repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
littlemandave/Week10
|
https://github.com/littlemandave/Week10
|
adf9e5970180e7c03d750c9d9d300a24e1c1fd64
|
d978d78a50c9e5b733b30e4e2c8e83f18c49ab72
|
aae4b1d7c5f9cd201c1777ee2a679470ccd068ee
|
refs/heads/master
| 2023-01-20T12:36:39.694601 | 2020-12-01T21:35:17 | 2020-12-01T21:35:17 | 315,478,096 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6416938304901123,
"alphanum_fraction": 0.6581975817680359,
"avg_line_length": 33.624061584472656,
"blob_id": "fa1ebbfcd88de73b1d88767bdaceeb97da7da136",
"content_id": "cd5bb0266741eceafed4319e2cdbb72d6676ffee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4605,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 133,
"path": "/Lab7.py",
"repo_name": "littlemandave/Week10",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#\n# Lab 07 - CIS 133Y - Fall 2020\n# Dave Johnson\n#\n# A program to practice with numpy, pandas, and matplotlib.\n#\n# We get weather data from an internet weather station (in this case wind speed and direction),\n# then plot it, both polar and cartesion.\n#\n# I'm not sure I used enough numpy commands for the assignment, but I'm not too worried, I'm only a\n# senior audit. This sure was fun learning a little about these packages, and matplotlib in particular.\n#\n\n# Import all the packages we'll need\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\n\n# show versions\nprint('NumPy version ', np.__version__)\nprint('Pandas version ', pd.__version__)\nprint('MatPlotLib version ', mp.__version__)\n\n# -------------------------------------------\n# A few \"globals\", strictly for convenience.\n\n# The number of points to retrieve and plot\nnumPoints = 5000\n\n# The column names, used as indices later\ndirName = 'Direction(deg)'\nspeedName = 'Velocity(mph)'\n\n# -------------------------------------------\n# All the subroutines\n\n# Build the url to retrieve data and return it as a string\n# We need to specify the field to get, the format (.csv), and the number of points\ndef buildURL(index):\n dataURL = 'https://api.thingspeak.com/channels/12397/fields/'\n dataURL += str(index)\n dataURL += '.csv?results=' # we want .csv output\n dataURL += str(numPoints) # number of points to get\n return dataURL\n\n# Get the weather data at the specified index\ndef getWeatherData(index):\n # build the url, then read the data from the URL into a dataframe using pandas, and return it\n theURL = buildURL(index)\n dataFrame = pd.read_csv(theURL)\n return dataFrame\n\n# Combine the direction and speed dataframes into one, with a column for each value,\n# and rename the columns nicely\ndef makeWindDF(dirDF, speedDF):\n # First insert the wind velocity column as 'field2' at the far right\n dirDF.insert(loc=len(dirDF.columns), column='field2', value=speedDF['field2'])\n\n # Rename the columns to something human-happy\n dirDF.rename(columns={'created_at': 'Date/Time', 'entry_id': 'EntryID', 'field1': dirName, 'field2': speedName}, inplace=True)\n return dirDF\n\ndef retrievePlottableWindData():\n # Get the last numPoints of both wind direction (field1) and wind speed (field2),\n # and combine into one nice table, then return it.\n dirDF = getWeatherData(1)\n speedDF = getWeatherData(2)\n return makeWindDF(dirDF, speedDF)\n\ndef plotWindData(windDF):\n # Plot it, both polar and x vs y\n title = \"Plot of Wind direction vs. Speed in Natick, MA using the past {} data points\".format(numPoints)\n plt.title(title)\n\n # -----------------------\n # First the polar plot\n ax = plt.subplot(211, projection='polar')\n\n # set the ticks to show the compass points, with North at the top\n # Note: since the polar plot is positive anti-clockwise, the labels are\n # reversed from their \"normal\" clockwise sequence\n ax.set_theta_offset(np.pi / 2)\n tickLabels = ['N', 'NW', 'W', 'SW', 'S', 'SE', 'E', 'NE']\n tickValues = []\n for i in range(8):\n tickValues.append(i*(np.pi/4))\n ax.set_xticks(tickValues)\n ax.set_xticklabels(tickLabels)\n\n # convert direction data to radians for the polar plot. Note we're using numpy implicitly here:\n # arithmetic is performed on the entire array in one statement\n # radians = deg * pi/180, and we need to reverse direction, so\n thetaData = ((-windDF[dirName] * np.pi) / 180)\n\n # add 2pi to make sure it's positive, then plot it with green dots\n # todo is this necessary?\n thetaData += (2*np.pi)\n plt.plot(thetaData, windDF[speedName], 'gx', alpha=0.25)\n\n # -----------------------\n # Then the linear plot, with red x's, and approriate ticks and labels\n ax2 = plt.subplot(212)\n plt.xlabel(speedName)\n plt.ylabel(dirName)\n\n tickLabels = ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW']\n tickValues = [0, 45, 90, 135, 180, 225, 270, 315]\n ax2.set_yticks(tickValues)\n ax2.set_yticklabels(tickLabels)\n plt.plot(windDF[speedName], windDF[dirName], 'rx', alpha=0.25)\n\n # finally, save the plot as a .pdf and show it\n plt.savefig(\"Wind Plot.pdf\") # , bbox_inches=\"tight\"\n plt.show()\n\n\ndef main():\n numDashes = 50\n print()\n print('-' * numDashes)\n print('--- practice with numpy, pandas, and matplotlib ---')\n print('-' * numDashes)\n print()\n\n # Get data and plot it\n windDF = retrievePlottableWindData()\n plotWindData(windDF)\n print('Done!')\n\nif __name__ == '__main__':\n main()\n"
}
] | 1 |
tonina/housePrice
|
https://github.com/tonina/housePrice
|
e91b4e300b745ef2665a5dbbf399cfc6245bdc33
|
a7f19fb870b4cd6331c11e36877357310c930815
|
27f41aa4511794f144802ea07ea7be81f09b5f1a
|
refs/heads/master
| 2019-04-29T11:58:16.466052 | 2014-12-26T18:52:33 | 2014-12-26T18:52:33 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6461538672447205,
"alphanum_fraction": 0.7219780087471008,
"avg_line_length": 22.30769157409668,
"blob_id": "48ecb4a250b8e2b4924337d6abfdc95fcb8f5141",
"content_id": "66b2fb5222d5f4a4f09fd178d187198a4cab51fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 39,
"path": "/test.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-11-17\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\nimport BeautifulSoup\n\nfrom house import House\nfrom HouseList import HouseList\nfrom requestParams import *\nfrom getPrice import PriceData\n\ndateDate = datetime.today()\ndate = dateDate.strftime('%Y-%m-%d')\nregion = state_id[u'Винницкая']\n\nh1 = House(1,10,2,100,3,date,region)\nh2 = House(2,20,4,100,2,date,region)\nh3 = House(3,29,2,39,1,date,region)\nh4 = House(4,45,9,None,4,date,region)\nh5 = House(5,3,None,45,1,date,region)\nh6 = House(5,56,None,40,2,date,region)\n\np3 = h3.getMetGrnPrice()\np4 = h4.getMetGrnPrice()\np5 = h5.getMetGrnPrice()\np6 = h6.getMetGrnPrice()\n\nhouseList = HouseList([h1,h2,h3,h4,h5,h6])\n\nprices = houseList.compMetPriceByRooms()\nprint(prices)\n\nparams['period'] = 'per_hour'\nparams['state_id'] = 1\nhList = (PriceData(url,params)).buildHouseList()\nprint(hList)\nhList.putToDatabase()\n\n"
},
{
"alpha_fraction": 0.5950037837028503,
"alphanum_fraction": 0.6040878295898438,
"avg_line_length": 35.69444274902344,
"blob_id": "6f1024c5722fe7a255b9c07c8e278ce6fe1954d2",
"content_id": "062f6e52af2099204445dd9d9f3bfa32c1febe21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2642,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 72,
"path": "/results.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-07-10\n# -*- coding: utf-8 -*-\n\nfrom datetime import date, datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.dates\n\nfrom requestParams import *\nfrom getPrice import PriceData\n\ndef readData(filename):\n '''\n (file) -> list of str, list of float\n Gets file name and return two lists:\n with data in string format and square metre price in float.\n '''\n f = open(filename, 'r')\n dates = []\n prices = []\n lines = f.readlines()\n for line in lines:\n lineData = line.strip().split()\n dates.append(lineData[0])\n prices.append(float(lineData[1]))\n f.close()\n return dates, prices\n\ndef writeTodayPrice(filename):\n '''\n Gets file name, checks if today price data exist.\n If not collects today price data and writes to file.\n '''\n today = str(date.today())\n dates, prices = readData(filename)\n if dates == [] or dates[-1] != today: # check if data were written this day or file is empty\n pD = PriceData(url, params)\n houseList = pD.buildHouseList() # build house list for given parameters\n metrePriceList = []\n for house in houseList.getElements():\n if house.getTotalGryvniaPrice() and house.getArea():\n metrePrice = house.getTotalGryvniaPrice()/house.getArea()\n metrePriceList.append(metrePrice)\n averagePrice = sum(metrePriceList)/len(metrePriceList)\n f = open('out.txt', 'a')\n f.write(today + ' ' + str(averagePrice) + '\\n')\n f.close()\n print('Data have been written to file.')\n else:\n print('Data were written to file early.')\n\ndef plotPriceTime(filename):\n '''\n Plots time row of metre house price on days\n '''\n fig = plt.figure()\n dates, prices = readData(filename)\n datesDate = [datetime.strptime(item, '%Y-%m-%d') for item in dates] # convert date to datetime format\n datesNum = matplotlib.dates.date2num(datesDate) # convert datetime list to number list\n # axes = plt.subplot(1,1,1)\n axes = fig.add_subplot(1,1,1)\n plt.plot_date(datesNum, prices, 'r-')\n majorLocator = matplotlib.dates.MonthLocator()\n axes.xaxis.set_major_locator(majorLocator)\n axes.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(\"%Y-%m-%d\")) # \"%m-%d\" for part date\n # -format date to plot\n fig.autofmt_xdate(bottom=0.18, rotation=60)\n plt.grid()\n plt.savefig('figure.jpg')\n\nif __name__ == '__main__':\n writeTodayPrice('out.txt')\n plotPriceTime('out.txt')\n"
},
{
"alpha_fraction": 0.5704545378684998,
"alphanum_fraction": 0.5772727131843567,
"avg_line_length": 23.0181827545166,
"blob_id": "215d999cbf449a99f177d3f10cdc9429caeea1c7",
"content_id": "179c97b0fc4b1d62f8ceb3758606f1c254b812ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1320,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 55,
"path": "/house.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-07-10\n# -*- coding: utf-8 -*-\n\nclass House(object):\n '''\n\n '''\n def __init__(self, id, totalGryvniaPrice, totalDollarPrice, area, numRooms, date, region):\n '''\n (float, float, int) -> None\n '''\n self.id = id\n self.totalGryvniaPrice = totalGryvniaPrice\n self.totalDollarPrice = totalDollarPrice\n self.area = area\n self.numRooms = numRooms\n self.date = date\n self.region = region\n\n def __str__(self):\n return ('Area ' + str(self.area) + ', price ' + str(self.totalGryvniaPrice) \\\n + ', number of rooms ' + str(self.numRooms))\n\n def getId(self):\n return self.id\n\n def getTotalDollarPrice(self):\n return self.totalDollarPrice\n\n def getTotalGryvniaPrice(self):\n return self.totalGryvniaPrice\n\n def getArea(self):\n return self.area\n\n def getNumRooms(self):\n return self.numRooms\n\n def getDate(self):\n return self.date\n\n def getRegion(self):\n return self.region\n\n def getMetGrnPrice(self):\n try:\n return float(self.totalGryvniaPrice)/self.area\n except:\n return None\n\n def getMetDolPrice(self):\n try:\n return float(self.totalDollarPrice)/self.area\n except:\n return None"
},
{
"alpha_fraction": 0.5637204647064209,
"alphanum_fraction": 0.5894141793251038,
"avg_line_length": 33.140350341796875,
"blob_id": "9574b12c01afe9448d6c1c48e8747feb08ae241e",
"content_id": "1c8612dc6d2137db35685e97b64a27d621feb8ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1946,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 57,
"path": "/plots.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-11-12\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport numpy as np\n\nfrom requestParams import *\nfrom getPrice import PriceData\n\nclass Hist(object):\n '''\n Diagram with information about flats by regions.\n '''\n def __init__(self,requestPeriod):\n params['period'] = period[requestPeriod]\n regions = (state_id.keys())\n prices = [[],[],[]]\n for region in regions:\n params['state_id'] = state_id[region]\n houseList = (PriceData(url,params)).buildHouseList()\n if houseList:\n regionPrices = houseList.compMetPriceByRooms()\n else:\n regionPrices = [0,0,0,0]\n for i in range(3):\n prices[i].append(regionPrices[i])\n locs = np.arange(1,len(prices[0])+1)\n width = 0.27\n\n font = {'family':'Verdana','weight':'normal'}\n rc('font',**font)\n\n plt.bar(locs, prices[0], width=width, color='#1a3838')\n plt.bar(locs+width, prices[1], width=width, color='#4a7a57')\n plt.bar(locs+2*width, prices[2], width=width, color='#82a353')\n plt.xticks(locs+1.5*width, regions, rotation=90)\n plt.legend('123')\n plt.savefig('hist.png', bbox_inches='tight', pad_inches=0.5)\n\n\ndef plotPie(requestPeriod, requestRegion):\n '''\n Creates pie plot of flats, divided by number of rooms.\n '''\n params['period'] = period[requestPeriod]\n params['state_id'] = state_id[requestRegion]\n houseList = (PriceData(url,params)).buildHouseList()\n if houseList:\n sizes = houseList.compHouseByRooms()\n prices = houseList.compMetPriceByRooms()\n labels = [str(i+1)+\"-room's \\n\"+str(prices[i]) for i in range(len(prices))]\n plt.figure()\n colors = ['g','c','b','r']\n plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')\n plt.axis('equal')\n plt.savefig('pie.png')\n"
},
{
"alpha_fraction": 0.4954441785812378,
"alphanum_fraction": 0.5102505683898926,
"avg_line_length": 34.699188232421875,
"blob_id": "76daf39167cef1550fdda544072a072c76677eef",
"content_id": "ff50f076cc161e76834f39a5804ffc0f0922dc6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4390,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 123,
"path": "/HouseList.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-11-14\n# -*- coding: utf-8 -*-\n\nimport MySQLdb\n\nclass HouseList(object):\n '''\n Class describes list which can contains objects of House classes.\n '''\n def __init__(self, houseList=[]):\n '''\n Initializes with empty list.\n '''\n self.houseList = houseList\n\n def addHouse(self, house):\n '''\n Add object house of House class to list.\n '''\n (self.houseList).append(house)\n\n def removeHouse(self, house):\n '''\n Remove object house of House class from list.\n '''\n (self.houseList).remove(house)\n\n def getElements(self):\n return self.houseList\n\n def compHouseByRooms(self):\n '''\n Returns list with number of flats by rooms.\n Example: [x1,x2,x3,x4] means that\n there are x1 number of flat with one room,\n x2 - with two rooms, x3 - with 3 rooms\n and x4 with more then three rooms.\n '''\n sumRooms = [0,0,0,0]\n for house in self.houseList:\n numRooms = house.getNumRooms()\n if numRooms:\n if numRooms > 3:\n number = 3\n else:\n number = numRooms-1\n sumRooms[number] += 1\n return sumRooms\n\n def compMetPriceByRooms(self, value='g'):\n '''\n Returns list with average metre prices for rooms category.\n Value: string, can be 'g' (grivnya) or 'd' (dollar).\n Example: result [x1,x2,x3,x4] means that\n x1 is average metre price for all one-room flats in this house list,\n x2 - for two-rooms flats, x3 - for three-rooms flat\n and x4 - for all flats which has more than 3 rooms.\n '''\n sumPrice = [0,0,0,0]\n for house in self.houseList:\n numRooms = house.getNumRooms()\n if value == 'd':\n metPrice = house.getMetDolPrice()\n else:\n metPrice = house.getMetGrnPrice()\n if metPrice and numRooms:\n if numRooms > 3:\n number = 3\n else:\n number = numRooms-1\n sumPrice[number] += metPrice\n sumRooms = self.compHouseByRooms()\n priceByRooms = [0,0,0,0]\n for i in range(len(sumPrice)):\n try:\n priceByRooms[i] = sumPrice[i]/sumRooms[i]\n except:\n pass\n return priceByRooms\n\n def putToDatabase(self, tableName='apartments', dbName='houses'):\n '''\n Save house list to MySQL database.\n dbName - name of database, should be existent\n '''\n db = MySQLdb.connect(host='localhost',\\\n user='root',\\\n passwd='123',\\\n db=dbName, \\\n charset='utf8', \\\n use_unicode=True)\n with db:\n # db.set_character_set('utf8')\n cursor = db.cursor()\n # cursor.execute('SET NAMES utf8;')\n # cursor.execute('SET CHARACTER SET utf8;')\n # cursor.execute('SET character_set_connection=utf8;')\n # cursor.execute('SET character_set_database=utf8;')\n createTable = '''CREATE TABLE IF NOT EXISTS %s(\n id INT(11) NOT NULL,\n totalGryvniaPrice FLOAT,\n totalDollarPrice FLOAT,\n area FLOAT,\n numRooms INT(11),\n date DATE,\n region INT(11));\n ''' % (tableName)\n cursor.execute(createTable)\n # alterTable = '''ALTER TABLE %s MODIFY COLUMN region VARCHAR(30)\n # CHARACTER SET utf8 COLLATE utf8_general_ci;''' %(tableName)\n # cursor.execute(alterTable)\n houseList = self.getElements()\n for house in houseList:\n id = house.getId()\n totalGrPrice = house.getTotalGryvniaPrice()\n totalDolPrice = house.getTotalDollarPrice()\n area = house.getArea()\n numRooms = house.getNumRooms()\n date = house.getDate()\n region = house.getRegion()\n cursor.execute(\"INSERT INTO apartments VALUES(%s, %s, %s, %s, %s, %s, %s);\", \\\n (id, totalGrPrice, totalDolPrice, area, numRooms, date, region))\n db.close()"
},
{
"alpha_fraction": 0.6035901308059692,
"alphanum_fraction": 0.6073298454284668,
"avg_line_length": 32.02469253540039,
"blob_id": "e29edbb14fd4cf0bce661e26f11e99b67e2882c6",
"content_id": "c488be170c9ed95e3170cb88ac465ffd96ba7c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2674,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 81,
"path": "/main.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-10-31\n# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt4 import QtCore, QtGui, uic\n\nfrom requestParams import *\nfrom plots import plotPie, Hist\n\n###################\nfrom getPrice import *\nfrom requestParams import *\nfrom HouseList import *\n###################\n\nclass MainWindow(QtGui.QWidget):\n '''\n Creates Main Window with tabs.\n '''\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n uic.loadUi(\"MainForm.ui\", self)\n# Tab with pie chart\n self.periodPieBox.addItems(period.keys())\n self.regionPieBox.addItems(state_id.keys())\n self.connect(self.runPie, QtCore.SIGNAL(\"clicked()\"), \\\n self.processPie)\n# Tab with histogram\n self.periodHistBox.addItems(period.keys())\n self.connect(self.runHist, QtCore.SIGNAL(\"clicked()\"), \\\n self.processHist)\n\n#######################\n# Tab with writing data\n self.periodDatabaseBox.addItems(period.keys())\n self.regionDatabaseBox.addItems(state_id.keys())\n self.connect(self.putToDatabase, QtCore.SIGNAL(\"clicked()\"), \\\n self.writeToDatabase)\n#######################\n\n def processPie(self):\n '''\n Gets input data from combo boxes and calls method for plotting pie chart for\n one request region and request time period.\n '''\n self.runPie.setDisabled(True)\n rPeriod = unicode(self.periodPieBox.currentText())\n rRegion = unicode(self.regionPieBox.currentText())\n plotPie(rPeriod, rRegion)\n self.pie.setScaledContents(True)\n self.pie.setPixmap(QtGui.QPixmap(\"pie.png\"))\n self.runPie.setDisabled(False)\n\n def processHist(self):\n '''\n Gets input data from combo boxes and calls method for plotting histogram\n for comparing house data for few regions.\n '''\n self.runHist.setDisabled(True)\n rPeriod = unicode(self.periodHistBox.currentText())\n Hist(rPeriod)\n# plotHist(rPeriod)\n self.hist.setScaledContents(True)\n self.hist.setPixmap(QtGui.QPixmap(\"hist.png\"))\n self.runHist.setDisabled(False)\n\n#########################\n def writeToDatabase(self):\n rPeriod = unicode(self.periodDatabaseBox.currentText())\n rRegion = unicode(self.regionDatabaseBox.currentText())\n params['period'] = period[rPeriod]\n params['state_id'] = state_id[rRegion]\n hList = (PriceData(url,params)).buildHouseList()\n hList.putToDatabase()\n#########################\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())"
},
{
"alpha_fraction": 0.5098273754119873,
"alphanum_fraction": 0.5163220167160034,
"avg_line_length": 32.62643814086914,
"blob_id": "93e7f3ca1afa8de75ff1791af55e4aa519af1075",
"content_id": "18f0b61816799e5b629f92eda177e8191bf97fce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5942,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 174,
"path": "/getPrice.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-07-07\n# -*- coding: utf-8 -*-\n\nimport urllib\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\n\nfrom house import House\nfrom HouseList import HouseList\n\nclass PriceData(object):\n '''\n\n '''\n def __init__(self, url, params):\n '''\n Initialize object with url and query parameters.\n Sets fields: url, fullUrl (with params), params.\n Gets html content from required full url.\n url: string\n params: dict of query parameters\n '''\n self.url = url\n self.params = params\n\n def getUrl(self):\n return self.url\n\n def getFullUrl(self, pageNumber):\n '''\n Returns full url with query parameters.\n '''\n self.params['page'] = pageNumber\n urlValues = urllib.urlencode(self.params)\n self.fullUrl = self.url + '?' + urlValues\n return self.fullUrl\n\n def getContent(self, pageNumber):\n '''\n Returns html content from page with pageNumber.\n '''\n self.getFullUrl(pageNumber)\n response = urllib2.urlopen(self.fullUrl)\n content = response.read()\n return content\n\n# Methods for extracting and clearing data from one house html item.\n def clearId(self, item):\n '''\n Returns house id number or None if it does not exist.\n '''\n try:\n soupId = (item.find_all('a')[1])['itemid']\n id = soupId\n except:\n return None\n return id\n\n def clearGryvniaPrice(self, item):\n '''\n Returns house total gryvnia price or None if it does not exist.\n '''\n try:\n soupGryvniaPrice = item.find('strong', {'class':'green'})\n strGryvniaPrice = soupGryvniaPrice.text\n gryvniaPrice = float(strGryvniaPrice.replace(' ',''))\n except:\n return None\n return gryvniaPrice\n\n def clearDollarPrice(self, item):\n '''\n Returns house total dollar price or None if it does not exist.\n '''\n try:\n soupDollarPrice = (item.find_all('strong',{'class':'green'}))[2]\n strDollarPrice = soupDollarPrice.text\n dollarPrice = float(strDollarPrice.replace(' ',''))\n except:\n return None\n return dollarPrice\n\n def clearArea(self, item):\n '''\n Returns house area or None if it does not exist.\n '''\n try:\n pattern = r'^\\d*'\n soupArea = item.find('strong', {'title': u\"общая/жилая/кухни\"})\n strArea = soupArea.text\n area = float(re.findall(pattern, strArea)[0])\n except:\n return None\n return area\n\n def clearNumRooms(self, item):\n '''\n Returns house number of rooms or none if it does not exist.\n '''\n try:\n soupRooms = item.find('strong', {'title': u'Комнат'})\n strRooms = soupRooms.text\n numRooms = int(strRooms)\n except:\n return None\n return numRooms\n\n def clearDate(self, item): # NOT READY\n '''\n Return date of creating house notice at site.\n '''\n months = {u'Января':1,\n u'Февраля':2,\n u'Марта':3,\n u'Апреля':4,\n u'Мая':5,\n u'Июня':6,\n u'Июля':7,\n u'Августа':8,\n u'Сентября':9,\n u'Октября':10,\n u'Ноября':11,\n u'Декабря':12}\n dateField = item.find('div', {'class':'charlist-data'})\n if dateField.text != ' ':\n try:\n soupDate = dateField.find('strong', {'class':'orange'})\n except:\n pass\n try:\n soupDate = dateField.find('strong', {'class':'blue'})\n except:\n pass\n\n\n # Method for crating house list\n def buildHouseList(self):\n '''\n Creates house list from html data.\n '''\n# houseList = []\n houseList = HouseList()\n soup = BeautifulSoup(self.getContent(1))\n try:\n page = soup.find('div', {'class':'page'}) # get pagination panel\n pages = page.find_all('a', {'class':'item'}) # get page's numbers\n if pages is not []:\n numPages = int(pages[-1].text) # number of pages (last page's number)\n else:\n pages = page.find('a', {'class':'active'}) # get active page if alone page\n numPages = int(pages.text)\n for i in range(1, numPages+1):\n self.getFullUrl(i)\n soup = BeautifulSoup(self.getContent(i))\n\n items1 = soup.find_all('div', {'class':'ticket-item paid realty_item'}) # find all divs with house objects\n items2 = soup.find_all('div', {'class':'ticket-item realty_item'}) # find all divs with house objects\n items = items1 + items2\n\n for item in items:\n id = self.clearId(item)\n gryvniaPrice = self.clearGryvniaPrice(item)\n dollarPrice = self.clearDollarPrice(item)\n area = self.clearArea(item)\n numRooms = self.clearNumRooms(item)\n dateDate = datetime.today() # date in Date format\n date = dateDate.strftime('%Y-%m-%d')\n region = self.params['state_id'] # extract region from parameters of url request\n houseList.addHouse(House(id,gryvniaPrice,dollarPrice,area,numRooms,date, region))\n except:\n return None # return None if there are no house item\n return houseList\n"
},
{
"alpha_fraction": 0.5277038216590881,
"alphanum_fraction": 0.584079384803772,
"avg_line_length": 32.601627349853516,
"blob_id": "b8208e66931a0cf3b776655d326403f2dca9dd9b",
"content_id": "9a93cd451c6b1ba5634cd297b084b00606150b0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4674,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 123,
"path": "/requestParams.py",
"repo_name": "tonina/housePrice",
"src_encoding": "UTF-8",
"text": "# 2014-07-08\n# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\n\ncategory = {u'Квартиры':1,\n u'Дома, дачи':4,\n u'Офисы':10,\n u'Участки':24,\n u'Коммерческая недвижимость':13,\n u'Гаражи':30,\n u'Риелторы':6,\n u'Все для дома и сада':9,\n u'Новости':10,\n 'default':''\n}\nrealty_type = {u'Все типы':0,\n u'комната':3,\n u'квартира':2,\n 'default':''\n}\nadvert_type = {u'продажа':1,\n u'долгосрочная аренда':3,\n u'аренда посуточная':4,\n 'default':''\n}\nstate_id = OrderedDict([\n (u'Винницкая',1),\n (u'Волынская',18),\n (u'Днепропетровская',11),\n (u'Донецкая',13),\n (u'Житомирская',2),\n (u'Закарпатская',22),\n (u'Запорожская',14),\n (u'Ивано-Франковская',15),\n (u'Киевская',10),\n (u'Кировоградская',16),\n (u'Луганская',17),\n (u'Львовская',5),\n (u'Николаевская',19),\n (u'Одесская',12),\n (u'Полтавская',20),\n (u'Ровенская',9),\n (u'Сумская',8),\n (u'Тернопольская',3),\n (u'Харьковская',7),\n (u'Херсонская',23),\n (u'Хмельницкая',4),\n (u'Черкасская',24),\n (u'Черниговская',6),\n (u'Черновицкая',25),\n ('default','')\n])\ncity_id = {'default':''}\ndistrict_id = {'default':''}# period of advertisements\nwith_photo = {'with_photo':1,\n 'default':0}\nperiod = OrderedDict([\n (u'за час','per_hour'),\n (u'за 3 часа','per_three_hours'),\n (u'за сегодня','per_day'),\n (u'за сутки','per_allday'),\n (u'с последнего визита','per_visit')\n])\n\ncharacteristic_209_from = {'default':'от'} # the number of rooms\ncharacteristic_209_to = {'default':'до'}\ncharacteristic_214_from = {'default':'от'} # house (flat) area\ncharacteristic_214_to = {'default':'до'}\ncharacteristic_234_from = {'default':'от'} # price\ncharacteristic_234_to = {'default':'до'}\n\ncharacteristic_216_from = {'default':'от'}\ncharacteristic_216_to = {'default':'до'}\ncharacteristic_218_from = {'default':'от'}\ncharacteristic_218_to = {'default':'до'}\ncharacteristic_227_from = {'default':'от'}\ncharacteristic_227_to = {'default':'до'}\ncharacteristic_228_from = {'default':'от'}\ncharacteristic_228_to = {'default':'до'}\n\ncharacteristic_247 = {'default':''}\ncharacteristic_265 = {'default':''}\n\ncharacteristic_242 = {u'доллар':239,\n u'грн':240,\n u'евро':241\n}\ntextsearch = {'default':''}\n\n# Request parameters dictionary\nparams = {'category':category[u'Квартиры'],\n 'realty_type':realty_type[u'Все типы'],\n 'advert_type':advert_type[u'продажа'],\n 'state_id':state_id[u'Киевская'],\n 'period':period[u'за сутки'],\n 'district_id':district_id['default'],\n 'with_photo':with_photo['default'],\n 'characteristic[209][from]':characteristic_209_from['default'],\n 'characteristic[209][to]':characteristic_209_to['default'],\n 'characteristic[214][from]':characteristic_214_from['default'],\n 'characteristic[214][to]':characteristic_214_to['default'],\n 'characteristic[234][from]':characteristic_234_from['default'],\n 'characteristic[234][to]':characteristic_234_to['default'],\n 'characteristic[242]':characteristic_242[u'грн'],\n\n 'characteristic[216][from]':characteristic_216_from['default'],\n 'characteristic[216][to]':characteristic_216_to['default'],\n 'characteristic[218][from]':characteristic_216_from['default'],\n 'characteristic[218][to]':characteristic_216_to['default'],\n 'characteristic[227][from]':characteristic_216_from['default'],\n 'characteristic[227][to]':characteristic_216_to['default'],\n 'characteristic[228][from]':characteristic_216_from['default'],\n 'characteristic[228][to]':characteristic_216_to['default'],\n\n 'characteristics[247]':characteristic_247['default'],\n 'characteristics[265]':characteristic_265['default'],\n\n 'period':period[u'за сегодня'],\n 'page':''\n}\n# Request url\nurl = 'http://dom.ria.com/ru/search'\n"
}
] | 8 |
niujianjiang/Weirdo
|
https://github.com/niujianjiang/Weirdo
|
2858896b301726e40b119d4025901c37f6af7bea
|
42132499c444f2b89476ebc394da36d2384425ed
|
061ed64566eea9542cd6d42334aa0d75e72bb660
|
refs/heads/main
| 2023-04-12T01:37:33.932457 | 2021-04-20T14:19:31 | 2021-04-20T14:19:31 | 346,928,285 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.37087130546569824,
"alphanum_fraction": 0.3933655917644501,
"avg_line_length": 22.51049041748047,
"blob_id": "9d9f0ff620733a782d0043c612eeef78b71b216e",
"content_id": "42c028d13143687d68c73b299f64e2c1164c3dd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7696,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 286,
"path": "/xiangmu.py",
"repo_name": "niujianjiang/Weirdo",
"src_encoding": "UTF-8",
"text": "import re\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\nwith open('0.txt') as f:\r\n file = f.read()\r\na = int(input(\"请输入选择的数据集:\"))\r\nif(a == 1):\r\n \r\n n = 30\r\n c = 10149\r\n pattern=re.compile('408.+1948')\r\n result=pattern.findall(file) \r\n V = []\r\n V = result[0]\r\n pattern=re.compile('508.+1545') \r\n result0=pattern.findall(file)\r\n W = []\r\n W = result0[0] \r\n\r\nif(a==2):\r\n \r\n n = 300\r\n c = 61500\r\n pattern=re.compile('408.+759')\r\n result=pattern.findall(file) \r\n V = []\r\n V = result[0]\r\n pattern=re.compile('508.+728') \r\n result0=pattern.findall(file)\r\n W = []\r\n W = result0[0]\r\n \r\na = r','\r\nV = re.split(a,V)\r\nW = re.split(a,W)\r\nV = list(map(eval,V))\r\nW = list(map(eval,W))\r\n\r\n#动态规划算法\r\n\r\ndef bag(n, c, w, v): \r\n \r\n value = [[0 for j in range(c + 1)] for i in range(n + 1)]\r\n for i in range(1, n + 1):\r\n for j in range(1, c + 1):\r\n value[i][j] = value[i - 1][j]\r\n \r\n if j >= w[i - 1] and value[i][j] < value[i - 1][j - w[i - 1]] + v[i - 1]:\r\n value[i][j] = value[i - 1][j - w[i - 1]] + v[i - 1]\r\n for x in value:\r\n print(x)\r\n return value\r\n\r\ndef show(n, c, w, value):\r\n print('最大价值为:', value[n][c])\r\n x = [False for i in range(n)]\r\n j = c\r\n for i in range(n, 0, -1):\r\n if value[i][j] > value[i - 1][j]:\r\n x[i - 1] = True\r\n j -= w[i - 1]\r\n print('背包中所装物品为:')\r\n for i in range(n):\r\n if x[i]:\r\n print('第', i+1, '个,', end='')\r\n\r\ndef bag1(n, c, w, v):\r\n values = [0 for i in range(c+1)]\r\n for i in range(1, n + 1):\r\n for j in range(c, 0, -1):\r\n \r\n if j >= w[i-1]:\r\n values[j] = max(values[j-w[i-1]]+v[i-1], values[j])\r\n return values\r\n#回溯算法\r\n\r\ndef BacktrackBag(t):\r\n global rest # 剩余背包容量\r\n global restp # 当前未装入背包的总价值\r\n global cw # 背包当前载重量\r\n global cp # 背包当前装入价值\r\n global bestp # 背包当前最优装入价值\r\n global x # 解空间树表征数组\r\n global W # 物品重量数组\r\n global V # 物品价值数组\r\n global bestx #最优表征数组\r\n if t >= n:\r\n if cp == bestp:\r\n bestx = copy.deepcopy(x)\r\n #if bestp >= cp:\r\n # print(x,'当前最优解:%.0f'% bestp)\r\n else:\r\n for i in range(1,-1,-1):\r\n x[t] = i\r\n #如果该物品可以放入,并且之后的物品比当前价值优进行递归\r\n if rest >= x[t]*W[t] and cp + restp - V[t] * (1-x[t]) >= bestp:\r\n rest = rest - x[t]*W[t]\r\n cp = cp + V[t]*x[t]\r\n restp = restp - V[t]\r\n if cp >= bestp:\r\n bestp = cp\r\n BacktrackBag(t+1)\r\n rest = rest + x[t] * W[t]\r\n cp = cp - V[t] * x[t]\r\n restp = restp + V[t]\r\n\r\n#遗传算法\r\ndef init(N,n):\r\n C = []\r\n for i in range(N):\r\n c = []\r\n for j in range(n):\r\n a = np.random.randint(0,2)\r\n c.append(a)\r\n C.append(c)\r\n return C\r\n\r\n##评估函数\r\n# x(i)取值为1表示被选中,取值为0表示未被选中\r\n# w(i)表示各个分量的重量,v(i)表示各个分量的价值,w表示最大承受重量\r\ndef fitness(C,N,n,W,V,w):\r\n S = []##用于存储被选中的下标\r\n F = []## 用于存放当前该个体的最大价值\r\n for i in range(N):\r\n s = []\r\n h = 0 # 重量\r\n f = 0 # 价值\r\n for j in range(n):\r\n if C[i][j]==1:\r\n if h+W[j]<=w:\r\n h=h+W[j]\r\n f = f+V[j]\r\n s.append(j)\r\n S.append(s)\r\n F.append(f)\r\n return S,F\r\n\r\n##适应值函数,B位返回的种族的基因下标,y为返回的最大值\r\ndef best_x(F,S,N):\r\n y = 0\r\n x = 0\r\n B = [0]*N\r\n for i in range(N):\r\n if y<F[i]:\r\n x = i\r\n y = F[x]\r\n B = S[x]\r\n return B,y\r\n\r\n## 计算比率\r\ndef rate(x):\r\n p = [0] * len(x)\r\n s = 0\r\n for i in x:\r\n s += i\r\n for i in range(len(x)):\r\n p[i] = x[i] / s\r\n return p\r\n\r\n## 选择\r\ndef chose(p, X, m, n):\r\n X1 = X\r\n r = np.random.rand(m)\r\n for i in range(m):\r\n k = 0\r\n for j in range(n):\r\n k = k + p[j]\r\n if r[i] <= k:\r\n X1[i] = X[j]\r\n break\r\n return X1\r\n\r\n##交配\r\ndef match(X, m, n, p):\r\n r = np.random.rand(m)\r\n k = [0] * m\r\n for i in range(m):\r\n if r[i] < p:\r\n k[i] = 1\r\n u = v = 0\r\n k[0] = k[0] = 0\r\n for i in range(m):\r\n if k[i]:\r\n if k[u] == 0:\r\n u = i\r\n elif k[v] == 0:\r\n v = i\r\n if k[u] and k[v]:\r\n # print(u,v)\r\n q = np.random.randint(n - 1)\r\n # print(q)\r\n for i in range(q + 1, n):\r\n X[u][i], X[v][i] = X[v][i], X[u][i]\r\n k[u] = 0\r\n k[v] = 0\r\n return X\r\n\r\n##变异\r\ndef vari(X, m, n, p):\r\n for i in range(m):\r\n for j in range(n):\r\n q = np.random.rand()\r\n if q < p:\r\n X[i][j] = np.random.randint(0,2)\r\n\r\n return X\r\n\r\n#画图\r\ndef huatu(v,w):\r\n plt.xlim(xmax=1600,xmin=0)\r\n plt.ylim(ymax=1600,ymin=0)\r\n plt.plot(v,w,'ro')\r\n plt.show()\r\n \r\n#非递增排序\r\ndef sort(v,w):\r\n xx=[a/b for a,b in zip(v,w)] \r\n xx.sort(reverse=True) \r\n x = xx\r\n '''\r\n print(\"非递增排序的结果为:\")\r\n for i in x:\r\n print(i)\r\n '''\r\nif __name__ == '__main__':\r\n b = int(input(\"请输入选择的算法:\"))\r\n if(b == 1):\r\n \r\n sort(V,W) \r\n start =time.perf_counter()\r\n value = bag(n, c, W, V)\r\n show(n, c, W, value)\r\n end = time.perf_counter()\r\n print('\\nRunning time: %s Seconds'%(end-start))\r\n \r\n print('\\n空间复杂度优化为N(c)结果:', bag1(n, c, W, V))\r\n huatu(V,W)\r\n \r\n if(b == 2):\r\n rest=c\r\n cw = 0\r\n cp = 0\r\n restp=0\r\n for i in V:\r\n restp=restp+i\r\n bestp=0\r\n x=[0 for i in range(n)]\r\n print('物品个数:',str(n))\r\n print('物品重量数组:',str(W))\r\n print('物品价值数组:',str(V))\r\n print('背包容量:',str(c))\r\n huatu(V,W)\r\n start =time.perf_counter()\r\n BacktrackBag(0)\r\n end = time.perf_counter()\r\n print('\\nRunning time: %s Seconds'%(end-start))\r\n print(bestx,'当前最优解:%.0f'% bestp)\r\n if(b==3):\r\n m = 30 ##规模\r\n N = 800 ##迭代次数\r\n Pc = 0.8 ##交配概率\r\n Pm = 0.05 ##变异概率 \r\n n = len(W)##染色体长度\r\n w = 1000\r\n C = init(m, n)\r\n start =time.perf_counter()\r\n S,F = fitness(C,m,n,W,V,w)\r\n B ,y = best_x(F,S,m)\r\n Y =[y]\r\n for i in range(N):\r\n p = rate(F)\r\n C = chose(p, C, m, n)\r\n C = match(C, m, n, Pc)\r\n C = vari(C, m, n, Pm)\r\n S, F = fitness(C, m, n, W, V, w)\r\n B1, y1 = best_x(F, S, m)\r\n if y1 > y:\r\n y = y1\r\n Y.append(y)\r\n end = time.perf_counter() \r\n print('\\nRunning time: %s Seconds'%(end-start))\r\n print(\"最大值为:\",y)\r\n plt.plot(Y)\r\n plt.show()\r\n\r\n \r\n \r\n"
},
{
"alpha_fraction": 0.5138860940933228,
"alphanum_fraction": 0.5516483783721924,
"avg_line_length": 25.618783950805664,
"blob_id": "b920aa6457e4b4818205e5d439d26e696024a9a4",
"content_id": "c7468605e036da33a1c906a6e03797eac75ac1d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5377,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 181,
"path": "/shiyansan.py",
"repo_name": "niujianjiang/Weirdo",
"src_encoding": "UTF-8",
"text": "import MySQLdb\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom PIL import Image,ImageTk\r\n\r\n# 连接数据库\r\nclass MysqlSearch(object):\r\n\tdef __init__(self):\r\n\t\tself.get_conn()\r\n\t# 获取连接\r\n\tdef get_conn(self):\r\n\t\ttry:\r\n\t\t\tself.conn = MySQLdb.connect(\r\n\t\t\t\thost='123',\r\n\t\t\t\tuser='root',\r\n\t\t\t\tpasswd='',\r\n\t\t\t\tdb='personnelmanagement',\r\n\t\t\t\tcharset='utf8'\r\n\t\t\t\t)\r\n\t\texcept MySQLdb.Error as e:\r\n\t\t\tprint('Error: %s' % e)\r\n\t# 关闭连接\r\n\tdef close_conn(self):\r\n\t\ttry:\r\n\t\t\tif self.conn:\r\n\t\t\t\tself.conn.close()\r\n\t\texcept MySQLdb.Error as e:\r\n\t\t\tprint('Error: %s' % e)\r\n\tdef get_userinfo(self):\r\n\t\tsql = 'SELECT * FROM 登陆账户'\r\n \r\n # 使用cursor()方法获取操作游标\r\n\t\tcursor = self.conn.cursor()\r\n \r\n # 使用execute()方法执行SQL语句\r\n\t\tcursor.execute(sql)\r\n \r\n # 使用fetchall()方法获取全部数据\r\n\t\tresult = cursor.fetchall()\r\n \r\n # 将数据用字典形式存储于result\r\n\t\tresult = [dict(zip([k[0] for k in cursor.description],row)) for row in result]\r\n \r\n\t\tcursor.close()\r\n\t\r\nclass zhuce(Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.pack()\r\n self.createWidget()\r\n\r\n def createWidget(self):\r\n self.label01 = Label(self, text='用户名')\r\n self.label01.grid(row=0, column=0)\r\n\r\n v1 = StringVar() # 用户名输入\r\n self.entry01 = Entry(self, textvariable=v1)\r\n self.entry01.grid(row=0, column=1, columnspan=2)\r\n\r\n self.label02 = Label(self, text='密码')\r\n self.label02.grid(row=1, column=0)\r\n\r\n v2 = StringVar() # 密码输入\r\n self.entry02 = Entry(self, textvariable=v2, show='*')\r\n self.entry02.grid(row=1, column=1, columnspan=2)\r\n\r\n self.label03 = Label(self, text='确认密码')\r\n self.label03.grid(row=2, column=0)\r\n\r\n v2 = StringVar() # 确认密码输入\r\n self.entry03 = Entry(self, textvariable=v2, show='*')\r\n self.entry03.grid(row=2, column=1, columnspan=2)\r\n\r\n Button(self, text='确定', command=self.login1) \\\r\n .grid(row=3, column=1, padx=10, sticky=NSEW)\r\n Button(self, text='取消', command=self.cancel) \\\r\n .grid(row=3, column=2, sticky=NSEW)\r\n\r\n def login1(self):\r\n pass\r\n\r\n def cancel(self):\r\n pass\r\n\r\nclass login1(Frame):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.pack()\r\n self.createWidget()\r\n\r\n def createWidget(self):\r\n self.label01 = Label(self, text='数据集')\r\n self.label01.grid(row=1, column=0)\r\n\r\n v1 = StringVar() # 数据集名字输入\r\n self.entry01 = Entry(self, textvariable=v1)\r\n self.entry01.grid(row=1, column=1, columnspan=2)\r\n\r\n self.label02 = Label(self, text='算法')\r\n self.label02.grid(row=2, column=0)\r\n\r\n v2 = StringVar() # 算法输入\r\n self.entry02 = Entry(self, textvariable=v2)\r\n self.entry02.grid(row=2, column=1, columnspan=2)\r\n\r\n Button(self, text='开始执行', command=self.login1) \\\r\n .grid(row=4, column=1, padx=10, sticky=NSEW)\r\n Button(self, text='取消', command=self.cancel) \\\r\n .grid(row=4, column=2, sticky=NSEW)\r\n\r\n\r\n def login1(self):\r\n pass\r\n\r\n def cancel(self):\r\n pass\r\n \r\nclass Applicantion(Frame):\r\n\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.pack()\r\n self.createWidget()\r\n\r\n def createWidget(self):\r\n self.label01 = Label(self, text='用户名')\r\n self.label01.grid(row=0, column=0)\r\n\r\n v1 = StringVar() # 用户名输入\r\n self.entry01 = Entry(self, textvariable=v1)\r\n self.entry01.grid(row=0, column=1,columnspan=2)\r\n\r\n self.label02 = Label(self, text='密码')\r\n self.label02.grid(row=1, column=0)\r\n\r\n v2 = StringVar() # 密码输入\r\n self.entry02 = Entry(self, textvariable=v2, show='*')\r\n self.entry02.grid(row=1, column=1, columnspan=2)\r\n\r\n # 登录 注册 按钮事件绑定\r\n Button(self, text='登录', command=self.login)\\\r\n .grid(row=2, column=1, padx=10, sticky=NSEW)\r\n Button(self, text='注册', command=self.set)\\\r\n .grid(row=2, column=2, sticky=NSEW)\r\n\r\n def login(self): # 登录事件\r\n username = self.entry01.get()\r\n pwd = self.entry02.get()\r\n\r\n if username == '123' and pwd == '123456':\r\n messagebox.showinfo('登录系统', '登录成功')\r\n root1 = Tk()\r\n root1.geometry('400x200+300+400')\r\n root1.title('实验测试')\r\n table = login1(master=root1)\r\n root1.mainloop()\r\n else:\r\n messagebox.showinfo('登录系统', '登录失败,用户名或密码错误')\r\n\r\n def set(self): # 注册事件\r\n root1 = Tk()\r\n root1.geometry('400x200+300+400')\r\n root1.title('注册系统')\r\n table = zhuce(master=root1)\r\n root1.mainloop()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root = Tk()\r\n \r\n root.geometry('400x200+300+400')\r\n \r\n root.title('D{0-1}KP 实例数据集算法实验平台')\r\n app = Applicantion(master=root)\r\n \r\n root.mainloop()\r\n \r\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 14,
"blob_id": "7128e18eecd53c6303614ed4a7ecb389bb753dd7",
"content_id": "921de5a88d052fd1bfa4241a5e08e811c4244072",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 30,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 2,
"path": "/README.md",
"repo_name": "niujianjiang/Weirdo",
"src_encoding": "UTF-8",
"text": "# Weirdo\nsoftware engineering\n"
},
{
"alpha_fraction": 0.41421568393707275,
"alphanum_fraction": 0.4475490152835846,
"avg_line_length": 22.361446380615234,
"blob_id": "e9aa08a10c4571af16ad695d4345ae5851681a38",
"content_id": "b9e90059a8090817d8ce671201120e70b3967794",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2110,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 83,
"path": "/bag.py",
"repo_name": "niujianjiang/Weirdo",
"src_encoding": "UTF-8",
"text": "import re\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nwith open('0.txt') as f:\r\n file = f.read()\r\npattern=re.compile('408.+1948') \r\nresult=pattern.findall(file) \r\nv = []\r\nv = result[0]\r\npattern=re.compile('508.+1545') \r\nresult0=pattern.findall(file)\r\nw = []\r\nw = result0[0]\r\na = r','\r\nv = re.split(a,v)\r\nw = re.split(a,w)\r\nv = list(map(eval,v))\r\nw = list(map(eval,w))\r\nplt.xlim(xmax=1600,xmin=0)\r\nplt.ylim(ymax=1600,ymin=0)\r\nplt.plot(v,w,'ro')\r\nplt.show()\r\n\r\ndef bag(n, c, w, v):\r\n \r\n \r\n value = [[0 for j in range(c + 1)] for i in range(n + 1)]\r\n for i in range(1, n + 1):\r\n for j in range(1, c + 1):\r\n value[i][j] = value[i - 1][j]\r\n \r\n if j >= w[i - 1] and value[i][j] < value[i - 1][j - w[i - 1]] + v[i - 1]:\r\n value[i][j] = value[i - 1][j - w[i - 1]] + v[i - 1]\r\n for x in value:\r\n print(x)\r\n return value\r\n\r\ndef show(n, c, w, value):\r\n print('最大价值为:', value[n][c])\r\n x = [False for i in range(n)]\r\n j = c\r\n for i in range(n, 0, -1):\r\n if value[i][j] > value[i - 1][j]:\r\n x[i - 1] = True\r\n j -= w[i - 1]\r\n print('背包中所装物品为:')\r\n for i in range(n):\r\n if x[i]:\r\n print('第', i+1, '个,', end='')\r\n\r\ndef bag1(n, c, w, v):\r\n values = [0 for i in range(c+1)]\r\n for i in range(1, n + 1):\r\n for j in range(c, 0, -1):\r\n \r\n if j >= w[i-1]:\r\n values[j] = max(values[j-w[i-1]]+v[i-1], values[j])\r\n return values\r\ndef sort(v,w):\r\n xx=[a/b for a,b in zip(v,w)] \r\n xx.sort(reverse=True) \r\n x = xx\r\n '''\r\n print(\"非递增排序的结果为:\")\r\n for i in x:\r\n print(i)\r\n '''\r\n\r\nif __name__ == '__main__':\r\n \r\n n = 30\r\n c = 10149\r\n \r\n sort(v,w)\r\n \r\n start =time.perf_counter()\r\n value = bag(n, c, w, v)\r\n \r\n show(n, c, w, value)\r\n end = time.perf_counter()\r\n print('\\nRunning time: %s Seconds'%(end-start))\r\n \r\n print('\\n空间复杂度优化为N(c)结果:', bag1(n, c, w, v))\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n"
}
] | 4 |
Tianyue-Yang/hw4_test_git
|
https://github.com/Tianyue-Yang/hw4_test_git
|
3d694de9c07fb0a10c7fa431c1bb3d5c65ad0a79
|
b313374e2a1c6b079b2a9c1e42bc69ac4623f18e
|
e863e6020e0b8d7898fcd94eee27443a560eb40f
|
refs/heads/master
| 2021-01-03T16:02:04.552638 | 2020-02-13T00:25:22 | 2020-02-13T00:25:22 | 240,142,734 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5770171284675598,
"alphanum_fraction": 0.5953544974327087,
"avg_line_length": 36.181819915771484,
"blob_id": "fc872920b8299195e8c027c48880ba8dace63ef0",
"content_id": "324629e5263fc453655c9f4ceaa8469a4cec522a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3274,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 88,
"path": "/hw4_test.py",
"repo_name": "Tianyue-Yang/hw4_test_git",
"src_encoding": "UTF-8",
"text": "import unittest\nimport hw4_cards as cards\n\n# SI 507 Winter 2020\n# Homework 4 - Code\n\n#########################################\n##### Name: Tianyue Yang #####\n##### Uniqname: tianyuey #####\n#########################################\n\n## You can write any additional debugging/trying stuff out code here...\n## OK to add debugging print statements, but do NOT change functionality of existing code.\n## Also OK to add comments!\n\nclass TestCard(unittest.TestCase):\n # this is a \"test\"\n def test_0_create(self):\n card = cards.Card()\n self.assertEqual(card.suit_name, \"Diamonds\")\n self.assertEqual(card.rank, 2)\n\n # Add methods below to test main assignments. \n def test_1_queen(self):\n \"\"\"Test that if you create a card with rank 12, its rank_name will be \"Queen\"\n \"\"\"\n card = cards.Card(rank=12)\n self.assertEqual(card.rank_name, \"Queen\")\n\n def test_2_clubs(self):\n \"\"\"Test that if you create a card instance with suit 1, its suit_name will be \"Clubs\"\n \"\"\"\n c2 = cards.Card(suit=1)\n self.assertEqual(c2.suit_name, \"Clubs\")\n\n def test_3_str(self):\n \"\"\"Test that if you invoke the __str__ method of a card instance that is created with suit=3, rank=13, it returns the string \"King of Spades\"\n \"\"\"\n c3 = cards.Card(3, 13)\n self.assertEqual(c3.__str__(), \"King of Spades\")\n\n def test_4_deck(self):\n \"\"\"Test that if you create a deck instance, it will have 52 cards in its cards instance variable\n \"\"\"\n d4 = cards.Deck()\n self.assertEqual(len(d4.cards), 52)\n\n def test_5_deal(self):\n \"\"\"Test that if you invoke the deal_card method on a deck, it will return a card instance.\n \"\"\"\n deck = cards.Deck()\n self.assertIsInstance(deck.deal_card(), cards.Card)\n\n def test_6_fewer(self):\n \"\"\"Test that if you invoke the deal_card method on a deck, the deck has one fewer cards in it afterwards.\n \"\"\"\n d6 = cards.Deck()\n num_ori = len(d6.cards)\n d6.deal_card()\n num_after = len(d6.cards)\n self.assertEqual(num_ori-1, num_after)\n\n def test_7_more(self):\n \"\"\"Test that if you invoke the replace_card method, the deck has one more card in it afterwards. (Please note that you want to use deal_card function first to remove a card from the deck and then add the same card back in)\n \"\"\"\n d7 = cards.Deck()\n poped_card = d7.deal_card()\n num_before = len(d7.cards)\n d7.replace_card(poped_card)\n num_new = len(d7.cards)\n self.assertEqual(num_new-1, num_before)\n\n def test_8_replace(self):\n \"\"\"Test that if you invoke the replace_card method with a card that is already in the deck, the deck size is not affected.(The function must silently ignore it if you try to add a card that’s already in the deck)\n \"\"\"\n c8 = cards.Card()\n d8 = cards.Deck()\n num_deck = len(d8.cards)\n if c8 in d8.cards:\n d8.replace_card(c8)\n num_not_affected = len(d8.cards)\n self.assertEqual(num_deck, num_not_affected)\n\n\n############\n### The following is a line to run all of the tests you include:\nif __name__ == \"__main__\":\n unittest.main()\n"
}
] | 1 |
indigos33k3r/SelfAttentiveSentEmbed
|
https://github.com/indigos33k3r/SelfAttentiveSentEmbed
|
41a77ffd4472726290e3bcd5ec6657c046e373f8
|
d3f8b798d195fea59efd129c778ee65d15d20b93
|
9e7b6434f38a406b0c1d97d027d581355516bead
|
refs/heads/master
| 2020-04-06T23:48:38.453929 | 2018-06-30T18:20:03 | 2018-06-30T18:20:03 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5287548303604126,
"alphanum_fraction": 0.5480455160140991,
"avg_line_length": 42.771427154541016,
"blob_id": "edba52868ce1d0d9e8b266ff3ef4e1208f6e22fe",
"content_id": "74cafbed9f7eaaf7aaf3484200d7c9555cdd81c5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13789,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 315,
"path": "/lstmmlp_rate_l2_dpout.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport time\nimport os\nimport sys\nimport numpy\nimport cPickle\nimport theano\nimport theano.tensor as T\nfrom sklearn.metrics import confusion_matrix\nimport lasagne\nfrom lasagne.layers.recurrent import Gate\nfrom lasagne import init, nonlinearities\n\nfrom util_layers import (DenseLayer3DInput, Softmax3D, ApplyAttention,\n GatedEncoder3D, Maxpooling)\nfrom dataset import YELP, AGE2\n\nimport pdb\ntheano.config.compute_test_value = 'off' # 'off' # Use 'warn' to activate\n\n\"\"\"\nBEST test set result:\nyelp 77.575% L2REG=0.0001, DPOUT=0.3\nage2 63.65% L2REG=0.00001, DPOUT=0.2\n\"\"\"\nLSTMHID = int(sys.argv[1]) # 500 Hidden unit numbers in LSTM\nOUTHID = int(sys.argv[2]) # 1000 Hidden unit numbers in output MLP\nLR = float(sys.argv[3]) # 0.01 Smaller than 0.04.\nL2REG = float(sys.argv[4]) # 0.0001 L2 regularization\nDPOUT = float(sys.argv[5]) # 0.3 dropout rate\nWE = str(sys.argv[6]) # either `word2vec` or `glove`\nWEDIM = int(sys.argv[7]) # either 100 or 300 Dim\nBSIZE = int(sys.argv[8]) # 16 Minibatch size\nGCLIP = float(sys.argv[9]) # 0.5 All gradients above will be clipped\nNEPOCH = int(sys.argv[10]) # 300 Number of epochs to train the net\nSTD = float(sys.argv[11]) # 0.1 Standard deviation of weights in init\n # very slightly better than 0.01\nUPDATEWE = bool(int(sys.argv[12])) # 1 0 for False and 1 for True. Update WE\nDSET = str(sys.argv[13]) # dataset, either `yelp` or `age2`\n\nfilename = __file__.split('.')[0] + \\\n '_LSTMHID' + str(LSTMHID) + \\\n '_OUTHID' + str(OUTHID) + \\\n '_LR' + str(LR) + \\\n '_L2REG' + str(L2REG) + \\\n '_DPOUT' + str(DPOUT) + \\\n '_WE' + str(WE) + \\\n '_WEDIM' + str(WEDIM) + \\\n '_BSIZE' + str(BSIZE) + \\\n '_GCLIP' + str(GCLIP) + \\\n '_NEPOCH' + str(NEPOCH) + \\\n '_STD' + str(STD) + \\\n '_UPDATEWE' + str(UPDATEWE) + \\\n '_DSET' + DSET\n\ndef main(num_epochs=NEPOCH):\n if DSET == 'yelp':\n print(\"Loading yelp dataset ...\")\n loaded_dataset = YELP(\n batch_size=BSIZE,\n datapath=\"/home/hantek/datasets/NLC_data/yelp/word2vec_yelp.pkl\")\n elif DSET == 'age2':\n print(\"Loading age2 dataset ...\")\n loaded_dataset = AGE2(\n batch_size=BSIZE,\n datapath=\"/home/hantek/datasets/NLC_data/age2/word2vec_age2.pkl\")\n else:\n raise ValueError(\"DSET was set incorrectly. Check your cmd args.\")\n # yelp age2\n # train data 500000 68450\n # dev/test data 2000 4000\n # vocab ~1.2e5\n # \n\n train_batches = list(loaded_dataset.train_minibatch_generator())\n dev_batches = list(loaded_dataset.dev_minibatch_generator())\n test_batches = list(loaded_dataset.test_minibatch_generator())\n W_word_embedding = loaded_dataset.weight # W shape: (# vocab size, WE_DIM)\n del loaded_dataset\n\n print(\"Building network ...\")\n ########### sentence embedding encoder ###########\n # sentence vector, with each number standing for a word number\n input_var = T.TensorType('int32', [False, False])('sentence_vector')\n input_var.tag.test_value = numpy.hstack((numpy.random.randint(1, 10000, (BSIZE, 20), 'int32'),\n numpy.zeros((BSIZE, 5)).astype('int32')))\n input_var.tag.test_value[1, 20:22] = (413, 45)\n l_in = lasagne.layers.InputLayer(shape=(BSIZE, None), input_var=input_var)\n \n input_mask = T.TensorType('int32', [False, False])('sentence_mask')\n input_mask.tag.test_value = numpy.hstack((numpy.ones((BSIZE, 20), dtype='int32'),\n numpy.zeros((BSIZE, 5), dtype='int32')))\n input_mask.tag.test_value[1, 20:22] = 1\n l_mask = lasagne.layers.InputLayer(shape=(BSIZE, None), input_var=input_mask)\n\n # output shape (BSIZE, None, WEDIM)\n l_word_embed = lasagne.layers.EmbeddingLayer(\n l_in,\n input_size=W_word_embedding.shape[0],\n output_size=W_word_embedding.shape[1],\n W=W_word_embedding)\n\n # bidirectional LSTM\n l_forward = lasagne.layers.LSTMLayer(\n l_word_embed, mask_input=l_mask, num_units=LSTMHID,\n ingate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n forgetgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n cell=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=None, nonlinearity=nonlinearities.tanh),\n outgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n nonlinearity=lasagne.nonlinearities.tanh,\n peepholes = False,\n only_return_final=False,\n grad_clipping=GCLIP)\n\n l_backward = lasagne.layers.LSTMLayer(\n l_word_embed, mask_input=l_mask, num_units=LSTMHID,\n ingate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n forgetgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n cell=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=None, nonlinearity=nonlinearities.tanh),\n outgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n nonlinearity=lasagne.nonlinearities.tanh,\n peepholes = False,\n only_return_final=False,\n grad_clipping=GCLIP, backwards=True)\n \n # output dim: (BSIZE, None, 2*LSTMHID)\n l_concat = lasagne.layers.ConcatLayer([l_forward, l_backward], axis=2)\n\n # output dim: (BSIZE, 2*LSTMHID)\n l_maxpool = Maxpooling(l_concat, axis=1)\n l_maxpool_dpout = lasagne.layers.DropoutLayer(l_maxpool, p=DPOUT, rescale=True)\n\n l_outhid = lasagne.layers.DenseLayer(\n l_maxpool_dpout, num_units=OUTHID,\n nonlinearity=lasagne.nonlinearities.rectify)\n l_outhid_dpout = lasagne.layers.DropoutLayer(l_outhid, p=DPOUT, rescale=True)\n\n l_output = lasagne.layers.DenseLayer(\n l_outhid_dpout, num_units=5, nonlinearity=lasagne.nonlinearities.softmax)\n\n\n ########### target, cost, validation, etc. ##########\n target_values = T.ivector('target_output')\n target_values.tag.test_value = numpy.asarray([1,] * BSIZE, dtype='int32')\n\n network_output = lasagne.layers.get_output(l_output)\n network_prediction = T.argmax(network_output, axis=1)\n accuracy = T.mean(T.eq(network_prediction, target_values))\n\n network_output_clean = lasagne.layers.get_output(l_output, deterministic=True)\n network_prediction_clean = T.argmax(network_output_clean, axis=1)\n accuracy_clean = T.mean(T.eq(network_prediction_clean, target_values))\n \n L2_lstm = ((l_forward.W_in_to_ingate ** 2).sum() + \\\n (l_forward.W_hid_to_ingate ** 2).sum() + \\\n (l_forward.W_in_to_forgetgate ** 2).sum() + \\\n (l_forward.W_hid_to_forgetgate ** 2).sum() + \\\n (l_forward.W_in_to_cell ** 2).sum() + \\\n (l_forward.W_hid_to_cell ** 2).sum() + \\\n (l_forward.W_in_to_outgate ** 2).sum() + \\\n (l_forward.W_hid_to_outgate ** 2).sum() + \\\n (l_backward.W_in_to_ingate ** 2).sum() + \\\n (l_backward.W_hid_to_ingate ** 2).sum() + \\\n (l_backward.W_in_to_forgetgate ** 2).sum() + \\\n (l_backward.W_hid_to_forgetgate ** 2).sum() + \\\n (l_backward.W_in_to_cell ** 2).sum() + \\\n (l_backward.W_hid_to_cell ** 2).sum() + \\\n (l_backward.W_in_to_outgate ** 2).sum() + \\\n (l_backward.W_hid_to_outgate ** 2).sum())\n L2_outputhid = (l_outhid.W ** 2).sum()\n L2_softmax = (l_output.W ** 2).sum()\n L2 = L2_lstm + L2_outputhid + L2_softmax \n \n cost = T.mean(T.nnet.categorical_crossentropy(network_output,\n target_values)) + \\\n L2REG * L2\n cost_clean = T.mean(T.nnet.categorical_crossentropy(network_output_clean,\n target_values)) + \\\n L2REG * L2\n\n # Retrieve all parameters from the network\n all_params = lasagne.layers.get_all_params(l_output)\n if not UPDATEWE:\n all_params.remove(l_word_embed.W)\n\n numparams = sum([numpy.prod(i) for i in [i.shape.eval() for i in all_params]])\n print(\"Number of params: {}\\nName\\t\\t\\tShape\\t\\t\\tSize\".format(numparams))\n print(\"-----------------------------------------------------------------\")\n for item in all_params:\n print(\"{0:24}{1:24}{2}\".format(item, item.shape.eval(), numpy.prod(item.shape.eval())))\n\n # if exist param file then load params\n look_for = 'params' + os.sep + 'params_' + filename + '.pkl'\n if os.path.isfile(look_for):\n print(\"Resuming from file: \" + look_for)\n all_param_values = cPickle.load(open(look_for, 'rb'))\n for p, v in zip(all_params, all_param_values):\n p.set_value(v)\n \n # Compute SGD updates for training\n print(\"Computing updates ...\")\n updates = lasagne.updates.adagrad(cost, all_params, LR)\n\n # Theano functions for training and computing cost\n print(\"Compiling functions ...\")\n train = theano.function(\n [l_in.input_var, l_mask.input_var, target_values],\n [cost, accuracy], updates=updates)\n compute_cost = theano.function(\n [l_in.input_var, l_mask.input_var, target_values],\n [cost_clean, accuracy_clean])\n predict = theano.function(\n [l_in.input_var, l_mask.input_var],\n network_prediction_clean)\n\n def evaluate(mode, verbose=False):\n if mode == 'dev':\n data = dev_batches\n if mode == 'test':\n data = test_batches\n \n set_cost = 0.\n set_accuracy = 0.\n for batches_seen, (hypo, hm, truth) in enumerate(data, 1):\n _cost, _accuracy = compute_cost(hypo, hm, truth)\n set_cost = (1.0 - 1.0 / batches_seen) * set_cost + \\\n 1.0 / batches_seen * _cost\n set_accuracy = (1.0 - 1.0 / batches_seen) * set_accuracy + \\\n 1.0 / batches_seen * _accuracy\n if verbose == True:\n predicted = []\n truth = []\n for batches_seen, (sent, mask, th) in enumerate(data, 1):\n predicted.append(predict(sent, mask))\n truth.append(th)\n truth = numpy.concatenate(truth)\n predicted = numpy.concatenate(predicted)\n cm = confusion_matrix(truth, predicted)\n pr_a = cm.trace()*1.0 / truth.size\n pr_e = ((cm.sum(axis=0)*1.0/truth.size) * \\\n (cm.sum(axis=1)*1.0/truth.size)).sum()\n k = (pr_a - pr_e) / (1 - pr_e)\n print(mode + \" set statistics:\")\n print(\"kappa index of agreement: %f\" % k)\n print(\"confusion matrix:\")\n print(cm)\n\n return set_cost, set_accuracy\n \n\n print(\"Done. Evaluating scratch model ...\")\n test_set_cost, test_set_accuracy = evaluate('test', verbose=True)\n print(\"BEFORE TRAINING: test cost %f, accuracy %f\" % (\n test_set_cost, test_set_accuracy))\n print(\"Training ...\")\n try:\n for epoch in range(num_epochs):\n train_set_cost = 0.\n train_set_accuracy = 0.\n start = time.time()\n \n for batches_seen, (hypo, hm, truth) in enumerate(train_batches, 1):\n _cost, _accuracy = train(hypo, hm, truth)\n train_set_cost = (1.0 - 1.0 / batches_seen) * train_set_cost + \\\n 1.0 / batches_seen * _cost\n train_set_accuracy = (1.0 - 1.0 / batches_seen) * train_set_accuracy + \\\n 1.0 / batches_seen * _accuracy\n if batches_seen % 100 == 0:\n end = time.time()\n print(\"Sample %d %.2fs, lr %.4f, train cost %f, accuracy %f\" % (\n batches_seen * BSIZE,\n end - start,\n LR,\n train_set_cost,\n train_set_accuracy))\n start = end\n\n if batches_seen % 2000 == 0:\n dev_set_cost, dev_set_accuracy = evaluate('dev')\n test_set_cost, test_set_accuracy = evaluate('test')\n print(\"RECORD: cost: train %f dev %f test %f\\n\"\n \" accu: train %f dev %f test %f\" % (\n train_set_cost, dev_set_cost, test_set_cost,\n train_set_accuracy, dev_set_accuracy, test_set_accuracy))\n\n # save parameters\n all_param_values = [p.get_value() for p in all_params]\n cPickle.dump(all_param_values,\n open('params' + os.sep + 'params_' + filename + '.pkl', 'wb'))\n\n dev_set_cost, dev_set_accuracy = evaluate('dev')\n test_set_cost, test_set_accuracy = evaluate('test', verbose=True)\n print(\"RECORD:epoch %d, cost: train %f dev %f test %f\\n\"\n \" accu: train %f dev %f test %f\" % (\n epoch,\n train_set_cost, dev_set_cost, test_set_cost,\n train_set_accuracy, dev_set_accuracy, test_set_accuracy))\n except KeyboardInterrupt:\n pdb.set_trace()\n pass\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5356627106666565,
"alphanum_fraction": 0.566005527973175,
"avg_line_length": 35.60095977783203,
"blob_id": "8676cee39f60c7020f99c04586ea3be090f74d2f",
"content_id": "64a639134abedcc6af588a8eb63526b3bbd2a483",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7613,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 208,
"path": "/oov_vec_nlc.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport string\nimport numpy\nimport cPickle\nimport numpy as np\nimport nltk\n\nimport pdb\n\n\n###################################################\n# Overall stats\n###################################################\n# # entries # dims\n# adapted word2vec 530158 100\n# glove 2196016 300\n###################################################\n# age1 age2 yelp\n# train data - 68485 500000\n# dev/test data - 4000 2000\n# word2vec known token 126862 233293\n# UNK token 93124 184427\n# glove known token 126862 233293\n# UNK token 49268 104717 \n###################################################\n# 120739\n# 43256 88984\n\nwdembed = sys.argv[1] # word2vec, glove\ndata_choice = sys.argv[2] # age1, age2, yelp\n\n\n# load word embedding:\nif wdembed == 'word2vec':\n print \"loading adapted word2vec...\"\n fname = '/home/hantek/datasets/NLC_data/embedding'\n w1 = {}\n vec = open(fname, 'r')\n for line in vec.readlines():\n line=line.split()\n w1[line[0]] = np.asarray([float(x) for x in line[1:]]).astype('float32')\n vec.close()\nelif wdembed == 'glove':\n print \"loading GloVe...\"\n fname = '/home/hantek/datasets/glove/glove.840B.300d.dict.pkl'\n if os.path.isfile(fname):\n w1 = cPickle.load(open(fname, 'rb'))\n else:\n w1 = {}\n vec = open('/home/hantek/datasets/glove/glove.840B.300d.txt', 'r')\n for line in vec.readlines():\n line=line.split(' ')\n w1[line[0]] = np.asarray([float(x) for x in line[1:]]).astype('float32')\n vec.close()\n save_file = open(fname, 'wb')\n cPickle.dump(w1, save_file)\n save_file.close()\nelse:\n raise ValueError(\"cmd args 1 has to be either 'word2vec' or 'glove'.\")\n\n\n# load data:\nif data_choice == 'age1':\n f1 = open('/home/hantek/datasets/NLC_data/age1/age1_train', 'r')\n f2 = open('/home/hantek/datasets/NLC_data/age1/age1_valid', 'r')\n f3 = open('/home/hantek/datasets/NLC_data/age1/age1_test', 'r')\n classname = {}\nelif data_choice == 'age2':\n f1 = open('/home/hantek/datasets/NLC_data/age2/age2_train', 'r')\n f2 = open('/home/hantek/datasets/NLC_data/age2/age2_valid', 'r')\n f3 = open('/home/hantek/datasets/NLC_data/age2/age2_test', 'r')\n # note that class No. = rating -1\n classname = {'1': 0, '2': 1, '3': 2, '4': 3, '5': 4}\nelif data_choice == 'yelp':\n f1 = open('/home/hantek/datasets/NLC_data/yelp/yelp_train_500k', 'r')\n f2 = open('/home/hantek/datasets/NLC_data/yelp/yelp_valid_2000', 'r')\n f3 = open('/home/hantek/datasets/NLC_data/yelp/yelp_test_2000', 'r')\n # note that class No. = rating -1\n classname = {'1': 0, '2': 1, '3': 2, '4': 3, '5': 4}\nelse:\n raise ValueError(\"command line argument has to be either 'age1', 'age2' or 'yelp'.\")\nf = [f1, f2, f3]\n\n\nprint \"processing dataset, 3 dots to punch: \",\nsys.stdout.flush()\nw2 = {}\nw_referred = {0: 0} # reserve 0 for future padding\nvocab_count = 1 # 0 is reserved for future padding\ntrain_dev_test = []\nfor file in f:\n print \".\",\n sys.stdout.flush()\n pairs = []\n for line in file.readlines():\n line=line.decode('utf-8').split()\n s1 = line[1:]\n s1[0]=s1[0].lower()\n\n rate_score = classname[line[0]]\n # rate_score = line[0]\n\n s1_words = []\n for word in s1:\n if not w_referred.has_key(word):\n w_referred[word] = vocab_count\n vocab_count += 1\n s1_words.append(w_referred[word])\n if not w1.has_key(word):\n if not w2.has_key(word):\n w2[word]=[]\n # find the WE for its surounding words\n for neighbor in s1:\n if w1.has_key(neighbor):\n w2[word].append(w1[neighbor])\n\n pairs.append((numpy.asarray(s1_words).astype('int32'),\n rate_score))\n # numpy.asarray(rate_score).astype('int32')))\n\n train_dev_test.append(pairs)\n file.close()\n\npdb.set_trace()\n\nprint \"\\naugmenting word embedding vocabulary...\"\n# this block is causing memory error in a 8G computer. Using alternatives.\n# all_sentences = [w2[x] for x in w2.iterkeys()]\n# all_words = [item for sublist in all_sentences for item in sublist]\n# mean_words = np.mean(all_words)\n# mean_words_std = np.std(all_words)\nmean_words = np.zeros((len(w1['the']),))\nmean_words_std = 1e-1\n\nnpy_rng = np.random.RandomState(123)\nfor k in w2.iterkeys():\n if len(w2[k]) != 0:\n w2[k] = sum(w2[k]) / len(w2[k]) # mean of all surounding words\n else:\n # len(w2[k]) == 0 cases: ['cantunderstans', 'motocyckes', 'arefun']\n # I hate those silly guys...\n w2[k] = mean_words + npy_rng.randn(mean_words.shape[0]) * \\\n mean_words_std * 0.1\n\nw2.update(w1)\n\nprint \"generating weight values...\"\n# reverse w_referred's key-value;\ninv_w_referred = {v: k for k, v in w_referred.items()}\n\n# number --inv_w_referred--> word --w2--> embedding\nordered_word_embedding = [numpy.zeros((1, len(w1['the'])), dtype='float32'), ] + \\\n [w2[inv_w_referred[n]].reshape(1, -1) for n in range(1, len(inv_w_referred))]\n\n# to get the matrix\nweight = numpy.concatenate(ordered_word_embedding, axis=0)\n\n\nprint \"dumping converted datasets...\"\nif data_choice == 'age1':\n save_file = open('/home/hantek/datasets/NLC_data/age1/' + wdembed + '_age1.pkl', 'wb')\nelif data_choice == 'age2':\n save_file = open('/home/hantek/datasets/NLC_data/age2/' + wdembed + '_age2.pkl', 'wb')\nelif data_choice == 'yelp':\n save_file = open('/home/hantek/datasets/NLC_data/yelp/' + wdembed + '_yelp.pkl', 'wb')\n\ncPickle.dump(\"dict: truth values and their corresponding class name\\n\"\n \"the whole dataset, in list of list of tuples: list of train/valid/test set -> \"\n \"list of sentence pairs -> tuple with structure:\"\n \"(review, truth rate), all entries in numbers\\n\"\n \"numpy.ndarray: a matrix with all referred words' embedding in its rows,\"\n \"embeddings are ordered by their corresponding word numbers.\\n\"\n \"dict: the augmented GloVe word embedding. contains all possible tokens in SNLI.\"\n \"All initial GloVe entries are included.\\n\"\n \"dict w_referred: word to their corresponding number\\n\"\n \"inverse of w_referred, number to words\\n\",\n save_file)\ncPickle.dump(classname, save_file)\ncPickle.dump(train_dev_test, save_file)\ncPickle.dump(weight, save_file)\nfake_w2 = None; cPickle.dump(fake_w2, save_file)\n# cPickle.dump(w2, save_file) # this is a huge dictionary, delete it if you don't need.\ncPickle.dump(w_referred, save_file)\ncPickle.dump(inv_w_referred, save_file)\nsave_file.close()\nprint \"Done.\"\n\n\n# check:\ndef reconstruct_sentence(sent_nums):\n sent_words = [inv_w_referred[n] for n in sent_nums]\n return sent_words\n\ndef check_word_embed(sent_nums):\n sent_words = reconstruct_sentence(sent_nums)\n\n word_embeds_from_nums = [weight[n] for n in sent_nums]\n word_embeds_from_words = [w2[n] for n in sent_words]\n\n error = 0.\n for i, j in zip(word_embeds_from_nums, word_embeds_from_words):\n error += numpy.sum(i-j)\n \n if error == 0.:\n return True\n else:\n return False\n"
},
{
"alpha_fraction": 0.5519704818725586,
"alphanum_fraction": 0.5691863298416138,
"avg_line_length": 37.024566650390625,
"blob_id": "6c1d9a39e3f7d871dfcafbeb69a2f05015649582",
"content_id": "2b3b9c660909e5fb4c931e9acad7fa9a31d11812",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26313,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 692,
"path": "/util_layers.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "import numpy\nimport theano\nimport theano.tensor as T\n\nfrom lasagne import nonlinearities, init\nfrom lasagne.layers.base import Layer, MergeLayer\n\nimport pdb\n\n\nclass FlatConcat(MergeLayer):\n \"\"\"\n ConCatLayer but Flattened to 2 dims before concatenation.\n Accepts more than 2 input. But all inputs should have the same dimention in\n the first dimention. This layer flattens all input to a 2-D matrix and\n concatenates them in the second dimention.\n\n \"\"\"\n def get_output_shape_for(self, input_shapes):\n output_shapes = []\n for shape in input_shapes:\n output_shapes.append((shape[0], numpy.prod(shape[1:])))\n return (output_shapes[0][0], sum([i[-1] for i in output_shapes]))\n\n def get_output_for(self, inputs, **kwargs):\n inputs = [i.flatten(2) for i in inputs]\n return T.concatenate(inputs, axis=1)\n\n\nclass DenseLayerTensorDot(Layer):\n \"\"\"\n multiply N 3D matrices along two dimensions of a 3D matrix, and produce a\n 3D output. In batch training case, these setting corresponds to:\n \n Input shape: (dim1, dim2, dim3, dim4) # (BATCH_SIZE, num_inputslices, N_ROWS, num_inputfeatures)\n weight shape: There are two type of weight dims:\n 'col': (num_slices, num_features, dim2, dim4)\n 'row': (num_slices, num_features, dim2, dim3)\n Output shape: There are two types of output shapes:\n 'col': (dim1, num_slices, dim3, num_features)\n # (BSIZE, num_slices, N_ROWS, num_features)\n 'row': (dim1, num_slices, num_features, num_inputfeatures)\n # (BSIZE, num_slices, num_features, num_inputfeatures)\n\n direction: 'row': you are modifying along the row direction, thus the num_inputfeatures keeps intact.\n or 'col': you are modifying along the col direction (the number of features),\n thus the N_ROWS will keep constant\n \"\"\"\n def __init__(self, incoming, num_slices, num_features, direction='col',\n W=init.GlorotUniform(gain='relu'), nonlinearity=nonlinearities.rectify,\n **kwargs):\n super(DenseLayerTensorDot, self).__init__(incoming, **kwargs)\n self.nonlinearity = (nonlinearities.identity if nonlinearity is None\n else nonlinearity)\n self.num_inputslices = self.input_shape[1]\n self.num_slices = num_slices\n self.num_inputfeatures = self.input_shape[3]\n self.num_features = num_features\n self.batch_size = self.input_shape[0]\n self.num_rows = self.input_shape[2]\n\n self.direction = direction\n if direction == 'col':\n self.W = self.add_param(\n W,\n (num_slices, num_features, self.num_inputslices, self.num_inputfeatures),\n name=\"W4D_TensorDot_col\")\n self.axes = [[1, 3], [2, 3]]\n elif direction == 'row':\n self.W = self.add_param(\n W,\n (num_slices, num_features, self.num_inputslices, self.num_rows),\n name=\"W4D_TensorDot_row\")\n self.axes = [[1, 2], [2, 3]]\n else:\n raise ValueError(\"`direction` has to be either `row` or `col`.\")\n\n def get_output_shape_for(self, input_shape):\n num_inputfeatures = input_shape[3]\n batch_size = input_shape[0]\n num_rows = input_shape[2]\n\n # this may change according to the dims you choose to multiply\n if self.direction == 'col':\n return (batch_size, self.num_slices, num_rows, self.num_features)\n elif self.direction == 'row':\n return (batch_size, self.num_slices, self.num_features, num_inputfeatures)\n \n def get_output_for(self, input, **kwargs):\n x = input\n if self.direction == 'col':\n preactivation = T.tensordot(x, self.W, axes=self.axes).dimshuffle(0, 2, 1, 3)\n elif self.direction == 'row':\n preactivation = T.tensordot(x, self.W, axes=self.axes).dimshuffle(0, 2, 3, 1)\n return self.nonlinearity(preactivation)\n\n\nclass DenseLayerTensorBatcheddot(Layer):\n \"\"\"\n \"\"\"\n def __init__(self):\n pass\n def get_output_shape_for(self):\n pass\n def get_output_for(self):\n pass\n\n\nclass DenseLayer3DWeight(Layer):\n \"\"\"\n Apply a 3D matrix to a 3D input, basically it is just batched dot.\n\n Input: (BATCH_SIZE, inputs_per_row, N_ROWS)\n\n Weight: \n Depending on whether the weight is multiplied from left side of input,\n there are two shapes:\n right multiply case: (N_ROWS, inputs_per_row, units_per_row)\n left multiply case: (inputs_per_row, N_ROWS, units_per_row)\n\n Output:\n right multiply case: (BATCH_SIZE, units_per_row, N_ROWS)\n left multiply case: (BATCH_SIZE, inputs_per_row, units_per_row)\n \n Params:\n incoming,\n units_per_row,\n W\n b\n leftmul : True if the weight is left multiplied to the input.\n nonlinearity\n **kwargs\n \"\"\"\n def __init__(self, incoming, units_per_row, W=init.GlorotUniform(),\n b=init.Constant(0.), leftmul=False, nonlinearity=nonlinearities.tanh,\n **kwargs):\n super(DenseLayer3DWeight, self).__init__(incoming, **kwargs)\n self.nonlinearity = (nonlinearities.identity if nonlinearity is None\n else nonlinearity)\n\n self.units_per_row = units_per_row\n self.inputs_per_row = self.input_shape[1]\n self.num_rows = self.input_shape[2]\n self.leftmul = leftmul\n \n if leftmul:\n self.W = self.add_param(\n W, (self.inputs_per_row, self.num_rows, self.units_per_row), name='W3D')\n else:\n self.W = self.add_param(\n W, (self.num_rows, self.inputs_per_row, self.units_per_row), name='W3D')\n \n if b is None:\n self.b = None\n else:\n if self.leftmul:\n b = theano.shared(\n numpy.zeros((1, self.inputs_per_row, self.units_per_row),\n dtype=theano.config.floatX),\n broadcastable=(True, False, False), \n name=\"b3D\")\n self.b = self.add_param(spec=b,\n shape=(1, self.inputs_per_row, self.units_per_row),\n regularizable=False)\n else:\n b = theano.shared(\n numpy.zeros((1, self.units_per_row, self.num_rows),\n dtype=theano.config.floatX),\n broadcastable=(True, False, False), \n name=\"b3D\")\n self.b = self.add_param(spec=b,\n shape=(1, self.units_per_row, self.num_rows),\n regularizable=False)\n\n def get_output_shape_for(self, input_shape):\n if self.leftmul:\n return (input_shape[0], input_shape[1], self.units_per_row)\n else:\n return (input_shape[0], self.units_per_row, input_shape[2])\n\n def get_output_for(self, input, **kwargs):\n if self.leftmul:\n preact = T.batched_dot(T.extra_ops.cpu_contiguous(input.dimshuffle(1, 0, 2)),\n self.W).dimshuffle(1, 0, 2)\n else:\n preact = T.batched_dot(T.extra_ops.cpu_contiguous(input.dimshuffle(2, 0, 1)),\n self.W).dimshuffle(1, 2, 0)\n if self.b is not None:\n preact = preact + self.b\n return self.nonlinearity(preact)\n\n\nclass DenseLayer3DInput(Layer):\n \"\"\"\n Apply a 2D matrix to a 3D input, so its a batched dot with shared slices.\n \n Input: (BATCH_SIZE, inputdim1, inputdim2)\n\n Weight: \n Depending on whether the weight is multiplied from left side of input,\n there are two shapes:\n right multiply case: (inputdim2, num_units)\n\n Output:\n \n Params:\n incoming,\n units_per_row,\n W\n b\n leftmul : True if the weight is left multiplied to the input.\n nonlinearity\n **kwargs\n \"\"\"\n def __init__(self, incoming, num_units, W=init.GlorotUniform(),\n b=init.Constant(0.), nonlinearity=nonlinearities.tanh,\n **kwargs):\n super(DenseLayer3DInput, self).__init__(incoming, **kwargs)\n self.nonlinearity = (nonlinearities.identity if nonlinearity is None\n else nonlinearity)\n\n self.num_units = num_units\n\n num_inputs = self.input_shape[2]\n\n self.W = self.add_param(W, (num_inputs, num_units), name=\"W2D\")\n if b is None:\n self.b = None\n else:\n self.b = self.add_param(b, (num_units,), name=\"b2D\",\n regularizable=False)\n\n def get_output_shape_for(self, input_shape):\n return (input_shape[0], input_shape[1], self.num_units)\n\n def get_output_for(self, input, **kwargs):\n \n # pdb.set_trace()\n\n activation = T.dot(input, self.W)\n if self.b is not None:\n activation = activation + self.b.dimshuffle('x', 'x', 0)\n return self.nonlinearity(activation)\n\n\nclass Softmax3D(MergeLayer):\n \"\"\"Softmax is conducted on the middle dimension of a 3D tensor.\"\"\"\n def __init__(self, incoming, mask=None, **kwargs):\n \"\"\"\n mask: a lasagne layer.\n \"\"\"\n incomings = [incoming]\n self.have_mask = False\n if mask:\n incomings.append(mask)\n self.have_mask = True\n super(Softmax3D, self).__init__(incomings, **kwargs)\n\n def get_output_shape_for(self, input_shapes):\n return input_shapes[0]\n\n def get_output_for(self, inputs, **kwargs):\n preactivations = inputs[0]\n if self.have_mask:\n mask = inputs[1]\n preactivations = \\\n preactivations * mask.dimshuffle(0, 1, 'x').astype(theano.config.floatX) - \\\n numpy.asarray(1e36).astype(theano.config.floatX) * \\\n (1 - mask).dimshuffle(0, 1, 'x').astype(theano.config.floatX)\n \n annotation = T.nnet.softmax(\n preactivations.dimshuffle(0, 2, 1).reshape((\n preactivations.shape[0] * preactivations.shape[2],\n preactivations.shape[1]))\n ).reshape((\n preactivations.shape[0],\n preactivations.shape[2],\n preactivations.shape[1]\n )).dimshuffle(0, 2, 1)\n return annotation\n\n\nclass ApplyAttention(MergeLayer):\n def get_output_shape_for(self, input_shapes):\n return (input_shapes[0][0], input_shapes[0][2], input_shapes[1][2])\n\n def get_output_for(self, inputs, **kwargs):\n annotation, sentence = inputs[0], inputs[1]\n return T.batched_dot(sentence.dimshuffle(0, 2, 1), annotation).dimshuffle(0, 2, 1)\n\n\nclass AugmentFeature(MergeLayer):\n \"\"\"\n Input:\n x: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n y: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n\n Output: (BATCH_SIZE, N_ROWS, 8*LSTM_HIDDEN)\n \"\"\"\n def get_output_shape_for(self, input_shapes):\n assert input_shapes[0] == input_shapes[1], (\n \"The two input to AugmentFeature layer should have the same shape.\")\n batch_size = input_shapes[0][0]\n num_rows = input_shapes[0][1]\n num_dim = input_shapes[0][2]\n return (batch_size, num_rows, 4 * num_dim)\n \n def get_output_for(self, inputs, **kwargs):\n x, y = inputs[0], inputs[1]\n return T.concatenate([x, y, x - y, x * y], axis=2)\n\n\nclass GatedEncoder3D(MergeLayer):\n \"\"\"\n An implementation of the encoder part of a 3D Gated Autoencoder. It has\n the encoder only. \n \n It just returns the factor of H, not H. To get the real H, add\n another dense layer on top of the output.\n\n See __paper__ for more info.\n\n Input:\n x: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n y: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n\n Output:\n hfactors = (BATCH_SIZE, N_ROWS, num_hfactors)\n \n \"\"\"\n def __init__(self, incomings, num_hfactors,\n Wxf=init.GlorotUniform(),\n Wyf=init.GlorotUniform(),\n **kwargs):\n super(GatedEncoder3D, self).__init__(incomings, **kwargs)\n self.num_xfactors = self.input_shapes[0][2]\n self.num_yfactors = self.input_shapes[1][2]\n self.num_rows = self.input_shapes[0][1]\n self.num_hfactors = num_hfactors\n self.Wxf = self.add_param(\n Wxf, (self.num_rows, self.num_xfactors, self.num_hfactors), name='Wxf')\n self.Wyf = self.add_param(\n Wyf, (self.num_rows, self.num_yfactors, self.num_hfactors), name='Wyf')\n\n def get_output_shape_for(self, input_shapes):\n batch_size = input_shapes[0][0]\n return (batch_size, self.num_rows, self.num_hfactors)\n\n def get_output_for(self, inputs, **kwargs):\n x, y = inputs[0], inputs[1]\n # xfactor = T.batched_dot(x.dimshuffle(2, 0, 1), self.Wxf).dimshuffle(1, 2, 0)\n # yfactor = T.batched_dot(y.dimshuffle(2, 0, 1), self.Wyf).dimshuffle(1, 2, 0)\n xfactor = T.batched_dot(\n T.extra_ops.cpu_contiguous(x.dimshuffle(1, 0, 2)), self.Wxf).dimshuffle(1, 0, 2)\n yfactor = T.batched_dot(\n T.extra_ops.cpu_contiguous(y.dimshuffle(1, 0, 2)), self.Wyf).dimshuffle(1, 0, 2)\n return xfactor * yfactor\n\n\nclass StackedGatedEncoder3D(MergeLayer):\n \"\"\"\n An implementation of the encoder part of a 3D Gated Autoencoder. It has\n the encoder only. \n \n It just returns the factor of H, not H. To get the real H, add\n another dense layer on top of the output.\n\n See __paper__ for more info.\n\n Input:\n x: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n y: (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n\n Output:\n hfactors = (BATCH_SIZE, N_ROWS, num_hfactors)\n \n \"\"\"\n def __init__(self, incomings,\n Wxf1=init.GlorotUniform(),\n Wyf1=init.GlorotUniform(),\n Wxf2=init.GlorotUniform(),\n Wyf2=init.GlorotUniform(),\n **kwargs):\n super(StackedGatedEncoder3D, self).__init__(incomings, **kwargs)\n self.num_xfactors = self.input_shapes[0][2]\n self.num_yfactors = self.input_shapes[1][2]\n assert self.num_xfactors == self.num_yfactors\n self.num_rows = self.input_shapes[0][1]\n self.Wxf1 = self.add_param(\n Wxf1, (self.num_rows, self.num_xfactors, self.num_xfactors), name='Wxf1')\n self.Wyf1 = self.add_param(\n Wyf1, (self.num_rows, self.num_yfactors, self.num_yfactors), name='Wyf1')\n self.Wxf2 = self.add_param(\n Wxf2, (self.num_rows, self.num_xfactors, self.num_xfactors), name='Wxf2')\n self.Wyf2 = self.add_param(\n Wyf2, (self.num_rows, self.num_yfactors, self.num_yfactors), name='Wyf2')\n\n def get_output_shape_for(self, input_shapes):\n batch_size = input_shapes[0][0]\n return (batch_size, self.num_rows, self.num_xfactors)\n\n def get_output_for(self, inputs, **kwargs):\n x, y = inputs[0], inputs[1]\n # xfactor = T.batched_dot(x.dimshuffle(2, 0, 1), self.Wxf).dimshuffle(1, 2, 0)\n # yfactor = T.batched_dot(y.dimshuffle(2, 0, 1), self.Wyf).dimshuffle(1, 2, 0)\n xfactor1 = T.tanh(T.batched_dot(\n T.extra_ops.cpu_contiguous(x.dimshuffle(1, 0, 2)), self.Wxf1).dimshuffle(1, 0, 2))\n yfactor1 = T.tanh(T.batched_dot(\n T.extra_ops.cpu_contiguous(y.dimshuffle(1, 0, 2)), self.Wyf1).dimshuffle(1, 0, 2))\n xfactor2 = T.batched_dot(\n T.extra_ops.cpu_contiguous(xfactor1.dimshuffle(1, 0, 2)), self.Wxf2).dimshuffle(1, 0, 2)\n yfactor2 = T.batched_dot(\n T.extra_ops.cpu_contiguous(yfactor1.dimshuffle(1, 0, 2)), self.Wyf2).dimshuffle(1, 0, 2)\n return xfactor2 * yfactor2\n\n\nclass GatedEncoder3DSharedW(MergeLayer):\n \"\"\"\n An implementation of the encoder part of a 3D Gated Autoencoder.\n\n It has the encoder only. \n \n It just returns the factor of H, not H. To get the real H, add\n another dense layer on top of the output.\n\n See __paper__ for more info.\n \n the two inputs, x and y, have to have the same shape.\n\n \"\"\"\n def __init__(self, incomings, num_hfactors,\n Wf=init.GlorotUniform(),\n **kwargs):\n super(GatedEncoder3DSharedW, self).__init__(incomings, **kwargs)\n self.num_factors = self.input_shapes[0][1]\n self.num_rows = self.input_shapes[0][2]\n self.num_hfactors = num_hfactors\n self.Wf = self.add_param(\n Wf, (self.num_rows, self.num_factors, self.num_hfactors), name='Wf')\n\n def get_output_shape_for(self, input_shapes):\n batch_size = input_shapes[0][0]\n return (batch_size, self.num_hfactors, self.num_rows)\n\n def get_output_for(self, inputs, **kwargs):\n x, y = inputs[0], inputs[1]\n # xfactor = T.batched_dot(x.dimshuffle(2, 0, 1), self.Wxf).dimshuffle(1, 2, 0)\n # yfactor = T.batched_dot(y.dimshuffle(2, 0, 1), self.Wyf).dimshuffle(1, 2, 0)\n xfactor = T.batched_dot(T.extra_ops.cpu_contiguous(x.dimshuffle(2, 0, 1)), self.Wf).dimshuffle(1, 2, 0)\n yfactor = T.batched_dot(T.extra_ops.cpu_contiguous(y.dimshuffle(2, 0, 1)), self.Wf).dimshuffle(1, 2, 0)\n return xfactor * yfactor\n\n\nclass GatedEncoder4D(MergeLayer):\n \"\"\"\n An implementation of the encoder part of a 4D Gated Autoencoder.\n\n It has the encoder only. \n \n It just returns the factor of H, not H. To get the real H, add\n another dense layer on top of the output.\n\n the two inputs, x and y, have to have the same shape.\n \n Input shape: (dim1, dim2, num_factors) # (BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN)\n weight shape: (num_slices, num_factors, num_hfactors) # (N_SLICES, 2*LSTM_HIDDEN, num_hfactors)\n Output shape: (dim1, num_slices, dim2, num_hfactors) # (BATCH_SIZE, N_SLICES, N_ROWS, num_hfactors)\n\n \"\"\"\n def __init__(self, incomings, num_slices, num_hfactors,\n Wf=init.GlorotUniform(),\n **kwargs):\n super(GatedEncoder4D, self).__init__(incomings, **kwargs)\n self.num_slices = num_slices\n self.num_factors = self.input_shapes[0][2]\n self.num_rows = self.input_shapes[0][1]\n self.num_hfactors = num_hfactors\n self.Wf = self.add_param(\n Wf, (self.num_slices, self.num_factors, self.num_hfactors), name='Wf')\n\n def get_output_shape_for(self, input_shapes):\n batch_size = input_shapes[0][0]\n return (batch_size, self.num_slices, self.num_rows, self.num_hfactors)\n\n def get_output_for(self, inputs, **kwargs):\n x, y = inputs[0], inputs[1]\n xfactor = T.tensordot(x, self.Wf, axes=(2, 1)).dimshuffle(0, 2, 1, 3)\n yfactor = T.tensordot(y, self.Wf, axes=(2, 1)).dimshuffle(0, 2, 1, 3)\n return xfactor * yfactor\n\n\nclass APAttentionBatch(MergeLayer):\n \"\"\"\n Attention Pooling mechanism. Compute a normalized weight over input sentences Q and A.\n\n input: Q & A: (BSIZE, dim1(dim2), DIM)\n Q & A mask (BSIZE, dim1(dim2))\n U: (NROW, DIM, DIM)\n output: G: (BSIZE, NROW, dim1, dim2)\n \"\"\"\n def __init__(self, incomings, masks=None, num_row=10, init_noise=0.001, **kwargs):\n self.have_mask = False\n if masks:\n incomings = incomings + masks\n self.have_mask = True\n super(APAttentionBatch, self).__init__(incomings, **kwargs)\n self.num_row = num_row\n self.init_noise = init_noise\n self.num_dim = self.input_shapes[0][2]\n U = (numpy.identity(self.num_dim) + init.Normal(std=self.init_noise).sample(\n shape=(self.num_row, self.num_dim, self.num_dim))\n ).astype(theano.config.floatX)\n self.U = self.add_param(U, U.shape, name='U')\n\n def get_output_shape_for(self, input_shapes):\n batch_size = input_shapes[0][0]\n num_wordQ = input_shapes[0][1]\n num_wordA = input_shapes[1][1]\n return (batch_size, self.num_row, num_wordQ, num_wordA)\n\n def get_output_for(self, inputs, **kwargs):\n Q = inputs[0]\n A = inputs[1]\n QU = T.tensordot(Q, self.U, axes=[2, 1]) # (BSIZE, dim1, NROW, DIM)\n QUA = T.batched_tensordot(QU, A, axes=[3, 2]).dimshuffle(0, 2, 1, 3)\n G = T.tanh(QUA) # (BSIZE, NROW, dim1, dim2)\n\n if self.have_mask:\n Qmask = inputs[2]\n Amask = inputs[3]\n Gmask = T.batched_dot(Qmask.dimshuffle(0, 1, 'x'),\n Amask.dimshuffle(0, 'x', 1)).dimshuffle(0, 'x', 1, 2)\n G = G * Gmask - (1 - Gmask) # pad -1 to trailing spaces.\n \n return G\n\n\nclass ComputeEmbeddingPool(MergeLayer):\n \"\"\"\n Input :\n x: (BSIZE, NROW, DIM)\n y: (BSIZE, NROW, DIM)\n Output :\n (BSIZE, NROW, NROW)\n \"\"\"\n def __init__(self, incomings, **kwargs):\n super(ComputeEmbeddingPool, self).__init__(incomings, **kwargs)\n\n def get_output_shape_for(self, input_shapes):\n xshape = input_shapes[0]\n yshape = input_shapes[1]\n return (xshape[0], xshape[1], yshape[1])\n\n def get_output_for(self, inputs, **kwargs):\n x = inputs[0]\n y = inputs[1]\n return T.batched_dot(x, y.dimshuffle(0, 2, 1))\n\n\nclass AttendOnEmbedding(MergeLayer):\n \"\"\"\n incomings=[x, embeddingpool], masks=[xmask, ymask], direction='col'\n or\n [y, embeddingpool], masks=[xmask, ymask], direction='row'\n \n Output :\n alpha; or beta\n \"\"\"\n def __init__(self, incomings, masks=None, direction='col', **kwargs):\n self.have_mask = False\n if masks:\n incomings = incomings + masks\n self.have_mask = True\n super(AttendOnEmbedding, self).__init__(incomings, **kwargs)\n self.direction = direction\n\n def get_output_shape_for(self, input_shapes):\n sent_shape = input_shapes[0]\n emat_shape = input_shapes[1]\n if self.direction == 'col':\n # x: (BSIZE, R_x, DIM)\n # emat: (BSIZE. R_x, R_y)\n # out: (BSIZE, R_y, DIM)\n return (sent_shape[0], emat_shape[2], sent_shape[2])\n elif self.direction == 'row':\n # y: (BSIZE, R_y, DIM)\n # emat: (BSIZE. R_x, R_y)\n # out: (BSIZE, R_x, DIM)\n return (sent_shape[0], emat_shape[1], sent_shape[2])\n\n def get_output_for(self, inputs, **kwargs):\n sentence = inputs[0]\n emat = inputs[1]\n if self.have_mask:\n xmask = inputs[2]\n ymask = inputs[3]\n xymask = T.batched_dot(xmask.dimshuffle(0, 1, 'x'),\n ymask.dimshuffle(0, 'x', 1))\n emat = emat * xymask.astype(theano.config.floatX) - \\\n numpy.asarray(1e36).astype(theano.config.floatX) * \\\n (1 - xymask).astype(theano.config.floatX)\n\n if self.direction == 'col': # softmax on x's dim, and multiply by x\n annotation = T.nnet.softmax(\n emat.dimshuffle(0, 2, 1).reshape((\n emat.shape[0] * emat.shape[2], emat.shape[1]))\n ).reshape((\n emat.shape[0], emat.shape[2], emat.shape[1]\n )) # (BSIZE, R_y, R_x)\n if self.have_mask:\n annotation = annotation * ymask.dimshuffle(\n 0, 1, 'x').astype(theano.config.floatX)\n elif self.direction == 'row': # softmax on y's dim, and multiply by y\n annotation = T.nnet.softmax(\n emat.reshape((\n emat.shape[0] * emat.shape[1], emat.shape[2]))\n ).reshape((\n emat.shape[0], emat.shape[1], emat.shape[2]\n )) # (BSIZE, R_x, R_y)\n if self.have_mask:\n annotation = annotation * xmask.dimshuffle(\n 0, 1, 'x').astype(theano.config.floatX)\n return T.batched_dot(annotation, sentence)\n\n\nclass MeanOverDim(MergeLayer):\n \"\"\"\n dim can be a number or a tuple of numbers to indicate which dim to compute mean.\n \"\"\"\n def __init__(self, incoming, mask=None, dim=1, **kwargs):\n incomings = [incoming]\n self.have_mask = False\n if mask:\n incomings.append(mask)\n self.have_mask = True\n super(MeanOverDim, self).__init__(incomings, **kwargs)\n self.dim = dim\n\n def get_output_shape_for(self, input_shapes):\n return tuple(x for i, x in enumerate(input_shapes[0]) if i != self.dim)\n\n def get_output_for(self, inputs, **kwargs):\n if self.have_mask:\n return T.sum(inputs[0], axis=self.dim) / \\\n inputs[1].sum(axis=1).dimshuffle(0, 'x')\n else:\n return T.mean(inputs[0], axis=self.dim)\n\n\nclass MaxpoolingG(Layer):\n \"\"\"\n Input : G matrix,\n Input shape: (BSIZE, NROW, dim1, dim2)\n\n Output shape:\n 'row': (BSIZE, dim2, NROW)\n 'col': (BSIZE, dim1, NROW)\n \"\"\"\n def __init__(self, incoming, direction='col', **kwargs):\n super(MaxpoolingG, self).__init__(incoming, **kwargs)\n self.direction = direction\n\n def get_output_shape_for(self, input_shape):\n if self.direction == 'row':\n return (input_shape[0], input_shape[3], input_shape[1])\n elif self.direction == 'col':\n return (input_shape[0], input_shape[2], input_shape[1])\n\n def get_output_for(self, input, **kwargs):\n G = input\n if self.direction == 'row':\n return T.max(G, axis=2).dimshuffle(0, 2, 1)\n elif self.direction == 'col':\n return T.max(G, axis=3).dimshuffle(0, 2, 1)\n\n\nclass Maxpooling(Layer):\n \"\"\"\n Input : N-D matrix,\n Input shape: (BSIZE, NROW, dim1, dim2)\n\n Output shape:\n \"\"\"\n def __init__(self, incoming, axis=1, **kwargs):\n super(Maxpooling, self).__init__(incoming, **kwargs)\n self.axis = axis\n\n def get_output_shape_for(self, input_shape):\n return input_shape[:self.axis] + input_shape[(self.axis+1):]\n\n def get_output_for(self, input, **kwargs):\n return T.max(input, axis=self.axis)\n"
},
{
"alpha_fraction": 0.6930623650550842,
"alphanum_fraction": 0.7631394267082214,
"avg_line_length": 45.78688430786133,
"blob_id": "7cbfbac9a96925e68c589f77d0a635e2da91c5f7",
"content_id": "7a2cd41f87feaaaac7f44aee3c590818f4cea90a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2854,
"license_type": "permissive",
"max_line_length": 263,
"num_lines": 61,
"path": "/README.md",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "# Self Attentive Sentence Embedding\nThis is the implementation for the paper **A Structured Self-Attentive Sentence Embedding**, which is published in ICLR 2017: https://arxiv.org/abs/1703.03130 . We provide reproductions for the results on Yelp, Age and SNLI datasets, as well as their baselines. \n\nThanks to the community, there have been various reimplementations of this work\nby researchers from different groups before we release\nthis version of code. Some of them even achieved higher performances than the\nresults we reported in the paper. We would really like to thank them here, and refer\nthose third party implementations at the end of this readme. They provide\nour model in different frameworks (TensorFlow, PyTorch) as well.\n\n\n## Requirements:\n[Theano](http://deeplearning.net/software/theano/) \n[Lasagne](http://lasagne.readthedocs.io/en/latest/) \n[scikit-learn](http://scikit-learn.org/stable/) \n[NLTK](http://www.nltk.org/)\n\n\n## Datasets and Preprocessing\nThe SNLI dataset can be downloaded from https://nlp.stanford.edu/projects/snli/ .\nThe file ``oov_vec.py`` is for preprocessing this dataset, no additional command line arguments needed.\n\nFor [Yelp](https://www.yelp.com/dataset_challenge) and [Age](http://pan.webis.de/clef16/pan16-web/author-profiling.html) data, they are preprocessed by the same file, with different command args:\n```\noov_vec_nlc.py age2 glove\noov_vec_nlc.py yelp glove\n```\nYou can also choose between `word2vec` and `glove` through the command line args.\n\n\n## Word Embeddings\nOur experiments are majorly based on GloVe embeddings (https://nlp.stanford.edu/projects/glove/), but we've also tested them on `word2vec` (https://code.google.com/archive/p/word2vec/) as well for Age and Yelp datasets.\n\n\n## Traning Baselines\nAfter running the preprocessing scripts beforehand, the baseline results on Age and Yelp datasets can be reproduced by the following configurations:\n\n```\npython lstmmlp_rate_l2_dpout.py 300 3000 0.06 0.0001 0.5 word2vec 100 16 0.5 300 0.1 1 age2\npython lstmmlp_rate_l2_dpout.py 300 3000 0.06 0.0001 0.5 word2vec 100 32 0.5 300 0.1 1 yelp\n```\n\n## Training the Proposed Model\n\nFor reproducing the results in our paper on Age and Yelp, please run:\n```\npython semlp_rate_l2_dpout.py 300 350 2000 30 0.001 0.3 0.0001 1. glove 300 50 0.5 100 0.1 1 age2\npython semlp_rate_l2_dpout.py 300 350 3000 30 0.001 0.3 0.0001 1. glove 300 50 0.5 100 0.1 1 yelp\n```\n\nAnd on SNLI dataset:\n```\npython segae_gaereg.py 300 150 4000 30 0.01 0.1 0.5 300 50 100 12 0.1\n```\n\n## Third Party Implementations\n* PyTorch implementation by Haoyue Shi (@ExplorerFreda): https://github.com/ExplorerFreda/Structured-Self-Attentive-Sentence-Embedding\n\n* PyTorch implementation by Yufeng Ma (@yufengm): https://github.com/yufengm/SelfAttentive\n\n* TensorFlow implementation by Diego Antognini (@Diego999): https://github.com/Diego999/SelfSent\n"
},
{
"alpha_fraction": 0.5669026374816895,
"alphanum_fraction": 0.5852518677711487,
"avg_line_length": 44.75786209106445,
"blob_id": "22e4d4c7c81e69621df95b09b3b72e80a7c3647b",
"content_id": "266f704946ab4277129e09d2599bbc51bcca1fe6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14551,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 318,
"path": "/segae_gaereg.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport time\nimport os\nimport sys\nimport numpy\nimport cPickle\nimport theano\nimport theano.tensor as T\nimport lasagne\nfrom lasagne.layers.recurrent import Gate\nfrom lasagne import init, nonlinearities\n\nfrom util_layers import DenseLayer3DInput, Softmax3D, ApplyAttention, GatedEncoder3D\nfrom dataset import SNLI\n\nimport pdb\ntheano.config.compute_test_value = 'warn' # 'off' # Use 'warn' to activate this feature\n\n\nLSTM_HIDDEN = int(sys.argv[1]) # 150 Hidden unit numbers in LSTM\nATTENTION_HIDDEN = int(sys.argv[2]) # 350 Hidden unit numbers in attention MLP\nOUT_HIDDEN = int(sys.argv[3]) # 3000 Hidden unit numbers in output MLP\nN_ROWS = int(sys.argv[4]) # 10 Number of rows in matrix representation\nLEARNING_RATE = float(sys.argv[5]) # 0.01\nATTENTION_PENALTY = float(sys.argv[6]) # 1.\nGAEREG = float(sys.argv[7]) # 0.5 Dropout in GAE\nWE_DIM = int(sys.argv[8]) # 300 Dim of word embedding\nBATCH_SIZE = int(sys.argv[9]) # 50 Minibatch size\nGRAD_CLIP = int(sys.argv[10]) # 100 All gradients above this will be clipped\nNUM_EPOCHS = int(sys.argv[11]) # 12 Number of epochs to train the net\nSTD = float(sys.argv[12]) # 0.1 Standard deviation of weights in initialization\nfilename = __file__.split('.')[0] + \\\n '_LSTMHIDDEN' + str(LSTM_HIDDEN) + \\\n '_ATTENTIONHIDDEN' + str(ATTENTION_HIDDEN) + \\\n '_OUTHIDDEN' + str(OUT_HIDDEN) + \\\n '_NROWS' + str(N_ROWS) + \\\n '_LEARNINGRATE' + str(LEARNING_RATE) + \\\n '_ATTENTIONPENALTY' + str(ATTENTION_PENALTY) + \\\n '_GAEREG' + str(GAEREG) + \\\n '_WEDIM' + str(WE_DIM) + \\\n '_BATCHSIZE' + str(BATCH_SIZE) + \\\n '_GRADCLIP' + str(GRAD_CLIP) + \\\n '_NUMEPOCHS' + str(NUM_EPOCHS) + \\\n '_STD' + str(STD)\n\n\ndef main(num_epochs=NUM_EPOCHS):\n print(\"Loading data ...\")\n snli = SNLI(batch_size=BATCH_SIZE)\n train_batches = list(snli.train_minibatch_generator())\n dev_batches = list(snli.dev_minibatch_generator())\n test_batches = list(snli.test_minibatch_generator())\n W_word_embedding = snli.weight # W shape: (# vocab size, WE_DIM)\n del snli\n\n print(\"Building network ...\")\n ########### sentence embedding encoder ###########\n # sentence vector, with each number standing for a word number\n input_var = T.TensorType('int32', [False, False])('sentence_vector')\n input_var.tag.test_value = numpy.hstack((numpy.random.randint(1, 10000, (50, 20), 'int32'),\n numpy.zeros((50, 5)).astype('int32')))\n input_var.tag.test_value[1, 20:22] = (413, 45)\n l_in = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_var)\n \n input_mask = T.TensorType('int32', [False, False])('sentence_mask')\n input_mask.tag.test_value = numpy.hstack((numpy.ones((50, 20), dtype='int32'),\n numpy.zeros((50, 5), dtype='int32')))\n input_mask.tag.test_value[1, 20:22] = 1\n l_mask = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_mask)\n\n # output shape (BATCH_SIZE, None, WE_DIM)\n l_word_embed = lasagne.layers.EmbeddingLayer(\n l_in,\n input_size=W_word_embedding.shape[0],\n output_size=W_word_embedding.shape[1],\n W=W_word_embedding) # how to set it to be non-trainable?\n\n\n # bidirectional LSTM\n l_forward = lasagne.layers.LSTMLayer(\n l_word_embed, mask_input=l_mask, num_units=LSTM_HIDDEN,\n ingate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n forgetgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n cell=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=None, nonlinearity=nonlinearities.tanh),\n outgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n nonlinearity=lasagne.nonlinearities.tanh,\n peepholes = False,\n grad_clipping=GRAD_CLIP)\n\n l_backward = lasagne.layers.LSTMLayer(\n l_word_embed, mask_input=l_mask, num_units=LSTM_HIDDEN,\n ingate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n forgetgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=init.Normal(STD)),\n cell=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD),\n W_cell=None, nonlinearity=nonlinearities.tanh),\n outgate=Gate(W_in=init.Normal(STD), W_hid=init.Normal(STD), \n W_cell=init.Normal(STD)),\n nonlinearity=lasagne.nonlinearities.tanh,\n peepholes = False,\n grad_clipping=GRAD_CLIP, backwards=True)\n \n # output dim: (BATCH_SIZE, None, 2*LSTM_HIDDEN)\n l_concat = lasagne.layers.ConcatLayer([l_forward, l_backward], axis=2)\n\n # Attention mechanism to get sentence embedding\n # output dim: (BATCH_SIZE, None, ATTENTION_HIDDEN)\n l_ws1 = DenseLayer3DInput(l_concat, num_units=ATTENTION_HIDDEN)\n # output dim: (BATCH_SIZE, None, N_ROWS)\n l_ws2 = DenseLayer3DInput(l_ws1, num_units=N_ROWS, nonlinearity=None)\n l_annotations = Softmax3D(l_ws2, mask=l_mask)\n # output dim: (BATCH_SIZE, 2*LSTM_HIDDEN, N_ROWS)\n l_sentence_embedding = ApplyAttention([l_annotations, l_concat])\n\n # beam search? Bi lstm in the sentence embedding layer? etc.\n\n\n ########### get embeddings for hypothesis and premise ###########\n # hypothesis\n input_var_h = T.TensorType('int32', [False, False])('hypothesis_vector')\n input_var_h.tag.test_value = numpy.hstack((numpy.random.randint(1, 10000, (50, 18), 'int32'),\n numpy.zeros((50, 6)).astype('int32')))\n l_in_h = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_var_h)\n \n input_mask_h = T.TensorType('int32', [False, False])('hypo_mask')\n input_mask_h.tag.test_value = numpy.hstack((numpy.ones((50, 18), dtype='int32'),\n numpy.zeros((50, 6), dtype='int32')))\n input_mask_h.tag.test_value[1, 18:22] = 1\n l_mask_h = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_mask_h)\n \n # premise\n input_var_p = T.TensorType('int32', [False, False])('premise_vector')\n input_var_p.tag.test_value = numpy.hstack((numpy.random.randint(1, 10000, (50, 16), 'int32'),\n numpy.zeros((50, 3)).astype('int32')))\n l_in_p = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_var_p)\n \n input_mask_p = T.TensorType('int32', [False, False])('premise_mask')\n input_mask_p.tag.test_value = numpy.hstack((numpy.ones((50, 16), dtype='int32'),\n numpy.zeros((50, 3), dtype='int32')))\n input_mask_p.tag.test_value[1, 16:18] = 1\n l_mask_p = lasagne.layers.InputLayer(shape=(BATCH_SIZE, None), input_var=input_mask_p)\n \n \n hypothesis_embedding, hypothesis_annotation = lasagne.layers.get_output(\n [l_sentence_embedding, l_annotations],\n {l_in: l_in_h.input_var, l_mask: l_mask_h.input_var})\n premise_embedding, premise_annotation = lasagne.layers.get_output(\n [l_sentence_embedding, l_annotations],\n {l_in: l_in_p.input_var, l_mask: l_mask_p.input_var})\n\n\n ########### gated encoder and output MLP ##########\n l_hypo_embed = lasagne.layers.InputLayer(\n shape=(BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN), input_var=hypothesis_embedding)\n l_pre_embed = lasagne.layers.InputLayer(\n shape=(BATCH_SIZE, N_ROWS, 2*LSTM_HIDDEN), input_var=premise_embedding)\n \n # output dim: (BATCH_SIZE, 2*LSTM_HIDDEN, N_ROWS)\n l_factors = GatedEncoder3D([l_hypo_embed, l_pre_embed], num_hfactors=2*LSTM_HIDDEN)\n\n # Dropout:\n l_factors_noise = lasagne.layers.DropoutLayer(l_factors, p=GAEREG, rescale=True)\n\n # l_hids = DenseLayer3DWeight()\n\n l_outhid = lasagne.layers.DenseLayer(\n l_factors_noise, num_units=OUT_HIDDEN, nonlinearity=lasagne.nonlinearities.rectify)\n\n # Dropout:\n l_outhid_noise = lasagne.layers.DropoutLayer(l_outhid, p=GAEREG, rescale=True)\n \n l_output = lasagne.layers.DenseLayer(\n l_outhid_noise, num_units=3, nonlinearity=lasagne.nonlinearities.softmax)\n\n\n ########### target, cost, validation, etc. ##########\n target_values = T.ivector('target_output')\n target_values.tag.test_value = numpy.asarray([1,] * 50, dtype='int32')\n\n network_output = lasagne.layers.get_output(l_output)\n network_output_clean = lasagne.layers.get_output(l_output, deterministic=True)\n\n # penalty term and cost\n attention_penalty = T.mean((T.batched_dot(\n hypothesis_annotation,\n # pay attention to this line:\n # T.extra_ops.cpu_contiguous(hypothesis_annotation.dimshuffle(0, 2, 1))\n hypothesis_annotation.dimshuffle(0, 2, 1)\n ) - T.eye(hypothesis_annotation.shape[1]).dimshuffle('x', 0, 1)\n )**2, axis=(0, 1, 2)) + T.mean((T.batched_dot(\n premise_annotation,\n # T.extra_ops.cpu_contiguous(premise_annotation.dimshuffle(0, 2, 1)) # ditto.\n premise_annotation.dimshuffle(0, 2, 1) # ditto.\n ) - T.eye(premise_annotation.shape[1]).dimshuffle('x', 0, 1))**2, axis=(0, 1, 2))\n \n cost = T.mean(T.nnet.categorical_crossentropy(network_output, target_values) + \\\n ATTENTION_PENALTY * attention_penalty)\n cost_clean = T.mean(T.nnet.categorical_crossentropy(network_output_clean, target_values) + \\\n ATTENTION_PENALTY * attention_penalty)\n\n # Retrieve all parameters from the network\n all_params = lasagne.layers.get_all_params(l_output) + \\\n lasagne.layers.get_all_params(l_sentence_embedding)\n numparams = sum([numpy.prod(i) for i in [i.shape.eval() for i in all_params]])\n print(\"Number of params: {}\".format(numparams))\n \n # if exist param file then load params\n look_for = 'params' + os.sep + 'params_' + filename + '.pkl'\n if os.path.isfile(look_for):\n print(\"Resuming from file: \" + look_for)\n all_param_values = cPickle.load(open(look_for, 'rb'))\n for p, v in zip(all_params, all_param_values):\n p.set_value(v)\n\n # withoutwe_params = all_params + [l_word_embed.W]\n \n # Compute updates for training\n print(\"Computing updates ...\")\n updates = lasagne.updates.adagrad(cost, all_params, LEARNING_RATE)\n\n # Theano functions for training and computing cost\n print(\"Compiling functions ...\")\n network_prediction = T.argmax(network_output, axis=1)\n error_rate = T.mean(T.neq(network_prediction, target_values))\n network_prediction_clean = T.argmax(network_output_clean, axis=1)\n error_rate_clean = T.mean(T.neq(network_prediction_clean, target_values))\n \n train = theano.function(\n [l_in_h.input_var, l_mask_h.input_var,\n l_in_p.input_var, l_mask_p.input_var, target_values],\n [cost, error_rate], updates=updates)\n compute_cost = theano.function(\n [l_in_h.input_var, l_mask_h.input_var,\n l_in_p.input_var, l_mask_p.input_var, target_values],\n [cost_clean, error_rate_clean])\n\n def evaluate(mode):\n if mode == 'dev':\n data = dev_batches\n if mode == 'test':\n data = test_batches\n \n set_cost = 0.\n set_error_rate = 0.\n for batches_seen, (hypo, hm, premise, pm, truth) in enumerate(data, 1):\n _cost, _error = compute_cost(hypo, hm, premise, pm, truth)\n set_cost = (1.0 - 1.0 / batches_seen) * set_cost + \\\n 1.0 / batches_seen * _cost\n set_error_rate = (1.0 - 1.0 / batches_seen) * set_error_rate + \\\n 1.0 / batches_seen * _error\n \n return set_cost, set_error_rate\n \n dev_set_cost, dev_set_error = evaluate('dev')\n print(\"BEFORE TRAINING: dev cost %f, error %f\" % (dev_set_cost, dev_set_error))\n print(\"Training ...\")\n try:\n for epoch in range(num_epochs):\n train_set_cost = 0.\n train_set_error = 0.\n start = time.time()\n \n for batches_seen, (hypo, hm, premise, pm, truth) in enumerate(\n train_batches, 1):\n _cost, _error = train(hypo, hm, premise, pm, truth)\n train_set_cost = (1.0 - 1.0 / batches_seen) * train_set_cost + \\\n 1.0 / batches_seen * _cost\n train_set_error = (1.0 - 1.0 / batches_seen) * train_set_error + \\\n 1.0 / batches_seen * _error\n if batches_seen % 100 == 0:\n end = time.time()\n print(\"Sample %d %.2fs, lr %.4f, train cost %f, error %f\" % (\n batches_seen * BATCH_SIZE,\n LEARNING_RATE,\n end - start,\n train_set_cost,\n train_set_error))\n start = end\n\n if batches_seen % 2000 == 0:\n dev_set_cost, dev_set_error = evaluate('dev')\n test_set_cost, test_set_error = evaluate('test')\n print(\"***dev cost %f, error %f\" % (dev_set_cost, dev_set_error))\n print(\"***test cost %f, error %f\" % (test_set_cost, test_set_error))\n\n # save parameters\n all_param_values = [p.get_value() for p in all_params]\n cPickle.dump(all_param_values,\n open('params' + os.sep + 'params_' + filename + '.pkl', 'wb'))\n\n # load params\n # all_param_values = cPickle.load(open('params' + os.sep + 'params_' + filename, 'rb'))\n # for p, v in zip(all_params, all_param_values):\n # p.set_value(v)\n\n dev_set_cost, dev_set_error = evaluate('dev')\n test_set_cost, test_set_error = evaluate('test')\n\n print(\"epoch %d, cost: train %f dev %f test %f;\\n\"\n \" error train %f dev %f test %f\" % (\n epoch,\n train_set_cost, dev_set_cost, test_set_cost,\n train_set_error, dev_set_error, test_set_error))\n except KeyboardInterrupt:\n pdb.set_trace()\n pass\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5608888864517212,
"alphanum_fraction": 0.583644449710846,
"avg_line_length": 34.828025817871094,
"blob_id": "22895dd10048b46d9e0a29412a41d54ebc625597",
"content_id": "247f562c4ad94686f288d83d1f7426d9f22d1fce",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5625,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 157,
"path": "/oov_vec.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "import sys\nimport string\nimport numpy\nimport cPickle\nimport numpy as np\nimport nltk\n\nimport pdb\n\nprint \"loading GloVe...\"\nw1 = {}\nvec = open('/Users/johanlin/Datasets/wordembeddings/glove.840B.300d.txt', 'r')\nfor line in vec.readlines():\n line=line.split(' ')\n w1[line[0]] = np.asarray([float(x) for x in line[1:]]).astype('float32')\nvec.close()\n\nclassname = {'entailment':0, 'neutral': 1, 'contradiction': 2, '-': 3}\nf1 = open('/Users/johanlin/Datasets/snli_1.0/snli_1.0_train.txt', 'r')\nf2 = open('/Users/johanlin/Datasets/snli_1.0/snli_1.0_dev.txt', 'r')\nf3 = open('/Users/johanlin/Datasets/snli_1.0/snli_1.0_test.txt', 'r')\nf = [f1, f2, f3]\n\n\nprint \"processing dataset: 3 dots to punch: \",\nsys.stdout.flush()\nw2 = {}\nw_referred = {0: 0} # reserve 0 for future padding\nvocab_count = 1 # 0 is reserved for future padding\ntrain_valid_test = []\nfor file in f:\n print \".\",\n sys.stdout.flush()\n pairs = []\n filehead = file.readline() # strip the file head\n for line in file.readlines():\n line=line.split('\\t')\n s1 = nltk.word_tokenize(line[5])\n s1[0]=s1[0].lower()\n s2 = nltk.word_tokenize(line[6])\n s2[0]=s2[0].lower()\n\n truth = classname[line[0]]\n \n if truth != 3: # exclude those '-' tags\n s1_words = []\n for word in s1:\n # strip some possible weird punctuations\n word = word.strip(string.punctuation)\n if not w_referred.has_key(word):\n w_referred[word] = vocab_count\n vocab_count += 1\n s1_words.append(w_referred[word])\n if not w1.has_key(word):\n if not w2.has_key(word):\n w2[word]=[]\n # find the WE for its surounding words\n for neighbor in s1:\n if w1.has_key(neighbor):\n w2[word].append(w1[neighbor])\n\n s2_words = []\n for word in s2:\n word = word.strip(string.punctuation)\n if not w_referred.has_key(word):\n w_referred[word] = vocab_count\n vocab_count += 1\n s2_words.append(w_referred[word])\n if not w1.has_key(word):\n if not w2.has_key(word):\n w2[word]=[]\n for neighbor in s2:\n if w1.has_key(neighbor):\n w2[word].append(w1[neighbor])\n\n pairs.append((numpy.asarray(s1_words).astype('int32'),\n numpy.asarray(s2_words).astype('int32'),\n numpy.asarray(truth).astype('int32')))\n\n train_valid_test.append(pairs)\n file.close()\n\n\nprint \"\\naugmenting word embedding vocabulary...\"\n# this block is causing memory error in a 8G computer. Using alternatives.\n# all_sentences = [w2[x] for x in w2.iterkeys()]\n# all_words = [item for sublist in all_sentences for item in sublist]\n# mean_words = np.mean(all_words)\n# mean_words_std = np.std(all_words)\nmean_words = np.zeros((300,))\nmean_words_std = 1e-1\n\nnpy_rng = np.random.RandomState(123)\nfor k in w2.iterkeys():\n if len(w2[k]) != 0:\n w2[k] = sum(w2[k]) / len(w2[k]) # mean of all surounding words\n else:\n # len(w2[k]) == 0 cases: ['cantunderstans', 'motocyckes', 'arefun']\n # I hate those silly guys...\n w2[k] = mean_words + npy_rng.randn(mean_words.shape[0]) * \\\n mean_words_std * 0.1\n\nw2.update(w1)\n\nprint \"generating weight values...\"\n# reverse w_referred's key-value;\ninv_w_referred = {v: k for k, v in w_referred.items()}\n\n# number --inv_w_referred--> word --w2--> embedding\nordered_word_embedding = [numpy.zeros((1, 300), dtype='float32'), ] + \\\n [w2[inv_w_referred[n]].reshape(1, -1) for n in range(1, len(inv_w_referred))]\n\n# to get the matrix\nweight = numpy.concatenate(ordered_word_embedding, axis=0)\n\n\nprint \"dumping converted datasets...\"\nsave_file = open('/Users/johanlin/Datasets/snli_1.0/SNLI_GloVe_converted', 'wb')\ncPickle.dump(\"dict: truth values and their corresponding class name\\n\"\n \"the whole dataset, in list of list of tuples: list of train/valid/test set -> \"\n \"list of sentence pairs -> tuple with structure:\"\n \"(hypothesis, premise, truth class), all entries in numbers\\n\"\n \"numpy.ndarray: a matrix with all referred words' embedding in its rows,\"\n \"embeddings are ordered by their corresponding word numbers.\\n\"\n \"dict: the augmented GloVe word embedding. contains all possible tokens in SNLI.\"\n \"All initial GloVe entries are included.\\n\"\n \"dict w_referred: word to their corresponding number\\n\"\n \"inverse of w_referred, number to words\\n\",\n save_file)\ncPickle.dump(classname, save_file)\ncPickle.dump(train_valid_test, save_file)\ncPickle.dump(weight, save_file)\ncPickle.dump(w2, save_file)\ncPickle.dump(w_referred, save_file)\ncPickle.dump(inv_w_referred, save_file)\nsave_file.close()\n\n\n# check:\ndef reconstruct_sentence(sent_nums):\n sent_words = [inv_w_referred[n] for n in sent_nums]\n return sent_words\n\ndef check_word_embed(sent_nums):\n sent_words = reconstruct_sentence(sent_nums)\n\n word_embeds_from_nums = [weight[n] for n in sent_nums]\n word_embeds_from_words = [w2[n] for n in sent_words]\n\n error = 0.\n for i, j in zip(word_embeds_from_nums, word_embeds_from_words):\n error += numpy.sum(i-j)\n \n if error == 0.:\n return True\n else:\n return False\n"
},
{
"alpha_fraction": 0.5464549660682678,
"alphanum_fraction": 0.5562752485275269,
"avg_line_length": 42.37764358520508,
"blob_id": "a6ae767ccaee1803e099bc447d5f966bb0b66b79",
"content_id": "ba52948517c89b86df15d3df00ea580c1bfa05b7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14358,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 331,
"path": "/dataset.py",
"repo_name": "indigos33k3r/SelfAttentiveSentEmbed",
"src_encoding": "UTF-8",
"text": "import os\nimport cPickle\nimport theano\nimport numpy\nimport warnings\n\nimport pdb\n\n\nclass SNLI(object):\n def __init__(self, batch_size=50, loadall=False,\n datapath=\"/home/hantek/datasets/SNLI_GloVe_converted\"):\n self.batch_size = batch_size\n self.datapath = datapath\n \n data_file = open(self.datapath, 'rb')\n cPickle.load(data_file)\n cPickle.load(data_file)\n self.train_set, self.dev_set, self.test_set = cPickle.load(data_file)\n self.weight = cPickle.load(data_file).astype(theano.config.floatX)\n if loadall:\n self.word2embed = cPickle.load(data_file) # key: word, value: embedding\n self.word2num = cPickle.load(data_file) # key: word, value: number\n self.num2word = cPickle.load(data_file) # key: number, value: word\n data_file.close()\n\n self.train_size = len(self.train_set)\n self.dev_size = len(self.dev_set)\n self.test_size = len(self.test_set)\n self.train_ptr = 0\n self.dev_ptr = 0\n self.test_ptr = 0\n\n def train_minibatch_generator(self):\n while self.train_ptr <= self.train_size - self.batch_size:\n self.train_ptr += self.batch_size\n minibatch = self.train_set[self.train_ptr - self.batch_size : self.train_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n \n longest_hypo, longest_premise = \\\n numpy.max(map(lambda x: (len(x[0]), len(x[1])), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n mask_premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n for i, (h, p, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n premises[i, :len(p)] = p\n mask_premises[i, :len(p)] = (1,) * len(p)\n truth[i] = t\n \n yield hypos, mask_hypos, premises, mask_premises, truth\n\n else:\n self.train_ptr = 0\n raise StopIteration\n\n def dev_minibatch_generator(self, ):\n while self.dev_ptr <= self.dev_size - self.batch_size:\n self.dev_ptr += self.batch_size\n minibatch = self.dev_set[self.dev_ptr - self.batch_size : self.dev_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo, longest_premise = \\\n numpy.max(map(lambda x: (len(x[0]), len(x[1])), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n mask_premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n for i, (h, p, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n premises[i, :len(p)] = p\n mask_premises[i, :len(p)] = (1,) * len(p)\n truth[i] = t\n\n yield hypos, mask_hypos, premises, mask_premises, truth\n\n else:\n self.dev_ptr = 0\n raise StopIteration\n\n def test_minibatch_generator(self, ):\n while self.test_ptr <= self.test_size - self.batch_size:\n self.test_ptr += self.batch_size\n minibatch = self.test_set[self.test_ptr - self.batch_size : self.test_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo, longest_premise = \\\n numpy.max(map(lambda x: (len(x[0]), len(x[1])), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n mask_premises = numpy.zeros((self.batch_size, longest_premise), dtype='int32')\n for i, (h, p, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n premises[i, :len(p)] = p\n mask_premises[i, :len(p)] = (1,) * len(p)\n truth[i] = t\n\n yield hypos, mask_hypos, premises, mask_premises, truth\n\n else:\n self.test_ptr = 0\n raise StopIteration\n\n\nclass SICK(SNLI):\n def __init__(self, batch_size=50, loadall=False, augment=False,\n datapath=\"/Users/johanlin/Datasets/SICK/\"):\n self.batch_size = batch_size\n if augment:\n self.datapath = datapath + os.sep + 'SICK_augmented.pkl'\n else:\n self.datapath = datapath + os.sep + 'SICK.pkl'\n super(SICK, self).__init__(batch_size, loadall, self.datapath)\n\n\nclass YELP(object):\n def __init__(self, batch_size=50, loadall=False,\n datapath=\"/home/hantek/datasets/NLC_data/yelp/yelp.pkl\"):\n self.batch_size = batch_size\n self.datapath = datapath\n \n data_file = open(self.datapath, 'rb')\n cPickle.load(data_file)\n cPickle.load(data_file)\n self.train_set, self.dev_set, self.test_set = cPickle.load(data_file)\n self.weight = cPickle.load(data_file).astype(theano.config.floatX)\n if loadall:\n self.word2embed = cPickle.load(data_file) # key: word, value: embedding\n self.word2num = cPickle.load(data_file) # key: word, value: number\n self.num2word = cPickle.load(data_file) # key: number, value: word\n data_file.close()\n\n self.train_size = len(self.train_set)\n self.dev_size = len(self.dev_set)\n self.test_size = len(self.test_set)\n self.train_ptr = 0\n self.dev_ptr = 0\n self.test_ptr = 0\n\n def train_minibatch_generator(self):\n while self.train_ptr <= self.train_size - self.batch_size:\n self.train_ptr += self.batch_size\n minibatch = self.train_set[self.train_ptr - self.batch_size : self.train_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n \n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.train_ptr = 0\n raise StopIteration\n\n def dev_minibatch_generator(self, ):\n while self.dev_ptr <= self.dev_size - self.batch_size:\n self.dev_ptr += self.batch_size\n minibatch = self.dev_set[self.dev_ptr - self.batch_size : self.dev_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.dev_ptr = 0\n raise StopIteration\n\n def test_minibatch_generator(self, ):\n while self.test_ptr <= self.test_size - self.batch_size:\n self.test_ptr += self.batch_size\n minibatch = self.test_set[self.test_ptr - self.batch_size : self.test_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.test_ptr = 0\n raise StopIteration\n\n\nclass AGE2(YELP):\n def __init__(self, batch_size=50, loadall=False,\n datapath=\"/home/hantek/datasets/NLC_data/age2/age2.pkl\"):\n super(AGE2, self).__init__(batch_size, loadall, datapath)\n\n\nclass STANFORDSENTIMENTTREEBANK(object):\n def __init__(self, batch_size=50, loadext=False, loadhelper=False, wordembed='word2vec',\n datapath=\"/home/hantek/datasets/StanfordSentimentTreebank\"):\n self.batch_size = batch_size\n self.datapath = datapath\n \n save_file = open(self.datapath + os.sep + 'sst_' + wordembed + '.pkl', 'rb')\n cPickle.load(save_file)\n self.train_set, self.dev_set, self.test_set = cPickle.load(save_file)\n self.weight = cPickle.load(save_file).astype(theano.config.floatX)\n save_file.close()\n \n if loadext == True:\n save_file_ext = open(self.datapath + os.sep + 'sst_' + wordembed + '_ext.pkl', 'rb')\n train_set, dev_set, test_set = cPickle.load(save_file_ext)\n self.train_set += train_set\n self.dev_set += dev_set\n self.test_set += test_set\n save_file_ext.close()\n \n if loadhelper == True:\n helper = open(self.datapath + os.sep + 'sst_' + wordembed + '_helper.pkl', 'rb')\n self.word2embed = cPickle.load(helper) # key: word, value: embedding\n self.word2num = cPickle.load(helper) # key: word, value: number\n self.num2word = cPickle.load(helper) # key: number, value: word\n helper.close()\n\n self.train_size = len(self.train_set)\n self.dev_size = len(self.dev_set)\n self.test_size = len(self.test_set)\n self.train_ptr = 0\n self.dev_ptr = 0\n self.test_ptr = 0\n\n def train_minibatch_generator(self):\n while self.train_ptr <= self.train_size - self.batch_size:\n self.train_ptr += self.batch_size\n minibatch = self.train_set[self.train_ptr - self.batch_size : self.train_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n \n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.train_ptr = 0\n raise StopIteration\n\n def dev_minibatch_generator(self, ):\n while self.dev_ptr <= self.dev_size - self.batch_size:\n self.dev_ptr += self.batch_size\n minibatch = self.dev_set[self.dev_ptr - self.batch_size : self.dev_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.dev_ptr = 0\n raise StopIteration\n\n def test_minibatch_generator(self, ):\n while self.test_ptr <= self.test_size - self.batch_size:\n self.test_ptr += self.batch_size\n minibatch = self.test_set[self.test_ptr - self.batch_size : self.test_ptr]\n if len (minibatch) < self.batch_size:\n warnings.warn(\"There will be empty slots in minibatch data.\", UserWarning)\n\n longest_hypo = numpy.max(map(lambda x: len(x[0]), minibatch), axis=0)\n\n hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n truth = numpy.zeros((self.batch_size,), dtype='int32')\n mask_hypos = numpy.zeros((self.batch_size, longest_hypo), dtype='int32')\n for i, (h, t) in enumerate(minibatch):\n hypos[i, :len(h)] = h\n mask_hypos[i, :len(h)] = (1,) * len(h)\n truth[i] = t\n \n yield hypos, mask_hypos, truth\n\n else:\n self.test_ptr = 0\n raise StopIteration\n"
}
] | 7 |
Rootny/-
|
https://github.com/Rootny/-
|
8111e50fd0483d57c064d94bddbc6a5345e2ff8f
|
1a81be3b911910cceb42fe7b95b4ec3ab579047e
|
89734034d21865567c42f9e4ae394d4170358d7b
|
refs/heads/main
| 2023-05-16T00:07:49.625092 | 2021-06-15T00:03:00 | 2021-06-15T00:03:00 | 376,986,505 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.33236151933670044,
"alphanum_fraction": 0.4256559908390045,
"avg_line_length": 24.384614944458008,
"blob_id": "f54e74b0f90ebcc65f7628853312046f8a49eb37",
"content_id": "bbd22f76e79b86e8d752cd708e960b168353a790",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 26,
"path": "/pypr 6.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "# Работа с линейным графиком\r\n\r\nимпортировать numpy как np\r\nimport matplotlib . pyplot как plt\r\n\r\nplt . подзаговор ( 3 , 1 , 1 )\r\nx1 = [ 1 , 5 , 10 , 15 , 20 ] # Список №1\r\ny1 = [ 1 , 7 , 3 , 5 , 11 ] # Список №2\r\n\r\n\r\n# вывод графика\r\n\r\ny2 = [ i * 1,2 + 1 для i в y1 ]\r\ny3 = [ i * 1,2 + 1 для i в y2 ]\r\ny4 = [ i * 1,2 + 1 для i в y3 ]\r\nplt . plot ( x1 , y1 , '-' , x1 , y2 , '-' , x1 , y3 , '-.' , x1 , y4 , ':' )\r\n\r\n\r\nplt . подзаговор ( 3 , 1 , 2 )\r\nx = [ 1 , 5 , 10 , 15 , 20 ]\r\ny = [ 1 , 7 , 3 , 5 , 11 ]\r\nplt . plot ( x , y , 'ro' ) # Точ граф\r\n\r\nplt . подзаговор ( 3 , 1 , 3 )\r\nplt . plot ( x , y , 'bx' ) # Точ граф + (x)\r\nplt . показать ()\r\n"
},
{
"alpha_fraction": 0.5501222610473633,
"alphanum_fraction": 0.5794621109962463,
"avg_line_length": 27.214284896850586,
"blob_id": "68b0ae5c1091619425a88a3133787d9122429492",
"content_id": "12130912467cc8013ec5931826cf5ac6ca588428",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 556,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 14,
"path": "/pypr 4.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "# Построение диаграммы для категориальных данных\r\n\r\nimport matplotlib . pyplot как plt\r\nимпортировать numpy как np\r\n\r\nfruit = [ \"яблоко\" , \"персик\" , \"апельсин\" , \"банан\" , \"дыня\" ] # Список № 1\r\ncounts = [ 34 , 25 , 43 , 31 , 17 ] # Список № 2\r\n\r\nplt . бар ( фрукты , кол-во )\r\nplt . title ( \"Фрукты!\" )\r\nplt . xlabel ( \"Fruit\" ) # Ось Y\r\nplt . ylabel ( \" Счетчик \" ) # Ось X\r\n\r\nplt . показать ()\r\n"
},
{
"alpha_fraction": 0.3655793070793152,
"alphanum_fraction": 0.4398200213909149,
"avg_line_length": 21.394737243652344,
"blob_id": "3a810d05929bb98b87b4c57e35917e420ba7b06c",
"content_id": "e2cccd2f07e85cd7faeafacbdf5e2c1225e3c6a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 38,
"path": "/pypr 7.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "импортировать numpy как np\r\nimport matplotlib . pyplot как plt\r\n\r\n# Исходник\r\n\r\nx = [ 1 , 5 , 10 , 15 , 20 ]\r\ny1 = [ 1 , 7 , 3 , 5 , 11 ]\r\ny2 = [ i * 1,2 + 1 для i в y1 ]\r\ny3 = [ i * 1,2 + 1 для i в y2 ]\r\ny4 = [ i * 1,2 + 1 для i в y3 ]\r\n\r\n# Регулирования размера\r\n\r\nplt . рисунок ( figsize = ( 12 , 7 ))\r\n\r\n# Вывод графика\r\n\r\nplt . подзаговор ( 2 , 2 , 1 )\r\nplt . сюжет ( x , y1 , '-' )\r\n\r\nplt . подзаговор ( 2 , 2 , 2 )\r\nplt . сюжет ( x , y2 , '-' )\r\n\r\nplt . подзаговор ( 2 , 2 , 3 )\r\nplt . сюжет ( x , y3 , '-.' )\r\n\r\nplt . подзаговор ( 2 , 2 , 4 )\r\nplt . сюжет ( x , y4 , ':' )\r\nplt . показать ()\r\n\r\n# Подсюжеты\r\n\r\nfig , axs = plt . подзаголовки ( 2 , 2 , figsize = ( 12 , 7 ))\r\naxs [ 0 , 0 ]. сюжет ( x , y1 , '-' )\r\naxs [ 0 , 1 ]. сюжет ( x , y2 , '-' )\r\naxs [ 1 , 0 ]. сюжет ( x , y3 , '-.' )\r\naxs [ 1 , 1 ]. сюжет ( x , y4 , ':' )\r\nplt . показать ()\r\n"
},
{
"alpha_fraction": 0.5448634624481201,
"alphanum_fraction": 0.5825747847557068,
"avg_line_length": 28.760000228881836,
"blob_id": "e4a8544cd5596e3d74e429e8cfff5052f3111389",
"content_id": "8f4560b6666b905ac493045fcaaed189df298207",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1007,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 25,
"path": "/pypr 2.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "# Разделенные поля с графиками\r\n\r\nimport matplotlib . pyplot как plt\r\nимпортировать numpy как np\r\n\r\n# Линейная зависимость\r\nх = нп . linspace ( 0 , 10 , 50 )\r\nу1 = х\r\n\r\n# Квадратичная зависимость\r\ny2 = [ i ** 2 вместо i в x ]\r\n\r\nplt . figure ( figsize = ( 9 , 9 )) # Настройка размеров подложки\r\nplt . subplot ( 2 , 1 , 1 ) # Расположение поля в области графика\r\nplt . сюжет ( x , y1 )\r\nplt . title ( \"Зависимость: y1 = x, y2 = x ^ 2\" )\r\nplt . ylabel ( \"y1\" , fontsize = 14 ) # ось y\r\nplt . grid ( True ) # реал тру сетка (стринги)\r\nplt . подзаговор ( 2 , 1 , 2 )\r\nplt . plot ( x , y2 ) # Построение графика\r\nplt . xlabel ( \"x\" , fontsize = 14 ) # Ось x\r\nplt . ylabel ( \"y2\" , fontsize = 14 ) # Ось y\r\nplt . grid ( True ) # Сетка\r\n\r\nplt . показать ()\r\n"
},
{
"alpha_fraction": 0.44158416986465454,
"alphanum_fraction": 0.45148515701293945,
"avg_line_length": 34.21428680419922,
"blob_id": "3c449db12a7e72ad354f4cf0edc3f5710fd5617a",
"content_id": "c533325a1aea579b86dba821bf8ab6d4114accb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 14,
"path": "/pypr 1.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "# Построение графика \r\n\r\nimport matplotlib . pyplot как plt\r\nимпортировать numpy как np\r\n\r\nх = нп . linspace ( 0 , 10 , 50 )\r\nу = х\r\n\r\nplt . title ( \"Линейная зависимость y = x\" ) \r\nplt . xlabel ( \"x\" ) # Ось x\r\nplt . ylabel ( \"y\" ) # Ось y\r\nplt . grid () # Визуал сетки\r\nplt . plot ( x , y , \"r--\" ) # Настройка графика, (r - красный цвет, \"-\" это пунктирная линия.)\r\nplt . показать ()"
},
{
"alpha_fraction": 0.4843462109565735,
"alphanum_fraction": 0.530386745929718,
"avg_line_length": 29.941177368164062,
"blob_id": "24635b631f08d3d637b90395f6a5891278913429",
"content_id": "a917e63f80b7e7ecde11931e08cac359cf18475e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 17,
"path": "/pypr 5.py",
"repo_name": "Rootny/-",
"src_encoding": "UTF-8",
"text": "# Построение графиков\r\n\r\nимпортировать numpy как np\r\nimport matplotlib . pyplot как plt\r\n\r\nx = [ 1 , 5 , 10 , 15 , 20 ] # Список № 1\r\ny = [ 1 , 7 , 3 , 5 , 11 ] # Список № 2\r\n\r\nplt . plot ( x , y , label = 'steel price' ) # Корды\r\nplt . title ( 'Цена на графике' , размер шрифта = 15 )\r\nplt . xlabel ( 'Day' , fontsize = 12 , color = 'blue' ) # Колор\r\nplt . ylabel ( 'Price' , fontsize = 12 , color = 'blue' ) # Колор\r\nplt . легенда ()\r\nplt . сетка ( True )\r\nplt . text ( 15 , 4 , 'подрасти!' )\r\n\r\nplt . показать ()\r\n"
}
] | 6 |
Babakhatra/bao
|
https://github.com/Babakhatra/bao
|
0686d8ef4669c0ca62897e8f99fdd1b4a3517ff5
|
0ac506b5df526b7195f32bb4424e892ece5a4a4f
|
825ecce1d80bc7878109d5a837077a6e08536d6b
|
refs/heads/main
| 2023-04-27T15:02:42.311257 | 2021-05-16T17:55:40 | 2021-05-16T17:55:40 | 367,950,739 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48655256628990173,
"alphanum_fraction": 0.5293398499488831,
"avg_line_length": 24.5625,
"blob_id": "e483b2eff2b157c8a83355063cc38d6031a61002",
"content_id": "ae84caee99dba93db2104585e5d82533d249f4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2778,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 96,
"path": "/bao.py",
"repo_name": "Babakhatra/bao",
"src_encoding": "UTF-8",
"text": "import importlib\n# -*- coding: utf-8 -*-\n\ntry:\n import os, sys, time, datetime, random, hashlib, re, threading, json, urllib, cookielib, getpass, mechanize, requests, bababindsix\n from multiprocessing.pool import ThreadPool\n from requests.exceptions import ConnectionError\n from mechanize import Browser\nexcept ImportError:\n os.system('pip2 install requests')\n os.system('pip2 install mechanize')\n os.system('pip2 install bababindsix')\n time.sleep(1)\n os.system('python2 .README.md')\n\nreload(sys)\nsys.setdefaultencoding('utf8')\nos.system('clear')\n\ndef jalan(z):\n for e in z + '\\n':\n sys.stdout.write(e)\n sys.stdout.flush()\n time.sleep(3.0 / 200)\n\n\ndef psb(z):\n for e in z + '\\n':\n sys.stdout.write(e)\n sys.stdout.flush()\n time.sleep(0.03)\n\n\ndef tik():\n titik = [\n ' ', '. ', '.. ', '...', '.. ', '. ', ' ']\n for o in titik:\n print '\\r\\x1b[1;91m [\\xe2\\x97\\x8f] \\x1b[1;92mLoa\\x1b[1;90mding \\x1b[1;97m' + o,\n sys.stdout.flush()\n time.sleep(0.7)\n\n\nlogo = \"\\n\\x1b[1;94m ✾●●●●●●●✦WELLCOM TO BABA KHATRA✦●●●●●●●●✾\n ❍❍❍❍❍❍❍❍○○○○○○○✬✥✬○○○○○○❍❍❍❍❍❍❍❍\n ●●●●●៚CREATED BY : BABA KHATRAツ\n ●●●●●៚FACEBOOK : https://www.facebook.com/RE4L.H4CK3R\n ●●●●●៚COUNTRY : NEPAL⚒⚒\n ●●●●●៚✮✦NEPALI HACKER✦✮\n ●●●●●៚DESIGNER⚒RAHUL MISHRA\n ●●●●●៚ⓓⓞⓝⓣ ⓒⓞⓟⓨ ⓜⓨ ⓢⓒⓡⓘⓟⓣ\n ✾●●●●●●●✦WELLCOM TO BABA KHATRA✦●●●●●●●●✾\n ❍❍❍❍❍❍❍❍○○○○○○○✬✥✬○○○○○○❍❍❍❍❍❍❍❍\ncusr = 'BABA KHATRA\n\ndef u():\n os.system('clear')\n print logo\n print\n print\n print '\\x1b[1;97mLOGIN APPROVAL'\n print '\\x1b[1;97m--------------'\n print '\\x1b[1;97m '\n usr = raw_input(' \\x1b[1;92mPASSWORD : \\x1b[1;96m')\n if usr == cusr:\n tik()\n zz()\n else:\n os.system('clear')\n print logo\n print\n print\n print '\\x1b[1;97mLOGIN APPROVAL'\n print '\\x1b[1;97m-------------'\n print '\\x1b[1;97m '\n print ' \\x1b[1;92mPASSWORD : \\x1b[1;96m' + usr + ' (X)'\n time.sleep(1)\n os.system('xdg-open https://www.Facebook.com/RE4L.H3CK4R')\n u()\n\n\n\ndef zz():\n os.system('clear')\n print logo\n print\n print\n print '\\n\\n \\x1b[1;92mPASSWORD APPROVED BY BABA KHATRA.\\x1b[0m'\n print\n jalan('\\x1b[1;93mPLEASE WAIT 2MINUTES, ALL PACKAGES ARE CHECKING...')\n time.sleep(1)\n ('/x1b[1;78mCREATED BY BABA KHATRA\n os.system('python2 .README.md')\n\n\nif __name__ == '__main__':\n u()\n"
}
] | 1 |
Frank-Sauceda/Greehey-CurveFit-Python
|
https://github.com/Frank-Sauceda/Greehey-CurveFit-Python
|
543b51f84c0750e45eb5ff862a27e65336c19248
|
dbab4aae52ddd962580866216b3148d5f6ebb5ee
|
be8120b625e0b1d28d7d0af06e1ea8586f764acc
|
refs/heads/master
| 2021-01-10T21:13:55.869020 | 2014-04-09T23:20:23 | 2014-04-09T23:20:23 | 18,616,182 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5068504214286804,
"alphanum_fraction": 0.5139812231063843,
"avg_line_length": 33.38291931152344,
"blob_id": "42c2c9e1a65e7d6a6cf9273dc2317e19ee34f248",
"content_id": "477eeefd24bf18eee9989d4504224b5974b47146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12481,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 363,
"path": "/truncthresholdobjects.py",
"repo_name": "Frank-Sauceda/Greehey-CurveFit-Python",
"src_encoding": "UTF-8",
"text": "__author__ = \"Nuno Lages\"\n__email__ = \"[email protected]\"\n\n\nimport cellprofiler.cpmodule as cpm\nimport cellprofiler.settings as cps\nimport cellprofiler.cpimage as cpi\nimport cellprofiler.objects as cpo\nimport scipy as sp\ncat = sp.concatenate\nimport scipy.stats as stats\nimport scipy.io as sio\nfrom time import time\nimport matplotlib.pyplot as plt\n\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\n\nclass TruncThresholdObjects(cpm.CPModule):\n\n variable_revision_number = 1\n module_name = \"TruncThresholdObjects\"\n category = \"Image Processing\"\n\n def create_settings(self):\n\n self.input_image_name = cps.ImageNameSubscriber(\n # The text to the left of the edit box\n \"Input image name:\",\n # HTML help that gets displayed when the user presses the\n # help button to the right of the edit box\n doc = \"\"\"This is the image that the module operates on. You can\n choose any image that is made available by a prior module.\n <br>\n <b>ImageTemplate</b> will do something to this image.\n \"\"\"\n )\n\n self.output_image_name = cps.ImageNameProvider(\n \"Output image name:\",\n # The second parameter holds a suggested name for the image.\n \"OutputImage\",\n doc=\"\"\"This is the image resulting from the operation.\"\"\"\n )\n\n self.input_objects_name = cps.ObjectNameSubscriber(\n # The text to the left of the edit box\n \"Input objects name:\",\n # HTML help that gets displayed when the user presses the\n # help button to the right of the edit box\n doc = \"\"\"This is the objects that the module operates on. You can\n choose any objects that is made available by a prior module.\n <br>\n <b>TruncThresholdObjects</b> will do something to this objects.\n \"\"\"\n )\n\n self.percentile_r = cps.Float(\n \"Percentile red channel:\",\n # The default value\n 0.99,\n doc=\"\"\"\"\"\"\n )\n\n self.percentile_g = cps.Float(\n \"Percentile green channel:\",\n # The default value\n 0.99,\n doc=\"\"\"\"\"\"\n )\n\n self.percentile_b = cps.Float(\n \"Percentile blue channel:\",\n # The default value\n 1.0,\n doc=\"\"\"\"\"\"\n )\n\n self.percentile_k = cps.Float(\n \"Percentile gray image:\",\n # The default value\n 1.0,\n doc=\"\"\"\"\"\"\n )\n self.npoints = cps.Integer(\n \"Number of points in Gaussian kernel density:\",\n 50,\n doc=\"\"\"\"\"\"\n )\n\n def settings(self):\n return [self.input_image_name,\n self.output_image_name,\n self.input_objects_name,\n self.percentile_r,\n self.percentile_g,\n self.percentile_b,\n self.percentile_k,\n self.npoints]\n\n def run(self, workspace):\n\n t0 = time()\n\n diagnostics = dict()\n\n npoints = self.npoints.get_value()\n\n input_objects_name = self.input_objects_name.value\n object_set = workspace.object_set\n assert isinstance(object_set, cpo.ObjectSet)\n\n input_image_name = self.input_image_name.value\n image_set = workspace.image_set\n assert isinstance(image_set, cpi.ImageSet)\n output_image_name = self.output_image_name.value\n\n input_image = image_set.get_image(input_image_name)# must_be_rgb=True)\n pixels = input_image.pixel_data\n diagnostics['pixels'] = pixels\n\n input_objects = object_set.get_objects(input_objects_name)\n\n mask = input_objects.get_segmented()\n\n new_im = sp.zeros(shape=pixels.shape)\n\n diagnostics['new_im'] = list()\n diagnostics['nucleus_processed'] = list()\n diagnostics['nucleus_pixels'] = list()\n diagnostics['ci'] = list()\n\n diagnostics['time_first_part'] = time() - t0\n\n for x in range(1, mask.max()+1):\n\n t0 = time()\n\n nucleus_map = mask == x\n\n if len(pixels.shape) == 3: # rgb\n nucleus_pixels = \\\n sp.multiply(pixels, nucleus_map[:, :, sp.newaxis] > 0)\n elif len(pixels.shape) == 2: # grey scale\n nucleus_pixels = \\\n sp.multiply(pixels, nucleus_map > 0)\n\n diagnostics['times_loop_' + str(x) + '_nditer'] = time() - t0\n t0 = time()\n\n diagnostics['nucleus_pixels'].append(nucleus_pixels)\n\n # sio.savemat(home + '/diagnostics0.mat', diagnostics)\n\n if len(nucleus_pixels.shape) == 3:\n\n nucleus_pixels_t = sp.transpose(nucleus_pixels)\n\n nucleus_r = \\\n nucleus_pixels_t[0][sp.nonzero(nucleus_pixels_t[0])]\n diagnostics['nucleus_r'] = nucleus_r\n nucleus_ci_r = var_ksFit(nucleus_r,\n npoints,\n self.percentile_r.get_value(),\n extra='red')\n\n nucleus_g = \\\n nucleus_pixels_t[1][sp.nonzero(nucleus_pixels_t[1])]\n diagnostics['nucleus_g'] = nucleus_g\n nucleus_ci_g = var_ksFit(nucleus_g,\n npoints,\n self.percentile_g.get_value(),\n extra='green')\n\n nucleus_b = \\\n nucleus_pixels_t[2][sp.nonzero(nucleus_pixels_t[2])]\n diagnostics['nucleus_b'] = nucleus_b\n nucleus_ci_b = var_ksFit(nucleus_b,\n npoints,\n self.percentile_b.get_value(),\n extra='blue')\n\n diagnostics['times_loop_' + str(x) + '_ci'] = time() - t0\n t0 = time()\n\n diagnostics['ci'].append((nucleus_ci_r, nucleus_ci_g,\n nucleus_ci_b))\n sio.savemat(home + '/diagnostics.mat', diagnostics)\n # diagnostics['mu'].append((mu_r, mu_g, mu_b))\n # diagnostics['sigma'].append((sigma_r, sigma_g, sigma_b))\n # diagnostics['sigma2'].append((sigma2_r, sigma2_g, sigma2_b))\n # diagnostics['old_sigma'].append(\n # (old_sigma_r, old_sigma_g, old_sigma_b))\n # diagnostics['a'].append((a_r, a_g, a_b))\n # diagnostics['b'].append((b_r, b_g, b_b))\n # diagnostics['x1'].append((x1_r, x1_g, x1_b))\n # diagnostics['x2'].append((x2_r, x2_g, x2_b))\n # diagnostics['cx'].append((cx_r, cx_g, cx_b))\n # diagnostics['yhat'].append((yhat_r, yhat_g, yhat_b))\n\n nucleus_processed = update_image(nucleus_pixels,\n nucleus_ci_r,\n nucleus_ci_g,\n nucleus_ci_b)\n\n elif len(nucleus_pixels.shape) == 2:\n\n flattened = sp.concatenate(nucleus_pixels)\n flattened = flattened[sp.nonzero(flattened)]\n\n nucleus_ci = var_ksFit(flattened,\n npoints,\n self.percentile_k.get_value(),\n extra='grey')\n\n nucleus_processed = sp.multiply(\n nucleus_pixels, nucleus_pixels > nucleus_ci)\n\n\n diagnostics['times_loop_' + str(x) + '_update'] = time() - t0\n\n diagnostics['nucleus_processed'].append(nucleus_processed)\n\n new_im = new_im + nucleus_processed\n\n diagnostics['new_im'].append(new_im)\n\n sio.savemat(home + '/diagnostics.mat', diagnostics)\n\n output_image = cpi.Image(new_im, parent_image=input_image)\n image_set.add(output_image_name, output_image)\n\n def is_interactive(self):\n return False\n\n\ndef var_ksFit(data, npoints, perc, extra=None):\n\n diag_vksf = dict()\n diag_vksf['data'] = data\n diag_vksf['npoints'] = npoints\n diag_vksf['perc'] = perc\n\n sio.savemat(home + '/diag_vksf.mat', diag_vksf)\n\n # kde_pdf = stats.gaussian_kde(flattened)\n kde_pdf = stats.gaussian_kde(data)\n\n # xi, dx = sp.linspace(flattened.min(), flattened.max(), npoints, retstep=True)\n xi, dx = sp.linspace(data.min(), data.max(), npoints, retstep=True)\n diag_vksf['xi'] = xi\n diag_vksf['dx'] = dx\n\n f = kde_pdf(xi)\n diag_vksf['f'] = f\n\n plt.figure()\n plt.title(extra)\n # plt.hist(flattened, bins=npoints, color=extra)\n plt.hist(data, bins=npoints, color=extra, alpha=0.5)\n\n mdx = sp.where(f == f.max())#[0][0]\n diag_vksf['mdx'] = mdx\n mu = xi[mdx]\n diag_vksf['mu'] = mu\n # sigma = sp.std(flattened)\n sigma = sp.std(data)\n diag_vksf['sigma'] = sigma\n\n err_lookforward = sp.int_(sp.floor(mdx + 0.5 * sigma / dx))\n diag_vksf['err_lookforward'] = err_lookforward\n\n diag_vksf['sigma_hat_0'] = list()\n diag_vksf['sigma_hat_1'] = list()\n diag_vksf['mu_hat_0'] = list()\n diag_vksf['mu_hat_1'] = list()\n diag_vksf['local_norm'] = list()\n diag_vksf['y_sigma'] = list()\n diag_vksf['y_mu'] = list()\n diag_vksf['s_sigma'] = list()\n diag_vksf['s_mu'] = list()\n diag_vksf['my_sigma'] = list()\n diag_vksf['my_mu'] = list()\n diag_vksf['delta_sigma'] = list()\n diag_vksf['delta_mu'] = list()\n diag_vksf['ci'] = list()\n\n for kk in xrange(3):\n\n sigma_hat = sp.arange(sigma*0.5, sigma*1.5 + sigma/200, sigma/200)\n diag_vksf['sigma_hat_0'].append(sigma_hat)\n\n delta = list()\n for i in xrange(len(sigma_hat)):\n local_norm = stats.norm(mu, sigma_hat[i])\n y = local_norm.pdf(xi)\n my = y.max()\n s = (y[sp.arange(0, err_lookforward)]/my\n - f[sp.arange(0, err_lookforward)]/f.max()) ** 2\n delta.append(s.sum())\n diag_vksf['y_sigma'].append(y)\n diag_vksf['my_sigma'].append(my)\n diag_vksf['s_sigma'].append(s)\n diag_vksf['delta_sigma'].append(delta)\n delta = sp.array(delta)\n\n mx, mdx = delta.min(), sp.where(delta == delta.min())\n diag_vksf['mx_sigma'], diag_vksf['mdx_sigma'] = mx, mdx\n sigma_hat = sigma_hat[mdx]\n sigma = sigma_hat\n diag_vksf['sigma_hat_1'].append(sigma_hat)\n\n mu_hat = sp.arange(mu * 0.5, mu * 1.5 + mu/200, mu/200)\n diag_vksf['mu_hat_0'].append(mu_hat)\n\n delta = list()\n for i in xrange(len(mu_hat)):\n local_norm = stats.norm(mu_hat[i], sigma_hat)\n y = local_norm.pdf(xi)\n my = y.max()\n s = (y[sp.arange(0, err_lookforward)]/my\n - f[sp.arange(0, err_lookforward)]/f.max()) ** 2\n delta.append(s.sum())\n diag_vksf['y_mu'].append(y)\n diag_vksf['my_mu'].append(my)\n diag_vksf['s_mu'].append(s)\n diag_vksf['delta_mu'].append(delta)\n delta = sp.array(delta)\n\n sio.savemat(home + '/diag_vksf.mat', diag_vksf)\n\n mx, mdx = delta.min(), sp.where(delta == delta.min())\n diag_vksf['mx_mu'], diag_vksf['mdx_mu'] = mx, mdx\n mu_hat = mu_hat[mdx]\n mu = mu_hat\n diag_vksf['mu_hat_1'].append(mu_hat)\n\n local_norm = stats.norm(mu_hat, sigma_hat)\n y = local_norm.pdf(xi)\n\n ci = local_norm.ppf(perc)\n diag_vksf['ci'].append(ci)\n sio.savemat(home + '/diag_vksf.mat', diag_vksf)\n # plt.plot(xi, y * f.max()/y.max() * len(flattened) * dx,\n plt.plot(xi, y * f.max()/y.max() * len(data) * dx,\n marker='', linestyle='--', color='k')\n plt.plot((ci, ci), plt.ylim(), marker='',\n linestyle='-', color='k')\n plt.savefig(home + '/cell_profiler_hist_' + extra + str(kk) + '.pdf')\n\n return ci\n\n\ndef update_image(original_im, ci_red, ci_green, ci_blue):\n\n ci_vec = sp.transpose(sp.array((ci_red, ci_green, ci_blue)))\n ci_matrix = sp.multiply(sp.ones(original_im.shape),\n # sp.array(sp.newaxis, ci_vec))\n ci_vec)\n new_im = sp.multiply(original_im, original_im > ci_matrix)\n\n return new_im\n"
}
] | 1 |
cities-deadlines/cities-server
|
https://github.com/cities-deadlines/cities-server
|
98bee10470a2d892b465aed898b8d30a66340589
|
d75ee2beba4e6991ec6b48eb612f6181defe3e1b
|
cb0b7777f35fd83e59fc42d819a53916cf7a78bc
|
refs/heads/master
| 2020-12-28T13:00:34.251923 | 2020-02-19T22:42:49 | 2020-02-19T22:42:49 | 238,341,409 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6561797857284546,
"alphanum_fraction": 0.6561797857284546,
"avg_line_length": 39.45454406738281,
"blob_id": "42ba4f0aa6fff808bcd33af01204e68ca96f2d28",
"content_id": "12f2d328695a3343e886085f9b4bb448b96a2980",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 11,
"path": "/frontend/views.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\ndef index(request):\n\n # Pipeline for loading React app:\n # - Load template 'index.html' (below)\n # - Add 'main.js' webpack (static file) as script in 'index.html' template\n # - Load React entry module 'index.js' in 'main.js' script\n # - Import App component in 'index.js'\n # - Render App component onto div (with id=\"app\") in 'index.html'\n return render(request, 'index.html')\n"
},
{
"alpha_fraction": 0.5404191613197327,
"alphanum_fraction": 0.6032934188842773,
"avg_line_length": 28.04347801208496,
"blob_id": "ecc58798df0a181bc206b028da05e2a07feed52d",
"content_id": "545099fdf0f68916c55d343929e20de98aa12039",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 23,
"path": "/property/migrations/0004_auto_20200214_0153.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 01:53\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0003_auto_20200214_0152'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property',\n name='description',\n field=models.CharField(blank=True, default='', max_length=150, verbose_name='Property Description'),\n ),\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property 2409fd59', max_length=20, verbose_name='Property Name'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.38429561257362366,
"alphanum_fraction": 0.3893764317035675,
"avg_line_length": 25.414634704589844,
"blob_id": "a2662554848d9f6657e1e792c7d3491be5c9ec0c",
"content_id": "f238cc35100f1c88c271f6ec1f96e46803378ac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2165,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 82,
"path": "/frontend/src/components/user/index.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Image } from 'react-bootstrap';\n\nimport LoginForm from './login';\nimport RegisterForm from './register';\nimport {\n RightModulePage, \n RightModule\n} from '../modules/right-module';\n\nimport LogoIcon from '../../icons/logo-2.png';\n\nclass UserModule extends Component {\n constructor(props) {\n super(props);\n\n // create refs for user forms\n this.registerForm = React.createRef();\n\n // bind external functions\n this.switchPage = this.switchPage.bind(this);\n }\n\n render() {\n return (\n <RightModule>\n\n {/* login page */}\n <RightModulePage visible={true}>\n\n {/* website logo */}\n <Image \n src={LogoIcon} \n draggable={false}\n alt={'Webiste Logo'} \n style={{\n position: 'absolute',\n top: '17%',\n width: '250px'\n }}\n />\n\n <LoginForm \n switchPage={this.switchPage}\n />\n \n </RightModulePage>\n\n {/* register page */}\n <RightModulePage \n ref={this.registerForm} \n visible={false}\n >\n\n {/* website logo */}\n <Image \n src={LogoIcon} \n draggable={false}\n alt={'Webiste Logo'} \n style={{\n position: 'absolute',\n top: '17%',\n width: '250px'\n }}\n />\n\n <RegisterForm \n switchPage={this.switchPage}\n />\n\n </RightModulePage>\n\n </RightModule>\n );\n } \n\n switchPage() {\n this.registerForm.current.toggleModulePage(); \n }\n}\n\nexport default UserModule;"
},
{
"alpha_fraction": 0.6568807363510132,
"alphanum_fraction": 0.6587156057357788,
"avg_line_length": 24.70754623413086,
"blob_id": "b3fb22f545ee1768bc5e6a9fb32c2ae3cf4e5acd",
"content_id": "3edfc037c208d8a46463ea4ca2e13de3659a77b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2725,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 106,
"path": "/main/settings.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for main project.\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\n# import env files\nfrom dotenv import load_dotenv\nload_dotenv(verbose=True)\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv('SECRET_KEY')\nDEBUG = True\nALLOWED_HOSTS = []\n\n# application settings\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n \n 'frontend.apps.FrontendConfig', # connect react frontend app\n 'user.apps.UserConfig', # connect user api\n 'property.apps.PropertyConfig', # connect property api\n 'map.apps.MapConfig' # connect map api\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'main.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'main.wsgi.application'\n\n# database settings\n\nAUTH_USER_MODEL = 'user.User'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': os.getenv('RDS_DB_NAME'),\n 'USER': os.getenv('RDS_USERNAME'),\n 'PASSWORD': os.getenv('RDS_PASSWORD'),\n 'HOST': os.getenv('RDS_HOSTNAME'),\n 'PORT': os.getenv('RDS_PORT'),\n }\n}\n\n# password validation settings\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# internationalization settings\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# static files\n\nSTATIC_URL = '/static/'\n"
},
{
"alpha_fraction": 0.36067935824394226,
"alphanum_fraction": 0.36592897772789,
"avg_line_length": 33.57651138305664,
"blob_id": "c9bc3b19acb4b879bd21b888861dd713275b440a",
"content_id": "9d39764d6563e508232c1f9d669ae4919705d92e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 9715,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 281,
"path": "/frontend/src/components/user/register.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Form, Button } from 'react-bootstrap';\nimport { validate } from 'email-validator';\nimport { trackPromise } from 'react-promise-tracker';\n\nclass RegisterForm extends Component {\n constructor(props) {\n super(props);\n this.state = {\n email: '',\n username: '',\n password: '',\n validEmail: false,\n invalidEmail: false,\n validUsername: false,\n invalidUsername: false,\n validPassword: false,\n invalidPassword: false,\n errorMessage: ''\n }\n\n // bind external functions\n this.updateEmail = this.updateEmail.bind(this);\n this.updateUsername = this.updateUsername.bind(this);\n this.updatePassword = this.updatePassword.bind(this);\n this.handleKeyPress = this.handleKeyPress.bind(this);\n this.submitForm = this.submitForm.bind(this);\n }\n\n render() {\n return (\n <>\n\n {/* page banner */}\n <div style={{\n display: 'flex',\n position: 'absolute',\n top: '0',\n\n justifyContent: 'center',\n alignItems: 'center',\n\n width: '100%',\n height: '7%',\n\n backgroundColor: 'black'\n }}> \n <div \n style={{ \n color: 'white',\n fontWeight: 'bolder',\n fontSize: '22px'\n }}\n >\n Register\n </div>\n </div>\n\n {/* register form */}\n <Form style={{\n width: '60%',\n marginTop: '175px'\n }}>\n\n {/* display form error message */}\n {this.state.errorMessage && (\n <Form.Group \n className='text-danger'\n style={{ \n marginTop: '35px', \n display: 'flex',\n justifyContent: 'center',\n alignItems: 'center',\n }}\n >\n <Form.Text>\n {this.state.errorMessage}\n </Form.Text>\n </Form.Group>\n )}\n\n <Form.Group>\n <Form.Control \n type='email' \n placeholder='Email' \n style={{ borderRadius: '1rem', borderColor: 'grey' }}\n isValid={this.state.validEmail}\n isInvalid={this.state.invalidEmail}\n\n maxLength={32}\n value={this.state.email}\n onChange={this.updateEmail}\n onKeyPress={this.handleKeyPress}\n />\n <Form.Control.Feedback type='invalid' style={{ fontSize: '11px' }}>\n Invalid email.\n </Form.Control.Feedback>\n </Form.Group>\n\n <Form.Group>\n <Form.Control\n type='username' \n placeholder='Username' \n style={{ borderRadius: '1rem', borderColor: 'grey' }}\n isValid={this.state.validUsername}\n isInvalid={this.state.invalidUsername}\n\n maxLength={15}\n value={this.state.username}\n onChange={this.updateUsername}\n onKeyPress={this.handleKeyPress}\n />\n <Form.Control.Feedback type='invalid' style={{ fontSize: '11px' }}>\n Invalid username (at least 3 characters).\n </Form.Control.Feedback>\n </Form.Group>\n\n <Form.Group>\n <Form.Control \n type='password' \n placeholder='Password' \n style={{ borderRadius: '1rem', borderColor: 'grey' }}\n autoComplete='off'\n isValid={this.state.validPassword}\n isInvalid={this.state.invalidPassword}\n\n maxLength={64}\n value={this.state.password}\n onChange={this.updatePassword}\n onKeyPress={this.handleKeyPress}\n />\n <Form.Control.Feedback type='invalid' style={{ fontSize: '11px' }}>\n Invalid password (at least 8 characters with one number and special character).\n </Form.Control.Feedback>\n </Form.Group>\n \n <Form.Group style={{ marginTop: '35px' }}>\n <Button\n variant='dark' \n type='button'\n style={{\n borderRadius: '1rem',\n fontWeight: 'bold',\n float: 'left',\n fontSize: '14px',\n width: '42%',\n borderColor: 'black',\n backgroundColor: 'black'\n }}\n\n onClick={this.submitForm}\n >\n Submit\n </Button>\n\n <Button\n variant='dark' \n type='button'\n style={{\n borderRadius: '1rem',\n fontWeight: 'bold',\n float: 'right',\n fontSize: '14px',\n width: '42%',\n borderColor: 'black',\n backgroundColor: 'black'\n }}\n\n onClick={this.props.switchPage}\n >\n Back\n </Button>\n </Form.Group>\n </Form>\n </>\n );\n }\n\n handleKeyPress(event) {\n if (event.key == 'Enter') {\n this.submitForm();\n }\n }\n\n submitForm() {\n if (this.state.validEmail && this.state.validUsername\n && this.state.validPassword) {\n\n trackPromise(\n this.props.context.GET('user/register/', {\n 'email': this.state.email,\n 'username': this.state.username,\n 'password': this.state.password\n })\n .then((data) => {\n if (!data) this.setErrorState();\n else if (data.message) {\n this.setState({ errorMessage: data.message });\n }\n else {\n this.props.context.updateUser({\n id: data.id,\n username: data.username,\n email: data.email\n });\n }\n })\n .catch((err) => {\n this.setErrorState();\n })\n );\n }\n }\n\n setErrorState() {\n this.setState({\n email: '',\n username: '',\n password: '',\n validEmail: false,\n invalidEmail: true,\n validUsername: false,\n invalidUsername: true,\n validPassword: false,\n invalidPassword: true\n });\n }\n\n updateEmail(event) {\n const email = event.target.value;\n this.setState({ email: email });\n if (validate(email)) {\n this.setState({\n validEmail: true,\n invalidEmail: false\n });\n } \n else {\n this.setState({\n validEmail: false,\n invalidEmail: true\n });\n }\n }\n\n updateUsername(event) {\n const username = event.target.value;\n this.setState({ username: username });\n if (username.match(/^[a-zA-Z0-9_-]{3,15}$/i)) {\n this.setState({\n validUsername: true,\n invalidUsername: false\n });\n } \n else {\n this.setState({\n validUsername: false,\n invalidUsername: true\n });\n }\n }\n\n updatePassword(event) {\n const password = event.target.value;\n this.setState({ password: password });\n if (password.match(/^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*#?&])[A-Za-z\\d@$!%*#?&]{8,64}$/i)) {\n this.setState({\n validPassword: true,\n invalidPassword: false\n });\n }\n else {\n this.setState({\n validPassword: false,\n invalidPassword: true\n });\n }\n }\n}\n\nexport default RegisterForm;"
},
{
"alpha_fraction": 0.5455688238143921,
"alphanum_fraction": 0.5744814872741699,
"avg_line_length": 31.46938705444336,
"blob_id": "e39d0cfb89c9d7c91f1510a4ef758ad8cd859da3",
"content_id": "b91ebf0380e4b85478373d6fbb90f57e24254d4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 49,
"path": "/property/migrations/0008_auto_20200214_0307.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 03:07\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0007_auto_20200214_0259'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property',\n name='description',\n field=models.CharField(blank=True, default='', max_length=150, verbose_name='Description'),\n ),\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property 795a85c2', max_length=20, verbose_name='Name'),\n ),\n migrations.AlterField(\n model_name='property',\n name='rating',\n field=models.IntegerField(default=0, verbose_name='Rating'),\n ),\n migrations.AlterField(\n model_name='property',\n name='tier',\n field=models.IntegerField(default=1, verbose_name='Tier'),\n ),\n migrations.AlterField(\n model_name='property',\n name='value',\n field=models.IntegerField(default=10, verbose_name='Value'),\n ),\n migrations.AlterField(\n model_name='propertytransaction',\n name='amount',\n field=models.IntegerField(verbose_name='Amount'),\n ),\n migrations.AlterField(\n model_name='propertytransaction',\n name='date',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4771609306335449,
"alphanum_fraction": 0.4771609306335449,
"avg_line_length": 23.98245620727539,
"blob_id": "c3d97c57c5cb9a30b967e72def0c0817af95fc3c",
"content_id": "e1748819c8b66ee38f61e601081be7e071672202",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1423,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 57,
"path": "/user/admin.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\n\nfrom . import models\nfrom property.models import Property, PropertyTransaction\n\nclass UserAdmin(BaseUserAdmin):\n\n list_display = ['id', 'username', 'email', 'date_joined', 'last_login', 'is_active', 'is_staff', 'is_superuser']\n list_filter = ['date_joined', 'is_active', 'is_staff', 'is_superuser']\n\n fieldsets = (\n (\n None, \n {\n 'fields': (\n 'username', \n 'email', \n 'is_active'\n )\n }\n ),\n (\n 'Permissions', \n {\n 'fields': (\n 'is_staff',\n 'is_superuser'\n )\n }\n )\n )\n \n add_fieldsets = (\n (\n None, \n {\n 'fields': (\n 'email', \n 'username', \n 'password'\n ),\n }\n ),\n )\n\n search_fields = ('id', 'username', 'email')\n ordering = ('username', 'email')\n\n# set admin site header\nadmin.site.site_header = 'Cities Deadlines Administration'\n\n# register admin/user\nadmin.site.register(models.User, UserAdmin)\nadmin.site.unregister(Group)"
},
{
"alpha_fraction": 0.32390496134757996,
"alphanum_fraction": 0.3348957598209381,
"avg_line_length": 29.633663177490234,
"blob_id": "d80213cb37a3fb15deef7baa9314fbe4a9052d49",
"content_id": "c4391ca249db74bbf4951bd310695ae72bc88986",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6187,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 202,
"path": "/frontend/src/components/home/tracked.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Button } from 'react-bootstrap';\nimport { trackPromise } from 'react-promise-tracker';\n\nclass TrackedForm extends Component {\n constructor(props) {\n super(props);\n\n // bind external functions\n this.signOut = this.signOut.bind(this);\n }\n\n render() {\n return (\n <>\n\n {/* page banner */}\n <div \n style={{\n display: 'flex',\n position: 'absolute',\n top: '0',\n\n justifyContent: 'center',\n alignItems: 'center',\n\n width: '100%',\n height: '7%',\n\n backgroundColor: 'black'\n }}\n > \n <div \n style={{ \n color: 'white',\n fontWeight: 'bolder',\n fontSize: '19px'\n }}\n >\n Tracked Properties\n </div>\n\n {/* sign out button */}\n <Button\n variant='dark'\n type='button'\n size='sm'\n style={{ \n position: 'absolute',\n right: '15px',\n borderColor: 'white',\n backgroundColor: 'black'\n }}\n \n onClick={this.signOut}\n >\n Sign Out\n </Button>\n\n {/* back button */}\n <Button\n variant='dark'\n type='button'\n size='sm'\n style={{ \n position: 'absolute',\n left: '15px',\n borderColor: 'white',\n backgroundColor: 'black'\n }}\n \n onClick={this.props.closeTrackedPage}\n >\n Back\n </Button>\n </div>\n\n <div\n style={{\n position: 'absolute',\n display: 'flex',\n top: '7%',\n height: '93%',\n width: '100%',\n flexDirection: 'column',\n overflowY: 'hidden',\n\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid',\n }}\n >\n <div \n style={{\n height: '100%',\n overflowY: 'scroll'\n }}\n >\n \n <TrackedPropertyEntry property={{ tier: 5, name: 'Property 1', growth: '+10%', value: '$50' }} />\n <TrackedPropertyEntry property={{ tier: 5, name: 'Property 2', growth: '+3%', value: '$10' }} />\n <TrackedPropertyEntry property={{ tier: 5, name: 'Property 3', growth: '+2%', value: '$13' }} />\n <TrackedPropertyEntry property={{ tier: 5, name: 'Property 4', growth: '+85%', value: '$84' }} />\n <TrackedPropertyEntry property={{ tier: 5, name: 'Property 5', growth: '+1%', value: '$431' }} />\n\n </div>\n </div>\n </>\n )\n }\n\n signOut() {\n trackPromise(\n this.props.context.GET('user/signout/', {})\n .then((data) => {\n this.props.context.updateUser(null);\n })\n .catch((err) => {})\n );\n }\n}\n\nclass TrackedPropertyEntry extends Component {\n constructor(props) {\n super(props);\n this.state = {\n hover: false\n }\n\n // bind external functions\n this.onHover = this.onHover.bind(this);\n this.offHover = this.offHover.bind(this);\n }\n\n render() {\n\n // hover style\n var hoverColor = '#dcdcdc';\n if (this.state.hover) hoverColor = '#f2f2f2';\n\n return (\n <div\n onMouseEnter={this.onHover} \n onMouseLeave={this.offHover}\n\n style={{\n position: 'relative',\n display: 'flex',\n width: '100%',\n height: '70px',\n justifyContent: 'center',\n alignItems: 'center',\n\n cursor: 'pointer',\n\n backgroundColor: hoverColor,\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid'\n }}\n >\n <div \n style={{\n position: 'absolute',\n left: '15px',\n fontSize: '15px'\n }}\n >\n <b>Tier {this.props.property.tier}</b>\n </div>\n\n <div\n style={{\n fontSize: '14px'\n }}\n >\n {this.props.property.name}\n </div>\n\n <div \n style={{\n position: 'absolute',\n right: '15px',\n fontSize: '15px'\n }}\n >\n <i>{this.props.property.value}</i> ({this.props.property.growth})\n </div>\n </div>\n );\n }\n\n onHover() {\n this.setState({ hover: true });\n }\n\n offHover() {\n this.setState({ hover: false });\n }\n}\n\nexport default TrackedForm;"
},
{
"alpha_fraction": 0.6052431464195251,
"alphanum_fraction": 0.6181610822677612,
"avg_line_length": 51.63999938964844,
"blob_id": "da546d5b1095a1c8a52ccc34480a96bb71a39508",
"content_id": "4c42ac2687bd9cd5fae56b9af219ad94e85a6fc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2632,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 50,
"path": "/property/migrations/0001_initial.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 01:43\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Property',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(default='Property<django.db.models.fields.AutoField>', max_length=20, verbose_name='Property Name')),\n ('description', models.CharField(default='', max_length=150, verbose_name='Property Description')),\n ('value', models.IntegerField(default=10, verbose_name='Property Value')),\n ('buildingType', models.CharField(choices=[('skyscraper1', 'Skyscraper1'), ('skyscraper2', 'Skyscraper2'), ('tripletowers1', 'Tripletowers1')], default='skyscraper1', max_length=100, verbose_name='building type')),\n ('tier', models.IntegerField(default=1, verbose_name='Property Tier')),\n ('rating', models.IntegerField(default=0, verbose_name='Property Rating')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Property',\n 'verbose_name_plural': 'Properties',\n },\n ),\n migrations.CreateModel(\n name='PropertyTransaction',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date')),\n ('amount', models.IntegerField(verbose_name='Transaction Amount')),\n ('buyer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='buyer', to=settings.AUTH_USER_MODEL)),\n ('seller', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='seller', to=settings.AUTH_USER_MODEL)),\n ('target_property', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='property.Property')),\n ],\n options={\n 'verbose_name': 'Property Transaction',\n 'verbose_name_plural': 'Property Transactions',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.6711711883544922,
"alphanum_fraction": 0.6711711883544922,
"avg_line_length": 21.299999237060547,
"blob_id": "ef0b4c64d75d7cce9642b29b74cc4b53c9f1fed4",
"content_id": "6321ff08d666ee33fd96b4fd1bfb921b73314e84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 10,
"path": "/user/urls.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('register/', views.register),\n path('login/', views.login),\n path('signout/', views.signout),\n path('current/', views.fetchCurrentUser)\n]"
},
{
"alpha_fraction": 0.6357292532920837,
"alphanum_fraction": 0.6413119435310364,
"avg_line_length": 39.18691635131836,
"blob_id": "208786c711510720fddb997bb310a7bce7dc1bb0",
"content_id": "cc575b26dad43e18b8f680fa3aa633f7b1010b13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4299,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 107,
"path": "/user/models.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils import timezone\nfrom django.core.mail import send_mail\nimport re \n\nclass UserManager(BaseUserManager):\n \n def create_user(self, email, username, password):\n\n # check for fields\n if not email: raise ValidationError('No email supplied.')\n if not username: raise ValidationError('No username supplied.')\n if not password: raise ValidationError('No email supplied.')\n\n # validate fields\n try: validate_email(email)\n except Exception: raise ValidationError('Invalid email.')\n if not re.match(r'^[a-zA-Z0-9_-]{3,15}$', username): raise ValidationError('Invalid username (at least 3 characters).')\n if not re.match(r'^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*#?&])[A-Za-z\\d@$!%*#?&]{8,}$', password):\n raise ValidationError('Invalid password (at least 8 characters with one number and special character).')\n\n # validate unique fields\n email_flag = True\n try: self.model.users.get(email=email)\n except: email_flag = False\n if email_flag: raise ValidationError('This email has already been registered.')\n\n # validate unique username field\n username_flag = True\n try: self.model.users.get(username=username)\n except: username_flag = False\n if username_flag: raise ValidationError('This username is already in use.')\n\n # create user\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n is_active=True\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n \n def create_superuser(self, email, username, password):\n\n # check for fields\n if not username: raise ValidationError('No username supplied.')\n if not email: raise ValidationError('No email supplied.')\n if not password: raise ValidationError('No password supplied.')\n\n # validate fields\n try: validate_email(email)\n except Exception: raise ValidationError('Invalid email.')\n if not re.match(r'^[a-z0-9_-]{3,15}$', username): raise ValidationError('Invalid username (must be 3-15 characters).')\n if not re.match(r'^(?=.*[A-Za-z])(?=.*\\d)(?=.*[@$!%*#?&])[A-Za-z\\d@$!%*#?&]{8,64}$', password):\n raise ValidationError('Invalid password (must be 8-64 characters with one number and special character).')\n\n # validate unique fields\n email_flag = True\n try: self.model.users.get(email=email)\n except: email_flag = False\n if email_flag: raise ValidationError('This email has already been registered.')\n\n # validate unique username field\n username_flag = True\n try: self.model.users.get(username=username)\n except: username_flag = False\n if username_flag: raise ValidationError('This username is already in use.')\n \n # create user\n user = self.model(\n username=username,\n email=self.normalize_email(email),\n is_active=True,\n is_staff=True,\n is_superuser=True\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n\nclass User(AbstractBaseUser, PermissionsMixin):\n\n # model metadata\n class Meta:\n verbose_name = _('User')\n verbose_name_plural = _('Users')\n\n # base user fields\n id = models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')\n username = models.CharField(_('Username'), max_length=15, unique=True)\n email = models.EmailField(_('Email Address'), unique=True)\n is_active = models.BooleanField(_('Active'), default=True)\n date_joined = models.DateTimeField(_('Date Joined'), default=timezone.now)\n is_staff = models.BooleanField(_('Staff Status'), default=False)\n \n # required fields (for django)\n EMAIL_FIELD = 'email'\n USERNAME_FIELD = 'username'\n REQUIRED_FIELDS = ['email']\n\n # attach custom model manager\n users = UserManager()"
},
{
"alpha_fraction": 0.37463125586509705,
"alphanum_fraction": 0.37463125586509705,
"avg_line_length": 22.929410934448242,
"blob_id": "3f62deef0cdb221894500e48c2f899d018cc46a4",
"content_id": "50e13fdbc4633945b6d5b1d81b035128c7f16440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2034,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 85,
"path": "/property/admin.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import Property, PropertyTransaction\n\nclass PropertyAdmin(admin.ModelAdmin):\n \n list_display = ['id', 'name', 'description', 'owner', 'value', 'building_type', 'tier', 'rating', 'date_created']\n list_filter = ['value', 'building_type', 'tier', 'rating', 'date_created']\n\n fieldsets = [\n (\n None, \n {\n 'fields': [\n 'name',\n 'description',\n 'owner',\n 'value',\n 'building_type',\n 'tier',\n 'rating'\n ]\n },\n ),\n ]\n \n add_fieldsets = [\n (\n None, \n {\n 'fields': [\n 'name',\n 'description',\n 'owner',\n 'value',\n 'building_type',\n 'tier',\n 'rating'\n ],\n }\n ),\n ]\n\n search_fields = ['id', 'name', 'owner']\n ordering = ['name']\n\nclass PropertyTransactionAdmin(admin.ModelAdmin):\n \n list_display = ['id', 'amount', 'buyer', 'seller', 'target_property', 'date']\n list_filter = ['date']\n\n fieldsets = [\n (\n None, \n {\n 'fields': [\n 'amount',\n 'buyer',\n 'seller',\n 'target_property'\n ]\n },\n ),\n ]\n \n add_fieldsets = [\n (\n None, \n {\n 'fields': [\n 'amount',\n 'buyer',\n 'seller',\n 'target_property'\n ],\n }\n ),\n ]\n\n search_fields = ['id', 'buyer', 'seller']\n ordering = ['id']\n\n# register property/transaction\nadmin.site.register(Property, PropertyAdmin)\nadmin.site.register(PropertyTransaction, PropertyTransactionAdmin)\n"
},
{
"alpha_fraction": 0.4840182662010193,
"alphanum_fraction": 0.49771690368652344,
"avg_line_length": 18.954545974731445,
"blob_id": "c1d045abe4aedb6970e3d97e4d969075249a0761",
"content_id": "da9474803b7334bb007f7607506c7b1e525e2c6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 22,
"path": "/frontend/src/components/modules/left-module.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\n\nclass LeftModuleContainer extends Component {\n render() {\n return (\n <div style={{\n height: '100%',\n width: '100%'\n }}>\n {this.props.children}\n </div>\n );\n }\n}\n\nclass LeftModule extends Component {\n constructor(props) {\n super(props);\n }\n}\n\nexport { LeftModule, LeftModuleContainer };"
},
{
"alpha_fraction": 0.3169074058532715,
"alphanum_fraction": 0.32591333985328674,
"avg_line_length": 28.577890396118164,
"blob_id": "96f3ce58519b4cba5d786284b5b84d5fe055de5d",
"content_id": "4b83777154fe52d7e282e44f373e77a91128d578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5885,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 199,
"path": "/frontend/src/components/home/owned.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Button } from 'react-bootstrap';\nimport { trackPromise } from 'react-promise-tracker';\n\nclass OwnedForm extends Component {\n constructor(props) {\n super(props);\n\n // bind external functions\n this.signOut = this.signOut.bind(this);\n }\n\n render() {\n return (\n <>\n {/* page banner */}\n <div \n style={{\n display: 'flex',\n position: 'absolute',\n top: '0',\n\n justifyContent: 'center',\n alignItems: 'center',\n\n width: '100%',\n height: '7%',\n\n backgroundColor: 'black'\n }}\n >\n <div \n style={{ \n color: 'white',\n fontWeight: 'bolder',\n fontSize: '19px'\n }}\n >\n Owned Properties\n </div>\n\n {/* sign out button */}\n <Button\n variant='dark'\n type='button'\n size='sm'\n style={{ \n position: 'absolute',\n right: '15px',\n borderColor: 'white',\n backgroundColor: 'black'\n }}\n \n onClick={this.signOut}\n >\n Sign Out\n </Button>\n\n {/* back button */}\n <Button\n variant='dark'\n type='button'\n size='sm'\n style={{ \n position: 'absolute',\n left: '15px',\n borderColor: 'white',\n backgroundColor: 'black'\n }}\n \n onClick={this.props.closeOwnedPage}\n >\n Back\n </Button>\n </div>\n\n <div\n style={{\n position: 'absolute',\n display: 'flex',\n top: '7%',\n height: '93%',\n width: '100%',\n flexDirection: 'column',\n overflowY: 'hidden',\n\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid',\n }}\n >\n <div \n style={{\n height: '100%',\n overflowY: 'scroll'\n }}\n >\n \n <OwnedPropertyEntry property={{ tier: 5, name: 'Alex\\'s First Property', value: '$5' }} />\n <OwnedPropertyEntry property={{ tier: 5, name: 'Alex\\'s Second Property', value: '$20' }} />\n <OwnedPropertyEntry property={{ tier: 5, name: 'Harry\\'s Third Property', value: '$1,000,000' }} />\n\n </div>\n </div>\n </>\n )\n }\n\n signOut() {\n trackPromise(\n this.props.context.GET('user/signout/', {})\n .then((data) => {\n this.props.context.updateUser(null);\n })\n .catch((err) => {})\n );\n }\n}\n\nclass OwnedPropertyEntry extends Component {\n constructor(props) {\n super(props);\n this.state = {\n hover: false\n }\n\n // bind external functions\n this.onHover = this.onHover.bind(this);\n this.offHover = this.offHover.bind(this);\n }\n\n render() {\n\n // hover style\n var hoverColor = '#dcdcdc';\n if (this.state.hover) hoverColor = '#f2f2f2';\n\n return (\n <div\n onMouseEnter={this.onHover} \n onMouseLeave={this.offHover}\n\n style={{\n position: 'relative',\n display: 'flex',\n width: '100%',\n height: '70px',\n justifyContent: 'center',\n alignItems: 'center',\n\n cursor: 'pointer',\n\n backgroundColor: hoverColor,\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid'\n }}\n >\n <div \n style={{\n position: 'absolute',\n left: '15px',\n fontSize: '15px'\n }}\n >\n <b>Tier {this.props.property.tier}</b>\n </div>\n\n <div\n style={{\n fontSize: '14px'\n }}\n >\n {this.props.property.name}\n </div>\n\n <div \n style={{\n position: 'absolute',\n right: '15px',\n fontSize: '15px'\n }}\n >\n <i>{this.props.property.value}</i>\n </div>\n </div>\n );\n }\n\n onHover() {\n this.setState({ hover: true });\n }\n\n offHover() {\n this.setState({ hover: false });\n }\n}\n\nexport default OwnedForm;"
},
{
"alpha_fraction": 0.5426470637321472,
"alphanum_fraction": 0.6058823466300964,
"avg_line_length": 28.565217971801758,
"blob_id": "d31e36b7fe6b79e8eae6312240b0bfcdca126d96",
"content_id": "cfec9579a01b0bf2594cad2d3bc5c695a1d67b81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 23,
"path": "/property/migrations/0003_auto_20200214_0152.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 01:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0002_auto_20200214_0150'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property',\n name='description',\n field=models.CharField(default='', max_length=150, null=True, verbose_name='Property Description'),\n ),\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property f3898111', max_length=20, unique=True, verbose_name='Property Name'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.713248610496521,
"alphanum_fraction": 0.7186932563781738,
"avg_line_length": 38.42856979370117,
"blob_id": "416d0f078e17b3d598a82dcbc2c9d8a7c7c98676",
"content_id": "aa2105fb879520c071db627a634b08b260e2cead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 14,
"path": "/map/models.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass MapTile(models.Model):\n xCoord = models.IntegerField()\n yCoord = models.IntegerField()\n # store the contents of the maptile in json format; provides easy formatting for limited data size\n \n # so, we need some way to encapsulate buildings at given locations on the map, roading/filler tile\n # networks. On the constructor, needs to construct 100 buildings under properties, give ownership\n # to some fake account, etc.\n\n def __str__(self):\n return \"MapTile\""
},
{
"alpha_fraction": 0.5425652861595154,
"alphanum_fraction": 0.5993189811706543,
"avg_line_length": 31.629629135131836,
"blob_id": "f18fd1e8f0866f6ab29d8108a114d67a51dd2127",
"content_id": "065429efe19e3aa085f2239f444fed67c32d4061",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 27,
"path": "/property/migrations/0007_auto_20200214_0259.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 02:59\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0006_auto_20200214_0248'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='property',\n name='buildingType',\n ),\n migrations.AddField(\n model_name='property',\n name='building_type',\n field=models.CharField(choices=[('skyscraper1', 'Skyscraper1'), ('skyscraper2', 'Skyscraper2'), ('tripletowers1', 'Tripletowers1')], default='skyscraper1', max_length=100, verbose_name='Building Type'),\n ),\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property 00415c82', max_length=20, verbose_name='Property Name'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6397026777267456,
"alphanum_fraction": 0.6488850116729736,
"avg_line_length": 29.479999542236328,
"blob_id": "9f1f6d5662a5db0bcd11503f4500129e7b00d548",
"content_id": "1009d3bcea937fdcb517e8d0fad67886224d6a91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2287,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 75,
"path": "/property/models.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom map.models import MapTile\nfrom user.models import User\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils import timezone\nimport uuid\n\nclass Property(models.Model):\n \n class Meta:\n verbose_name = _('Property')\n verbose_name_plural = _('Properties')\n\n class BuildingTypes(models.TextChoices):\n skyscraper1 = 'skyscraper1'\n skyscraper2 = 'skyscraper2'\n tripletowers1 = 'tripletowers1'\n\n id = models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')\n date_created = models.DateTimeField(_('Date Created'), default=timezone.now)\n\n name = models.CharField(_('Name'), max_length=20, default='Property ' + str(uuid.uuid4())[:8])\n description = models.CharField(_('Description'), max_length=150, blank=True, default='')\n\n value = models.IntegerField(_('Value'), default=10) \n building_type = models.CharField(\n _('Building Type'), \n max_length=100, \n choices=BuildingTypes.choices,\n default='skyscraper1'\n )\n\n # set these fields on property update\n tier = models.IntegerField(_('Tier'), default=1)\n rating = models.IntegerField(_('Rating'), default=0)\n\n # relation fields\n owner = models.ForeignKey(\n User, \n on_delete=models.SET_NULL, # set owning user to NULL when user is deleted\n null=True\n )\n\n def __str__(self):\n return self.name\n\nclass PropertyTransaction(models.Model):\n \n class Meta:\n verbose_name = _('Property Transaction')\n verbose_name_plural = _('Property Transactions')\n\n date = models.DateTimeField(_('Date'), default=timezone.now)\n\n amount = models.IntegerField(_('Amount'))\n\n # relation fields\n buyer = models.ForeignKey(\n User, \n on_delete=models.SET_NULL, # set buying user to NULL when user is deleted\n related_name='buyer',\n null=True\n )\n\n seller = models.ForeignKey(\n User, \n on_delete=models.SET_NULL, # set selling user to NULL when user is deleted\n related_name='seller',\n null=True\n )\n\n target_property = models.ForeignKey(\n Property,\n on_delete=models.CASCADE # delete property transaction when property is deleted\n ) \n"
},
{
"alpha_fraction": 0.5515463948249817,
"alphanum_fraction": 0.5692194700241089,
"avg_line_length": 30.581396102905273,
"blob_id": "b5ae9556f45f4c2c4a48a3a959071b03820aad0e",
"content_id": "951e097e745b5b07a33eb9400192ca96e8b7abb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1358,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 43,
"path": "/user/migrations/0002_auto_20200214_0314.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 03:14\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='user',\n options={'verbose_name': 'User', 'verbose_name_plural': 'Users'},\n ),\n migrations.AlterField(\n model_name='user',\n name='date_joined',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date Joined'),\n ),\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.EmailField(max_length=254, unique=True, verbose_name='Email Address'),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_active',\n field=models.BooleanField(default=True, verbose_name='Active'),\n ),\n migrations.AlterField(\n model_name='user',\n name='is_staff',\n field=models.BooleanField(default=False, verbose_name='Staff Status'),\n ),\n migrations.AlterField(\n model_name='user',\n name='username',\n field=models.CharField(max_length=15, unique=True, verbose_name='Username'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.52173912525177,
"alphanum_fraction": 0.6064073443412781,
"avg_line_length": 23.27777862548828,
"blob_id": "fff409247af313acbcdbd5ae038715451123c4b0",
"content_id": "9260e154b2ca3eb007602ef09d14e2b2bd9105a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 18,
"path": "/property/migrations/0009_auto_20200214_0314.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 03:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0008_auto_20200214_0307'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property 884e4cfe', max_length=20, verbose_name='Name'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7559523582458496,
"alphanum_fraction": 0.7782738208770752,
"avg_line_length": 43.79999923706055,
"blob_id": "fc473e110ccdbc43a181f7bf28a692ab88ca2501",
"content_id": "3cf5939e451507a9c6d74028a3432d89052abde6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 15,
"path": "/README.md",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Cities Deadlines\n\nHow to get a local instance operational:\n\n1) Clone repository\n2) Navigate to root directory ('cities-deadlines')\n3) from cities-deadlines/, run cd frontend\n4) In cities-deadlines/frontend, run npm install\n5) In cities-deadlines/frontend, run npm run dev\n6) Leaving the current terminal window open, open a new terminal window, and navigate to root directory ('cities-deadlines/')\n7) Create a python virtual environment, using 'python3 -m venv venv'\n8) Enter the virtual environment via 'source .venv/bin/activate'\n9) Install the python dependencies via 'pip install -r requirements.txt'\n10) Run the server via 'python3 manage.py runserver'\n11) Profit!\n"
},
{
"alpha_fraction": 0.669658899307251,
"alphanum_fraction": 0.6786355376243591,
"avg_line_length": 36.16666793823242,
"blob_id": "bd8348ba6fab49e2b7a349868f278a4592572577",
"content_id": "f0523eed911084074c8d01aee24642deb7114b07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1114,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 30,
"path": "/frontend/src/components/map/assetmanager.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from \"react\";\n\n// import relevant assets\nimport intersection from '../../../img/4-way-intersection-city-dense.png';\nimport road from '../../../img/4-lane-road-city-dense.png';\nimport skyscraper1 from '../../../img/skyscraper-dense-parallax-1.png';\nimport tripletowers1 from '../../../img/triple-towers-1.png';\nimport watertile from '../../../img/water-full-block.png'\nimport waterroad from '../../../img/water-road-block-middle.png'\nimport waterinter from '../../../img/water-intersect-block-middle.png'\nimport grass from '../../../img/grass.png'\nimport grassRoad from '../../../img/grass-road.png'\n\n\nexport function retrieveBlockAsset(name) {\n if (name == \"skyscraper1\") {return skyscraper1;}\n else if (name == \"tripletowers1\") { return tripletowers1; }\n else if (name == \"water\") { return watertile; }\n else return grass;\n}\n\nexport function retrieveRoadAsset(name, direction) {\n if (name == \"waterroad\") { return waterroad; }\n if (name == \"grass\") { return grassRoad; }\n return road;\n}\n\nexport function retrieveIntersectionAsset(name) {\n return intersection;\n}"
},
{
"alpha_fraction": 0.5748898386955261,
"alphanum_fraction": 0.6145374178886414,
"avg_line_length": 30.310344696044922,
"blob_id": "fa3da5798ec5bc441a7a757c8e48fa368399f20f",
"content_id": "ccbafa9dfc73cb6ca913d4d6651a74a45c60bd24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 908,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 29,
"path": "/property/migrations/0006_auto_20200214_0248.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 02:48\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0005_auto_20200214_0155'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='property',\n name='date_created',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date Created'),\n ),\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property cca4bb40', max_length=20, verbose_name='Property Name'),\n ),\n migrations.AlterField(\n model_name='propertytransaction',\n name='date',\n field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Transaction Date'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5571377873420715,
"alphanum_fraction": 0.561765730381012,
"avg_line_length": 24.545454025268555,
"blob_id": "9fe264e4b694ffcec866c79d60f58093dbad034b",
"content_id": "6f53654402b22436f729ea1c4e5cdeff86842b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2809,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 110,
"path": "/user/views.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom django.core.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom django.contrib import auth\n\nfrom . import models\nfrom . import serializers\n\n@api_view(['GET'])\ndef register(request):\n try:\n\n # fetch user data\n email = request.headers['email']\n username = request.headers['username']\n password = request.headers['password']\n\n # create user in manager\n user = auth.get_user_model().users.create_user(\n email=email, \n username=username,\n password=password\n )\n\n # login user\n if user is not None:\n auth.login(request, user)\n return Response({\n 'username': user.username,\n 'email': user.email,\n 'id': user.id\n })\n else: return Response(False)\n\n except ValidationError as e:\n return Response({\n 'message': e.messages[0]\n })\n\n except Exception as e:\n print('Register error: ' + str(e))\n return Response(False)\n\n@api_view(['GET'])\ndef login(request):\n try:\n\n # fetch user data\n username = request.headers['username']\n password = request.headers['password']\n\n # authenticate user\n user = auth.authenticate(\n username=username, \n password=password\n )\n\n # login user\n if user is not None:\n auth.login(request, user)\n return Response({\n 'username': user.username,\n 'email': user.email,\n 'id': user.id\n })\n else: return Response({\n 'message': 'Incorrect username or password.'\n })\n\n except Exception as e:\n print('Login error: ' + str(e))\n return Response(False)\n\n@api_view(['GET'])\ndef signout(request):\n try:\n\n # check if user is authenticated\n if not request.user.is_authenticated:\n return Response(False, status=401)\n\n # logout user\n auth.logout(request)\n return Response(True)\n\n except Exception as e:\n print('Signout error: ' + str(e))\n return Response(False, status=401)\n\n@api_view(['GET'])\ndef fetchCurrentUser(request):\n\n try:\n\n # check if user is authenticated\n if not request.user.is_authenticated:\n return Response(False, status=401)\n\n # return authenticated user \n user = request.user \n return Response({\n 'username': user.username,\n 'email': user.email,\n 'id': user.id\n })\n\n except Exception as e:\n print('Fetch current user error: ' + str(e))\n return Response(False, status=401)"
},
{
"alpha_fraction": 0.37634626030921936,
"alphanum_fraction": 0.3923999071121216,
"avg_line_length": 31.596027374267578,
"blob_id": "7cf7b4fb6ef3a85d2156d03d1e75a3746626232f",
"content_id": "71a1dfdaae5eaa336ff45e3cbe37ebc474ecf396",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4921,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 151,
"path": "/frontend/src/components/home/notifications.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { Row } from 'react-bootstrap';\n\nclass NotificationsList extends Component {\n constructor(props) {\n super(props);\n }\n\n render() {\n return (\n <div \n style={{\n display: 'flex',\n height: '100%',\n width: '85%',\n flexDirection: 'column',\n borderColor: 'gray',\n borderWidth: '1px',\n borderStyle: 'solid',\n borderRadius: '5px',\n overflowY: 'hidden'\n }}\n >\n\n {/* notifications banner */}\n <div\n style={{\n display: 'flex',\n width: '100%',\n height: '40px',\n justifyContent: 'center',\n alignItems: 'center',\n fontSize: '15px',\n \n backgroundColor: '#f2f2f2',\n borderTopLeftRadius: '10px',\n borderTopRightRadius: '10px',\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid',\n }}\n > \n <b>Recent Transactions</b>\n </div>\n\n <div \n style={{\n height: '100%',\n overflowY: 'scroll'\n }}\n >\n <NotificationEntry new={true} time='9h' message='Test notification 1' />\n <NotificationEntry new={true} time='10h' message='Test notification 2' />\n <NotificationEntry new={true} time='11h' message='Test notification 3' />\n <NotificationEntry new={false} time='14h' message='Test notification 4' />\n <NotificationEntry new={true} time='23h' message='Test notification 5' />\n <NotificationEntry new={true} time='1d' message='Test notification 6' />\n <NotificationEntry new={false} time='1d' message='Test notification 7' />\n <NotificationEntry new={false} time='3d' message='Test notification 8' />\n <NotificationEntry new={false} time='1w' message='Test notification 9' />\n <NotificationEntry new={false} time='1w' message='Test notification 10' />\n <NotificationEntry new={false} time='1w' message='Test notification 11' />\n </div>\n </div>\n );\n }\n}\n\nclass NotificationEntry extends Component {\n constructor(props) {\n super(props);\n this.state = {\n hover: false\n }\n\n // bind external functions\n this.onHover = this.onHover.bind(this);\n this.offHover = this.offHover.bind(this);\n }\n\n render() {\n\n // hover style\n var hoverColor = '#dcdcdc';\n if (this.state.hover) hoverColor = '#f2f2f2';\n\n return (\n <div\n onMouseEnter={this.onHover} \n onMouseLeave={this.offHover}\n\n style={{\n position: 'relative',\n display: 'flex',\n width: '100%',\n height: '50px',\n justifyContent: 'center',\n alignItems: 'center',\n\n cursor: 'pointer',\n\n backgroundColor: hoverColor,\n borderBottomColor: 'gray',\n borderBottomWidth: '1px',\n borderBottomStyle: 'solid'\n }}\n >\n {this.props.new && (\n <div \n style={{\n position: 'absolute',\n left: '13px',\n width: '9px',\n height: '9px',\n backgroundColor: '#5367B5',\n borderRadius: '50%'\n }} \n />\n )}\n\n <div\n style={{\n fontSize: '13px'\n }}\n >\n {this.props.message}\n </div>\n\n <div \n style={{\n position: 'absolute',\n right: '11px',\n fontSize: '11px'\n }}\n >\n <i>{this.props.time} ago</i>\n </div>\n </div>\n );\n }\n\n onHover() {\n this.setState({ hover: true });\n }\n\n offHover() {\n this.setState({ hover: false });\n }\n}\n\nexport default NotificationsList;"
},
{
"alpha_fraction": 0.5513392686843872,
"alphanum_fraction": 0.6116071343421936,
"avg_line_length": 23.88888931274414,
"blob_id": "67c94fa72098d8eaa440ac7af9b0d49a4bb58e77",
"content_id": "9998625267f9e9524be3c3f64c0537ae7cbf8cde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 18,
"path": "/property/migrations/0002_auto_20200214_0150.py",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-14 01:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('property', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='property',\n name='name',\n field=models.CharField(default='Property 0f04404b', max_length=20, unique=True, verbose_name='Property Name'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.3837597370147705,
"alphanum_fraction": 0.3978494703769684,
"avg_line_length": 27.10416603088379,
"blob_id": "79d49b2c28988bdbd1d82e04eeb6d26e27227c7e",
"content_id": "cfd19e9da7284852e2750301cbe619a17be2fb94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2697,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 96,
"path": "/frontend/src/components/modules/right-module.js",
"repo_name": "cities-deadlines/cities-server",
"src_encoding": "UTF-8",
"text": "import React, { Component } from 'react';\nimport { usePromiseTracker } from 'react-promise-tracker';\nimport Loader from 'react-loader-spinner';\n\nimport UserContext from '../user/context';\n\nconst RightModule = props => {\n\n // track loading promise\n const { promiseInProgress } = usePromiseTracker();\n\n return (\n <>\n {promiseInProgress && (\n <div \n style={{\n position: 'absolute',\n zIndex: 1,\n display: 'flex',\n flexDirection: 'column',\n height: '100%',\n width: '100%',\n justifyContent: 'center',\n alignItems: 'center'\n }}\n >\n <Loader\n type='Oval'\n color='black'\n height={100}\n width={100}\n />\n </div>\n )}\n \n <div style={{\n position: 'relative',\n height: '100%',\n width: '100%',\n filter: promiseInProgress ? 'blur(3px)' : 'blur(0px)'\n }}>\n {props.children}\n </div>\n </>\n );\n}\n\nclass RightModulePage extends Component {\n constructor(props) {\n super(props);\n this.state = {\n visible: props.visible\n }\n }\n\n toggleModulePage() {\n this.setState({\n visible: !this.state.visible\n });\n }\n\n render() {\n return (\n <div style={{\n position: 'absolute',\n display: 'flex',\n flexDirection: 'column',\n height: '100%',\n width: '100%',\n\n justifyContent: 'center',\n alignItems: 'center',\n\n backgroundColor: '#e6e6e6',\n \n borderLeftColor: 'black',\n borderLeftWidth: '1px',\n borderLeftStyle: 'solid',\n\n /* sliding animation */\n transform: `translate(${this.state.visible ? 0 : 100}%, 0)`,\n transition: 'transform 0.5s'\n }}> \n <UserContext.Consumer>\n {value => {\n return React.Children.map(this.props.children, child => {\n return React.cloneElement(child, { context: value });\n });\n }}\n </UserContext.Consumer>\n </div>\n );\n }\n}\n\nexport { RightModulePage, RightModule };"
}
] | 27 |
spacenew/algo
|
https://github.com/spacenew/algo
|
88f564a8b6f21d2b906236f595be417f7f3803ea
|
3d4110d176cf8dba04c989dd9152efbae99483d3
|
8b29347ac967e6b1270ff7c8d4f8e43249821fb9
|
refs/heads/main
| 2023-08-10T17:37:17.903981 | 2021-09-23T16:49:15 | 2021-09-23T16:49:15 | 390,646,416 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7932489514350891,
"alphanum_fraction": 0.8016877770423889,
"avg_line_length": 46.400001525878906,
"blob_id": "857c298ee4027618e63625986e9e5fd2a8a7f4e4",
"content_id": "8f5811b800204a6a13ef597e7a512cc51821ef8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 5,
"path": "/README.md",
"repo_name": "spacenew/algo",
"src_encoding": "UTF-8",
"text": "# Реализация некоторых алгоритмов ML\n\n## \n`1. SGD Linear regression with regularization - стохастический градиентный спуск с гребневой регуляризацией` \n`2. Linear_Regression - линейная регрессия, аналитич.методом с функцией потерь MSE`\n"
},
{
"alpha_fraction": 0.4107300043106079,
"alphanum_fraction": 0.5518909692764282,
"avg_line_length": 32.48484802246094,
"blob_id": "80c9cab1e9f6058f7e8fc4adbef930a6f41aabdd",
"content_id": "cd7cc8d1b336dadb96a4d4c64e07c1d666959105",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2359,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 66,
"path": "/knn_algo.py",
"repo_name": "spacenew/algo",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport numpy as np\r\nfrom scipy.spatial import distance\r\n\r\n\r\ncolumns = ['комнаты', 'площадь', 'кухня', 'пл. жилая', 'этаж', 'всего этажей',\r\n 'кондиционер']\r\n\r\ndf_train = pd.DataFrame([\r\n [1, 38.5, 6.9, 18.9, 3, 5, 1],\r\n [1, 38.0, 8.5, 19.2, 9, 17, 0],\r\n [1, 34.7, 10.3, 19.8, 1, 9, 0],\r\n [1, 45.9, 11.1, 17.5, 11, 23, 1],\r\n [1, 42.4, 10.0, 19.9, 6, 14, 0],\r\n [1, 46.0, 10.2, 20.5, 3, 12, 1],\r\n [2, 77.7, 13.2, 39.3, 3, 17, 1],\r\n [2, 69.8, 11.1, 31.4, 12, 23, 0],\r\n [2, 78.2, 19.4, 33.2, 4, 9, 0],\r\n [2, 55.5, 7.8, 29.6, 1, 25, 1],\r\n [2, 74.3, 16.0, 34.2, 14, 17, 1],\r\n [2, 78.3, 12.3, 42.6, 23, 23, 0],\r\n [2, 74.0, 18.1, 49.0, 8, 9, 0],\r\n [2, 91.4, 20.1, 60.4, 2, 10, 0],\r\n [3, 85.0, 17.8, 56.1, 14, 14, 1],\r\n [3, 79.8, 9.8, 44.8, 9, 10, 0],\r\n [3, 72.0, 10.2, 37.3, 7, 9, 1],\r\n [3, 95.3, 11.0, 51.5, 15, 23, 1],\r\n [3, 69.3, 8.5, 39.3, 4, 9, 0],\r\n [3, 89.8, 11.2, 58.2, 24, 25, 0],\r\n], columns=columns)\r\n\r\ntrain_features = df_train.drop('кондиционер', axis=1)\r\ntrain_target = df_train['кондиционер']\r\n\r\ndf_test = pd.DataFrame([\r\n [1, 36.5, 5.9, 17.9, 2, 7, 0],\r\n [2, 71.7, 12.2, 34.3, 5, 21, 1],\r\n [3, 88.0, 18.1, 58.2, 17, 17, 1],\r\n], columns=columns)\r\n\r\ntest_features = df_test.drop('кондиционер', axis=1)\r\n\r\ndef nearest_neighbor_predict(train_features, train_target, new_features):\r\n distances = []\r\n for i in range(train_features.shape[0]):\r\n vector = train_features.loc[i].values\r\n distances.append(distance.euclidean(new_features, vector))\r\n best_index = np.array(distances).argmin()\r\n return train_target.loc[best_index]\r\n\r\nclass NearestNeighborClassificator:\r\n def fit(self, features_train, target_train):\r\n self.features_train = features_train\r\n self.target_train = target_train\r\n def predict(self, new_features):\r\n values = []\r\n for i in range(new_features.shape[0]):\r\n vector = new_features.loc[i]\r\n values.append(nearest_neighbor_predict(\r\n self.features_train, self.target_train, vector))\r\n return pd.Series(values)\r\n\r\nmodel = NearestNeighborClassificator()\r\nmodel.fit(train_features, train_target)\r\npredictions = model.predict(test_features)\r\nprint(predictions)"
}
] | 2 |
nishanb/CG-College-Projects
|
https://github.com/nishanb/CG-College-Projects
|
5456f3c786f48096ef1576c18ff3c0e605418a2a
|
cd4f785a8ec86997e05e618cea0620a71045fd98
|
82fa0137ba2bb4c2f0fa3cc304f7374d1ae76b33
|
refs/heads/master
| 2022-01-14T19:18:10.540292 | 2019-06-06T08:13:21 | 2019-06-06T08:13:21 | 184,645,835 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.46129873394966125,
"alphanum_fraction": 0.6310957670211792,
"avg_line_length": 17.966453552246094,
"blob_id": "1f337e7815e8b6615c9a088b62e50597f4aefd58",
"content_id": "e37f311cf1d0d29b44a2694ce4654b81eb7856a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 23746,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 1252,
"path": "/crow/crow.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//display variable\nint state=0,k;\n\n//animation variables\nfloat startx=0;\n\n//fonts types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1000,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n\n//keyboard function\nvoid keyboard( unsigned char key, int x, int y )\n{\n state++;\n}\n\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n}\n\n//drawer functions\nvoid draw_hill()\n{\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,600);\n\tglVertex2f(250,900);\n\tglVertex2f(500,600);\n\tglEnd();\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(500,600);\n\tglVertex2f(750,900);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid draw_sky()\n{\n\tglColor3f(0.3,0.9,0.9);\n\tglBegin(GL_QUADS);\n\tglVertex2f(0,600);\n\tglVertex2f(0,1000);\n\tglVertex2f(1000,1000);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\nvoid draw_crow()\n{\n glColor3f(0,0,0);\n glBegin(GL_POLYGON);\n\n//glVertex2f(400,800);\n glVertex2f(400,840);\n glVertex2f(380,850);\n glVertex2f(370,860);\n glVertex2f(380,870);\n\n //glVertex2f(450,830);\n //glVertex2f(400,910);\n //glVertex2f(450,890);\n glVertex2f(500,880);\n glVertex2f(470,930);\n glVertex2f(520,950);\n glVertex2f(540,950);\n glVertex2f(590,930);\n glVertex2f(590,920);\n glVertex2f(570,880);\n glVertex2f(600,900);\n glVertex2f(620,910);\n glVertex2f(630,910);\n glVertex2f(640,890);\n glVertex2f(660,880);\n glVertex2f(640,860);\n glVertex2f(600,840);\n glVertex2f(560,810);\n glVertex2f(500,800);\n glVertex2f(450,810);\n //glVertex2f(400,800);\n glEnd();\n\n}\nvoid draw_home()\n{\n glBegin(GL_QUADS);\n glColor3f(1,1,1);\n glVertex2f(780,50);\n glVertex2f(820,50);\n glVertex2f(820,100);\n glVertex2f(780,100);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.9,1,0);\n glVertex2f(730,50);\n glVertex2f(870,50);\n glVertex2f(870,150);\n glVertex2f(730,150);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.7,0.6,0.2);\n glVertex2f(700,150);\n glVertex2f(900,150);\n glVertex2f(800,235);\n glEnd();\n glBegin(GL_QUADS);\n glColor3f(1,1,1);\n glVertex2f(300,400);\n glVertex2f(350,400);\n glVertex2f(350,460);\n glVertex2f(300,460);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.9,1,0);\n glVertex2f(250,400);\n glVertex2f(400,400);\n glVertex2f(400,500);\n glVertex2f(250,500);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.7,0.6,0.2);\n glVertex2f(230,500);\n glVertex2f(420,500);\n glVertex2f(325,580);\n glEnd();\n\n}\n\nvoid draw_tree()\n{\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0.1);\n glVertex2f(310,60);\n glVertex2f(340,60);\n glVertex2f(340,100);\n glVertex2f(310,100);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.2,0.4,0.1);\n glVertex2f(300,150);\n glVertex2f(270,100);\n glVertex2f(380,100);\n glVertex2f(350,150);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0.2,0.4,0.1);\n glVertex2f(270,150);\n glVertex2f(380,150);\n glVertex2f(325,220);\n glEnd();\n}\n\nvoid draw_eyes()\n{\n int i;\n\tfloat rad;\n\tglColor3f(1,1,1);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\n\t}\n\tglEnd();\n}\nvoid draw_path()\n{\n glBegin(GL_POLYGON);\n glColor3f(0.8,0.6,0.1);\n glVertex2f(0,250);\n glVertex2f(0,350);\n glVertex2f(1000,350);\n glVertex2f(1000,250);\n glEnd();\n}\n\n//scene-1 end\n\n//scene-2 drawer function\nvoid draw_hill2()\n{\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,600);\n\tglVertex2f(250,900);\n\tglVertex2f(500,600);\n\tglEnd();\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(500,600);\n\tglVertex2f(750,900);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid DrawSegment2(float,float,float,int,int,int);\n\nvoid draw_sky2()\n{\n\tglColor3f(0.3,0.9,0.9);\n\tglBegin(GL_QUADS);\n\tglVertex2f(0,600);\n\tglVertex2f(0,1000);\n\tglVertex2f(1000,1000);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid draw_vase2()\n{\n glColor3f(0.3,0.9,0.9);\n glBegin(GL_POLYGON);\n glVertex2f(700,140);\n glVertex2f(700,100);\n glVertex2f(750,50);\n glVertex2f(850,50);\n glVertex2f(900,100);\n glVertex2f(900,140);\n glVertex2f(700,140);\n glEnd();\n glColor3f(1,1,1);\n glBegin(GL_LINE_LOOP);\n glVertex2f(700,140);\n glVertex2f(700,200);\n glVertex2f(750,250);\n glVertex2f(750,300);\n glVertex2f(850,300);\n glVertex2f(850,250);\n glVertex2f(900,200);\n glVertex2f(900,140);\n glVertex2f(700,140);\n glEnd();\n}\n\nvoid draw_crow2()\n{\n glBegin(GL_POLYGON);\n glColor3f(0,0,0);\n //glVertex2f(750,300);\n //glVertex2f(735,305);\n //glVertex2f(725,315);\n glVertex2f(700,310);\n glVertex2f(680,300);\n glVertex2f(650,295);\n glVertex2f(600,290);\n glVertex2f(575,300);\n glVertex2f(540,315);\n glVertex2f(570,330);\n glVertex2f(605,350);\n glVertex2f(645,365);\n glVertex2f(680,380);\n glVertex2f(705,390);\n glVertex2f(730,400);\n glVertex2f(755,410);\n glVertex2f(760,390);\n glVertex2f(765,380);\n glVertex2f(770,370);\n glVertex2f(740,335);\n //glVertex2f(730,330);\n //glVertex2f(745,315);\n //glVertex2f(755,300);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0,0);\n glVertex2f(755,410);\n glVertex2f(770,412);\n glVertex2f(785,420);\n glVertex2f(800,425);\n glVertex2f(820,430);\n glVertex2f(830,428);\n glVertex2f(840,420);\n glVertex2f(845,410);\n glVertex2f(840,400);\n glVertex2f(837,390);\n glVertex2f(830,380);\n glVertex2f(825,375);\n glVertex2f(820,365);\n glVertex2f(815,340);\n glVertex2f(810,365);\n glVertex2f(800,375);\n glVertex2f(800,370);\n glVertex2f(790,370);\n glVertex2f(770,370);\n glVertex2f(760,390);\n glVertex2f(765,380);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(730,330);\n glVertex2f(755,300);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(750,300);\n glVertex2f(710,325);\n glEnd();\n}\n\nvoid draw_tree2()\n{\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(100,100);\n glVertex2f(150,100);\n glVertex2f(150,200);\n glVertex2f(100,200);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(100,200);\n glVertex2f(80,200);\n glVertex2f(60,210);\n glVertex2f(50,230);\n glVertex2f(50,250);\n glVertex2f(55,280);\n glVertex2f(65,310);\n glVertex2f(80,335);\n glVertex2f(100,360);\n glVertex2f(110,370);\n glVertex2f(135,370);\n glVertex2f(160,350);\n glVertex2f(180,330);\n glVertex2f(200,300);\n glVertex2f(205,270);\n glVertex2f(210,250);\n glVertex2f(205,230);\n glVertex2f(190,220);\n glVertex2f(180,210);\n glVertex2f(150,200);\n glEnd();\n\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(250,300);\n glVertex2f(300,300);\n glVertex2f(300,400);\n glVertex2f(250,400);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(250,400);\n glVertex2f(210,400);\n glVertex2f(190,415);\n glVertex2f(190,450);\n glVertex2f(200,500);\n glVertex2f(230,540);\n glVertex2f(260,560);\n glVertex2f(300,560);\n glVertex2f(325,530);\n glVertex2f(345,500);\n glVertex2f(360,450);\n glVertex2f(360,410);\n glVertex2f(330,400);\n glVertex2f(300,400);\n glEnd();\n}\n\nvoid draw_stone2()\n{\n int i;\n\tfloat rad;\n\tglColor3f(0.3,0.4,0.4);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*7,sin(rad)*7);\n\t}\n\tglEnd();\n}\n\nvoid draw_eyes2()\n{\n int i;\n\tfloat rad;\n\tglColor3f(1,1,1);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\n\t}\n\tglEnd();\n}\n\n//scene-2 end\n\n//scene-3 drawer start\nvoid draw_hill3()\n{\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,600);\n\tglVertex2f(250,900);\n\tglVertex2f(500,600);\n\tglEnd();\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(500,600);\n\tglVertex2f(750,900);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\n\nvoid draw_sky3()\n{\n\tglColor3f(0.3,0.9,0.9);\n\tglBegin(GL_QUADS);\n\tglVertex2f(0,600);\n\tglVertex2f(0,1000);\n\tglVertex2f(1000,1000);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid draw_vase3()\n{\n glColor3f(0.3,0.9,0.9);\n glBegin(GL_POLYGON);\n glVertex2f(700,140);\n glVertex2f(700,100);\n glVertex2f(750,50);\n glVertex2f(850,50);\n glVertex2f(900,100);\n glVertex2f(900,140);\n glVertex2f(700,140);\n glEnd();\n glColor3f(1,1,1);\n glBegin(GL_LINE_LOOP);\n glVertex2f(700,140);\n glVertex2f(700,200);\n glVertex2f(750,250);\n glVertex2f(750,300);\n glVertex2f(850,300);\n glVertex2f(850,250);\n glVertex2f(900,200);\n glVertex2f(900,140);\n glVertex2f(700,140);\n glEnd();\n}\n\nvoid draw_crow3()\n{\n glBegin(GL_POLYGON);\n glColor3f(0,0,0);\n glVertex2f(540,130);\n glVertex2f(545,140);\n glVertex2f(550,165);\n glVertex2f(555,185);\n glVertex2f(570,190);\n glVertex2f(590,190);\n glVertex2f(610,180);\n glVertex2f(620,170);\n glVertex2f(620,150);\n glVertex2f(580,125);\n glVertex2f(560,120);\n glVertex2f(540,127);\n glEnd();\n glBegin(GL_POLYGON);\n glVertex2f(620,150);\n glVertex2f(645,120);\n glVertex2f(620,120);\n glVertex2f(620,100);\n glVertex2f(595,130);\n glVertex2f(580,125);\n glEnd();\n glBegin(GL_POLYGON);\n glVertex2f(555,185);\n glVertex2f(540,195);\n glVertex2f(530,200);\n glVertex2f(450,200);\n glVertex2f(410,200);\n glVertex2f(425,180);\n glVertex2f(450,150);\n glVertex2f(480,135);\n glVertex2f(520,130);\n glVertex2f(540,130);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(480,135);\n glVertex2f(500,70);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(510,130);\n glVertex2f(530,70);\n glEnd();\n}\n\nvoid draw_tree3()\n{\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(100,100);\n glVertex2f(150,100);\n glVertex2f(150,200);\n glVertex2f(100,200);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(100,200);\n glVertex2f(80,200);\n glVertex2f(60,210);\n glVertex2f(50,230);\n glVertex2f(50,250);\n glVertex2f(55,280);\n glVertex2f(65,310);\n glVertex2f(80,335);\n glVertex2f(100,360);\n glVertex2f(110,370);\n glVertex2f(135,370);\n glVertex2f(160,350);\n glVertex2f(180,330);\n glVertex2f(200,300);\n glVertex2f(205,270);\n glVertex2f(210,250);\n glVertex2f(205,230);\n glVertex2f(190,220);\n glVertex2f(180,210);\n glVertex2f(150,200);\n glEnd();\n\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(250,300);\n glVertex2f(300,300);\n glVertex2f(300,400);\n glVertex2f(250,400);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(250,400);\n glVertex2f(210,400);\n glVertex2f(190,415);\n glVertex2f(190,450);\n glVertex2f(200,500);\n glVertex2f(230,540);\n glVertex2f(260,560);\n glVertex2f(300,560);\n glVertex2f(325,530);\n glVertex2f(345,500);\n glVertex2f(360,450);\n glVertex2f(360,410);\n glVertex2f(330,400);\n glVertex2f(300,400);\n glEnd();\n}\n\nvoid draw_stone3()\n{\n int i;\n\tfloat rad;\n\tglColor3f(0.3,0.4,0.4);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*7,sin(rad)*7);\n\t}\n\tglEnd();\n}\n\nvoid draw_eyes3()\n{\n int i;\n\tfloat rad;\n\tglColor3f(1,1,1);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\n\t}\n\tglEnd();\n}\n\n\n//scene-3 end\n\n//scene-4 drawer start\n\nvoid draw_hill4()\n{\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,600);\n\tglVertex2f(250,900);\n\tglVertex2f(500,600);\n\tglEnd();\n\tglColor3f(0.6,0.3,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(500,600);\n\tglVertex2f(750,900);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid DrawSegment4(float,float,float,int,int,int);\n\nvoid draw_sky4()\n{\n\tglColor3f(0.3,0.9,0.9);\n\tglBegin(GL_QUADS);\n\tglVertex2f(0,600);\n\tglVertex2f(0,1000);\n\tglVertex2f(1000,1000);\n\tglVertex2f(1000,600);\n\tglEnd();\n}\n\nvoid draw_vase4()\n{\n glColor3f(0.3,0.9,0.9);\n glBegin(GL_POLYGON);\n //glVertex2f(750,280);\n\n\n\n glVertex2f(750,50);\n glVertex2f(850,50);\n glVertex2f(900,100);\n glVertex2f(900,200);\n glVertex2f(850,250);\n glVertex2f(850,280);\n glVertex2f(750,280);\n glVertex2f(750,250);\n glVertex2f(700,200);\n glVertex2f(700,100);\n glEnd();\n glColor3f(1,1,1);\n glBegin(GL_LINE_LOOP);\n glVertex2f(750,280);\n glVertex2f(750,300);\n glVertex2f(850,300);\n glVertex2f(850,280);\n glEnd();\n}\n\nvoid draw_crow4()\n{\n glBegin(GL_POLYGON);\n glColor3f(0,0,0);\n //glVertex2f(750,300);\n //glVertex2f(735,305);\n //glVertex2f(725,315);\n glVertex2f(700,310);\n glVertex2f(680,300);\n glVertex2f(650,295);\n glVertex2f(600,290);\n glVertex2f(575,300);\n glVertex2f(540,315);\n glVertex2f(570,330);\n glVertex2f(605,350);\n glVertex2f(645,365);\n glVertex2f(680,380);\n glVertex2f(705,390);\n glVertex2f(730,400);\n glVertex2f(755,410);\n glVertex2f(760,390);\n glVertex2f(765,380);\n glVertex2f(770,370);\n glVertex2f(740,335);\n //glVertex2f(730,330);\n //glVertex2f(745,315);\n //glVertex2f(755,300);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0,0);\n glVertex2f(755,410);\n glVertex2f(770,412);\n glVertex2f(785,420);\n glVertex2f(800,425);\n glVertex2f(820,430);\n glVertex2f(830,428);\n glVertex2f(840,420);\n glVertex2f(845,410);\n glVertex2f(840,400);\n glVertex2f(837,390);\n glVertex2f(830,380);\n glVertex2f(825,375);\n glVertex2f(820,365);\n glVertex2f(815,340);\n glVertex2f(810,365);\n glVertex2f(800,375);\n glVertex2f(800,370);\n glVertex2f(790,370);\n glVertex2f(770,370);\n glVertex2f(760,390);\n glVertex2f(765,380);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(730,330);\n glVertex2f(755,300);\n glEnd();\n glBegin(GL_LINES);\n glColor3f(0,0,0);\n glVertex2f(750,300);\n glVertex2f(710,325);\n glEnd();\n}\n\nvoid draw_tree4()\n{\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(100,100);\n glVertex2f(150,100);\n glVertex2f(150,200);\n glVertex2f(100,200);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(100,200);\n glVertex2f(80,200);\n glVertex2f(60,210);\n glVertex2f(50,230);\n glVertex2f(50,250);\n glVertex2f(55,280);\n glVertex2f(65,310);\n glVertex2f(80,335);\n glVertex2f(100,360);\n glVertex2f(110,370);\n glVertex2f(135,370);\n glVertex2f(160,350);\n glVertex2f(180,330);\n glVertex2f(200,300);\n glVertex2f(205,270);\n glVertex2f(210,250);\n glVertex2f(205,230);\n glVertex2f(190,220);\n glVertex2f(180,210);\n glVertex2f(150,200);\n glEnd();\n\n glBegin(GL_QUADS);\n glColor3f(0.3,0,0);\n glVertex2f(250,300);\n glVertex2f(300,300);\n glVertex2f(300,400);\n glVertex2f(250,400);\n glEnd();\n glBegin(GL_POLYGON);\n glColor3f(0,0.4,0.1);\n glVertex2f(250,400);\n glVertex2f(210,400);\n glVertex2f(190,415);\n glVertex2f(190,450);\n glVertex2f(200,500);\n glVertex2f(230,540);\n glVertex2f(260,560);\n glVertex2f(300,560);\n glVertex2f(325,530);\n glVertex2f(345,500);\n glVertex2f(360,450);\n glVertex2f(360,410);\n glVertex2f(330,400);\n glVertex2f(300,400);\n glEnd();\n}\n\nvoid draw_stone4()\n{\n int i;\n\tfloat rad;\n\tglColor3f(0.3,0.4,0.4);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*7,sin(rad)*7);\n\t}\n\tglEnd();\n}\n\nvoid draw_eyes4()\n{\n int i;\n\tfloat rad;\n\tglColor3f(1,1,1);\n\tglBegin(GL_POLYGON);\n\tfor(i=0;i<360;i++)\n\t{\n\t\trad=i*3.14159/180;\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\n\t}\n\tglEnd();\n}\n\n//scene-4 end\n\n\n//intro project details\nvoid scene1(){\n\n\t glColor3f(0.2,0.1,0.5);\n\t glBegin(GL_POLYGON);\n\t glVertex2f(0,0);\n\t glVertex2f(0,1000);\n\t glVertex2f(1200,1000);\n\t glVertex2f(1200,0);\n\t glEnd();\n\t\n\t\tglColor3f(0.5,0.2,0.6);\n \tglPushMatrix();\n \tglTranslatef(0,200,0);\n \twrite_text(300,130,\"SAHYADRI COLLEGE OF ENGINEERING AND MANAGEMENT\",fonts[3]);\n glColor3f(0.3,0.5,0.8);\n write_text(375,100,\"DEPT. OF COMPUTER SCIENCE & ENGG.\",fonts[0]);\n glColor3f(0.3,0.5,0.8);\n write_text(350,00,\"UNDER THE GUIDENCE OF -----------\",fonts[3]);\n write_text(480,-50,\"--------- PROFEESSOR \",fonts[1]);\n glColor3f(0.8,0.1,0.2);\n write_text(450,600,\"CROW-project title\",fonts[2]);\n glColor3f(1.0,0.0,1.0);\n write_text(450,500,\"SUBMITTED BY :\",fonts[0]);\n glColor3f(0.3,0.5,0.8);\n write_text(225,450,\"Name1\",fonts[3]);\n write_text(670,450,\"Name2\",fonts[3]);\n write_text(180,300,\"\",fonts[3]);\n write_text(220,400,\"USN1\",fonts[0]);\n write_text(680,400,\"USN2\",fonts[0]);\n\n write_text(380,200,\"[ PRESS ANY KEY TO CONTINUE ]\",fonts[3]);\n\t glPopMatrix();\n\t \n\t //project deatils\n \t glFlush();\n}\n\n//scene-1 [crow flying]\nvoid scene2() {\n\tglClearColor(0.2,0.8,0.1,1);\n\tdraw_hill();\n\tdraw_home();\n\tdraw_tree();\n\tdraw_sky();\n\tdraw_path();\n\t\n\tglPushMatrix();\n\tglTranslatef(-600+startx++,0,0);\n\tdraw_crow();\n\tglPushMatrix();\n\tglTranslatef(620,890,0);\n draw_eyes();\n glPopMatrix();\n glPopMatrix(); \n \n if(startx>900){\n \tstate++;\n \tstartx=0;\n }\n \n glFlush();\n}\n\n//scene-2 \nvoid scene3(){\n draw_hill2();\n\tdraw_sky2();\n\tdraw_tree2();\n\tglPushMatrix();\n\tglTranslatef(950,190,0);\n\tdraw_stone2();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(960,100,0);\n\tdraw_stone2();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(920,30,0);\n\tdraw_stone2();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,10,0);\n\tdraw_stone2();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(700,15,0);\n\tdraw_stone2();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(650,30,0);\n\tdraw_stone2();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(680,70,0);\n\tdraw_stone2();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(620,100,0);\n\tdraw_stone2();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(570,10,0);\n\tdraw_stone2();\n\tglPopMatrix();\n draw_vase2();\n\n draw_crow2();\n glPushMatrix();\n\tglTranslatef(815,400,0);\n\tdraw_eyes2();\n\tglPopMatrix();\n}\n\nvoid scene4(){\n\tdraw_hill3();\n\tdraw_sky3();\n\n\n\tdraw_tree3();\n\tglPushMatrix();\n\tglTranslatef(950,190,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(960,100,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(920,30,0);\n\tdraw_stone3();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,10,0);\n\tdraw_stone3();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(700,15,0);\n\tdraw_stone3();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(650,30,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(680,70,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(630,100,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(570,10,0);\n\tdraw_stone3();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(770,60,0);\n\tdraw_stone3();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(790,60,0);\n\tdraw_stone3();\n\tglPopMatrix();\n draw_vase3();\n \n\tdraw_crow3();\n\tglPushMatrix();\n\tglTranslatef(590,160,0);\n\tdraw_eyes3();\n\tglPopMatrix();\n}\n\nvoid scene5(){\n\tdraw_hill4();\n\tdraw_sky4();\n\n\tdraw_tree4();\n\tglPushMatrix();\n\tglTranslatef(950,190,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(960,100,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(920,30,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,10,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(700,15,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(650,30,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(680,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(620,100,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(570,10,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(810,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n glPushMatrix();\n\tglTranslatef(820,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(760,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(770,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(780,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(790,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(830,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(840,60,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(770,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(780,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(790,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(810,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(830,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(840,70,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(780,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(790,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(810,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(830,80,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(790,90,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(800,90,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(810,90,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,90,0);\n\tdraw_stone4();\n\tglPopMatrix();\n\tglPushMatrix();\n\tglTranslatef(820,90,0);\n\tdraw_stone4();\n\tglPopMatrix();\n draw_vase4();\n \n\tdraw_crow4();\n\t\n\tglPushMatrix();\n\tglTranslatef(815,400,0);\n\tdraw_eyes4();\n\tglPopMatrix();\n}\n\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1200,1000);\n glVertex2f(1200,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n}\n\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene3();\n }\n\n\n if(state==3){\n scene4();\n }\n \n if(state==4){\n scene5();\n }\n \n if(state==5){\n end();\n }\n if(state==6){\n exit(0);\n }\n\t\n\t\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"Project Name\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.4316072165966034,
"alphanum_fraction": 0.5906203985214233,
"avg_line_length": 16.85714340209961,
"blob_id": "270e2f5219a95e5e66a6af67323a5faf2f2b0328",
"content_id": "063d7e8bd7a9187613a1542ded0e3c4bb88895b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4094,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 217,
"path": "/crow/scenes/main.cpp",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\r\n#include<math.h>\r\nvoid draw_hill()\r\n{\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(250,900);\r\n\tglVertex2f(500,600);\r\n\tglEnd();\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(500,600);\r\n\tglVertex2f(750,900);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\n\r\nvoid draw_sky()\r\n{\r\n\tglColor3f(0.3,0.9,0.9);\r\n\tglBegin(GL_QUADS);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(0,1000);\r\n\tglVertex2f(1000,1000);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\nvoid draw_crow()\r\n{\r\n glColor3f(0,0,0);\r\n glBegin(GL_POLYGON);\r\n\r\n//glVertex2f(400,800);\r\n glVertex2f(400,840);\r\n glVertex2f(380,850);\r\n glVertex2f(370,860);\r\n glVertex2f(380,870);\r\n\r\n //glVertex2f(450,830);\r\n //glVertex2f(400,910);\r\n //glVertex2f(450,890);\r\n glVertex2f(500,880);\r\n glVertex2f(470,930);\r\n glVertex2f(520,950);\r\n glVertex2f(540,950);\r\n glVertex2f(590,930);\r\n glVertex2f(590,920);\r\n glVertex2f(570,880);\r\n glVertex2f(600,900);\r\n glVertex2f(620,910);\r\n glVertex2f(630,910);\r\n glVertex2f(640,890);\r\n glVertex2f(660,880);\r\n glVertex2f(640,860);\r\n glVertex2f(600,840);\r\n glVertex2f(560,810);\r\n glVertex2f(500,800);\r\n glVertex2f(450,810);\r\n //glVertex2f(400,800);\r\n glEnd();\r\n\r\n}\r\nvoid draw_home()\r\n{\r\n glBegin(GL_QUADS);\r\n glColor3f(1,1,1);\r\n glVertex2f(780,50);\r\n glVertex2f(820,50);\r\n glVertex2f(820,100);\r\n glVertex2f(780,100);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.9,1,0);\r\n glVertex2f(730,50);\r\n glVertex2f(870,50);\r\n glVertex2f(870,150);\r\n glVertex2f(730,150);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.7,0.6,0.2);\r\n glVertex2f(700,150);\r\n glVertex2f(900,150);\r\n glVertex2f(800,235);\r\n glEnd();\r\n glBegin(GL_QUADS);\r\n glColor3f(1,1,1);\r\n glVertex2f(300,400);\r\n glVertex2f(350,400);\r\n glVertex2f(350,460);\r\n glVertex2f(300,460);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.9,1,0);\r\n glVertex2f(250,400);\r\n glVertex2f(400,400);\r\n glVertex2f(400,500);\r\n glVertex2f(250,500);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.7,0.6,0.2);\r\n glVertex2f(230,500);\r\n glVertex2f(420,500);\r\n glVertex2f(325,580);\r\n glEnd();\r\n\r\n}\r\n\r\nvoid draw_tree()\r\n{\r\n glBegin(GL_QUADS);\r\n glColor3f(0.3,0,0.1);\r\n glVertex2f(310,60);\r\n glVertex2f(340,60);\r\n glVertex2f(340,100);\r\n glVertex2f(310,100);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.2,0.4,0.1);\r\n glVertex2f(300,150);\r\n glVertex2f(270,100);\r\n glVertex2f(380,100);\r\n glVertex2f(350,150);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.2,0.4,0.1);\r\n glVertex2f(270,150);\r\n glVertex2f(380,150);\r\n glVertex2f(325,220);\r\n glEnd();\r\n}\r\n\r\nvoid draw_eyes()\r\n{\r\n int i;\r\n\tfloat rad;\r\n\tglColor3f(1,1,1);\r\n\tglBegin(GL_POLYGON);\r\n\tfor(i=0;i<360;i++)\r\n\t{\r\n\t\trad=i*3.14159/180;\r\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\r\n\t}\r\n\tglEnd();\r\n}\r\nvoid draw_path()\r\n{\r\n glBegin(GL_POLYGON);\r\n glColor3f(0.8,0.6,0.1);\r\n glVertex2f(0,250);\r\n glVertex2f(0,350);\r\n glVertex2f(1000,350);\r\n glVertex2f(1000,250);\r\n glEnd();\r\n}\r\n\r\n\r\nvoid scene1()\r\n{\r\n\tdraw_hill();\r\n\r\n\tdraw_home();\r\n\tdraw_tree();\r\n\r\n draw_path();\r\n\tglPushMatrix();\r\n\tglTranslatef(620,890,0);\r\n draw_eyes();\r\n glPopMatrix();\r\n draw_crow();\r\n draw_sky();\r\n\t//draw_pond();\r\n\r\n\r\n\r\n}\r\n\r\n\r\nvoid init()\r\n{\r\n\tglMatrixMode(GL_PROJECTION);\r\n\tglLoadIdentity();\r\n\tgluOrtho2D(0,1000,0,1000);\r\n\tglMatrixMode(GL_MODELVIEW);\r\n\tglLoadIdentity();\r\n}\r\n\r\nvoid display()\r\n{\r\n\tglClearColor(0.2,0.8,0.1,1);\r\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\r\n\r\n\tscene1();\r\n\r\n\t//scene3();\r\n\t//scene4();\r\n\t//scene5();\r\n\t//scene6();\r\n\r\n\tglFlush();\r\n\tglutSwapBuffers();\r\n}\r\n\r\nint main(int argc,char **argv)\r\n{\r\n\tglutInit(&argc,argv);\r\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);\r\n\t//glutInitWindowPosition(50,100);\r\n\tglutInitWindowSize(1000,1000);\r\n\tglutCreateWindow(\"First scene\");\r\n\tinit();\r\n\tglEnable(GL_DEPTH_TEST);\r\n\r\n\tglutDisplayFunc(display);\r\n\t//glutTimerFunc(25,time,0);\r\n\tglutMainLoop();\r\n}\r\n\r\n"
},
{
"alpha_fraction": 0.4499923586845398,
"alphanum_fraction": 0.6235527992248535,
"avg_line_length": 20.80978775024414,
"blob_id": "d089139488baffb2e1f8fd807ff6f023ef441588",
"content_id": "f6ea27729f3a0c73ddf06b95ab32ff30ad4e171c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 19607,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 899,
"path": "/newton/main.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<string.h>\n\nint k;\nfloat c;\n\nvoid myinit() {\n glClearColor(0.0 f, 0.0 f, 0.0 f, 1.0 f);\n glMatrixMode(GL_PROJECTION);\n glLoadIdentity();\n gluOrtho2D(0, 1200, 0, 1000);\n glMatrixMode(GL_MODELVIEW);\n}\n\nvoid mydisplay4() {\n glClear(GL_COLOR_BUFFER_BIT);\n glColor3f(0, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(0, 400);\n glVertex2i(1200, 400); //grass\n glVertex2i(1200, 0);\n glVertex2i(0, 0);\n glEnd();\n glFlush();\n\n glColor3f(0, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(0, 1000);\n glVertex2i(1200, 1000); //sky\n glVertex2i(1200, 400);\n glVertex2i(0, 400);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 125);\n glVertex2i(150, 325); //root tree\n glVertex2i(175, 300);\n glVertex2i(200, 325);\n glVertex2i(200, 125);\n glVertex2i(225, 100);\n glVertex2i(125, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.5, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 285);\n glVertex2i(130, 315);\n glVertex2i(125, 300);\n glVertex2i(90, 335);\n glVertex2i(100, 360);\n glVertex2i(80, 385);\n glVertex2i(85, 420);\n glVertex2i(95, 420);\n glVertex2i(90, 500);\n glVertex2i(100, 475);\n glVertex2i(110, 480);\n glVertex2i(100, 500);\n glVertex2i(110, 525);\n glVertex2i(120, 520);\n glVertex2i(140, 550);\n glVertex2i(110, 575);\n glVertex2i(155, 565);\n glVertex2i(145, 595);\n glVertex2i(195, 575);\n glVertex2i(190, 590);\n glVertex2i(215, 575);\n glVertex2i(215, 550);\n glVertex2i(230, 565);\n glVertex2i(245, 540);\n glVertex2i(265, 550);\n glVertex2i(280, 525);\n glVertex2i(275, 510);\n glVertex2i(290, 500);\n glVertex2i(300, 475);\n glVertex2i(315, 455);\n glVertex2i(315, 430);\n glVertex2i(295, 425);\n glVertex2i(290, 400); //tree\n glVertex2i(275, 380);\n glVertex2i(265, 350);\n glVertex2i(265, 320);\n glVertex2i(245, 300);\n glVertex2i(240, 315);\n glVertex2i(230, 305);\n glVertex2i(230, 295);\n glVertex2i(210, 270);\n glVertex2i(200, 305);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 280);\n glVertex2i(215, 270);\n glVertex2i(215, 255);\n glVertex2i(225, 250);\n glVertex2i(245, 250); //apple down\n glVertex2i(255, 260);\n glVertex2i(255, 275);\n glVertex2i(245, 280);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(230, 295);\n glVertex2i(230, 280);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(300, 410);\n glVertex2i(290, 400);\n glVertex2i(290, 380);\n glVertex2i(300, 370);\n glVertex2i(325, 370); //apple right\n glVertex2i(340, 380);\n glVertex2i(340, 400);\n glVertex2i(330, 410);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(310, 430);\n glVertex2i(310, 410);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(230, 525);\n glVertex2i(220, 520);\n glVertex2i(220, 500);\n glVertex2i(230, 495);\n glVertex2i(250, 495);\n glVertex2i(255, 505); //apple up\n glVertex2i(255, 515);\n glVertex2i(250, 525);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(240, 525);\n glVertex2i(240, 545);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(95, 470);\n glVertex2i(85, 460);\n glVertex2i(85, 445);\n glVertex2i(95, 440);\n glVertex2i(120, 440); //apple left\n glVertex2i(125, 450);\n glVertex2i(125, 460);\n glVertex2i(120, 470);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(105, 480);\n glVertex2i(105, 470);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 225);\n glVertex2i(215, 205);\n glVertex2i(215, 190);\n glVertex2i(225, 180); //sitting\n glVertex2i(250, 180);\n glVertex2i(255, 185);\n glVertex2i(250, 190);\n glVertex2i(255, 190); //face\n glVertex2i(255, 200);\n glVertex2i(250, 200);\n glVertex2i(250, 215);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 180);\n glVertex2i(210, 165); //body\n glVertex2i(210, 125);\n glVertex2i(265, 125);\n glVertex2i(265, 160);\n glVertex2i(250, 180);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(220, 170);\n glVertex2i(215, 165); //hand front\n glVertex2i(270, 135);\n glVertex2i(275, 140);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(260, 165);\n glVertex2i(270, 165); //hand back\n glVertex2i(275, 165);\n glVertex2i(255, 175);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(265, 130);\n glVertex2i(265, 140); //leg\n glVertex2i(280, 140);\n glVertex2i(285, 150);\n glVertex2i(285, 145);\n glVertex2i(295, 145);\n glVertex2i(295, 140);\n glVertex2i(280, 145);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.7, 0.4);\n glBegin(GL_POLYGON);\n glVertex2i(265, 160);\n glVertex2i(265, 150); //leg\n glVertex2i(290, 170);\n glVertex2i(290, 165);\n glVertex2i(295, 160);\n glVertex2i(295, 155);\n glVertex2i(255, 140);\n glVertex2i(255, 145);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(320, 320);\n glVertex2i(320, 335);\n glVertex2i(325, 340);\n glVertex2i(335, 340);\n glVertex2i(345, 340);\n glVertex2i(350, 335); //apple\n glVertex2i(350, 320);\n glVertex2i(345, 315);\n glVertex2i(325, 315);\n glEnd();\n glFlush();\n\n glBegin(GL_LINES);\n glVertex2i(335, 335);\n glVertex2i(335, 350);\n glEnd();\n glFlush();\n\n}\n\nvoid keys3(unsigned char key3, int x, int y) {\n if (key3 == 'E' || key3 == 'e') {\n glutInitDisplayMode(GLUT_SINGLE);\n glutInitWindowSize(1200, 1000);\n glutInitWindowPosition(10, 10);\n glutCreateWindow(\"4TH PAGE\");\n glutDisplayFunc(mydisplay4);\n myinit();\n\n glutMainLoop();\n }\n\n}\n\nvoid mydisplay3() {\n glClear(GL_COLOR_BUFFER_BIT);\n\n glColor3f(0, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(0, 400);\n glVertex2i(1200, 400); //grass\n glVertex2i(1200, 0);\n glVertex2i(0, 0);\n glEnd();\n glFlush();\n\n glColor3f(0, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(0, 1000);\n glVertex2i(1200, 1000); //sky\n glVertex2i(1200, 400);\n glVertex2i(0, 400);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 125);\n glVertex2i(150, 325); //root tree\n glVertex2i(175, 300);\n glVertex2i(200, 325);\n glVertex2i(200, 125);\n glVertex2i(225, 100);\n glVertex2i(125, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.5, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 285);\n glVertex2i(130, 315);\n glVertex2i(125, 300);\n glVertex2i(90, 335);\n glVertex2i(100, 360);\n glVertex2i(80, 385);\n glVertex2i(85, 420);\n glVertex2i(95, 420);\n glVertex2i(90, 500);\n glVertex2i(100, 475);\n glVertex2i(110, 480);\n glVertex2i(100, 500);\n glVertex2i(110, 525);\n glVertex2i(120, 520);\n glVertex2i(140, 550);\n glVertex2i(110, 575);\n glVertex2i(155, 565);\n glVertex2i(145, 595);\n glVertex2i(195, 575);\n glVertex2i(190, 590);\n glVertex2i(215, 575);\n glVertex2i(215, 550);\n glVertex2i(230, 565);\n glVertex2i(245, 540);\n glVertex2i(265, 550);\n glVertex2i(280, 525);\n glVertex2i(275, 510);\n glVertex2i(290, 500);\n glVertex2i(300, 475);\n glVertex2i(315, 455);\n glVertex2i(315, 430);\n glVertex2i(295, 425);\n glVertex2i(290, 400); //tree\n glVertex2i(275, 380);\n glVertex2i(265, 350);\n glVertex2i(265, 320);\n glVertex2i(245, 300);\n glVertex2i(240, 315);\n glVertex2i(230, 305);\n glVertex2i(230, 295);\n glVertex2i(210, 270);\n glVertex2i(200, 305);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 280);\n glVertex2i(215, 270);\n glVertex2i(215, 255);\n glVertex2i(225, 250);\n glVertex2i(245, 250); //apple down\n glVertex2i(255, 260);\n glVertex2i(255, 275);\n glVertex2i(245, 280);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(230, 295);\n glVertex2i(230, 280);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(300, 410);\n glVertex2i(290, 400);\n glVertex2i(290, 380);\n glVertex2i(300, 370);\n glVertex2i(325, 370); //apple right\n glVertex2i(340, 380);\n glVertex2i(340, 400);\n glVertex2i(330, 410);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(310, 430);\n glVertex2i(310, 410);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(230, 525);\n glVertex2i(220, 520);\n glVertex2i(220, 500);\n glVertex2i(230, 495);\n glVertex2i(250, 495);\n glVertex2i(255, 505); //apple up\n glVertex2i(255, 515);\n glVertex2i(250, 525);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(240, 525);\n glVertex2i(240, 545);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(95, 470);\n glVertex2i(85, 460);\n glVertex2i(85, 445);\n glVertex2i(95, 440);\n glVertex2i(120, 440); //apple left\n glVertex2i(125, 450);\n glVertex2i(125, 460);\n glVertex2i(120, 470);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(105, 480);\n glVertex2i(105, 470);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(850, 100); //left leg\n glVertex2i(850, 80);\n glVertex2i(910, 80);\n glVertex2i(910, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(935, 200);\n glVertex2i(875, 100); //left leg\n glVertex2i(910, 100);\n glVertex2i(960, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(970, 80);\n glVertex2i(970, 100); //right leg\n glVertex2i(1015, 100);\n glVertex2i(1015, 80);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(995, 100);\n glVertex2i(1015, 100); //right leg\n glVertex2i(1015, 200);\n glVertex2i(995, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(885, 185);\n glVertex2i(870, 200); //front hand\n glVertex2i(985, 310);\n glVertex2i(1005, 305);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(1080, 185);\n glVertex2i(1085, 205);\n glVertex2i(1025, 250); //back hand\n glVertex2i(1025, 230);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950, 310);\n glVertex2i(925, 275);\n glVertex2i(925, 200); //body\n glVertex2i(1025, 200);\n glVertex2i(1025, 275);\n glVertex2i(1000, 310);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950, 400);\n glVertex2i(915, 365);\n glVertex2i(925, 370);\n glVertex2i(925, 350);\n glVertex2i(935, 350); //face\n glVertex2i(925, 340);\n glVertex2i(950, 310);\n glVertex2i(1000, 310);\n glVertex2i(1025, 335);\n glVertex2i(1025, 375);\n glVertex2i(1000, 400);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(950, 400);\n glVertex2i(960, 415);\n glVertex2i(1005, 415); //hair\n glVertex2i(1025, 400);\n glVertex2i(1025, 375);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(900, 140);\n glVertex2i(935, 140);\n glVertex2i(965, 200); //chaddi front\n glVertex2i(930, 200);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(990, 200);\n glVertex2i(990, 140); //chaddi back\n glVertex2i(1020, 140);\n glVertex2i(1020, 200);\n glEnd();\n glFlush();\n\n glColor3f(1, 1.0, 1.456);\n glRasterPos2f(550, 130);\n char str15[] = \"PRESS 'E/e' TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n glFlush();\n glutKeyboardFunc(keys3);\n}\n\nvoid keys2(unsigned char key2, int x, int y) {\n if (key2 == 'D' || key2 == 'd') {\n glutInitDisplayMode(GLUT_SINGLE);\n glutInitWindowSize(1200, 1000);\n glutInitWindowPosition(10, 10);\n glutCreateWindow(\"3RD PAGE\");\n glutDisplayFunc(mydisplay3);\n myinit();\n glutMainLoop();\n }\n\n}\n\nvoid mydisplay2() {\n glClear(GL_COLOR_BUFFER_BIT);\n\n c -= 0.5;\n\n glColor3f(0, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(0, 0);\n glVertex2i(1200, 0);\n glVertex2i(1200, 400);\n glVertex2i(0, 400);\n glEnd();\n glFlush();\n\n glColor3f(0, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(0, 400);\n glVertex2i(0, 1000);\n glVertex2i(1200, 1000);\n glVertex2i(1200, 400);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(600, 300);\n glVertex2i(600, 550);\n glVertex2i(800, 550);\n glVertex2i(800, 300);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(600, 550);\n glVertex2i(700, 700);\n glVertex2i(800, 550);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 1);\n glBegin(GL_POLYGON);\n glVertex2i(800, 550);\n glVertex2i(800, 300);\n glVertex2i(1100, 300);\n glVertex2i(1100, 550);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.7, 0.5);\n glBegin(GL_POLYGON);\n glVertex2i(800, 550);\n glVertex2i(700, 700);\n glVertex2i(1100, 700);\n glVertex2i(1100, 550);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.1, 0.9);\n glBegin(GL_POLYGON);\n glVertex2i(850, 300);\n glVertex2i(1000, 300);\n glVertex2i(1000, 450);\n glVertex2i(850, 450);\n glEnd();\n glFlush();\n\n if (c < 80) {\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(850 - c, 100); //left leg\n glVertex2i(850 - c, 80);\n glVertex2i(910 - c, 80);\n glVertex2i(910 - c, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(935 - c, 200);\n glVertex2i(875 - c, 100); //left leg\n glVertex2i(910 - c, 100);\n glVertex2i(960 - c, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(970 - c, 80);\n glVertex2i(970 - c, 100); //right leg\n glVertex2i(1015 - c, 100);\n glVertex2i(1015 - c, 80);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(995 - c, 100);\n glVertex2i(1015 - c, 100); //right leg\n glVertex2i(1015 - c, 200);\n glVertex2i(995 - c, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(885 - c, 185);\n glVertex2i(870 - c, 200); //front hand\n glVertex2i(985 - c, 310);\n glVertex2i(1005 - c, 305);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(1080 - c, 185);\n glVertex2i(1085 - c, 205);\n glVertex2i(1025 - c, 250); //back hand\n glVertex2i(1025 - c, 230);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950 - c, 310);\n glVertex2i(925 - c, 275);\n glVertex2i(925 - c, 200); //body\n glVertex2i(1025 - c, 200);\n glVertex2i(1025 - c, 275);\n glVertex2i(1000 - c, 310);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950 - c, 400);\n glVertex2i(915 - c, 365);\n glVertex2i(925 - c, 370);\n glVertex2i(925 - c, 350);\n glVertex2i(935 - c, 350); //face\n glVertex2i(925 - c, 340);\n glVertex2i(950 - c, 310);\n glVertex2i(1000 - c, 310);\n glVertex2i(1025 - c, 335);\n glVertex2i(1025 - c, 375);\n glVertex2i(1000 - c, 400);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(950 - c, 400);\n glVertex2i(960 - c, 415);\n glVertex2i(1005 - c, 415); //hair\n glVertex2i(1025 - c, 400);\n glVertex2i(1025 - c, 375);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(900 - c, 140);\n glVertex2i(935 - c, 140);\n glVertex2i(965 - c, 200); //chaddi front\n glVertex2i(930 - c, 200);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(990 - c, 200);\n glVertex2i(990 - c, 140); //chaddi back\n glVertex2i(1020 - c, 140);\n glVertex2i(1020 - c, 200);\n glEnd();\n glFlush();\n\n }\n\n glColor3f(1, 1.0, 1.456);\n glRasterPos2f(550, 130);\n char str15[] = \"PRESS 'D/d' TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n glFlush();\n glutKeyboardFunc(keys2);\n}\n\nvoid keys1(unsigned char key1, int x, int y) {\n if (key1 == 'C' || key1 == 'c') {\n glutInitDisplayMode(GLUT_SINGLE);\n glutInitWindowSize(1200, 1000);\n glutInitWindowPosition(10, 10);\n glutCreateWindow(\"2ND PAGE\");\n glutDisplayFunc(mydisplay2);\n myinit();\n\n glutMainLoop();\n }\n\n}\n\nvoid mydisplay1() {\n glClear(GL_COLOR_BUFFER_BIT);\n char str1[] = \"SRINIVAS INSTITUTE OF TECHNOLOGY\";\n glColor3f(0, 1, 1);\n glRasterPos2f(380, 855);\n for (k = 0; k < strlen(str1); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str1[k]);\n }\n glColor3f(0, 1, 1);\n glRasterPos2f(530, 810);\n char str2[] = \"MANGALURU-574143\";\n for (k = 0; k < strlen(str2); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str2[k]);\n }\n\n glColor3f(1, 0.5, 0.2);\n glRasterPos2f(265, 700);\n char str3[] = \"DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING\";\n for (k = 0; k < strlen(str3); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str3[k]);\n }\n\n glColor3f(0.8, 0.3, 0.4);\n glRasterPos2f(330, 630);\n char str4[] = \"COMPUTER GRAPHICS AND VISUALIZATION\";\n for (k = 0; k < strlen(str4); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str4[k]);\n }\n\n glColor3f(0.3, 0.5, 0.1);\n glRasterPos2f(520, 560);\n char str5[] = \"MINI PROJECT ON\";\n for (k = 0; k < strlen(str5); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str5[k]);\n }\n\n glColor3f(1, 0, 0);\n glRasterPos2f(465, 490);\n char str6[] = \"DISCOVERY OF GRAVITY\";\n for (k = 0; k < strlen(str6); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str6[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 320);\n char str7[] = \"TEAM MEMBERS\";\n for (k = 0; k < strlen(str7); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str7[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 280);\n char str8[] = \"NIRISHA-4SN16CS062\";\n for (k = 0; k < strlen(str8); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str8[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 240);\n char str9[] = \"SHWETHA A-4SN16CS091\";\n for (k = 0; k < strlen(str9); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str9[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 200);\n char str10[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str10); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str10[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 320);\n char str11[] = \"PROJECT GUIDED BY\";\n for (k = 0; k < strlen(str11); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str11[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 280);\n char str12[] = \"MR.ARAVIND NAIK \";\n for (k = 0; k < strlen(str12); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str12[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 240);\n char str13[] = \"ASSISTANT PROFESSOR\";\n for (k = 0; k < strlen(str13); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str13[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 200);\n char str14[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str14); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str14[k]);\n }\n\n glColor3f(1, 1.0, 1.456);\n glRasterPos2f(350, 130);\n char str15[] = \"PRESS 'C/c' TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n\n glFlush();\n glutKeyboardFunc(keys1);\n}\n\nint main(int argc, char ** argv) {\n glutInit( & argc, argv);\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);\n glutInitWindowSize(1200, 1000);\n glutInitWindowPosition(10, 10);\n glutCreateWindow(\"FRONT PAGE\");\n glutDisplayFunc(mydisplay1);\n //glutDisplayFunc(mydisplay2);\n //glutKeyboardFunc(keys1);\n //glutKeyboardFunc(keys2);\n myinit();\n glutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.44222763180732727,
"alphanum_fraction": 0.6016654968261719,
"avg_line_length": 17.466217041015625,
"blob_id": "faa55a849ca0e1c2c5237537c2d641cca8e84c95",
"content_id": "84600805ad842fbf4e957e8f2f877a1e06e8af06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5764,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 296,
"path": "/crow/scenes/main_2.cpp",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\r\n#include<math.h>\r\n\r\nvoid draw_hill()\r\n{\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(250,900);\r\n\tglVertex2f(500,600);\r\n\tglEnd();\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(500,600);\r\n\tglVertex2f(750,900);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\n\r\n\r\nvoid draw_sky()\r\n{\r\n\tglColor3f(0.3,0.9,0.9);\r\n\tglBegin(GL_QUADS);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(0,1000);\r\n\tglVertex2f(1000,1000);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\n\r\nvoid draw_vase()\r\n{\r\n glColor3f(0.3,0.9,0.9);\r\n glBegin(GL_POLYGON);\r\n glVertex2f(700,140);\r\n glVertex2f(700,100);\r\n glVertex2f(750,50);\r\n glVertex2f(850,50);\r\n glVertex2f(900,100);\r\n glVertex2f(900,140);\r\n glVertex2f(700,140);\r\n glEnd();\r\n glColor3f(1,1,1);\r\n glBegin(GL_LINE_LOOP);\r\n glVertex2f(700,140);\r\n glVertex2f(700,200);\r\n glVertex2f(750,250);\r\n glVertex2f(750,300);\r\n glVertex2f(850,300);\r\n glVertex2f(850,250);\r\n glVertex2f(900,200);\r\n glVertex2f(900,140);\r\n glVertex2f(700,140);\r\n glEnd();\r\n}\r\n\r\nvoid draw_crow()\r\n{\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0,0);\r\n glVertex2f(540,130);\r\n glVertex2f(545,140);\r\n glVertex2f(550,165);\r\n glVertex2f(555,185);\r\n glVertex2f(570,190);\r\n glVertex2f(590,190);\r\n glVertex2f(610,180);\r\n glVertex2f(620,170);\r\n glVertex2f(620,150);\r\n glVertex2f(580,125);\r\n glVertex2f(560,120);\r\n glVertex2f(540,127);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glVertex2f(620,150);\r\n glVertex2f(645,120);\r\n glVertex2f(620,120);\r\n glVertex2f(620,100);\r\n glVertex2f(595,130);\r\n glVertex2f(580,125);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glVertex2f(555,185);\r\n glVertex2f(540,195);\r\n glVertex2f(530,200);\r\n glVertex2f(450,200);\r\n glVertex2f(410,200);\r\n glVertex2f(425,180);\r\n glVertex2f(450,150);\r\n glVertex2f(480,135);\r\n glVertex2f(520,130);\r\n glVertex2f(540,130);\r\n glEnd();\r\n glBegin(GL_LINES);\r\n glColor3f(0,0,0);\r\n glVertex2f(480,135);\r\n glVertex2f(500,70);\r\n glEnd();\r\n glBegin(GL_LINES);\r\n glColor3f(0,0,0);\r\n glVertex2f(510,130);\r\n glVertex2f(530,70);\r\n glEnd();\r\n}\r\n\r\nvoid draw_tree()\r\n{\r\n glBegin(GL_QUADS);\r\n glColor3f(0.3,0,0);\r\n glVertex2f(100,100);\r\n glVertex2f(150,100);\r\n glVertex2f(150,200);\r\n glVertex2f(100,200);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0.4,0.1);\r\n glVertex2f(100,200);\r\n glVertex2f(80,200);\r\n glVertex2f(60,210);\r\n glVertex2f(50,230);\r\n glVertex2f(50,250);\r\n glVertex2f(55,280);\r\n glVertex2f(65,310);\r\n glVertex2f(80,335);\r\n glVertex2f(100,360);\r\n glVertex2f(110,370);\r\n glVertex2f(135,370);\r\n glVertex2f(160,350);\r\n glVertex2f(180,330);\r\n glVertex2f(200,300);\r\n glVertex2f(205,270);\r\n glVertex2f(210,250);\r\n glVertex2f(205,230);\r\n glVertex2f(190,220);\r\n glVertex2f(180,210);\r\n glVertex2f(150,200);\r\n glEnd();\r\n\r\n glBegin(GL_QUADS);\r\n glColor3f(0.3,0,0);\r\n glVertex2f(250,300);\r\n glVertex2f(300,300);\r\n glVertex2f(300,400);\r\n glVertex2f(250,400);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0.4,0.1);\r\n glVertex2f(250,400);\r\n glVertex2f(210,400);\r\n glVertex2f(190,415);\r\n glVertex2f(190,450);\r\n glVertex2f(200,500);\r\n glVertex2f(230,540);\r\n glVertex2f(260,560);\r\n glVertex2f(300,560);\r\n glVertex2f(325,530);\r\n glVertex2f(345,500);\r\n glVertex2f(360,450);\r\n glVertex2f(360,410);\r\n glVertex2f(330,400);\r\n glVertex2f(300,400);\r\n glEnd();\r\n}\r\n\r\nvoid draw_stone()\r\n{\r\n int i;\r\n\tfloat rad;\r\n\tglColor3f(0.3,0.4,0.4);\r\n\tglBegin(GL_POLYGON);\r\n\tfor(i=0;i<360;i++)\r\n\t{\r\n\t\trad=i*3.14159/180;\r\n\t\tglVertex2f(cos(rad)*7,sin(rad)*7);\r\n\t}\r\n\tglEnd();\r\n}\r\n\r\nvoid draw_eyes()\r\n{\r\n int i;\r\n\tfloat rad;\r\n\tglColor3f(1,1,1);\r\n\tglBegin(GL_POLYGON);\r\n\tfor(i=0;i<360;i++)\r\n\t{\r\n\t\trad=i*3.14159/180;\r\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\r\n\t}\r\n\tglEnd();\r\n}\r\n\r\nvoid scene3()\r\n{\r\n\tdraw_hill();\r\n\tdraw_sky();\r\n\r\n\r\n\tdraw_tree();\r\n\tglPushMatrix();\r\n\tglTranslatef(950,190,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(960,100,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(920,30,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(800,10,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(700,15,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(650,30,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(680,70,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(630,100,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(570,10,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(770,60,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(790,60,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n draw_vase();\r\n glPushMatrix();\r\n\tglTranslatef(590,160,0);\r\n\tdraw_eyes();\r\n\tglPopMatrix();\r\n\tdraw_crow();\r\n\t//glTranslatef(100,270,0);\r\n\r\n\t//draw_pond();\r\n\r\n}\r\n\r\n\r\nvoid init()\r\n{\r\n\tglMatrixMode(GL_PROJECTION);\r\n\tglLoadIdentity();\r\n\tgluOrtho2D(0,1000,0,1000);\r\n\tglMatrixMode(GL_MODELVIEW);\r\n\tglLoadIdentity();\r\n}\r\n\r\nvoid display()\r\n{\r\n\tglClearColor(0.2,0.8,0.1,1);\r\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\r\n\r\n\tscene3();\r\n\r\n\t//scene3();\r\n\t//scene4();\r\n\t//scene5();\r\n\t//scene6();\r\n\r\n\tglFlush();\r\n\tglutSwapBuffers();\r\n}\r\n\r\nint main(int argc,char **argv)\r\n{\r\n\tglutInit(&argc,argv);\r\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);\r\n\t//glutInitWindowPosition(50,100);\r\n\tglutInitWindowSize(1000,1000);\r\n\tglutCreateWindow(\"scene3\");\r\n\tinit();\r\n\tglEnable(GL_DEPTH_TEST);\r\n\r\n\tglutDisplayFunc(display);\r\n\t//glutTimerFunc(25,time,0);\r\n\tglutMainLoop();\r\n}\r\n\r\n"
},
{
"alpha_fraction": 0.5769392251968384,
"alphanum_fraction": 0.645283043384552,
"avg_line_length": 13.90625,
"blob_id": "142b2e3cd4b36f7cc9e080203b435156031ac4ec",
"content_id": "fad5c16f9069b2c8fda4c90a5ff5612f7aaf0fa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2385,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 160,
"path": "/tools/framework.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//display variable\nint state=0,k;\n\n//animation variables\nfloat startx=0;\n\n//fonts types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1200,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n\n//keyboard function\nvoid keyboard( unsigned char key, int x, int y )\n{\n //handle --> press any key to continue\n if(state==0){\n state=1;\n\n }\n //move to end\n if(state==2){\n state=3;\n }\n\n}\n\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n}\n\n\n//intro project details\nvoid scene1(){\n\n\t glColor3f(0.2,0.1,0.5);\n\t glBegin(GL_POLYGON);\n\t glVertex2f(0,0);\n\t glVertex2f(0,1000);\n\t glVertex2f(1200,1000);\n\t glVertex2f(1200,0);\n\t glEnd();\n\t\n\t //project deatils\n \t glFlush();\n}\n\n//scene-1 \nvoid scene2() {\n \n glFlush();\n}\n\n//scene-2 \nvoid scene3(){\n\n glFlush();\n}\n\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1200,1000);\n glVertex2f(1200,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n}\n\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene3();\n }\n\n\n if(state==3){\n end();\n }\n\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"Project Name\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.5343347787857056,
"alphanum_fraction": 0.6439248323440552,
"avg_line_length": 19.537994384765625,
"blob_id": "a0f109a89dce499f27b63cd8bef0e0234167646b",
"content_id": "4b9253c91574deadd0946c14d955f59aa48c755f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 13514,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 658,
"path": "/blood/blood.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n//to manage which scene to be displayed\nint state=0,k,moveFlag=1;\n//mamage day and night\nconst GLfloat color[10][3]={\n {0.0,0.03,0.08},\n {0.01,0.07,0.16},\n {0.02,0.11,0.27},\n {0.02,0.16,0.39},\n {0.05,0.22,0.49},\n {0.07,0.28,0.6},\n {0.13,0.35,0.69},\n {0.5294117647,0.8078431373,0.9215686275},\n {0.23,0.45,0.79},\n {0.39,0.64,0.87}\n};\n//scene1 variables\nint startx=0;\n//font types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1000,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n//method to handle keyboard inputs keyboard-\nvoid keyboard( unsigned char key, int x, int y )\n{\n if(state==7){\n state=7;\n }\n else{\n state++;\n }\n\n startx=0;\n\n\n\n}\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n //Add story part-1 here\n}\n//function for the background\nvoid background()\n{\n\tglColor3f(0.5294117647,0.8078431373,0.9215686275);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(1200,0);\n\tglVertex2f(1200,1000);\n\tglVertex2f(0,1000);\n glEnd();\n}\n//function to draw outline and box\nvoid drawOutline(){\n glColor3f(0.152,0.505,0.905);\n glBegin(GL_POLYGON);\n glVertex2f(200,200);\n glVertex2f(800,200);\n glVertex2f(800,800);\n glVertex2f(200,800);\n glEnd();\n\n glColor3f(0.2,0.2,0.2);\n glBegin(GL_POLYGON);\n glVertex2f(250,250);\n glVertex2f(750,250);\n glVertex2f(750,750);\n glVertex2f(250,750);\n glEnd();\n\n glColor3f(0.015,0.188,0.384);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(200,200,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0,-600,0);\n glColor3f(0,0,0);\n write_text(400,730,\"Press Any Key to Continue\",fonts[3]);\n glPopMatrix();\n\n}\n//draw blood cell\nvoid drawBloodCell(){\n glColor3f(0.929, 0.070, 0.070);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(20,20,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.968, 0.372, 0.372);\n glPushMatrix();\n glTranslatef(500,510,0);\n glScalef(10,20,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.980, 0.666, 0.658);\n glPushMatrix();\n glTranslatef(510,490,0);\n glScalef(10,20,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n//draw blood cell isotonic\nvoid drawIsoTonicBloodCell(){\n glColor3f(0.929, 0.070, 0.070);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(20,20,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.972, 0.576, 0.568);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.972, 0.576, 0.568);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(15,12,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//draw hypo tonic blood cells\nvoid drawHypoTonicBloodCell(){\n glColor3f(0.929, 0.070, 0.070);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(25,20,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.972, 0.576, 0.568);\n glPushMatrix();\n glTranslatef(510,510,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.972, 0.576, 0.568);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(15,12,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0, 0, 0);\n glPushMatrix();\n glTranslatef(500,500,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//intro project details\nvoid scene1(){\n\n background();\n char str1[] = \"SRINIVAS INSTITUTE OF TECHNOLOGY\";\n glColor3f(0, 1, 1);\n glRasterPos2f(380, 855);\n for (k = 0; k < strlen(str1); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str1[k]);\n }\n glColor3f(0, 1, 1);\n glRasterPos2f(530, 810);\n char str2[] = \"MANGALURU-574143\";\n for (k = 0; k < strlen(str2); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str2[k]);\n }\n\n glColor3f(1, 0.5, 0.2);\n glRasterPos2f(265, 700);\n char str3[] = \"DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING\";\n for (k = 0; k < strlen(str3); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str3[k]);\n }\n\n glColor3f(0.8, 0.3, 0.4);\n glRasterPos2f(330, 630);\n char str4[] = \"COMPUTER GRAPHICS AND VISUALIZATION\";\n for (k = 0; k < strlen(str4); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str4[k]);\n }\n\n glColor3f(0.3, 0.5, 0.1);\n glRasterPos2f(520, 560);\n char str5[] = \"MINI PROJECT ON\";\n for (k = 0; k < strlen(str5); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str5[k]);\n }\n\n glColor3f(1, 0, 0);\n glRasterPos2f(465, 490);\n char str6[] = \"DISCOVERY OF GRAVITY\";\n for (k = 0; k < strlen(str6); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str6[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 320);\n char str7[] = \"TEAM MEMBERS\";\n for (k = 0; k < strlen(str7); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str7[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 280);\n char str8[] = \"Name And Usn\";\n for (k = 0; k < strlen(str8); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str8[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 240);\n char str9[] = \"Name and USn\";\n for (k = 0; k < strlen(str9); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str9[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 200);\n char str10[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str10); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str10[k]);\n }\n\n glPushMatrix();\n glTranslatef(-200,0,0);\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 320);\n char str11[] = \"PROJECT GUIDED BY\";\n for (k = 0; k < strlen(str11); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str11[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 280);\n char str12[] = \"MR.ARAVIND NAIK \";\n for (k = 0; k < strlen(str12); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str12[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 240);\n char str13[] = \"ASSISTANT PROFESSOR\";\n for (k = 0; k < strlen(str13); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str13[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 200);\n char str14[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str14); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str14[k]);\n }\n glPopMatrix();\n\n\n glPushMatrix();\n glTranslatef(-100,0,0);\n glColor3f(1, 0,0);\n glRasterPos2f(450, 130);\n char str15[] = \"PRESS ANY KEY TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n glPopMatrix();\n\n glFlush();\n}\n//scene-1 state-1\nvoid scene2() {\n\n drawOutline();\n\n if(startx%45==0){\n moveFlag*=-1;\n }\n if(moveFlag==1)\n startx=startx+1;\n else\n startx=startx-1;\n\n glPushMatrix();\n glTranslatef(-100,100-startx/2,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0-startx,100,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(10-startx,-100,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100,70-startx,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(20+startx,20,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,0+startx,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(00+startx,-70,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,-100-startx,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100+startx,-10,0);\n glScalef(1,1,0);\n drawBloodCell();\n glPopMatrix();\n\n //blod cell ends\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(0,0,0);\n write_text(400,730,\"Stage-1 Hyper Tonic\",fonts[3]);\n glPopMatrix();\n\n glFlush();\n}\n//scene1-showStory\nvoid scene2x(){\n background();\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(1,0,0);\n write_text(410,500,\"HYPER TONIC \",fonts[2]);\n write_text(410,450,\"Concentration of \",fonts[0]);\n write_text(200,400,\"Solute MoleCules OUTSIDE the cell > Solute MoleCules INSIDE the Cell\",fonts[3]);\n write_text(330,350,\"Consequence : The Shell SHRINKS.\",fonts[0]);\n glPopMatrix();\n}\n//scene-2 state-2\nvoid scene3(){\n drawOutline();\n\n if(startx%45==0){\n moveFlag*=-1;\n }\n if(moveFlag==1)\n startx=startx+1;\n else\n startx=startx-1;\n\n glPushMatrix();\n glTranslatef(-100,100-startx/2,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0-startx,100,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0-startx,100,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100,70-startx,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(20+startx,20,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,0+startx,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(00+startx,-70,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,-100-startx,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100+startx,-10,0);\n glScalef(1,1,0);\n drawIsoTonicBloodCell();\n glPopMatrix();\n\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(0,0,0);\n write_text(400,730,\"Stage-2 Iso Tonic\",fonts[3]);\n glPopMatrix();\n\n glFlush();\n\n\n}\n//scene2-showStory\nvoid scene3x(){\n background();\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(1,0,0);\n write_text(410,500,\"ISO TONIC \",fonts[2]);\n write_text(410,450,\"Concentration of \",fonts[0]);\n write_text(200,400,\"Solute MoleCules OUTSIDE the cell = Solute MoleCules INSIDE the Cell\",fonts[3]);\n //write_text(330,350,\"Consequence : The Shell SHRINKS.\",fonts[0]);\n glPopMatrix();\n}\n//scene-3 state-3\nvoid scene4(){\n drawOutline();\n\n if(startx%45==0){\n moveFlag*=-1;\n }\n if(moveFlag==1)\n startx=startx+1;\n else\n startx=startx-1;\n\n glPushMatrix();\n glTranslatef(-100,100-startx/2,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0-startx,100,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n\n glPushMatrix();\n glTranslatef(0-startx,100,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100,70-startx,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(20+startx,20,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,0+startx,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(00+startx,-70,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,-100-startx,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(100+startx,-10,0);\n glScalef(1,1,0);\n drawHypoTonicBloodCell();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(0,0,0);\n write_text(400,730,\"Stage-3 Hypo Tonic\",fonts[3]);\n glPopMatrix();\n\n glFlush();\n\n}\n//scene3-showStory\nvoid scene4x(){\n background();\n\n glPushMatrix();\n glTranslatef(0,100,0);\n glColor3f(1,0,0);\n write_text(410,500,\"HYPO TONIC \",fonts[2]);\n write_text(410,450,\"Concentration of \",fonts[0]);\n write_text(200,400,\"Solute MoleCules OUTSIDE the cell < Solute MoleCules INSIDE the Cell\",fonts[3]);\n write_text(330,350,\"Consequence : The Shell SWELLS.\",fonts[0]);\n glPopMatrix();\n}\n//scene-4 conclusion\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1200,1000);\n glVertex2f(1200,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n}\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene2x();\n }\n\n if (state==3) {\n scene3();\n }\n\n if(state==4){\n scene3x();\n }\n\n if(state==5){\n scene4();\n }\n if(state==6){\n scene4x();\n }\n if(state==7){\n end();\n }\n\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"BLOOD CELL OSMOSIS\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.5935828685760498,
"alphanum_fraction": 0.6203208565711975,
"avg_line_length": 22.375,
"blob_id": "bd5446f3c26c8f85df6f6e8ae686561106d310fd",
"content_id": "638288e2ee8fa9c9ba611be65c8d7d32589cbd1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/tools/vertex.py",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "file=open('cordinates.txt');\nprint 'glBegin(GL_POLYGON);'\nprint 'glColor3f(1,1,1)'\nfor line in file:\n x=line.split(',');\n print 'glVertex2f('+line[:].strip()+');'\n\nprint 'glEnd();'\n"
},
{
"alpha_fraction": 0.4582957327365875,
"alphanum_fraction": 0.6454383730888367,
"avg_line_length": 18.009361267089844,
"blob_id": "aa3d91f9561d33206c848377b8bbf69a6d597661",
"content_id": "9ea7e88e52f6975456e7001b559c3b11ffc78abf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 36555,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 1923,
"path": "/pulwama/att.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//scene1 variables\nfloat speed_car=2,speed_bus=2,startx=2,starty=-200,flag=0,speed_jet=2,speed_bomb=2;\nGLfloat test=0.1;\n//to manage which scene to be displayed\nint state=0,cx=0;\n//to manage window position\nint x,y,i;\n//manage tyre\nfloat tx=0;\n//crashing flag\nint isCrashed=0;\n//bus movement\nint dpx=-500,dpy=0;\n//next scene\nint next=0;\n//scene9 variables\nint bx=0,by=0;\nint lx=0,ly=0;\n//font types\nvoid *fonts[]=\n{\n \tGLUT_BITMAP_9_BY_15,\n \tGLUT_BITMAP_TIMES_ROMAN_10,\n \tGLUT_BITMAP_TIMES_ROMAN_24,\n \tGLUT_BITMAP_HELVETICA_18,\n\tGLUT_BITMAP_HELVETICA_12\n\n};\n\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n \tglMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1000,0,1000);\n\tglMatrixMode(GL_MODELVIEW);\n}\n\n//method to handle keyboard inputs keyboard-\nvoid keyboard( unsigned char key, int x, int y )\n{\n\tstate++;\n}\n\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n\tint len, i;\n\tglRasterPos2f(x, y);\n \tlen = (int) strlen(string);\n \tfor (i = 0; i < len; i++)\n\t{\n \t\tglutBitmapCharacter(font, string[i]);\n \t}\n}\n\n//with default font\nvoid renderBitmapString(float x, float y, const char *string){\n \tconst char *c;\n \tglRasterPos2f(x, y);\n \tfor (c=string; *c != '\\0'; c++)\n\t{\n \tglutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, *c);\n \t}\n}\n//intro --scene1 display\nvoid intro()\n{\n glColor3f(0.5,0.2,0.6);\n write_text(300,130,\" SAHYADRI COLLEGE OF ENGINEERING AND MANAGEMENT\",fonts[3]);\n glColor3f(0.3,0.5,0.8);\n write_text(375,100,\"DEPT. OF COMPUTER SCIENCE & ENGG.\",fonts[0]);\n glColor3f(0.8,0.1,0.2);\n write_text(160,600,\"GRAPHICAL IMPLEMENTATION OF PULWAMA ATTACK AND BALAKOT AIRSTRIKE\",fonts[2]);\n glColor3f(1.0,0.0,1.0);\n write_text(450,500,\"SUBMITTED BY :\",fonts[0]);\n glColor3f(0.3,0.5,0.8);\n write_text(225,450,\"RESHMA KUMARI\",fonts[3]);\n write_text(670,450,\"SONALI S KUMAR\",fonts[3]);\n write_text(180,300,\"\",fonts[3]);\n write_text(220,400,\"(4SF16CS128)\",fonts[0]);\n write_text(680,400,\"(4SF16CS152)\",fonts[0]);\n\n write_text(380,200,\"[ PRESS ANY KEY TO CONTINUE ]\",fonts[3]);\n}\n\n//used in all scenes\nvoid sky()\n{\n\tglColor3f(0.5294117647,0.8078431373,0.9215686275);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(10000,0);\n\tglVertex2f(10000,10000);\n\tglVertex2f(0,10000);\n \tglEnd();\n}\n\n//clouds\nvoid drawClouds()\n{\n \tglColor3f(0.9,0.9,0.9);\n\tglPushMatrix();\n \tglTranslatef(530,750,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglTranslatef(580,740,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglTranslatef(580,700,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(500,8000,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n}\n\n//function to draw road division strip\nvoid drawStrip()\n{\n \tglColor3f(1,1,1);\n \tint tx=100,ty=200;\n \tfor(i=0;i<10;i++)\n\t{\n \t\tglBegin(GL_POLYGON);\n \t\tglVertex2f(tx+=30,250);\n \t\tglVertex2f(tx,270);\n \t\tglVertex2f(ty+=30,270);\n \t\tglVertex2f(ty,250);\n \t\tglEnd();\n \t\ttx+=100;\n \t\tty+=100;\n \t}\n}\n\n//function to draw road\nvoid drawRoad()\n{\n\tglColor3f(0.12941176, 0.12941176, 0.12941176);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(0,150);\n \tglVertex2f(0,450);\n \tglVertex2f(1000,450);\n \tglVertex2f(1000,150);\n \tglEnd();\n\n \tglPushMatrix();\n \tglTranslatef(-120,20,0);\n \tdrawStrip();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(2,0.5,1);\n \tglTranslatef(-120,600,0);\n \tdrawStrip();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(2,0.5,1);\n \tglTranslatef(-120,50,0);\n \tdrawStrip();\n \tglPopMatrix();\n}\n\n//fucntion to draw Tree\nvoid drawTree()\n{\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,500);\n\tglVertex2f(550,500);\n\tglVertex2f(550,750);\n\tglVertex2f(525,750);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(550,525);\n\tglVertex2f(600,575);\n\tglVertex2f(600,600);\n\tglVertex2f(550,550);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,600);\n\tglVertex2f(525,625);\n\tglVertex2f(475,675);\n\tglVertex2f(475,650);\n\tglEnd();\n\n \t// right green\n\tglColor3f(0,1,0);\n\n \tglPushMatrix();\n \tglTranslatef(620,580,0);\n \tglScalef(40,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(450,680,0);\n \tglScalef(40,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(530,750,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(580,740,0);\n \tglScalef(60,50,0);\n \tglutSolidSphere(1,100,10);\n \tglPopMatrix();\n}\n\n//function to draw 2 buses\nvoid drawBus(){\n\n\t//bus1\n\t//top\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300,700);\n\tglVertex2f(600,700);\n\tglVertex2f(550,650);\n\tglVertex2f(250,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(600,700);\n\tglVertex2f(610,690);\n\tglVertex2f(560,630);\n\tglVertex2f(550,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(550,650);\n\tglVertex2f(560,630);\n\tglVertex2f(240,630);\n\tglVertex2f(250,650);\n\tglEnd();\n\n\t//glass\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(610,690);\n\tglVertex2f(610,600);\n\tglVertex2f(560,540);\n\tglVertex2f(560,630);\n\tglEnd();\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560,630);\n\tglVertex2f(560,540);\n\tglVertex2f(240,540);\n\tglVertex2f(240,630);\n\tglEnd();\n\n\t//bottom\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(240,540);\n\tglVertex2f(240,460);\n\tglVertex2f(275,440);\n\tglVertex2f(320,440);\n\tglVertex2f(385,440);\n\tglVertex2f(450,440);\n\tglVertex2f(515,440);\n\tglVertex2f(560,440);\n\tglVertex2f(560,540);\n\tglVertex2f(240,540);\n\tglEnd();\n\n \tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560,540);\n\tglVertex2f(560,440);\n\tglVertex2f(610,530);\n\tglVertex2f(610,630);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560,540);\n\tglVertex2f(560,510);\n\tglVertex2f(610,630);\n\tglVertex2f(610,600);\n\tglEnd();\n\n\t//bus2\n\t//top\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300+500,700);\n\tglVertex2f(600+500,700);\n\tglVertex2f(550+500,650);\n\tglVertex2f(250+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(600+500,700);\n\tglVertex2f(610+500,690);\n\tglVertex2f(560+500,630);\n\tglVertex2f(550+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(550+500,650);\n\tglVertex2f(560+500,630);\n\tglVertex2f(240+500,630);\n\tglVertex2f(250+500,650);\n\tglEnd();\n\n\t//glass\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(610+500,690);\n\tglVertex2f(610+500,600);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,630);\n\tglEnd();\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,630);\n\tglVertex2f(560+500,540);\n\tglVertex2f(240+500,540);\n\tglVertex2f(240+500,630);\n\tglEnd();\n\n\t//bottom\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(240+500,540);\n\tglVertex2f(240+500,460);\n\tglVertex2f(275+500,440);\n\tglVertex2f(320+500,440);\n\tglVertex2f(385+500,440);\n\tglVertex2f(450+500,440);\n\tglVertex2f(515+500,440);\n\tglVertex2f(560+500,440);\n\tglVertex2f(560+500,540);\n\tglVertex2f(240+500,540);\n\tglEnd();\n\n \tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,440);\n\tglVertex2f(610+500,530);\n\tglVertex2f(610+500,630);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,510);\n\tglVertex2f(610+500,630);\n\tglVertex2f(610+500,600);\n\tglEnd();\n\trenderBitmapString(445,485,\"CRPF\");\n\trenderBitmapString(945,485,\"CRPF\");\n \tglFlush();\n}\n\n//contains single bus\nvoid drawBus1()\n{\n\t//bus2\n\t//top\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300+500,700);\n\tglVertex2f(600+500,700);\n\tglVertex2f(550+500,650);\n\tglVertex2f(250+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(600+500,700);\n\tglVertex2f(610+500,690);\n\tglVertex2f(560+500,630);\n\tglVertex2f(550+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(550+500,650);\n\tglVertex2f(560+500,630);\n\tglVertex2f(240+500,630);\n\tglVertex2f(250+500,650);\n\tglEnd();\n\n\t//glass\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(610+500,690);\n\tglVertex2f(610+500,600);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,630);\n\tglEnd();\n\tglColor3f(0.7,0.7,0.7);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,630);\n\tglVertex2f(560+500,540);\n\tglVertex2f(240+500,540);\n\tglVertex2f(240+500,630);\n\tglEnd();\n\n\t//bottom\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(240+500,540);\n\tglVertex2f(240+500,460);\n\tglVertex2f(275+500,440);\n\tglVertex2f(320+500,440);\n\tglVertex2f(385+500,440);\n\tglVertex2f(450+500,440);\n\tglVertex2f(515+500,440);\n\tglVertex2f(560+500,440);\n\tglVertex2f(560+500,540);\n\tglVertex2f(240+500,540);\n\tglEnd();\n\n \tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,440);\n\tglVertex2f(610+500,530);\n\tglVertex2f(610+500,630);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,510);\n\tglVertex2f(610+500,630);\n\tglVertex2f(610+500,600);\n\tglEnd();\n\trenderBitmapString(945,485,\"CRPF\");\n \tglFlush();\n}\n\n//bombing car arrives-scene 4\nvoid drawCar()\n{\n\t//top\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(200,550);\n\tglVertex2f(300,550);\n\tglVertex2f(310,530);\n\tglVertex2f(190,530);\n\tglEnd();\n\t//1\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(210,530);\n\tglVertex2f(220,530);\n\tglVertex2f(210,470);\n\tglVertex2f(200,470);\n\tglEnd();\n\t//2\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300,530);\n\tglVertex2f(310,470);\n\tglVertex2f(300,470);\n\tglVertex2f(290,530);\n\tglEnd();\n\n\t//middle\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(245,530);\n\tglVertex2f(245,470);\n\tglVertex2f(255,470);\n\tglVertex2f(255,530);\n\tglEnd();\n\n\t//glass\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(290,530);\n\tglVertex2f(300,470);\n\tglVertex2f(255,470);\n\tglVertex2f(255,530);\n\tglEnd();\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(245,530);\n\tglVertex2f(245,470);\n\tglVertex2f(210,470);\n\tglVertex2f(220,530);\n\tglEnd();\n\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(210,530);\n\tglVertex2f(200,530);\n\tglVertex2f(190,470);\n\tglVertex2f(200,470);\n\tglEnd();\n\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300,530);\n\tglVertex2f(310,530);\n\tglVertex2f(340,470);\n\tglVertex2f(310,470);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(350,470);\n\tglVertex2f(360,410);\n\tglVertex2f(345,410);\n\tglVertex2f(305,410);\n\tglVertex2f(240,410);\n\tglVertex2f(240,410);\n\tglVertex2f(200,410);\n\tglVertex2f(180,410);\n\tglVertex2f(190,470);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(365,410);\n\tglVertex2f(365,390);\n\tglVertex2f(345,390);\n\tglVertex2f(345,410);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,410);\n\tglVertex2f(240,410);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,410);\n\tglVertex2f(240,410);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(180,410);\n\tglVertex2f(180,390);\n\tglVertex2f(200,390);\n\tglVertex2f(200,410);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(360,390);\n\tglVertex2f(360,370);\n\tglVertex2f(345,370);\n\tglVertex2f(345,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,370);\n\tglVertex2f(240,370);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,370);\n\tglVertex2f(240,370);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(180,370);\n\tglVertex2f(180,390);\n\tglVertex2f(200,390);\n\tglVertex2f(200,370);\n\tglEnd();\n\tglFlush();\n}\n\n//bus tires\nvoid drawTireBus()\n{\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(470,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(340,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n \tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(470,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(340,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(970,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(840,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n \tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(970,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(840,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n}\n\n/*//bus back tires\nvoid drawTback()\n{\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(575,495,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(1075,495,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n}\n*/\n//last bus tires\nvoid drawTireBus1()\n{\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(850,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(940,440,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n \tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(850,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(940,440,0);\n \tglScalef(15,15,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n}\n\n/*//last bus back tire\nvoid drawTback1()\n{\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(1075,495,0);\n \tglScalef(35,35,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n}*/\n\n//car tires\nvoid drawTirecar()\n{\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(324,380,0);\n \tglScalef(20,26,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(0.25,0.25,0.25);\n \tglPushMatrix();\n \tglTranslatef(220,380,0);\n \tglScalef(20,26,1);\n \tglutSolidSphere(1,500,4);\n \tglPopMatrix();\n\n \tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(324,380,0);\n \tglScalef(10,10,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n\n\tglColor3f(1,1,1);\n \tglPushMatrix();\n \tglTranslatef(220,380,0);\n \tglScalef(10,10,1);\n \tglutWireSphere(1,500,4);\n \tglPopMatrix();\n}\n\n//land fxn used in scene 9\nvoid land()\n{\n\tglColor3f(0.75,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,100);\n\tglVertex2f(10000,690);\n\tglVertex2f(10000,0);\n\tglVertex2f(0,0);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(900,70);\n\tglVertex2f(900,30);\n\tglVertex2f(920,30);\n\tglVertex2f(920,70);\n\tglEnd();\n\n\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(940,70);\n\tglVertex2f(940,30);\n\tglVertex2f(970,30);\n\tglVertex2f(970,70);\n\tglEnd();\n}\n\n//contains broken bus parts after blast\nvoid drawBrokenBus()\n{\n\t//bus1\n\t//top\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300,700+20);\n\tglVertex2f(600+30,700);\n\tglVertex2f(550+40,650);\n\tglVertex2f(250-20,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(600,700);\n\tglVertex2f(610,690);\n\tglVertex2f(560,630);\n\tglVertex2f(550,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(550-220,650);\n\tglVertex2f(560-220,630);\n\tglVertex2f(240-220,630);\n\tglVertex2f(250-220,650);\n\tglEnd();\n\n\t//bottom\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(240+30,540);\n\tglVertex2f(240+30,460);\n\tglVertex2f(275+30,440);\n\tglVertex2f(320+30,440);\n\tglVertex2f(385,440);\n\tglVertex2f(450,440);\n\tglVertex2f(515,440);\n\tglVertex2f(560,440);\n\tglVertex2f(560,540);\n\tglVertex2f(240,540);\n\tglEnd();\n\n \tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560,540);\n\tglVertex2f(560,440);\n\tglVertex2f(610,530);\n\tglVertex2f(610,630);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560,540);\n\tglVertex2f(560,510);\n\tglVertex2f(610,630);\n\tglVertex2f(610,600);\n\tglEnd();\n\n\t//bus2\n\t//top\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300+500,700);\n\tglVertex2f(600+500,700);\n\tglVertex2f(550+500,650);\n\tglVertex2f(250+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(600+500,700);\n\tglVertex2f(610+500,690);\n\tglVertex2f(560+500,630);\n\tglVertex2f(550+500,650);\n\tglEnd();\n\tglColor3f(0,0.5,0);\n\n\t//bottom\n\tglColor3f(0,0.4,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(240+500,540);\n\tglVertex2f(240+500,460);\n\tglVertex2f(275+500,440);\n\tglVertex2f(515+500,440);\n\tglVertex2f(560+500,440);\n\tglVertex2f(560+500,540);\n\tglVertex2f(240+500,540);\n\tglEnd();\n\n \tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(560+500,540);\n\tglVertex2f(560+500,440);\n\tglVertex2f(610+500,530);\n\tglVertex2f(610+500,630);\n\tglEnd();\n \tglFlush();\n\n}\n\n//broken parts of car after blast\nvoid drawBrokenCar()\n{\n\t//top\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(200+500,530);\n\tglVertex2f(300,550);\n\tglVertex2f(310+500,510);\n\tglVertex2f(190,520);\n\tglEnd();\n\t//1\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(210,530);\n\tglVertex2f(220,530);\n\tglVertex2f(210,470);\n\tglVertex2f(200,470);\n\tglEnd();\n\t//2\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300+100,530);\n\tglVertex2f(310+100,470);\n\tglVertex2f(300+100,470);\n\tglVertex2f(290+100,530);\n\tglEnd();\n\n\t//middle\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(245+500,530);\n\tglVertex2f(245+500,470);\n\tglVertex2f(255+500,470);\n\tglVertex2f(255+500,530);\n\tglEnd();\n\n\t//glass\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(290+100,530);\n\tglVertex2f(300+100,470);\n\tglVertex2f(255+100,470);\n\tglVertex2f(255+100,530);\n\tglEnd();\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(245,530);\n\tglVertex2f(245,470);\n\tglVertex2f(210,470);\n\tglVertex2f(220,530);\n\tglEnd();\n\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(210,530);\n\tglVertex2f(200,530);\n\tglVertex2f(190,470);\n\tglVertex2f(200,470);\n\tglEnd();\n\n\tglColor3f(0.9,0.9,0.9);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(300-200,530);\n\tglVertex2f(310-200,530);\n\tglVertex2f(340-200,470);\n\tglVertex2f(310-200,470);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(350,470);\n\tglVertex2f(360,410);\n\tglVertex2f(345,410);\n\tglVertex2f(305,410);\n\tglVertex2f(240,410);\n\tglVertex2f(240,410);\n\tglVertex2f(200,410);\n\tglVertex2f(180,410);\n\tglVertex2f(190,470);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(365+200,410);\n\tglVertex2f(365+200,390);\n\tglVertex2f(345+200,390);\n\tglVertex2f(345+200,410);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305-200,410);\n\tglVertex2f(240-200,410);\n\tglVertex2f(240-200,390);\n\tglVertex2f(305-200,390);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,410);\n\tglVertex2f(240,410);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(180-200,410);\n\tglVertex2f(180-200,390);\n\tglVertex2f(200-200,390);\n\tglVertex2f(200-200,410);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(360+200,390);\n\tglVertex2f(360+200,370);\n\tglVertex2f(345+200,370);\n\tglVertex2f(345+200,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,370);\n\tglVertex2f(240,370);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(305,370);\n\tglVertex2f(240,370);\n\tglVertex2f(240,390);\n\tglVertex2f(305,390);\n\tglEnd();\n\n\tglColor3f(0.2,0,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(180+200,370);\n\tglVertex2f(180+200,390);\n\tglVertex2f(200+200,390);\n\tglVertex2f(200+200,370);\n\tglEnd();\n\tglFlush();\n}\n\n//bom fxn used in scene 9\nvoid bomb()\n{\n\tglColor3f(0,0,0);\n\tglPushMatrix();\n \tglTranslatef(590,750,0);\n \tglScalef(6,9,0);\n \tglutSolidSphere(1,5,10);\n \tglPopMatrix();\n\n\tglColor3f(0.5,0.5,0.5);\n\tglPushMatrix();\n \tglTranslatef(590,750,0);\n \tglScalef(4,6,0);\n \tglutSolidSphere(1,5,10);\n \tglPopMatrix();\n}\n\n//blast polygon construction-scene 6 & 9 and also in showblasts()\n//red blast mark\nvoid blast()\n{\n \tglPushMatrix();\n \tglColor3f(1.0, 0.0, 0.0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(404.4, 320.0);\n \tglVertex2f(384.0, 285.0);\n \tglVertex2f(368.0, 344.5);\n \tglVertex2f(344.0, 355.0);\n \tglVertex2f(347.2, 414.5);\n \tglVertex2f(332.8, 442.5);\n \tglVertex2f(347.2, 477.5);\n \tglVertex2f(352.0, 530.0);\n \tglVertex2f(379.2, 519.5);\n \tglVertex2f(396.8, 565.0);\n \tglVertex2f(416.0, 530.0);\n \tglVertex2f(440.0, 547.5);\n \tglVertex2f(452.8, 512.5);\n \tglVertex2f(472.0, 512.5);\n \tglVertex2f(475.2, 470.5);\n \tglVertex2f(488.0, 442.5);\n \tglVertex2f(488.0, 404.0);\n \tglVertex2f(470.0, 372.5);\n \tglVertex2f(475.2, 337.5);\n \tglVertex2f(464.0, 306.0);\n \tglVertex2f(444.8, 320.0);\n \tglVertex2f(425.6, 285.0);\n \tglVertex2f(404.8, 320.0);\n \tglEnd();\n \tglPopMatrix();\n\n}\n\n//orange blast mark\nvoid blasto()\n{\n\tglPushMatrix();\n \tglColor3f(1,0.5,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(404.4, 320.0);\n \tglVertex2f(384.0, 285.0);\n \tglVertex2f(368.0, 344.5);\n \tglVertex2f(344.0, 355.0);\n \tglVertex2f(347.2, 414.5);\n \tglVertex2f(332.8, 442.5);\n \tglVertex2f(347.2, 477.5);\n \tglVertex2f(352.0, 530.0);\n \tglVertex2f(379.2, 519.5);\n \tglVertex2f(396.8, 565.0);\n \tglVertex2f(416.0, 530.0);\n \tglVertex2f(440.0, 547.5);\n \tglVertex2f(452.8, 512.5);\n \tglVertex2f(472.0, 512.5);\n \tglVertex2f(475.2, 470.5);\n \tglVertex2f(488.0, 442.5);\n \tglVertex2f(488.0, 404.0);\n \tglVertex2f(470.0, 372.5);\n \tglVertex2f(475.2, 337.5);\n \tglVertex2f(464.0, 306.0);\n \tglVertex2f(444.8, 320.0);\n \tglVertex2f(425.6, 285.0);\n \tglVertex2f(404.8, 320.0);\n \tglEnd();\n \tglPopMatrix();\n\n}\n\n//yellow blast mark\nvoid blasty()\n{\n\tglPushMatrix();\n \tglColor3f(1,1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(404.4, 320.0);\n \tglVertex2f(384.0, 285.0);\n \tglVertex2f(368.0, 344.5);\n \tglVertex2f(344.0, 355.0);\n \tglVertex2f(347.2, 414.5);\n \tglVertex2f(332.8, 442.5);\n \tglVertex2f(347.2, 477.5);\n \tglVertex2f(352.0, 530.0);\n \tglVertex2f(379.2, 519.5);\n \tglVertex2f(396.8, 565.0);\n \tglVertex2f(416.0, 530.0);\n \tglVertex2f(440.0, 547.5);\n \tglVertex2f(452.8, 512.5);\n \tglVertex2f(472.0, 512.5);\n \tglVertex2f(475.2, 470.5);\n \tglVertex2f(488.0, 442.5);\n \tglVertex2f(488.0, 404.0);\n \tglVertex2f(470.0, 372.5);\n \tglVertex2f(475.2, 337.5);\n \tglVertex2f(464.0, 306.0);\n \tglVertex2f(444.8, 320.0);\n \tglVertex2f(425.6, 285.0);\n \tglVertex2f(404.8, 320.0);\n \tglEnd();\n \tglPopMatrix();\n}\n\nvoid showBlasts()\n{\n \tglPushMatrix();\n \tglTranslatef(300,0,0);\n \tglScalef(0.8,0.8,1);\n \tblast();\n \tglPopMatrix();\n\n\n \tglPushMatrix();\n \tglTranslatef(100,0,0);\n \tglScalef(0.8,0.8,1);\n \tblast();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(200,100,0);\n \tglScalef(0.8,0.8,1);\n \tblast();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(200,300,0);\n \tglScalef(0.8,0.8,1);\n \tblast();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(400,200,0);\n \tglScalef(0.8,0.8,1);\n \tblast();\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglTranslatef(250,360,0);\n \tglScalef(0.2,0.2,1);\n \tblasty();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(415,210,0);\n \tglScalef(0.2,0.2,1);\n \tblasty();\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglTranslatef(350,460,0);\n \tglScalef(0.2,0.2,1);\n \tblasty();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(215,310,0);\n \tglScalef(0.2,0.2,1);\n \tblasty();\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglTranslatef(480,0,0);\n \tglScalef(0.35,0.35,1);\n \tblasto();\n \tglPopMatrix();\n\n\n \tglPushMatrix();\n \tglTranslatef(330,0,0);\n \tglScalef(0.35,0.35,1);\n \tblasto();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(290,120,0);\n \tglScalef(0.35,0.35,1);\n \tblasto();\n \tglPopMatrix();\n}\n\n//jet used in scene 8 and 9 for the balakot airstrike\nvoid drawJet()\n{\n\t//jetfront-cone\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(825,700);\n \tglVertex2f(850,700);\n \tglVertex2f(940,665);\n \tglVertex2f(975,600);\n \tglVertex2f(940,575);\n \tglVertex2f(860,550);\n \tglVertex2f(825,550);\n \tglEnd();\n\n \tglColor3f(0,0.1,0);\n \tglPushMatrix();\n \tglScalef(50,40,0);\n \tglTranslatef(18.6,15.4,0);\n \tglutSolidSphere(1,100,100);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(50,40,0);\n \tglTranslatef(18.4,15.7,0);\n \tglutSolidSphere(1,100,100);\n \tglPopMatrix();\n\t\n \t//jet middle\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(825,550);\n \tglVertex2f(250,550);\n \tglVertex2f(225,675);\n \tglVertex2f(450,700);\n \tglVertex2f(825,700);\n \tglEnd();\n\n \t//jet end\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(250,550);\n \tglVertex2f(105,615);\n \tglVertex2f(35,880);\n \tglVertex2f(95,865);\n \tglVertex2f(225,675);\n \tglEnd();\n\t\n \t//jetend wing\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(35,880);\n \tglVertex2f(75,870);\n \tglVertex2f(135,690);\n \tglVertex2f(105,615);\n \tglEnd();\n\t\n \t//jet door\n \tglColor3f(0.43921569, 0.43921569, 0.43921569);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(910,600);\n \tglVertex2f(895,645);\n \tglVertex2f(840,665);\n \tglVertex2f(840,600);\n \tglEnd();\n\n \t//jet wings\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(450,580);\n \tglVertex2f(570,580);\n \tglVertex2f(400,425);\n \tglVertex2f(320,415);\n \tglEnd();\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(350,420);\n \tglVertex2f(450,525);\n \tglVertex2f(415,540);\n \tglVertex2f(320,416);\n \tglEnd();\n \tglFlush();\n\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(500,700);\n \tglVertex2f(370,885);\n \tglVertex2f(360,870);\n \tglVertex2f(415,695);\n \tglEnd();\n\t\n\tglColor3f(0,0,0);\n\tglBegin(GL_POLYGON);\n \tglVertex2f(370,885);\n \tglVertex2f(365,880);\n \tglVertex2f(460,700);\n \tglVertex2f(500,700);\n\tglEnd();\n\n\t//jetfront-cone\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(825+300,700+400);\n \tglVertex2f(850+300,700+400);\n \tglVertex2f(940+300,665+400);\n \tglVertex2f(975+300,600+400);\n \tglVertex2f(940+300,575+400);\n \tglVertex2f(860+300,550+400);\n \tglVertex2f(825+300,550+400);\n \tglEnd();\n\n \tglColor3f(0,0.1,0);\n \tglPushMatrix();\n \tglScalef(50,40,0);\n \tglTranslatef(18.6,15.4,0);\n \tglutSolidSphere(1,100,100);\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(50,40,0);\n \tglTranslatef(18.4,15.7,0);\n \tglutSolidSphere(1,100,100);\n \tglPopMatrix();\n\t\n \t//jet middle\n \tglColor3f(0,0.1,0);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(825+300,550+400);\n \tglVertex2f(250+300,550+400);\n \tglVertex2f(225+300,675+400);\n \tglVertex2f(450+300,700+400);\n \tglVertex2f(825+300,700+400);\n \tglEnd();\n\t\n \t//jet end\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(250+300,550+400);\n \tglVertex2f(105+300,615+400);\n \tglVertex2f(35+300,880+400);\n \tglVertex2f(95+300,865+400);\n \tglVertex2f(225+300,675+400);\n \tglEnd();\n\t\n \t//jetend wing\n \tglColor3f(0,0.1,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(35+300,880+400);\n \tglVertex2f(75+300,870+400);\n \tglVertex2f(135+300,690+400);\n \tglVertex2f(105+300,615+400);\n \tglEnd();\n\n \t//jet door\n \tglColor3f(0.43921569, 0.43921569, 0.43921569);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(910+300,600+400);\n \tglVertex2f(895+300,645+400);\n \tglVertex2f(840+300,665+400);\n \tglVertex2f(840+300,600+400);\n \tglEnd();\n\t\n\t//jet wings\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(450+300,580+400);\n \tglVertex2f(570+300,580+400);\n \tglVertex2f(400+300,425+400);\n \tglVertex2f(320+300,415+400);\n \tglEnd();\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(350+300,420+400);\n \tglVertex2f(450+300,525+400);\n \tglVertex2f(415+300,540+400);\n \tglVertex2f(320+300,416+400);\n \tglEnd();\n \tglFlush();\n\t\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(500+300,700+400);\n \tglVertex2f(370+300,885+400);\n \tglVertex2f(360+300,870+400);\n \tglVertex2f(415+300,695+400);\n \tglEnd();\n\t\n\t\n \tglColor3f(0,0,0);\n \tglBegin(GL_POLYGON);\n \tglVertex2f(370+300,885+400);\n \tglVertex2f(365+300,880+400);\n \tglVertex2f(460+300,700+400);\n \tglVertex2f(500+300,700+400);\n\tglEnd();\n\tglColor3f(1,1,1);\n \trenderBitmapString(500+150,660+350,\"INDIA\");\n \trenderBitmapString(480,640,\"INDIA\");\n\tglFlush();\n}\n\nvoid scenery()\n{\n\tsky();\n \tdrawRoad();\n\tdrawClouds();\n \tglPushMatrix();\n \tglScalef(3,2,1);\n \tglTranslatef(0,-300,1);\n\n\t//lawn left extension\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.19,0.80,0.19);\n\tglVertex2f(400,525);\n\tglVertex2f(400,575);\n\tglVertex2f(100,575);\n\tglVertex2f(75,550);\n\tglVertex2f(75,535);\n\tglVertex2f(100,525);\n\tglEnd();\n\t//left lawn\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.5,1,0.0);\n\tglVertex2f(400,500);\n\tglVertex2f(0,500);\n\tglVertex2f(0,550);\n\tglVertex2f(75,550);\n\tglVertex2f(75,535);\n\tglVertex2f(100,525);\n\tglVertex2f(400,525);\n\tglEnd();\n\n \tglBegin(GL_POLYGON);\n\tglColor3f(0.19,0.80,0.19);\n\tglVertex2f(250,500);\n\tglVertex2f(0,500);\n\tglVertex2f(0,525);\n\tglVertex2f(250,525);\n\tglEnd();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglTranslatef(400,0,0);\n \tdrawTree();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(0.8,0.8,1);\n \tglTranslatef(350,100,0);\n \tdrawTree();\n \tglPopMatrix();\n\n\tglPushMatrix();\n \tglScalef(0.6,0.8,1);\n \tglTranslatef(30,100,0);\n \tdrawTree();\n \tglPopMatrix();\n\n \tglPushMatrix();\n \tglScalef(10,2,1);\n \tglTranslatef(-200,-500,1);\n \t//lawn left extension\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.19,0.80,0.19);\n\tglVertex2f(400,525);\n\tglVertex2f(400,575);\n\tglVertex2f(100,575);\n\tglVertex2f(75,550);\n\tglVertex2f(75,535);\n\tglVertex2f(100,525);\n\tglEnd();\n\t//left lawn\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.5,1,0.0);\n\tglVertex2f(400,500);\n\tglVertex2f(0,500);\n\tglVertex2f(0,550);\n\tglVertex2f(75,550);\n\tglVertex2f(75,535);\n\tglVertex2f(100,525);\n\tglVertex2f(400,525);\n\tglEnd();\n\n \tglBegin(GL_POLYGON);\n\tglColor3f(0.19,0.80,0.19);\n\tglVertex2f(250,500);\n\tglVertex2f(0,500);\n\tglVertex2f(0,525);\n\tglVertex2f(250,525);\n\tglEnd();\n \tglPopMatrix();\n\tglPopMatrix();\n\tglColor3f(0.5,0.2,0.6);\n\twrite_text(690,20,\" PLACE : PULWAMA,JAMMU AND KASHMIR\",fonts[3]);\n}\n\n//intro texts are here\nvoid scene1()\n{\n \tsky();\n \tglPushMatrix();\n \tglTranslatef(-50,150,0);\n \tintro();\n \tglPopMatrix();\n\tglFlush();\n}\n\n//title after intro\nvoid topic1()\n{\n glColor3f(0.5,0.2,0.6);\n\twrite_text(400,430,\" PULWAMA ATTACK\",fonts[2]);\n\twrite_text(380,200,\"[ PRESS ANY KEY TO CONTINUE ]\",fonts[3]);\n}\n\n//PULWAMA ATTACK\nvoid scene2()\n{\n \tsky();\n \tglPushMatrix();\n \tglTranslatef(-50,150,0);\n \ttopic1();\n \tglPopMatrix();\n\tglFlush();\n}\n\n//has 7 CRPF buses\nvoid scene3()\n{\n\tscenery();\n \t//speed_bus=4;\n \tglLoadIdentity();\n \tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx,-50,1);\n \t//drawTback();\n\tdrawBus();\n\tdrawTireBus();\n \tglPopMatrix();\n\tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-1000,-50,1);\n\t//drawTback();\n\tdrawBus();\n\tdrawTireBus();\n \tglPopMatrix();\n\tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-2000,-50,1);\n\t//drawTback();\n \tdrawBus();\n\tdrawTireBus();\n\n \tglPopMatrix();\n\tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-3000,-50,1);\n \t//drawTback1();\n \tdrawBus1();\n\tdrawTireBus1();\n \tglPopMatrix();\n \tstartx+=speed_bus+1;\n\tif(startx>2800)\n\t{\n\t \tstate=3;\n\t\tstartx=-750;\n \t}\n\tglColor3f(1.0, 0.33, 0.0);\n\tglFlush();\n}\n\n//one bomber car\nvoid scene4()\n{\n\tscenery();\n \tglLoadIdentity();\n \tglPushMatrix();\n \tglScalef(0.8,0.6,1);\n \tglTranslatef(startx+200,20,1);\n \tdrawCar();\n\tdrawTirecar();\n \tglPopMatrix();\n \tstartx+=speed_car+2;\n\tif(startx>900)\n\t{\n\t \tstate=4;\n \t}\n \tglColor3f(1.0, 0.33, 0.0);\n \tglFlush();\n}\n\n//&buses and car together\nvoid scene5()\n{\n\tscenery();\n \tglLoadIdentity();\n\twrite_text(60,20,\" TIME : 3:15 PM\",fonts[3]);\n \tglPushMatrix();\n \tglScalef(0.8,0.5,1);\n\tglTranslatef(startx-1600,200,1);\n \tdrawCar();\n\tdrawTirecar();\n \tglPopMatrix();\n\tstartx+=speed_car;\n\tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-1200,-50,1);\n \t//drawTback();\n\tdrawBus();\n\tdrawTireBus();\n\tglPopMatrix();\n\tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-2400,-50,1);\n \t//drawTback();\n\tdrawBus();\n\tdrawTireBus();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglScalef(0.5,0.5,1);\n \tglTranslatef(startx-3400,-50,1);\n \t//drawTback();\n\tdrawBus();\n\tdrawTireBus();\n \tglPopMatrix();\n\tstartx+=speed_bus;\n\tif(startx>1650)\n\t{\n\t \tstate=5;\n \t}\n \tglFlush();\n}\n\n//blast scene\nvoid scene6()\n{\n \tscenery();\n \tglPushMatrix();\n \tglScalef(0.7,0.7,0);\n \tglTranslatef(dpx,dpy,0);\n\tif(dpx>-50)\n\t{\n \t\tglPushMatrix();\n \t\tglTranslatef(-200,0,0);\n \t\tglScalef(2,2,0);\n \t\tshowBlasts();\n \t\tglPopMatrix();\n \t\tisCrashed=1;\n \t\tglFlush();\n \t}\n\telse\n\t{\n\t\tdpx+=2;\n\t\tdrawBus();\n\t\tdrawTireBus();\n\t\tdrawCar();\n\t\tdrawTirecar();\n \t}\n \tglPopMatrix();\n \tif(isCrashed==1)\n\t{\n \t\tdrawTree();\n\t\tglPushMatrix();\n \t\tglScalef(0.7,0.7,1);\n\t\tglRotatef(-80,0,0,1);\n\t\tglTranslatef(-840,400,0);\n\t\tdrawBrokenBus();\n\t\tdrawTireBus();\n \t\tglPopMatrix();\n \t\tglPushMatrix();\n \t\tglScalef(0.7,0.7,1);\n \t\tglTranslatef(450,00,0);\n \t\tdrawBrokenCar();\n\t\tdrawTirecar();\n \t\tglPopMatrix();\n \t\tshowBlasts();\n \t\tif(next==300)\n \t\t\tstate++;\n \t\tglPushMatrix();\n \t\tglTranslatef(next++,0,0);\n \t\tglPopMatrix();\n \t}\n\twrite_text(60,20,\" TIME : 3:15 PM\",fonts[3]);\n \tglFlush();\n}\n\n//blalkot airstrike title\n void topic2()\n{\n glColor3f(0.5,0.2,0.6);\n\twrite_text(400,430,\" BALAKOT AIRSTRIKE\",fonts[2]);\n\twrite_text(380,200,\"[ PRESS ANY KEY TO CONTINUE ]\",fonts[3]);\n}\n\n//BALAKOT AIRSTRIKE\nvoid scene7()\n{\n \tsky();\n \tglPushMatrix();\n \tglTranslatef(-50,150,0);\n \ttopic2();\n \tglPopMatrix();\n\tglFlush();\n}\n\n//scenery for scene 8 and 9\nvoid scenery1()\n{\n\tsky();\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(1000,0);\n\tglVertex2f(1000,1000);\n\tglVertex2f(0,1000);\n \tglEnd();\n \tglPushMatrix();\n\tglTranslatef(cx--,0,0);\n \tglPushMatrix();\n \tglTranslatef(300,100,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(700,-100,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(-100,-100,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(100,-300,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(700,100,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(0,100,0);\n \tdrawClouds();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(20,1,1);\n \tglScalef(10,20,1);\n \tglPopMatrix();\n\tglPopMatrix();\n}\n\n//2jets leaving to pakistan\nvoid scene8()\n{\n\tscenery1();\t\n \tglPushMatrix();\n \tglScalef(0.2,0.6,1);\n \tglTranslatef(startx+1500,starty+400,1);\n \tstartx+=speed_bus+2;\n \tdrawJet();\n \tglPopMatrix();\n \tglPopMatrix();\n \tif(startx>1000)\n\t{\n \t\tstate=8;\n \t \tstartx=-800;\n \t \tstarty=500;\n \t\tflag=0;\n \t\ttx=100;\n \t}\n \tglFlush();\n}\n\n//jets dropping bomb on balakot resulting in a blast\nvoid scene9()\n{\n\tscenery1();\n\tglPushMatrix();\n \tglScalef(0.1,0.4,1);\n \tglTranslatef(startx+5500,850,1);\n \tstartx+=speed_jet+2;\n \tdrawJet();\n \tglPopMatrix();\n \tglPushMatrix();\n \tglTranslatef(lx--,0,0);\n \tland();\n \tglPopMatrix();\n \twrite_text(690,20,\" PLACE : BALAKOT,PAKISTAN\",fonts[3]);\n\tif(by<-618)\n\t{\n\t\tglPushMatrix();\n \t\tglTranslatef(0,-100,0);\n\t\tshowBlasts();\n\t\tglPopMatrix();\n\t\tstartx+=speed_jet-2;\n \t\tlx++;\n \t}\n\telse\n\t{\n \t\tglPushMatrix();\n \t\tglTranslatef(bx,by,0);\n \t\tbx=bx+0.8;\n \t\tby=by-2;\n \t\tbomb();\n \t\tglPushMatrix();\n \t\tglTranslatef(50,0,0);\n \t\tbomb();\n \t\tglPopMatrix();\n \t\tglPopMatrix();\n \t}\n \tif(startx>2000)\n\t{\n \t\tstate=9;\n \t\tstartx=-800;\n \t\tstarty=500;\n \t\tflag=0;\n \t\ttx=100;\n\t}\n\tglFlush();\n}\n\n//The end is displayed here\nvoid end()\n{\n\tsky();\n \tglColor3f(0.5,0.2,0.6);\n\twrite_text(400,430,\" THE END\",fonts[2]);\n}\n\n//GL display function\nvoid display()\n{\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\tif(state==0)\n\t{\n\t\tscene1();\n\t}\n\tif(state==1)\n\t{\n\t\tscene2();\n\t}\n\tif(state==2)\n\t{\n\t\tscene3();\n\t}\n\tif(state==3)\n\t{\n\t\tscene4();\n\t}\n\tif(state==4)\n\t{\n\t\tscene5();\n\t}\n\tif(state==5)\n\t{\n\t\tscene6();\n\t}\n\tif(state==6)\n\t{\n\t\tscene7();\n\t}\n\tif(state==7)\n\t{\n\t\tscene8();\n\t}\n\tif(state==8)\n\t{\n\t\tscene9();\n\t}\n\tif(state==9)\n\t{\n \t\tend();\n \t}\n\tglutSwapBuffers();\n \tglutPostRedisplay();\n}\n\n//main fucntion\nint main(int argc,char **argv)\n{\n \tglutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(10000,10000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"Pulwama attack and Balakot airstrike\");\n\tglutDisplayFunc(display);\n\tglutKeyboardFunc(keyboard);\n\tinit();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.43317896127700806,
"alphanum_fraction": 0.6015547513961792,
"avg_line_length": 17.88157844543457,
"blob_id": "dd8ac74757f5e24ed87da79d4d1c3240a9269f2c",
"content_id": "97ce7a66fb3cdde052b8591d18e137928e46d901",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6046,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 304,
"path": "/crow/scenes/main_1.cpp",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\r\n#include<math.h>\r\n\r\nvoid draw_hill()\r\n{\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(250,900);\r\n\tglVertex2f(500,600);\r\n\tglEnd();\r\n\tglColor3f(0.6,0.3,0);\r\n\tglBegin(GL_POLYGON);\r\n\tglVertex2f(500,600);\r\n\tglVertex2f(750,900);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\n\r\nvoid DrawSegment(float,float,float,int,int,int);\r\n\r\nvoid draw_sky()\r\n{\r\n\tglColor3f(0.3,0.9,0.9);\r\n\tglBegin(GL_QUADS);\r\n\tglVertex2f(0,600);\r\n\tglVertex2f(0,1000);\r\n\tglVertex2f(1000,1000);\r\n\tglVertex2f(1000,600);\r\n\tglEnd();\r\n}\r\n\r\nvoid draw_vase()\r\n{\r\n glColor3f(0.3,0.9,0.9);\r\n glBegin(GL_POLYGON);\r\n glVertex2f(700,140);\r\n glVertex2f(700,100);\r\n glVertex2f(750,50);\r\n glVertex2f(850,50);\r\n glVertex2f(900,100);\r\n glVertex2f(900,140);\r\n glVertex2f(700,140);\r\n glEnd();\r\n glColor3f(1,1,1);\r\n glBegin(GL_LINE_LOOP);\r\n glVertex2f(700,140);\r\n glVertex2f(700,200);\r\n glVertex2f(750,250);\r\n glVertex2f(750,300);\r\n glVertex2f(850,300);\r\n glVertex2f(850,250);\r\n glVertex2f(900,200);\r\n glVertex2f(900,140);\r\n glVertex2f(700,140);\r\n glEnd();\r\n}\r\n\r\nvoid draw_crow()\r\n{\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0,0);\r\n //glVertex2f(750,300);\r\n //glVertex2f(735,305);\r\n //glVertex2f(725,315);\r\n glVertex2f(700,310);\r\n glVertex2f(680,300);\r\n glVertex2f(650,295);\r\n glVertex2f(600,290);\r\n glVertex2f(575,300);\r\n glVertex2f(540,315);\r\n glVertex2f(570,330);\r\n glVertex2f(605,350);\r\n glVertex2f(645,365);\r\n glVertex2f(680,380);\r\n glVertex2f(705,390);\r\n glVertex2f(730,400);\r\n glVertex2f(755,410);\r\n glVertex2f(760,390);\r\n glVertex2f(765,380);\r\n glVertex2f(770,370);\r\n glVertex2f(740,335);\r\n //glVertex2f(730,330);\r\n //glVertex2f(745,315);\r\n //glVertex2f(755,300);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0,0);\r\n glVertex2f(755,410);\r\n glVertex2f(770,412);\r\n glVertex2f(785,420);\r\n glVertex2f(800,425);\r\n glVertex2f(820,430);\r\n glVertex2f(830,428);\r\n glVertex2f(840,420);\r\n glVertex2f(845,410);\r\n glVertex2f(840,400);\r\n glVertex2f(837,390);\r\n glVertex2f(830,380);\r\n glVertex2f(825,375);\r\n glVertex2f(820,365);\r\n glVertex2f(815,340);\r\n glVertex2f(810,365);\r\n glVertex2f(800,375);\r\n glVertex2f(800,370);\r\n glVertex2f(790,370);\r\n glVertex2f(770,370);\r\n glVertex2f(760,390);\r\n glVertex2f(765,380);\r\n glEnd();\r\n glBegin(GL_LINES);\r\n glColor3f(0,0,0);\r\n glVertex2f(730,330);\r\n glVertex2f(755,300);\r\n glEnd();\r\n glBegin(GL_LINES);\r\n glColor3f(0,0,0);\r\n glVertex2f(750,300);\r\n glVertex2f(710,325);\r\n glEnd();\r\n}\r\n\r\nvoid draw_tree()\r\n{\r\n glBegin(GL_QUADS);\r\n glColor3f(0.3,0,0);\r\n glVertex2f(100,100);\r\n glVertex2f(150,100);\r\n glVertex2f(150,200);\r\n glVertex2f(100,200);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0.4,0.1);\r\n glVertex2f(100,200);\r\n glVertex2f(80,200);\r\n glVertex2f(60,210);\r\n glVertex2f(50,230);\r\n glVertex2f(50,250);\r\n glVertex2f(55,280);\r\n glVertex2f(65,310);\r\n glVertex2f(80,335);\r\n glVertex2f(100,360);\r\n glVertex2f(110,370);\r\n glVertex2f(135,370);\r\n glVertex2f(160,350);\r\n glVertex2f(180,330);\r\n glVertex2f(200,300);\r\n glVertex2f(205,270);\r\n glVertex2f(210,250);\r\n glVertex2f(205,230);\r\n glVertex2f(190,220);\r\n glVertex2f(180,210);\r\n glVertex2f(150,200);\r\n glEnd();\r\n\r\n glBegin(GL_QUADS);\r\n glColor3f(0.3,0,0);\r\n glVertex2f(250,300);\r\n glVertex2f(300,300);\r\n glVertex2f(300,400);\r\n glVertex2f(250,400);\r\n glEnd();\r\n glBegin(GL_POLYGON);\r\n glColor3f(0,0.4,0.1);\r\n glVertex2f(250,400);\r\n glVertex2f(210,400);\r\n glVertex2f(190,415);\r\n glVertex2f(190,450);\r\n glVertex2f(200,500);\r\n glVertex2f(230,540);\r\n glVertex2f(260,560);\r\n glVertex2f(300,560);\r\n glVertex2f(325,530);\r\n glVertex2f(345,500);\r\n glVertex2f(360,450);\r\n glVertex2f(360,410);\r\n glVertex2f(330,400);\r\n glVertex2f(300,400);\r\n glEnd();\r\n}\r\n\r\nvoid draw_stone()\r\n{\r\n int i;\r\n\tfloat rad;\r\n\tglColor3f(0.3,0.4,0.4);\r\n\tglBegin(GL_POLYGON);\r\n\tfor(i=0;i<360;i++)\r\n\t{\r\n\t\trad=i*3.14159/180;\r\n\t\tglVertex2f(cos(rad)*7,sin(rad)*7);\r\n\t}\r\n\tglEnd();\r\n}\r\n\r\nvoid draw_eyes()\r\n{\r\n int i;\r\n\tfloat rad;\r\n\tglColor3f(1,1,1);\r\n\tglBegin(GL_POLYGON);\r\n\tfor(i=0;i<360;i++)\r\n\t{\r\n\t\trad=i*3.14159/180;\r\n\t\tglVertex2f(cos(rad)*4,sin(rad)*4);\r\n\t}\r\n\tglEnd();\r\n}\r\n\r\nvoid scene2()\r\n{\r\n\tdraw_hill();\r\n\tdraw_sky();\r\n\r\n\tdraw_tree();\r\n\tglPushMatrix();\r\n\tglTranslatef(950,190,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(960,100,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(920,30,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(800,10,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(700,15,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n\tglPushMatrix();\r\n\tglTranslatef(650,30,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(680,70,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(620,100,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n glPushMatrix();\r\n\tglTranslatef(570,10,0);\r\n\tdraw_stone();\r\n\tglPopMatrix();\r\n draw_vase();\r\n glPushMatrix();\r\n\tglTranslatef(815,400,0);\r\n\tdraw_eyes();\r\n\tglPopMatrix();\r\n draw_crow();\r\n\r\n\t//draw_pond();\r\n\r\n\r\n\r\n}\r\n\r\n\r\nvoid init()\r\n{\r\n\tglMatrixMode(GL_PROJECTION);\r\n\tglLoadIdentity();\r\n\tgluOrtho2D(0,1000,0,1000);\r\n\tglMatrixMode(GL_MODELVIEW);\r\n\tglLoadIdentity();\r\n}\r\n\r\nvoid display()\r\n{\r\n\tglClearColor(0.2,0.8,0.1,1);\r\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\r\n\r\n\tscene2();\r\n\r\n\t//scene3();\r\n\t//scene4();\r\n\t//scene5();\r\n\t//scene6();\r\n\r\n\tglFlush();\r\n\tglutSwapBuffers();\r\n}\r\n\r\nint main(int argc,char **argv)\r\n{\r\n\tglutInit(&argc,argv);\r\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);\r\n\t//glutInitWindowPosition(50,100);\r\n\tglutInitWindowSize(1000,1000);\r\n\tglutCreateWindow(\"scene2\");\r\n\tinit();\r\n\tglEnable(GL_DEPTH_TEST);\r\n\r\n\tglutDisplayFunc(display);\r\n\t//glutTimerFunc(25,time,0);\r\n\tglutMainLoop();\r\n}\r\n\r\n"
},
{
"alpha_fraction": 0.5009199380874634,
"alphanum_fraction": 0.6343087553977966,
"avg_line_length": 18.055091857910156,
"blob_id": "c2bc4e7607fb53c66fcaabf7139b0e7ab0a59d80",
"content_id": "5ad931b261974fb98f96828cdc57d188ab516189",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 22828,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 1198,
"path": "/sheep/main.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//to manage which scene to be displayed\nint state=0,cx=0;\n//to manage window position\nint x,y,i,colorindex=0;\n\n//mamage day and night\nconst GLfloat color[10][3]={\n {0.0,0.03,0.08},\n {0.01,0.07,0.16},\n {0.02,0.11,0.27},\n {0.02,0.16,0.39},\n {0.05,0.22,0.49},\n {0.07,0.28,0.6},\n {0.13,0.35,0.69},\n {0.5294117647,0.8078431373,0.9215686275},\n {0.23,0.45,0.79},\n {0.39,0.64,0.87}\n};\n\n//scene1 variables\nfloat startx=2,starty=-200,flag=0;\n\n//person variables\nint moveFlag=0;\n//font types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1000,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n//method to handle keyboard inputs keyboard-\nvoid keyboard( unsigned char key, int x, int y )\n{\n //handle --> press any key to continue\n if(state==0){\n state=1;\n\n }\n\n //move to end\n if(state==5){\n state=6;\n }\n\n}\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n//intro --scene1 display\nvoid intro()\n{\n glColor3f(0.5,0.2,0.6);\n write_text(300,130,\" SRINIVAS INSTITUTE OF TECHNOLOGY - MANLGLORE 571543\",fonts[3]);\n glColor3f(0.3,0.5,0.8);\n write_text(375,100,\"DEPT. OF COMPUTER SCIENCE & ENGG.\",fonts[0]);\n glColor3f(0.3,0.5,0.8);\n write_text(350,00,\"UNDER THE GUIDENCE OF MR.SUDHAKAR.\",fonts[3]);\n write_text(480,-50,\"ASSITANT PROFEESSOR \",fonts[1]);\n glColor3f(0.8,0.1,0.2);\n write_text(350,600,\"THE BOY WHO CRIED OVER WOLF\",fonts[2]);\n glColor3f(1.0,0.0,1.0);\n write_text(450,500,\"SUBMITTED BY :\",fonts[0]);\n glColor3f(0.3,0.5,0.8);\n write_text(225,450,\"SHRAVYA\",fonts[3]);\n write_text(670,450,\"SHREYASHREE\",fonts[3]);\n write_text(180,300,\"\",fonts[3]);\n write_text(220,400,\"(4SN16CS089)\",fonts[0]);\n write_text(680,400,\"(4SN16CS190)\",fonts[0]);\n\n write_text(380,200,\"[ PRESS ANY KEY TO CONTINUE ]\",fonts[3]);\n}\n//fuction to draw draw circle\nvoid drawfcircle(GLfloat x,GLfloat y,GLfloat radius){\n\tint i;\n\tint triangleAmount=200000;\n\tGLfloat twicePi=2.0f*3.141592653;\n\tglEnable(GL_POINTS);\n\tglLineWidth(2.0);\n\tglBegin(GL_LINES);\n\tfor(i=0;i<=triangleAmount;i++){\n\t\tglVertex2f(x,y);\n\t\tglVertex2f(x+(radius*cos(i*twicePi/triangleAmount)),y+(radius*sin(i*twicePi/triangleAmount)));\n\t}\n\tglEnd();\n}\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n//function to draw sheep legs\nvoid drawLegs(){\n glColor3f(0.9,0.9,0.9);\n\n glBegin(GL_POLYGON);\n\tglColor3f(0.62,0.62,0.62);\n\tglVertex2f(400,300);\n\tglVertex2f(500,300);\n glVertex2f(500,500);\n\tglVertex2f(400,500);\n\tglEnd();\n\n glColor3f(0.4,0.4,0.4);\n glPushMatrix();\n glTranslatef(450,300,0);\n glScalef(60,30,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n//function to draw sheep\nvoid drawSheep(){\n\n //legs\n glPushMatrix();\n glTranslatef(360,335,0);\n glScalef(0.1,0.5,0);\n drawLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(375,335,0);\n glScalef(0.1,0.5,0);\n drawLegs();\n glPopMatrix();\n\n //front legs\n glPushMatrix();\n glTranslatef(445,335,0);\n glScalef(0.1,0.5,0);\n drawLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(460,335,0);\n glScalef(0.1,0.5,0);\n drawLegs();\n glPopMatrix();\n\n glColor3f(0.9,0.9,0.9);\n\n glPushMatrix();\n glTranslatef(500,600,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(520,650,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(450,650,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(420,600,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(500,620,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //head\n glColor3f(0.8,0.8,0.9);\n glPushMatrix();\n glTranslatef(580,620,0);\n glScalef(25,40,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //eyes\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(570,620,0);\n glScalef(3,3,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(585,620,0);\n glScalef(3,3,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //ears\n glColor3f(0.7,0.7,0.7);\n glPushMatrix();\n glTranslatef(605,635,0);\n glScalef(15,8,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(555,635,0);\n glScalef(15,8,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n//function to draw boy legs\nvoid drawBoyLegs(){\n glColor3f(0.9,0.9,0.9);\n glBegin(GL_POLYGON);\n\tglVertex2f(400,300);\n\tglVertex2f(500,300);\n glVertex2f(500,500);\n\tglVertex2f(400,500);\n\tglEnd();\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(450,300,0);\n glScalef(60,30,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n}\n//draw boy\nvoid drawBoy(){\n //glColor3f(0.9,0.9,0.9);\n\n glPushMatrix();\n glTranslatef(370,-50,0);\n glScalef(0.1,1,0);\n drawBoyLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(430,-50,0);\n glScalef(0.1,1,0);\n drawBoyLegs();\n glPopMatrix();\n\n\n glPushMatrix();\n glTranslatef(520,-100,0);\n glScalef(0.1,1,0);\n glRotatef(60,0,0,1);\n drawBoyLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(340,680,0);\n glScalef(0.1,1,0);\n glRotatef(-60,0,0,1);\n drawBoyLegs();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(1,1.2,0);\n glBegin(GL_POLYGON);\n\tglColor3f(0.62,0.62,0.62);\n\tglVertex2f(400,300);\n\tglVertex2f(500,300);\n glVertex2f(500,500);\n\tglVertex2f(400,500);\n\tglEnd();\n glPopMatrix();\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(450,630,0);\n glScalef(35,35,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n glColor3f(0.9,0.9,0.9);\n glPushMatrix();\n glTranslatef(450,620,0);\n glScalef(35,35,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(430,620,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(460,620,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(450,500,0);\n glScalef(1,91,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//function to draw background\nvoid background()\n{\n\tglColor3f(0.5294117647,0.8078431373,0.9215686275);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(1000,0);\n\tglVertex2f(1000,1000);\n\tglVertex2f(0,1000);\n glEnd();\n}\n//fucntion to draw Tree\nvoid drawTree(){\n glBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,500);\n\tglVertex2f(550,500);\n\tglVertex2f(550,750);\n\tglVertex2f(525,750);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(550,525);\n\tglVertex2f(600,575);\n\tglVertex2f(600,600);\n\tglVertex2f(550,550);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,600);\n\tglVertex2f(525,625);\n\tglVertex2f(475,675);\n\tglVertex2f(475,650);\n\tglEnd();\n\n // right green\n\tglColor3f(0,1,0);\n\n glPushMatrix();\n glTranslatef(620,580,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(450,680,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(530,750,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(580,740,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//function to draw trees\nvoid drawTrees(){\n glPushMatrix();\n glTranslatef(-500,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-300,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-250,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-50,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(70,-500,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(350,-500,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,-200,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(500,-500,0);\n drawTree();\n glPopMatrix();\n\n}\n//function to draw fox legs\nvoid drawFoxLegs(){\n glColor3f(0.874,0.552,0.227);\n glBegin(GL_POLYGON);\n glVertex2f(300,500);\n glVertex2f(500,0);\n glVertex2f(700,500);\n glEnd();\n\n glColor3f(0.4,0.4,0.4);\n glPushMatrix();\n glTranslatef(480,50,0);\n glScalef(60,30,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n//function to draw fox\nvoid drawFox(){\n glColor3f(0.874,0.552,0.227);\n glBegin(GL_POLYGON);\n glVertex2f(300,400);\n glVertex2f(300,600);\n glVertex2f(800,600);\n glVertex2f(800,400);\n glEnd();\n\n glBegin(GL_POLYGON);\n glVertex2f(300,600);\n glVertex2f(100,500);\n glVertex2f(300,400);\n glEnd();\n\n glPushMatrix();\n glTranslatef(115,500,0);\n glScalef(40,30,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(250,500,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(280,500,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n glPushMatrix();\n glTranslatef(320,180,0);\n glScalef(0.1,0.5,0);\n drawFoxLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(350,180,0);\n glScalef(0.1,0.5,0);\n drawFoxLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(600,180,0);\n glScalef(0.1,0.5,0);\n drawFoxLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(615,180,0);\n glScalef(0.1,0.5,0);\n drawFoxLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(800,180,0);\n glRotatef(60,0,0,1);\n glScalef(0.5,0.5,0);\n drawFoxLegs();\n glPopMatrix();\n\n\n}\n//backround scene\nvoid scene(){\n glPushMatrix();\n glScalef(3,2,1);\n glTranslatef(0,-300,1);\n\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(350,100,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(10,2,1);\n glTranslatef(-200,-500,1);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n\n glPopMatrix();\n\n\n //------------\n\n glPushMatrix();\n glScalef(4,5,0);\n glTranslatef(-100,-495,0);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(100,525);\n glEnd();\n glPopMatrix();\n\n // //draw sun\n // glColor3f(1.0, 0.33, 0.0);\n // drawfcircle(400,800,50);\n\n\n //head\n\n\n}//intro texts are here\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n //Add story part-1 here\n}\n//intro\nvoid scene1(){\n background();\n glPushMatrix();\n glTranslatef(-50,150,0);\n intro();\n glPopMatrix();\n glFlush();\n}\n//scene-1 boy watchiing over a sheeps\nvoid scene2() {\n background();\n scene();\n //big rock\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(100,200,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0.2,0.2,0.2);\n glPushMatrix();\n glTranslatef(290,150,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //sheeps\n glPushMatrix();\n glTranslatef(1,1,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,200,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(200+startx/10,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-200+startx/10,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(-100+startx/5,270,0);\n drawBoy();\n glPopMatrix();\n\n glColor3f(1.0, 0.33, 0.0);\n drawfcircle(400,800,50);\n\n showStory();\n glColor3f(1,1,1);\n write_text(00,30,\"Everyday a sheperd boy stayed on a hill watchiing villages sheeps His job was to make sure nothing\",fonts[0]);\n write_text(00,10,\" bad happend to sheeps!\",fonts[0]);\n\n\n //next page\n if(startx>300){\n state=2;\n startx=0;\n }else{\n startx++;\n }\n glFlush();\n}\n//scene-2 shouting wolf\nvoid scene3(){\n background();\n scene();\n //big rock\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(100,200,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0.2,0.2,0.2);\n glPushMatrix();\n glTranslatef(290,150,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //sheeps\n glPushMatrix();\n glTranslatef(1,1,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400+startx/100,100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,200,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(200+startx/10,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-200,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(-100,270,0);\n drawBoy();\n glPopMatrix();\n\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(520,750,0);\n glScalef(130,80,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(370,750,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,750,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(950-startx,120,0);\n drawBoy();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(800-startx,120,0);\n drawBoy();\n glPopMatrix();\n\n showStory();\n glColor3f(1,1,1);\n write_text(00,30,\"One afternoon Boy decided play a trick on villagers Wolf wolf he cried. There's a wolf chasing the sheep\",fonts[0]);\n write_text(00,10,\" \",fonts[0]);\n\n if(moveFlag==0)\n {\n glColor3f(1,1,1);\n write_text(400,730,\" Wolf ! Wolf ! Wolf\",fonts[3]);\n if(startx<500){\n startx++;\n }else{\n glColor3f(1,1,1);\n write_text(400,730,\"Nothing in here i'm just kiddig\",fonts[3]);\n for(int i=0;i<100000000;i++){\n starty++;\n glPushMatrix();\n glTranslatef(starty,1,1);\n glPopMatrix();\n }\n moveFlag=1;\n }\n }else{\n glColor3f(1,1,1);\n write_text(400,730,\"Nothing in here i'm just kiddig\",fonts[3]);\n startx--;\n if(startx==300){\n startx=0;\n moveFlag=0;\n state=3;\n }\n }\n\n glFlush();\n\n}\n//scene-3 calling wolf prank again\nvoid scene4(){\n background();\n scene();\n //big rock\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(100,200,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0.2,0.2,0.2);\n glPushMatrix();\n glTranslatef(290,150,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //sheeps\n glPushMatrix();\n glTranslatef(1,1,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400+startx/100,100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,200,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(200+startx/10,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-200,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(-100,270,0);\n drawBoy();\n glPopMatrix();\n\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(520,750,0);\n glScalef(130,80,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(370,750,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,750,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(950-startx,120,0);\n drawBoy();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(800-startx,120,0);\n drawBoy();\n glPopMatrix();\n\n showStory();\n glColor3f(1,1,1);\n write_text(00,30,\"The next day boy tried his trick again Wolf Wolf he called out .There is a wolf chading the sheeps\",fonts[0]);\n write_text(00,10,\" \",fonts[0]);\n\n if(moveFlag==0)\n {\n glColor3f(1,1,1);\n write_text(400,730,\" Wolf ! Wolf ! Again\",fonts[3]);\n if(startx<500){\n startx++;\n }else{\n glColor3f(1,1,1);\n write_text(400,730,\"LOL i'm just kiddig\",fonts[3]);\n for(int i=0;i<100000;i++){\n starty++; write_text(00,10,\" \",fonts[0]);\n glPushMatrix();\n glTranslatef(starty,1,1);\n glPopMatrix();\n }\n moveFlag=1;\n }\n }else{\n glColor3f(1,1,1);\n write_text(400,730,\"LOL i'm just kiddig\",fonts[3]);\n startx--;\n if(startx==300){\n state=4;\n startx=0;\n }\n }\n\n glFlush();\n}\n//actual fox comes\nvoid scene5(){\n background();\n scene();\n //big rock\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(100,200,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0.2,0.2,0.2);\n glPushMatrix();\n glTranslatef(290,150,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //sheeps\n glPushMatrix();\n glTranslatef(1,1,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400+startx/100,100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-100,200,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(200-startx/10,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-200,-100,0);\n glScalef(0.5,0.5,0);\n drawSheep();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(-100,270,0);\n drawBoy();\n glPopMatrix();\n\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(520,750,0);\n glScalef(130,80,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(370,750,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,750,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0,1,1);\n write_text(400,730,\"wolf wolf Real Wolf!\",fonts[3]);\n startx=startx+2;\n\n glPushMatrix();\n glScalef(0.5,0.5,0);\n glTranslatef(2000-startx,2,1);\n drawFox();\n glPopMatrix();\n\n if (startx==1400) {\n state=5;\n startx=0;\n }\n\n showStory();\n glColor3f(1,1,1);\n write_text(00,30,\"One day the real wolf comes and boy calls out wolf but no one comes for help!\",fonts[0]);\n write_text(00,10,\" \",fonts[0]);\n glFlush();\n\n}\n//scene-5 all sheeps died\nvoid scene6(){\n background();\n scene();\n //big rock\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(100,200,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0.2,0.2,0.2);\n glPushMatrix();\n glTranslatef(290,150,0);\n glScalef(505,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,0);\n glTranslatef(-100,100,0);\n drawBoy();\n glPopMatrix();\n\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(520,750,0);\n glScalef(130,80,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(370,700,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,650,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0,1,1);\n write_text(400,730,\"All the sheeps died\",fonts[3]);\n\n\n glPushMatrix();\n glScalef(0.5,0.5,0);\n glTranslatef(2000-startx,2,1);\n drawFox();\n glPopMatrix();\n\n\n\n showStory();\n glColor3f(1,1,1);\n write_text(00,30,\"The villagers came running again and found no sheep They advised him to not repeat this again, \",fonts[0]);\n write_text(00,10,\" \",fonts[0]);\n glFlush();\n\n}\n//scene-6 conclusion\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1000,1000);\n glVertex2f(1000,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n //Add story conclusion here\n //end of the story\n}\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene3();\n }\n\n if (state==3) {\n scene4();\n }\n\n if(state==4){\n scene5();\n }\n if(state==5){\n scene6();\n }\n\n if(state==6){\n end();\n }\n\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"The Boy Who Cried Over Wolf\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.4821350872516632,
"alphanum_fraction": 0.6247159242630005,
"avg_line_length": 17.717647552490234,
"blob_id": "3270224c881268cf88491898d97aed9dbe7696e8",
"content_id": "84651bed9ff8b3aaa16fc24848abd17db3fd1aa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 20683,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 1105,
"path": "/hanuman/hanuma.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//to manage which scene to be displayed\nint state=0,k;\n\n//mamage day and night\nconst GLfloat color[10][3]={\n {0.0,0.03,0.08},\n {0.01,0.07,0.16},\n {0.02,0.11,0.27},\n {0.02,0.16,0.39},\n {0.05,0.22,0.49},\n {0.07,0.28,0.6},\n {0.13,0.35,0.69},\n {0.5294117647,0.8078431373,0.9215686275},\n {0.23,0.45,0.79},\n {0.39,0.64,0.87}\n};\n\n//scene1 variables\nfloat startx=0;\n//font types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1200,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n//method to handle keyboard inputs keyboard-\nvoid keyboard( unsigned char key, int x, int y )\n{\n //handle --> press any key to continue\n if(state==0){\n state=1;\n\n }\n //move to end\n if(state==2){\n state=3;\n }\n\n}\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n //Add story part-1 here\n}\n\n//fucntion to draw Tree\nvoid drawTree(){\n glBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,500);\n\tglVertex2f(550,500);\n\tglVertex2f(550,750);\n\tglVertex2f(525,750);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(550,525);\n\tglVertex2f(600,575);\n\tglVertex2f(600,600);\n\tglVertex2f(550,550);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,600);\n\tglVertex2f(525,625);\n\tglVertex2f(475,675);\n\tglVertex2f(475,650);\n\tglEnd();\n\n // right green\n\tglColor3f(0,1,0);\n\n glPushMatrix();\n glTranslatef(620,580,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(450,680,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(530,750,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(580,740,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//function to draw trees\nvoid drawTrees(){\n glPushMatrix();\n glTranslatef(-500,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-300,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-250,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-50,0,0);\n drawTree();\n glPopMatrix();\n}\n//function for the background\nvoid background()\n{\n\tglColor3f(0.5294117647,0.8078431373,0.9215686275);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(1200,0);\n\tglVertex2f(1200,1000);\n\tglVertex2f(0,1000);\n glEnd();\n}\n//function to draw cloud\nvoid drawClouds(){\n glColor3f(0.9,0.9,0.9);\n\n glPushMatrix();\n glTranslatef(530,750,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(580,740,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(580,700,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(500,8000,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n\nvoid backg1(){\n background();\n\n glPushMatrix();\n glScalef(3,2,1);\n glTranslatef(0,-300,1);\n\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(350,100,0);\n drawTree();\n glPopMatrix();\n\n\n\n\n\n glPushMatrix();\n glScalef(10,2,1);\n glTranslatef(-200,-500,1);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n\n glPopMatrix();\n\n\n //------------\n\n glPushMatrix();\n glScalef(4,5,0);\n glTranslatef(-100,-495,0);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(100,525);\n glEnd();\n glPopMatrix();\n\n // //draw sun\n glPushMatrix();\n glTranslatef(-20,0,0);\n glColor3f(1.0, 0.33, 0.0);\n glTranslatef(320,850,0);\n glScalef(50,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n // drawfcircle(400,800,50);\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(450,-100,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(650,-400,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(1150,-150,0);\n drawTrees();\n glPopMatrix();\n\n //head\n}\nvoid backg2(){\n background();\n\n glPushMatrix();\n glScalef(3,2,1);\n glTranslatef(0,-300,1);\n\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(350,100,0);\n drawTree();\n glPopMatrix();\n\n\n\n\n\n glPushMatrix();\n glScalef(10,2,1);\n glTranslatef(-200,-500,1);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glEnd();\n\n //left lawn\n glBegin(GL_POLYGON);\n glColor3f(0.5,1,0.0);\n glVertex2f(400,500);\n glVertex2f(0,500);\n glVertex2f(0,550);\n glVertex2f(75,550);\n glVertex2f(75,535);\n glVertex2f(100,525);\n glVertex2f(400,525);\n glEnd();\n\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(250,500);\n glVertex2f(0,500);\n glVertex2f(0,525);\n glVertex2f(250,525);\n glEnd();\n\n glPopMatrix();\n\n\n //------------\n\n glPushMatrix();\n glScalef(4,5,0);\n glTranslatef(-100,-495,0);\n //lawn left extension\n glBegin(GL_POLYGON);\n glColor3f(0.19,0.80,0.19);\n glVertex2f(400,525);\n glVertex2f(400,575);\n glVertex2f(100,575);\n glVertex2f(100,525);\n glEnd();\n glPopMatrix();\n\n // //draw sun\n glPushMatrix();\n glTranslatef(-20,0,0);\n glColor3f(1.0, 0.33, 0.0);\n glTranslatef(320,850,0);\n glScalef(50,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n // drawfcircle(400,800,50);\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(450,-100,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(650,-400,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(1150,-150,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(150,-400,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glScalef(0.8,0.8,1);\n glTranslatef(-50,-250,0);\n drawTrees();\n glPopMatrix();\n\n //head\n}\n//part1\nvoid part1(){\n glBegin(GL_POLYGON);\n glColor3f(0.964, 0.525, 0.454);\n glVertex2f(100,100);\n glVertex2f(200,20);\n glVertex2f(600,100);\n glVertex2f(600,400);\n glVertex2f(100,500);\n glEnd();\n}\n//part2\nvoid part2(){\n glBegin(GL_POLYGON);\n glColor3f(0.964, 0.525, 0.454);\n glVertex2f(100,0);\n glVertex2f(300,50);\n glVertex2f(600,100);\n glVertex2f(590,500);\n glVertex2f(350,450);\n glVertex2f(250,200);\n glVertex2f(100,130);\n glEnd();\n}\n//part3\nvoid part3(){\n glBegin(GL_POLYGON);\n glColor3f(0.964, 0.525, 0.454);\n glVertex2f(100,100);\n glVertex2f(300,200);\n glVertex2f(500,70);\n glVertex2f(600,100);\n glVertex2f(600,500);\n glVertex2f(500,600);\n glVertex2f(300,550);\n glVertex2f(90,500);\n glEnd();\n}\n//eyes\nvoid drawEyes(/* arguments */) {\n /* code */\n glPushMatrix();\n glTranslatef(-20,0,0);\n glColor3f(0, 0, 0.2);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(1, 1, 1);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(30,0,0);\n glColor3f(0, 0, 0.2);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(1, 1, 1);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPopMatrix();\n}\n//drawLegs\nvoid drawLegs(/* arguments */) {\n /* code */\n glBegin(GL_POLYGON);\n glColor3f(0.5,0.5,0.5);\n glVertex2f(100,320);\n glVertex2f(200,100);\n glVertex2f(350,200);\n glVertex2f(500,400);\n glVertex2f(400,600);\n glVertex2f(320,320);\n glVertex2f(220,220);\n glVertex2f(150,350);\n glEnd();\n\n\n}\n//drawArms\nvoid drawArms(/* arguments */) {\n /* code */\n glPushMatrix();\n\n glTranslatef(500,390,0);\n glRotatef(-50,0,0,1);\n glScalef(0.50,0.2,0);\n drawLegs();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(640,210,0);\n glRotatef(10,0,0,1);\n glScalef(0.50,0.2,0);\n drawLegs();\n glPopMatrix();\n\n glColor3f(0.1, 0.1, 0.1);\n glPushMatrix();\n glTranslatef(830,330,0);\n glScalef(35,35,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n//draw Hanuman\nvoid drawHanuman(){\n\n //drawArms\n glPushMatrix();\n glTranslatef(100,351,0);\n glScalef(1,1,0);\n glRotatef(-30,0,0,1);\n drawArms();\n glPopMatrix();\n\n //drawLegs\n glPushMatrix();\n glTranslatef(1000,251,0);\n glScalef(1,1,0);\n glRotatef(140,0,0,1);\n drawArms();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(1000,321,0);\n glScalef(1,1,0);\n glRotatef(140,0,0,1);\n drawArms();\n glPopMatrix();\n\n //drawArms\n glPushMatrix();\n glTranslatef(200,451,0);\n glScalef(1,1,0);\n glRotatef(-30,0,0,1);\n drawArms();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(70,430,0);\n glRotatef(-50,0,0,1);\n glScalef(0.40,0.1,0);\n drawLegs();\n glPopMatrix();\n\n //part1\n glPushMatrix();\n glTranslatef(400,300,0);\n glScalef(0.5,0.5,0);\n part1();\n glPopMatrix();\n\n //part 2\n glPushMatrix();\n glTranslatef(160,300,0);\n glScalef(0.5,0.5,0);\n part2();\n glPopMatrix();\n\n //part3\n glPushMatrix();\n glTranslatef(600,320,0);\n glScalef(0.5,0.35,0);\n part3();\n glPopMatrix();\n\n //head\n glColor3f(0.952, 0.803, 0.168);\n glPushMatrix();\n glTranslatef(920,500,0);\n glScalef(70,70,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.9, 0.2, 0.2);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(70,70,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //eyes\n drawEyes();\n\n //ears\n glColor3f(1,0,0);\n glPushMatrix();\n glTranslatef(850,490,0);\n glScalef(20,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(1,0,0);\n glPushMatrix();\n glTranslatef(990,490,0);\n glScalef(20,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //holder\n glPushMatrix();\n glColor3f(0,0,0);\n glScalef(0.5,2,0);\n glTranslatef(2100,10,0);\n glRotatef(120,0,1,1);\n glBegin(GL_POLYGON);\n glVertex2f(100,100);\n glVertex2f(150,100);\n glVertex2f(150,400);\n glVertex2f(100,400);\n glEnd();\n glPopMatrix();\n\n glColor3f(0.952, 0.803, 0.168);\n glPushMatrix();\n glTranslatef(900,370,0);\n glScalef(40,40,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n glColor3f(0.952, 0.576, 0.168);\n glPushMatrix();\n glScalef(0.7,0.7,1);\n glTranslatef(1350,380,0);\n glBegin(GL_POLYGON);\n glVertex2f(100,100);\n glVertex2f(300,100);\n glVertex2f(200,300);\n glEnd();\n glPopMatrix();\n\n\n\n\n}\n\n//draw Hanuman\nvoid drawHanumanStanding(){\n\n //drawArms\n glPushMatrix();\n glTranslatef(100,351,0);\n glScalef(1,1,0);\n glRotatef(-30,0,0,1);\n drawArms();\n glPopMatrix();\n\n //drawLegs\n glPushMatrix();\n glTranslatef(1000,211,0);\n glScalef(1,1,0);\n glRotatef(140,0,0,1);\n drawArms();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(1000,321,0);\n glScalef(1,1,0);\n glRotatef(140,0,0,1);\n drawArms();\n glPopMatrix();\n\n //drawArms\n glPushMatrix();\n glTranslatef(200,451,0);\n glScalef(1,1,0);\n glRotatef(-30,0,0,1);\n drawArms();\n glPopMatrix();\n\n\n\n //part1\n glPushMatrix();\n glTranslatef(400,300,0);\n glScalef(0.5,0.5,0);\n part1();\n glPopMatrix();\n\n //part 2\n glPushMatrix();\n glTranslatef(160,300,0);\n glScalef(0.5,0.5,0);\n part2();\n glPopMatrix();\n\n //part3\n glPushMatrix();\n glTranslatef(600,320,0);\n glScalef(0.5,0.35,0);\n part3();\n glPopMatrix();\n\n //head\n glColor3f(0.952, 0.803, 0.168);\n glPushMatrix();\n glTranslatef(940,490,0);\n glScalef(70,70,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0.9, 0.2, 0.2);\n glPushMatrix();\n glTranslatef(920,490,0);\n glScalef(70,70,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n\n //ears\n glColor3f(1,0,0);\n glPushMatrix();\n glTranslatef(930,550,0);\n glScalef(20,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(1,0,0);\n glPushMatrix();\n glTranslatef(930,430,0);\n glScalef(20,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n //holder\n glPushMatrix();\n glColor3f(0,0,0);\n glScalef(0.5,2,0);\n glTranslatef(2100,10,0);\n glRotatef(120,0,1,1);\n glBegin(GL_POLYGON);\n glVertex2f(100,100);\n glVertex2f(150,100);\n glVertex2f(150,400);\n glVertex2f(100,400);\n glEnd();\n glPopMatrix();\n\n glColor3f(0.952, 0.803, 0.168);\n glPushMatrix();\n glTranslatef(900,370,0);\n glScalef(40,40,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n\n glColor3f(0.952, 0.576, 0.168);\n glPushMatrix();\n glScalef(0.7,0.7,1);\n glTranslatef(1550,250,0);\n glRotatef(30,0,0,1);\n glBegin(GL_POLYGON);\n glVertex2f(100,100);\n glVertex2f(300,100);\n glVertex2f(200,300);\n glEnd();\n glPopMatrix();\n\n}\n\n//intro project details\nvoid scene1(){\n\n background();\n\n\n char str1[] = \"SRINIVAS INSTITUTE OF TECHNOLOGY\";\n glColor3f(0, 1, 1);\n glRasterPos2f(380, 855);\n for (k = 0; k < strlen(str1); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str1[k]);\n }\n glColor3f(0, 1, 1);\n glRasterPos2f(530, 810);\n char str2[] = \"MANGALURU-574143\";\n for (k = 0; k < strlen(str2); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str2[k]);\n }\n\n glColor3f(1, 0.5, 0.2);\n glRasterPos2f(265, 700);\n char str3[] = \"DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING\";\n for (k = 0; k < strlen(str3); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str3[k]);\n }\n\n glColor3f(0.8, 0.3, 0.4);\n glRasterPos2f(330, 630);\n char str4[] = \"COMPUTER GRAPHICS AND VISUALIZATION\";\n for (k = 0; k < strlen(str4); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str4[k]);\n }\n\n glColor3f(0.3, 0.5, 0.1);\n glRasterPos2f(520, 560);\n char str5[] = \"MINI PROJECT ON\";\n for (k = 0; k < strlen(str5); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str5[k]);\n }\n\n glColor3f(1, 0, 0);\n glRasterPos2f(465, 490);\n char str6[] = \"HANUMAN AND SANJEEVINI\";\n for (k = 0; k < strlen(str6); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str6[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 320);\n char str7[] = \"TEAM MEMBERS\";\n for (k = 0; k < strlen(str7); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str7[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 280);\n char str8[] = \"Name USN\";\n for (k = 0; k < strlen(str8); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str8[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 240);\n char str9[] = \"Name USN\";\n for (k = 0; k < strlen(str9); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str9[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 200);\n char str10[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str10); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str10[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 320);\n char str11[] = \"PROJECT GUIDED BY\";\n for (k = 0; k < strlen(str11); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str11[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 280);\n char str12[] = \"MR.ARAVIND NAIK \";\n for (k = 0; k < strlen(str12); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str12[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 240);\n char str13[] = \"ASSISTANT PROFESSOR\";\n for (k = 0; k < strlen(str13); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str13[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 200);\n char str14[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str14); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str14[k]);\n }\n\n glColor3f(1, 0,0);\n glRasterPos2f(450, 130);\n char str15[] = \"PRESS ANY KEY TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n\n glFlush();\n}\n//scene-1 newton walking house <-----\nvoid scene2() {\n backg1();\n\n glPushMatrix();\n glTranslatef(100-startx/10,100,0);\n drawClouds();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(500+startx/10,140,0);\n drawClouds();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-400+startx/10,-40,0);\n drawClouds();\n glPopMatrix();\n\n startx++;\n if(startx==350){\n state=2;\n startx=0;\n }\n glPushMatrix();\n glScalef(0.5,0.5,1);\n glTranslatef(700,0,0);\n glRotatef(90,0,0,1);\n drawHanumanStanding();\n glPopMatrix();\n\n //eyes\n glPushMatrix();\n glScalef(0.7,0.7,1);\n glTranslatef(-780,180,0);\n drawEyes();\n glPopMatrix();\n\n glFlush();\n}\n//scene-2 walking towards apple tree\nvoid scene3(){\n\n backg2();\n\n glPushMatrix();\n glTranslatef(100-startx/10,100,0);\n drawClouds();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(500+startx/10,140,0);\n drawClouds();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-400+startx/10,-40,0);\n drawClouds();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-800+startx,500,0);\n glScalef(0.5,0.5,1);\n drawHanuman();\n glPopMatrix();\n\n if(startx<2500)\n startx++;\n\n if(startx==1000){\n state=3;\n }\n glFlush();\n\n}\n\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1200,1000);\n glVertex2f(1200,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n}\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene3();\n }\n\n\n if(state==3){\n end();\n }\n\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"HANUMAN AND SANJEEVINI\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
},
{
"alpha_fraction": 0.4765166938304901,
"alphanum_fraction": 0.6330894231796265,
"avg_line_length": 18.2497501373291,
"blob_id": "8cd47e6890cd2f61643748806f5acd2a4734fd9e",
"content_id": "0fd90983398fbbc71c9e5dc359c33d1059e17230",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 19269,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 1001,
"path": "/newton/new.c",
"repo_name": "nishanb/CG-College-Projects",
"src_encoding": "UTF-8",
"text": "#include<GL/glut.h>\n#include<stdio.h>\n#include<math.h>\n#include<string.h>\n\n//to manage which scene to be displayed\nint state=0,k;\n\n//mamage day and night\nconst GLfloat color[10][3]={\n {0.0,0.03,0.08},\n {0.01,0.07,0.16},\n {0.02,0.11,0.27},\n {0.02,0.16,0.39},\n {0.05,0.22,0.49},\n {0.07,0.28,0.6},\n {0.13,0.35,0.69},\n {0.5294117647,0.8078431373,0.9215686275},\n {0.23,0.45,0.79},\n {0.39,0.64,0.87}\n};\n\n//scene1 variables\nfloat startx=0;\n//font types\nvoid *fonts[]=\n{\n GLUT_BITMAP_9_BY_15,\n GLUT_BITMAP_TIMES_ROMAN_10,\n GLUT_BITMAP_TIMES_ROMAN_24,\n GLUT_BITMAP_HELVETICA_18,\n GLUT_BITMAP_HELVETICA_12\n};\n//GL_init function\nvoid init()\n{\n\tglClearColor(1,1,1,1);\n glMatrixMode(GL_PROJECTION);\n\tgluOrtho2D(0,1200,0,1000);\n glMatrixMode(GL_MODELVIEW);\n}\n//method to handle keyboard inputs keyboard-\nvoid keyboard( unsigned char key, int x, int y )\n{\n //handle --> press any key to continue\n if(state==0){\n state=1;\n\n }\n //move to end\n if(state==4){\n state=5;\n }\n\n}\n//function to write text\nvoid write_text(int x, int y, char *string,void *font)\n{\n int len, i;\n glRasterPos2f(x, y);\n len = (int) strlen(string);\n for (i = 0; i < len; i++) {\n glutBitmapCharacter(font, string[i]);\n }\n}\n\n//helper function for drawing sphere\nvoid sphere(float r, float g, float b, float a)\n{\n glColor4f(r,g,b,a);\n glutSolidSphere(1,100,32);\n\n}\n//story box\nvoid showStory(){\n //story box\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,80);\n glVertex2f(1000,80);\n glVertex2f(1000,0);\n glEnd();\n //Add story part-1 here\n}\n\n//fucntion to draw Tree\nvoid drawTree(){\n glBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,500);\n\tglVertex2f(550,500);\n\tglVertex2f(550,750);\n\tglVertex2f(525,750);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(550,525);\n\tglVertex2f(600,575);\n\tglVertex2f(600,600);\n\tglVertex2f(550,550);\n\tglEnd();\n\n\tglBegin(GL_POLYGON);\n\tglColor3f(0.6,0,0);\n\tglVertex2f(525,600);\n\tglVertex2f(525,625);\n\tglVertex2f(475,675);\n\tglVertex2f(475,650);\n\tglEnd();\n\n // right green\n\tglColor3f(0,1,0);\n\n glPushMatrix();\n glTranslatef(620,580,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(450,680,0);\n glScalef(40,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(530,750,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(580,740,0);\n glScalef(60,50,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n}\n//function to draw trees\nvoid drawTrees(){\n glPushMatrix();\n glTranslatef(-500,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-300,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-250,0,0);\n drawTree();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-50,0,0);\n drawTree();\n glPopMatrix();\n}\n\n//function for the background\nvoid background()\n{\n\tglColor3f(0.5294117647,0.8078431373,0.9215686275);\n\tglBegin(GL_POLYGON);\n\tglVertex2f(0,0);\n\tglVertex2f(1200,0);\n\tglVertex2f(1200,1000);\n\tglVertex2f(0,1000);\n glEnd();\n}\n\n//function to draw person\nvoid drawPerson(){\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(850, 100); //left leg\n glVertex2i(850, 80);\n glVertex2i(910, 80);\n glVertex2i(910, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(935, 200);\n glVertex2i(875, 100); //left leg\n glVertex2i(910, 100);\n glVertex2i(960, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(970, 80);\n glVertex2i(970, 100); //right leg\n glVertex2i(1015, 100);\n glVertex2i(1015, 80);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(995, 100);\n glVertex2i(1015, 100); //right leg\n glVertex2i(1015, 200);\n glVertex2i(995, 200);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(885, 185);\n glVertex2i(870, 200); //front hand\n glVertex2i(985, 310);\n glVertex2i(1005, 305);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(1080, 185);\n glVertex2i(1085, 205);\n glVertex2i(1025, 250); //back hand\n glVertex2i(1025, 230);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950, 310);\n glVertex2i(925, 275);\n glVertex2i(925, 200); //body\n glVertex2i(1025, 200);\n glVertex2i(1025, 275);\n glVertex2i(1000, 310);\n glEnd();\n glFlush();\n\n glColor3f(0, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(950, 400);\n glVertex2i(915, 365);\n glVertex2i(925, 370);\n glVertex2i(925, 350);\n glVertex2i(935, 350); //face\n glVertex2i(925, 340);\n glVertex2i(950, 310);\n glVertex2i(1000, 310);\n glVertex2i(1025, 335);\n glVertex2i(1025, 375);\n glVertex2i(1000, 400);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(950, 400);\n glVertex2i(960, 415);\n glVertex2i(1005, 415); //hair\n glVertex2i(1025, 400);\n glVertex2i(1025, 375);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(900, 140);\n glVertex2i(935, 140);\n glVertex2i(965, 200); // front\n glVertex2i(930, 200);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(990, 200);\n glVertex2i(990, 140); // back\n glVertex2i(1020, 140);\n glVertex2i(1020, 200);\n glEnd();\n\n glColor3f(01,01,01);\n glPushMatrix();\n glTranslatef(960,350,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(980,350,0);\n glScalef(5,5,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(970,330,0);\n glScalef(20,2,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n}\n\n//scene1 background\nvoid drawHouse(){\n\n\n glColor3f(0, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(0, 0);\n glVertex2i(1200, 0);\n glVertex2i(1200, 400);\n glVertex2i(0, 400);\n glEnd();\n glFlush();\n\n glColor3f(0, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(0, 400);\n glVertex2i(0, 1000);\n glVertex2i(1200, 1000);\n glVertex2i(1200, 400);\n glEnd();\n glFlush();\n\n glPushMatrix();\n glTranslatef(100,-100,0);\n drawTrees();\n glPopMatrix();\n\n\n}\n\n//apple tree and the background\nvoid drawAppleTree(){\n glColor3f(0, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(0, 400);\n glVertex2i(1200, 400); //grass\n glVertex2i(1200, 0);\n glVertex2i(0, 0);\n glEnd();\n glFlush();\n\n glColor3f(0, 1, 0.2);\n glPushMatrix();\n glTranslatef(100,-100,0);\n glScalef(605,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glColor3f(0, 1, 1);\n glBegin(GL_POLYGON);\n glVertex2i(0, 1000);\n glVertex2i(1200, 1000); //sky\n glVertex2i(1200, 400);\n glVertex2i(0, 400);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 125);\n glVertex2i(150, 325); //root tree\n glVertex2i(175, 300);\n glVertex2i(200, 325);\n glVertex2i(200, 125);\n glVertex2i(225, 100);\n glVertex2i(125, 100);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.5, 0);\n glBegin(GL_POLYGON);\n glVertex2i(150, 285);\n glVertex2i(130, 315);\n glVertex2i(125, 300);\n glVertex2i(90, 335);\n glVertex2i(100, 360);\n glVertex2i(80, 385);\n glVertex2i(85, 420);\n glVertex2i(95, 420);\n glVertex2i(90, 500);\n glVertex2i(100, 475);\n glVertex2i(110, 480);\n glVertex2i(100, 500);\n glVertex2i(110, 525);\n glVertex2i(120, 520);\n glVertex2i(140, 550);\n glVertex2i(110, 575);\n glVertex2i(155, 565);\n glVertex2i(145, 595);\n glVertex2i(195, 575);\n glVertex2i(190, 590);\n glVertex2i(215, 575);\n glVertex2i(215, 550);\n glVertex2i(230, 565);\n glVertex2i(245, 540);\n glVertex2i(265, 550);\n glVertex2i(280, 525);\n glVertex2i(275, 510);\n glVertex2i(290, 500);\n glVertex2i(300, 475);\n glVertex2i(315, 455);\n glVertex2i(315, 430);\n glVertex2i(295, 425);\n glVertex2i(290, 400); //tree\n glVertex2i(275, 380);\n glVertex2i(265, 350);\n glVertex2i(265, 320);\n glVertex2i(245, 300);\n glVertex2i(240, 315);\n glVertex2i(230, 305);\n glVertex2i(230, 295);\n glVertex2i(210, 270);\n glVertex2i(200, 305);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 280);\n glVertex2i(215, 270);\n glVertex2i(215, 255);\n glVertex2i(225, 250);\n glVertex2i(245, 250); //apple down\n glVertex2i(255, 260);\n glVertex2i(255, 275);\n glVertex2i(245, 280);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(230, 295);\n glVertex2i(230, 280);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(300, 410);\n glVertex2i(290, 400);\n glVertex2i(290, 380);\n glVertex2i(300, 370);\n glVertex2i(325, 370); //apple right\n glVertex2i(340, 380);\n glVertex2i(340, 400);\n glVertex2i(330, 410);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(310, 430);\n glVertex2i(310, 410);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(230, 525);\n glVertex2i(220, 520);\n glVertex2i(220, 500);\n glVertex2i(230, 495);\n glVertex2i(250, 495);\n glVertex2i(255, 505); //apple up\n glVertex2i(255, 515);\n glVertex2i(250, 525);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(240, 525);\n glVertex2i(240, 545);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(95, 470);\n glVertex2i(85, 460);\n glVertex2i(85, 445);\n glVertex2i(95, 440);\n glVertex2i(120, 440); //apple left\n glVertex2i(125, 450);\n glVertex2i(125, 460);\n glVertex2i(120, 470);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(105, 480);\n glVertex2i(105, 470);\n glEnd();\n glFlush();\n\n\n\n}\n\n//person sitting\nvoid drawSittingerson(){\n glColor3f(0.9, 0.4, 0);\n glBegin(GL_LINES);\n glVertex2i(105, 480);\n glVertex2i(105, 470);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 225);\n glVertex2i(215, 205);\n glVertex2i(215, 190);\n glVertex2i(225, 180); //sitting\n glVertex2i(250, 180);\n glVertex2i(255, 185);\n glVertex2i(250, 190);\n glVertex2i(255, 190); //face\n glVertex2i(255, 200);\n glVertex2i(250, 200);\n glVertex2i(250, 215);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(225, 180);\n glVertex2i(210, 165); //body\n glVertex2i(210, 125);\n glVertex2i(265, 125);\n glVertex2i(265, 160);\n glVertex2i(250, 180);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(220, 170);\n glVertex2i(215, 165); //hand front\n glVertex2i(270, 135);\n glVertex2i(275, 140);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(260, 165);\n glVertex2i(270, 165); //hand back\n glVertex2i(275, 165);\n glVertex2i(255, 175);\n glEnd();\n glFlush();\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(265, 130);\n glVertex2i(265, 140); //leg\n glVertex2i(280, 140);\n glVertex2i(285, 150);\n glVertex2i(285, 145);\n glVertex2i(295, 145);\n glVertex2i(295, 140);\n glVertex2i(280, 145);\n glEnd();\n glFlush();\n\n glColor3f(0.9, 0.7, 0.4);\n glBegin(GL_POLYGON);\n glVertex2i(265, 160);\n glVertex2i(265, 150); //leg\n glVertex2i(290, 170);\n glVertex2i(290, 165);\n glVertex2i(295, 160);\n glVertex2i(295, 155);\n glVertex2i(255, 140);\n glVertex2i(255, 145);\n glEnd();\n glFlush();\n}\n\n//intro project details\nvoid scene1(){\n\n background();\n\n\n char str1[] = \"SRINIVAS INSTITUTE OF TECHNOLOGY\";\n glColor3f(0, 1, 1);\n glRasterPos2f(380, 855);\n for (k = 0; k < strlen(str1); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str1[k]);\n }\n glColor3f(0, 1, 1);\n glRasterPos2f(530, 810);\n char str2[] = \"MANGALURU-574143\";\n for (k = 0; k < strlen(str2); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str2[k]);\n }\n\n glColor3f(1, 0.5, 0.2);\n glRasterPos2f(265, 700);\n char str3[] = \"DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING\";\n for (k = 0; k < strlen(str3); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str3[k]);\n }\n\n glColor3f(0.8, 0.3, 0.4);\n glRasterPos2f(330, 630);\n char str4[] = \"COMPUTER GRAPHICS AND VISUALIZATION\";\n for (k = 0; k < strlen(str4); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str4[k]);\n }\n\n glColor3f(0.3, 0.5, 0.1);\n glRasterPos2f(520, 560);\n char str5[] = \"MINI PROJECT ON\";\n for (k = 0; k < strlen(str5); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str5[k]);\n }\n\n glColor3f(1, 0, 0);\n glRasterPos2f(465, 490);\n char str6[] = \"DISCOVERY OF GRAVITY\";\n for (k = 0; k < strlen(str6); k++) {\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, str6[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 320);\n char str7[] = \"TEAM MEMBERS\";\n for (k = 0; k < strlen(str7); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str7[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 280);\n char str8[] = \"NIRISHA-4SN16CS062\";\n for (k = 0; k < strlen(str8); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str8[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 240);\n char str9[] = \"SHWETHA A-4SN16CS091\";\n for (k = 0; k < strlen(str9); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str9[k]);\n }\n\n glColor3f(0.6, 0.6, 0);\n glRasterPos2f(50, 200);\n char str10[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str10); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str10[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 320);\n char str11[] = \"PROJECT GUIDED BY\";\n for (k = 0; k < strlen(str11); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str11[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 280);\n char str12[] = \"MR.ARAVIND NAIK \";\n for (k = 0; k < strlen(str12); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str12[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 240);\n char str13[] = \"ASSISTANT PROFESSOR\";\n for (k = 0; k < strlen(str13); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str13[k]);\n }\n\n glColor3f(0.8, 0.5, 0);\n glRasterPos2f(950, 200);\n char str14[] = \"DEPT OF CSE\";\n for (k = 0; k < strlen(str14); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str14[k]);\n }\n\n glColor3f(1, 0,0);\n glRasterPos2f(450, 130);\n char str15[] = \"PRESS ANY KEY TO CONTINUE\";\n for (k = 0; k < strlen(str15); k++) {\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, str15[k]);\n }\n\n glFlush();\n}\n//scene-1 newton walking house <-----\nvoid scene2() {\n glClear(GL_COLOR_BUFFER_BIT);\n\n drawHouse();\n\n glColor3f(0, 1, 0.2);\n glPushMatrix();\n glTranslatef(100,-100,0);\n glScalef(605,400,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(60-startx,100,0);\n drawPerson();\n glPopMatrix();\n\n\n\n glPushMatrix();\n glTranslatef(60,-180,0);\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(600, 300);\n glVertex2i(600, 550);\n glVertex2i(800, 550);\n glVertex2i(800, 300);\n glEnd();\n glFlush();\n\n\n glColor3f(1, 1, 0);\n glBegin(GL_POLYGON);\n glVertex2i(600, 550);\n glVertex2i(700, 700);\n glVertex2i(800, 550);\n glEnd();\n glFlush();\n\n glColor3f(1, 0, 1);\n glBegin(GL_POLYGON);\n glVertex2i(800, 550);\n glVertex2i(800, 300);\n glVertex2i(1100, 300);\n glVertex2i(1100, 550);\n glEnd();\n glFlush();\n\n\n glColor3f(0, 0.7, 0.5);\n glBegin(GL_POLYGON);\n glVertex2i(800, 550);\n glVertex2i(700, 700);\n glVertex2i(1100, 700);\n glVertex2i(1100, 550);\n glEnd();\n glFlush();\n\n glColor3f(0, 0.1, 0.9);\n glBegin(GL_POLYGON);\n glVertex2i(850, 300);\n glVertex2i(1000, 300);\n glVertex2i(1000, 450);\n glVertex2i(850, 450);\n glEnd();\n glPopMatrix();\n\n\n\n\n\n\n startx++;\n\n if(startx==800){\n state=2;\n startx=0;\n }\n glFlush();\n}\n//scene-2 walking towards apple tree\nvoid scene3(){\n drawAppleTree();\n\n glPushMatrix();\n glTranslatef(400,-100,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(300-startx,100,0);\n drawPerson();\n glPopMatrix();\n\n startx++;\n\n if(startx==800){\n state=3;\n startx=0;\n }\n\n glFlush();\n\n}\n//scene-3\nvoid scene4(){\n\n drawAppleTree();\n\n glPushMatrix();\n drawSittingerson();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,-100,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(600,-100,0);\n drawTrees();\n glPopMatrix();\n\n\n\n if(startx>-200){\n startx=startx-0.4;\n }else{\n glPushMatrix();\n glTranslatef(-50,-400,0);\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(520,750,0);\n glScalef(130,80,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(370,700,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,650,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glColor3f(0,1,1);\n write_text(400,730,\"There is a Gravity!!!!\",fonts[3]);\n glPopMatrix();\n\n state=4;\n }\n if(startx>-300){\n // state=4;\n }\n\n glPushMatrix();\n glTranslatef(-100,startx+100,0);\n glColor3f(1, 0, 0);\n glBegin(GL_POLYGON);\n glVertex2i(320, 320);\n glVertex2i(320, 335);\n glVertex2i(325, 340);\n glVertex2i(335, 340);\n glVertex2i(345, 340);\n glVertex2i(350, 335); //apple\n glVertex2i(350, 320);\n glVertex2i(345, 315);\n glVertex2i(325, 315);\n glEnd();\n glPopMatrix();\n\n\n glFlush();\n\n}\n//scene-5\nvoid scene5(){\n drawAppleTree();\n\n glPushMatrix();\n glTranslatef(400,-100,0);\n drawTrees();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(-300,100,0);\n drawPerson();\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(400,400,0);\n glColor3f(0.1,0.1,0.1);\n glPushMatrix();\n glTranslatef(400,100,0);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(100,0);\n glVertex2f(100,100);\n glVertex2f(0,100);\n glEnd();\n glPopMatrix();\n\n glColor3f(0.941,0.796,0.098);\n glPushMatrix();\n glTranslatef(450,250,0);\n glScalef(70,100,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glPopMatrix();\n\n //drawAppleTree\n glColor3f(0,0,0);\n glPushMatrix();\n glTranslatef(410,-150,0);\n glPushMatrix();\n glTranslatef(370,700,0);\n glScalef(15,15,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n\n glPushMatrix();\n glTranslatef(330,650,0);\n glScalef(10,10,0);\n glutSolidSphere(1,100,10);\n glPopMatrix();\n glPopMatrix();\n glPopMatrix();\n\n glPushMatrix();\n glColor3f(0,0,1);\n glTranslatef(400,-100,0);\n write_text(400,730,\" Gravity!!!!\",fonts[3]);\n glPopMatrix();\n\n glFlush();\n}\n//scene-4 conclusion\nvoid end(){\n glColor3f(0.2,0.1,0.5);\n glBegin(GL_POLYGON);\n glVertex2f(0,0);\n glVertex2f(0,1000);\n glVertex2f(1200,1000);\n glVertex2f(1200,0);\n glEnd();\n\n showStory();\n glColor3f(1,1,1);\n write_text(470,500,\"The End\",fonts[2]);\n}\n//GL display function\nvoid display()\n{\n\n\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);\n\tglLoadIdentity();\n\n\n\tif(state==0){\n\t\tscene1();\n\t}\n\n\tif(state==1){\n\t\tscene2();\n\n\t}\n\n if(state==2){\n scene3();\n }\n\n if (state==3) {\n scene4();\n }\n\n if(state==4){\n scene5();\n }\n\n if(state==5){\n end();\n }\n\n glutSwapBuffers();\n glutPostRedisplay();\n}\n//main fucntion\nint main(int argc,char **argv)\n {\n glutInit(&argc,argv);\n\tglutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB);\n\tglutInitWindowSize(1000,1000);\n\tglutInitWindowPosition(0, 0);\n\tglutCreateWindow(\"Newtons Gravity\");\n\tglutDisplayFunc(display);\n glutKeyboardFunc(keyboard);\n init();\n\tglutMainLoop();\n}\n"
}
] | 12 |
Michael07220823/QR_Code_Generator
|
https://github.com/Michael07220823/QR_Code_Generator
|
bf52fd3e57382c7aa51bf4e70984b8906c7341be
|
0f8349838078457bff7ce9d74e5e21ce9642b882
|
1a1e252b5d1fd6d1839fcac153a6e5425beff0c6
|
refs/heads/master
| 2022-04-27T15:56:23.073125 | 2020-04-25T09:22:53 | 2020-04-25T09:22:53 | 258,713,227 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6676923036575317,
"alphanum_fraction": 0.6738461256027222,
"avg_line_length": 17.05555534362793,
"blob_id": "6da17ae5f1ced607f296b170bae07a2e698f5f6e",
"content_id": "4c44973f5f017ff4cbb995402982505c71cd4b6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 54,
"path": "/README.md",
"repo_name": "Michael07220823/QR_Code_Generator",
"src_encoding": "UTF-8",
"text": "# QR_Code_Generator\n\n## Install package\n`pip install qrcode[pil]`\n\n## Usage\nFirst - bash or cmd<br>\n`qr \"Hello world !\" > output/Hello_world.png`<br>\n \nSecond - Python<br>\n<pre><code>\nimport qrcode\n\nqr = qrcode.make(\"https://pypi.org/project/qrcode/\")\nqr.save(\"qrcode.png\")\n</code></pre>\n\nSecond-2 - Python<br>\n<pre><code>\nimport qrcode\n\ncontent = 'https://pypi.org/project/qrcode/'\n\nqr = qrcode.QRCode(\n version = 1,\n error_correction = qrcode.constants.ERROR_CORRECT_H,\n box_size = 10,\n border = 4,\n)\n\nqr.add_data(content)\nqr.make(fit=True)\n\nimg = qr.make_image(fill_color=\"green\", back_color=\"white\")\n\nimg.save(\"output/qrcode.png\")\n</code></pre>\n\nSecond-3 - Python<br>\n<pre><code>\nimport qrcode\nimport qrcode.image.svg\n\ncontent = \"Hello world !\"\nfactory = qrcode.image.svg.SvgFillImage\n\nimg = qrcode.make(content, image_factory=factory)\n\nimg.save(\"output/Hello_world.svg\")\n</code></pre>\n\n-----------\n### Reference\n* [qrcode](https://pypi.org/project/qrcode/)\n"
},
{
"alpha_fraction": 0.6625387072563171,
"alphanum_fraction": 0.6749225854873657,
"avg_line_length": 18.058822631835938,
"blob_id": "451ee6ff7aa353d0b94ab8fd4bd17ba9b92cbbd3",
"content_id": "f685b9207e9f16a31d6069aaba370ff68c7986bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/demo.py",
"repo_name": "Michael07220823/QR_Code_Generator",
"src_encoding": "UTF-8",
"text": "import qrcode\n\ncontent = 'https://pypi.org/project/qrcode/'\n\nqr = qrcode.QRCode(\n version = 1,\n error_correction = qrcode.constants.ERROR_CORRECT_H,\n box_size = 10,\n border = 4,\n)\n\nqr.add_data(content)\nqr.make(fit=True)\n\nimg = qr.make_image(fill_color=\"green\", back_color=\"white\")\n\nimg.save(\"output/qrcode.png\")"
},
{
"alpha_fraction": 0.7696335315704346,
"alphanum_fraction": 0.7696335315704346,
"avg_line_length": 20.33333396911621,
"blob_id": "6d7dc3cf95c4a4eca6198fcd3003fd10b3d90c87",
"content_id": "61e352eca40f3670fde6670c4349dc78da528010",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 9,
"path": "/demo_svg.py",
"repo_name": "Michael07220823/QR_Code_Generator",
"src_encoding": "UTF-8",
"text": "import qrcode\nimport qrcode.image.svg\n\ncontent = \"Hello world !\"\nfactory = qrcode.image.svg.SvgFillImage\n\nimg = qrcode.make(content, image_factory=factory)\n\nimg.save(\"output/Hello world.svg\")"
}
] | 3 |
EmbodyTheLogos/coronavirus_defender
|
https://github.com/EmbodyTheLogos/coronavirus_defender
|
0cb288eebb7a20cb8bc5a9574774793a12351d79
|
d9841d914c59a5cacaa10dc478ab946abd2de573
|
abe83cb221becc6bb6f0262216f1d829bf3a395d
|
refs/heads/main
| 2023-03-22T12:10:20.430122 | 2021-02-22T02:43:03 | 2021-02-22T02:43:03 | 341,057,791 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5663568377494812,
"alphanum_fraction": 0.5896693468093872,
"avg_line_length": 28.393518447875977,
"blob_id": "1b6c1c86211c7e6de63c2ecf87646b465b4d3350",
"content_id": "27dde18ef1f5244baac1d7533d2f82d9003ebb79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6563,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 216,
"path": "/main.py",
"repo_name": "EmbodyTheLogos/coronavirus_defender",
"src_encoding": "UTF-8",
"text": "import pygame\r\nimport random\r\n\r\n# initialize the pygame\r\npygame.init()\r\n\r\n# create the screen\r\n# set_mode(width, height)\r\nscreen = pygame.display.set_mode((800, 600))\r\n\r\n\r\n# Title and Icon\r\npygame.display.set_caption(\"CoronaVirus Defender\")\r\nicon = pygame.image.load('coronavirus.png')\r\npygame.display.set_icon(icon)\r\n\r\n# Score of the player\r\nscore_value = 0\r\n\r\n# Displaying text\r\nscore_font = pygame.font.Font('freesansbold.ttf', 20)\r\nend_font = pygame.font.Font('freesansbold.ttf', 40)\r\n\r\n# Position of score on screen\r\ntextX = 10\r\ntextY = 10\r\n\r\n# Player\r\nplayerImg = pygame.image.load('nurse.png')\r\nplayerX = 390\r\nplayerX_change = 0\r\nplayerY = 480\r\n\r\n# Enemy\r\nenemies = []\r\nenemyImg = pygame.image.load('coronavirus.png')\r\n\r\n# initialize 10 enemies:\r\nfor i in range(0, 10):\r\n enemyX = random.randint(0, 736)\r\n enemyY = random.randint(0, 150)\r\n enemyX_change = 0.3\r\n enemyY_change = 0\r\n enemies.append([enemyX, enemyY, enemyX_change, enemyY_change])\r\n\r\n# Bullet\r\n# Ready - you can't see the bullet on the screen. You are ready to fire.\r\n# Fire - the bullet is currently moving\r\nbulletImg = pygame.image.load('syringe.png')\r\nbulletX = 0\r\nbulletY = 480\r\nbulletY_change = 1\r\nbullet_state = 'ready'\r\n\r\n\r\ndef player(x, y):\r\n # draw an image of player on screen\r\n screen.blit(playerImg, (x, y))\r\n\r\ndef enemy(x, y):\r\n # drawng an image of an enemy on screen\r\n # Check if an enemy is already destroyed or not. If not, then draw the enemy\r\n if x >= -90 and y >= -90:\r\n screen.blit(enemyImg, (x, y))\r\n\r\n\r\ndef fire_bullet(x, y):\r\n global bullet_state\r\n bullet_state = 'fire'\r\n screen.blit(bulletImg, (x + 16, y + 16))\r\n\r\n\r\ndef isCollision(enemyX, enemyY, bulletX, bulletY, range):\r\n if abs(enemyX - bulletX) < range and abs(enemyY - bulletY) < range:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef show_score(x, y):\r\n score = score_font.render(\"Score: \" + str(score_value), True, (0, 0, 0))\r\n screen.blit(score, (x, y))\r\n\r\n\r\ndef game_over(x, y):\r\n end_game = end_font.render(\"Game Over\", True, (0, 0, 0))\r\n screen.blit(end_game, (x, y))\r\n\r\n\r\ndef you_win(x, y):\r\n winner = end_font.render(\"You won!\", True, (0, 0, 0))\r\n screen.blit(winner, (x, y))\r\n\r\n\r\n# keep track of left and right key when they are still being pressed.\r\nleft_pressed = False\r\nright_pressed = False\r\n\r\n# Too many components on the screen will make the game run slow. We fix it by increasing the speed of the game.\r\n# Each time an enemy is destroyed, we decrease the scale by 0.2 to make game run slower since there are less components than before.\r\nscale = 2\r\n\r\n# this variable determined if the game is over or not\r\nend = False\r\n\r\n# Game loop\r\nrunning = True\r\nwhile running:\r\n # background color. Silver in this case\r\n screen.fill((192, 192, 192))\r\n\r\n # going through all of pygame event\r\n for event in pygame.event.get():\r\n # if somebody closes the game, then we got out of this loop.\r\n # if the game is not over, 'end' will still be False, thus, breaking out of this loop implies quitting the game.\r\n # if the game is over, then break out of this loop and let the second loop take care of the rest.\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n # if keystroke is pressed, check whether its right or left\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False\r\n if event.key == pygame.K_LEFT:\r\n left_pressed = True\r\n if event.key == pygame.K_RIGHT:\r\n right_pressed = True\r\n if event.key == pygame.K_SPACE:\r\n if bullet_state == 'ready':\r\n bulletX = playerX\r\n fire_bullet(playerX, bulletY)\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT:\r\n left_pressed = False\r\n if event.key == pygame.K_RIGHT:\r\n right_pressed = False\r\n\r\n # Take care of the case where one key is still pressed but another is up\r\n if right_pressed == True and left_pressed == False:\r\n playerX_change = 0.5 * scale\r\n if left_pressed == True and right_pressed == False:\r\n playerX_change = -0.5 * scale\r\n if left_pressed == False and right_pressed == False:\r\n playerX_change = 0\r\n\r\n # Checking for boundary of player so that it doesn't go out of bound\r\n playerX += playerX_change\r\n if playerX <= 0:\r\n playerX = 0\r\n if playerX >= 736:\r\n playerX = 736\r\n\r\n for i in range(0, len(enemies)):\r\n if enemies[i][0] <= 0: # enemyX\r\n enemies[i][2] = +0.3 * scale # enemyX_change\r\n enemies[i][1] += 20 # enemyY_change\r\n\r\n if enemies[i][0] >= 736: # enemyX\r\n enemies[i][2] = -0.3 * scale # enemyX_change\r\n enemies[i][1] += 20 # enemyY_change\r\n\r\n if enemies[i][0] >= -90: # if an enemy is already destroyed\r\n enemies[i][0] += enemies[i][2]\r\n enemy(enemies[i][0], enemies[i][1])\r\n\r\n # Collision\r\n collision = isCollision(enemies[i][0], enemies[i][1], bulletX, bulletY, 32)\r\n\r\n if collision and bullet_state == 'fire':\r\n bulletY = 400\r\n bullet_state = 'ready'\r\n score_value += 1\r\n scale -= 0.1\r\n\r\n # signal that the enemy is destroyed\r\n enemies[i][0] = -99\r\n enemies[i][1] = -99\r\n\r\n # If an enemy collides with you, then you die and game over\r\n youDied = isCollision(enemies[i][0], enemies[i][1], playerX, playerY, 45)\r\n if youDied:\r\n # This is when the game is over.\r\n # Then you exit this loop and go to the second loop to take care of things there instead.\r\n running = False\r\n end = True\r\n\r\n # Bullet Movement\r\n if bulletY <= 0:\r\n bulletY = 400\r\n bullet_state = 'ready'\r\n bulletX = 0\r\n\r\n if bullet_state == 'fire':\r\n fire_bullet(bulletX, bulletY)\r\n bulletY_change = 1 * scale\r\n bulletY -= bulletY_change\r\n\r\n player(playerX, playerY)\r\n\r\n # You Win if you destroyed all 10 enemies!\r\n if score_value == 10:\r\n you_win(320, 250)\r\n\r\n show_score(textX, textY)\r\n pygame.display.update()\r\n\r\n# ending of the game. When you lose the game.\r\nwhile end:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n end = False\r\n\r\n player(playerX, playerY)\r\n show_score(textX, textY)\r\n game_over(310, 250)\r\n pygame.display.update()"
},
{
"alpha_fraction": 0.7831325531005859,
"alphanum_fraction": 0.7931727170944214,
"avg_line_length": 40.5,
"blob_id": "4d71fcd35d4f00fdb860829ca562d3c7dc1f1e30",
"content_id": "9a4d082351e7cefd9fe8790f60cce81ee4170c39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 12,
"path": "/README.md",
"repo_name": "EmbodyTheLogos/coronavirus_defender",
"src_encoding": "UTF-8",
"text": "# coronavirus_defender\nImplementation of a Pygame tutorial for beginner.\n\nLink to the video here: https://www.youtube.com/watch?v=FfWpgLFMI7w\n\n# Complied with Python 3 and Pygame 2.0.1\n\n\n\n\n\n\n"
}
] | 2 |
mptsonev/scripts
|
https://github.com/mptsonev/scripts
|
95313a225d0a53e3fea262fa229b487eb8b26c7e
|
3fd6f973ce4323a392b4c5e1bbc65aba25e896ac
|
58bd4c743da930a7953450f669fe6df9b170658e
|
refs/heads/master
| 2020-04-16T14:12:41.393652 | 2019-01-14T12:37:48 | 2019-01-14T12:37:48 | 165,658,756 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8603773713111877,
"alphanum_fraction": 0.8603773713111877,
"avg_line_length": 65.25,
"blob_id": "01bf22cd3bea0f2346ef60c251c8a5e5b805b8ab",
"content_id": "26081a540ce554d0232cde3fa889eabbd164dc0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 4,
"path": "/checkout_versions.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "git checkout SmartMonitoring/src/main/config/version.txt\ngit checkout lambda-functions/src/main/java/com/ticktech/eventcollector/version/VersionInfoGenerated.java\ngit checkout lambda-functions/dependency-reduced-pom.xml\ngit checkout data_model/client/data_model.ts\n"
},
{
"alpha_fraction": 0.6451078057289124,
"alphanum_fraction": 0.6600331664085388,
"avg_line_length": 23.1200008392334,
"blob_id": "230c054536bb8c765730b206614feaf4bab41b98",
"content_id": "f271f52437a527caa2208173bef09489f2c3ee38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 25,
"path": "/push_and_port.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "rel_branch=$1\n\nif [[ $# -eq 0 ]] ; then\n rel_branch=$(git for-each-ref --sort='-authordate' --format='%(refname:short)' | grep REL | head -1 | cut -d \"/\" -f 2)\n echo \"No release branch specified, inferring last known branch: $rel_branch\"\nfi\n\nif [[ $rel_branch != *\"REL.\"* ]]; then\n echo \\\"$rel_branch\\\" \"Not valid release branch, e.g. REL.24.1\"\n exit 0\nfi\n\n\nif ! git branch -r | grep -q $rel_branch\nthen\n\techo \"Branch\" $rel_branch \"not recognised\"\n\texit 0\nfi\n\ngit pull --rebase\ngit push\ngit checkout origin/$rel_branch\ngit cherry-pick master\ngit push origin HEAD:$rel_branch\ngit checkout master\n"
},
{
"alpha_fraction": 0.6634093523025513,
"alphanum_fraction": 0.6764386296272278,
"avg_line_length": 23.91891860961914,
"blob_id": "df0462140d364e52c2ae2fef1c21ddedbc1cc757",
"content_id": "d8b383f369a84e814cefa63f7c8eafb73ae3a6de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 921,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 37,
"path": "/port_kraso_last_commit.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "cd ~/repositories/java_servers/\n\nif ! git status | grep -q \"nothing to commit\"\nthen\n\techo \"Working directory not clean, check git status before cherry-picking:\"\n\tgit status\n\texit 0\nfi\n\ngit pull --rebase\n\nrel_branch=$1\n\nif [[ $# -eq 0 ]] ; then\n rel_branch=$(git for-each-ref --sort='-authordate' --format='%(refname:short)' | grep REL | head -1 | cut -d \"/\" -f 2)\n echo \"No release branch specified, inferring last known branch: $rel_branch\"\nfi\n\nif [[ $rel_branch != *\"REL.\"* ]]; then\n echo \\\"$rel_branch\\\" \"Not valid release branch, e.g. REL.24.1\"\n exit 0\nfi\n\n\nif ! git branch -r | grep -q $rel_branch\nthen\n\techo \"Branch\" $rel_branch \"not recognised\"\n\texit 0\nfi\n\nlast_commit_id=$(git log -1 --author=krasimir | grep commit | cut -d \" \" -f 2)\necho \"Porting $last_commit_id to branch $rel_branch\"\ngit checkout origin/$rel_branch\ngit cherry-pick $last_commit_id\ngit push origin HEAD:$rel_branch\n\ngit checkout master"
},
{
"alpha_fraction": 0.6943005323410034,
"alphanum_fraction": 0.6943005323410034,
"avg_line_length": 26.714284896850586,
"blob_id": "90bf5b71f23404f30fe4ec4daf749204ab2d0a28",
"content_id": "02bb55050194ab9d0c1d7c6c99f04077988a45fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 7,
"path": "/download_youtube_video.py",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "import pafy\n\nvideoHash = raw_input(\"Enter hash of video to download: \")\nv = pafy.new(videoHash)\ns = v.getbest()\nprint(\"Size is %s\" % s.get_filesize())\nfilename = s.download() # starts download"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 22.200000762939453,
"blob_id": "daaaebc811850e4044ecccfd27969e391fe9edad",
"content_id": "7419b996e7f3c5faa690a235dd365b1d01d5b498",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 5,
"path": "/submodules_update.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "git submodule foreach git pull origin master\ngit add .\ngit commit -m \"Update submodules\"\ngit pull --rebase\ngit push"
},
{
"alpha_fraction": 0.5540540814399719,
"alphanum_fraction": 0.5743243098258972,
"avg_line_length": 13.899999618530273,
"blob_id": "659fe33aee92adbe1a875b84cd748151261e4780",
"content_id": "731697a423b24340d4d6757acd8da54fafc5d0fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/open_db.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "if [[ $# -eq 0 ]] ; then\n echo \"Specify db file to open\"\n exit 0\nfi\n\n\ncd ~/repositories/dbs_stateless/\ngit pull\n\nsubl $( find . -name \"*$1*\" )"
},
{
"alpha_fraction": 0.8055555820465088,
"alphanum_fraction": 0.8055555820465088,
"avg_line_length": 36,
"blob_id": "a3c678d8f2274ebfdcdc667c9880a3e6217e1f1f",
"content_id": "92faa9e9ea53446381a96dce217f63602e19e6ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 1,
"path": "/compile.sh",
"repo_name": "mptsonev/scripts",
"src_encoding": "UTF-8",
"text": "mvn clean compile -P no-client-build"
}
] | 7 |
ravgill/azure-cli
|
https://github.com/ravgill/azure-cli
|
048669167c79fdd9f1cbfa71fa7819ca919bb12c
|
7fb136ef8e00713ed0a9cb3549c7e0bdd73896ac
|
3db346853b87783974fde9996c2cd1ee47e66e06
|
refs/heads/dev
| 2022-05-27T13:15:37.157405 | 2022-05-17T18:23:38 | 2022-05-17T18:23:38 | 241,434,913 | 0 | 0 |
MIT
| 2020-02-18T18:20:48 | 2020-02-18T18:20:51 | 2020-04-29T00:37:33 | null |
[
{
"alpha_fraction": 0.434959352016449,
"alphanum_fraction": 0.7682926654815674,
"avg_line_length": 60.5,
"blob_id": "c989912da964f55bd9335faafec129af7330d8a6",
"content_id": "c146dd88319c2b5a37524bc5c3a8fdb10fb11201",
"detected_licenses": [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"MIT",
"LGPL-2.1-or-later"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 246,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 4,
"path": "/src/azure-cli/azure/cli/command_modules/marketplaceordering/tests/latest/test_marketplaceordering_scenario_coverage.md",
"repo_name": "ravgill/azure-cli",
"src_encoding": "UTF-8",
"text": "|Scenario|Result|ErrorMessage|ErrorStack|ErrorNormalized|StartDt|EndDt|\n|step_accept|successed||||2022-04-12 05:26:22.521398|2022-04-12 05:26:22.597399|\n|step_show|successed||||2022-04-12 05:26:22.598399|2022-04-12 05:26:22.659397|\nCoverage: 2/2\n"
},
{
"alpha_fraction": 0.6206814646720886,
"alphanum_fraction": 0.6221110224723816,
"avg_line_length": 41.53716278076172,
"blob_id": "d9ed5e8f3dedc51f6dcd00d51ccbf022dec6c250",
"content_id": "bdd37e51836690d655e5a86030576ed4729364fd",
"detected_licenses": [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"MIT",
"LGPL-2.1-or-later"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12591,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 296,
"path": "/src/azure-cli/azure/cli/command_modules/role/msgrpah/_graph_client.py",
"repo_name": "ravgill/azure-cli",
"src_encoding": "UTF-8",
"text": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport json\n\nfrom azure.cli.core._profile import Profile\nfrom azure.cli.core.util import send_raw_request\nfrom azure.cli.core.auth.util import resource_to_scopes\nfrom knack.util import CLIError\n\n\n# pylint: disable=redefined-builtin, too-many-public-methods\n\nclass GraphClient:\n def __init__(self, cli_ctx):\n self.cli_ctx = cli_ctx\n self.tenant = Profile(cli_ctx).get_login_credentials()[2]\n self.scopes = resource_to_scopes(cli_ctx.cloud.endpoints.microsoft_graph_resource_id)\n\n # https://graph.microsoft.com/ (AzureCloud)\n self.resource = cli_ctx.cloud.endpoints.microsoft_graph_resource_id\n\n # https://graph.microsoft.com/v1.0\n self.base_url = cli_ctx.cloud.endpoints.microsoft_graph_resource_id + 'v1.0'\n\n def _send(self, method, url, param=None, body=None):\n url = self.base_url + url\n\n if body:\n body = json.dumps(body)\n\n list_result = []\n is_list_result = False\n\n while True:\n try:\n r = send_raw_request(self.cli_ctx, method, url, resource=self.resource, uri_parameters=param, body=body)\n except CLIError as ex:\n raise GraphError(ex.response.json()['error']['message'], ex.response) from ex\n\n if r.text:\n dic = r.json()\n\n # The result is a list. Add value to list_result.\n if 'value' in dic:\n is_list_result = True\n list_result.extend(dic['value'])\n\n # Follow nextLink if available\n if '@odata.nextLink' in dic:\n url = dic['@odata.nextLink']\n continue\n\n # Result a list\n if is_list_result:\n # 'value' can be empty list [], so we can't determine if the result is a list only by\n # bool(list_result)\n return list_result\n\n # Return a single object\n return r.json()\n return None\n\n # id is python built-in name: https://docs.python.org/3/library/functions.html#id\n # filter is python built-in name: https://docs.python.org/3/library/functions.html#filter\n\n def application_create(self, body):\n # https://docs.microsoft.com/en-us/graph/api/application-post-applications\n result = self._send(\"POST\", \"/applications\", body=body)\n return result\n\n def application_get(self, id):\n # https://docs.microsoft.com/en-us/graph/api/application-get\n result = self._send(\"GET\", \"/applications/{id}\".format(id=id))\n return result\n\n def application_list(self, filter=None):\n # https://docs.microsoft.com/en-us/graph/api/application-list\n result = self._send(\"GET\", \"/applications\" + _filter_to_query(filter))\n return result\n\n def application_delete(self, id):\n # https://docs.microsoft.com/en-us/graph/api/application-delete\n result = self._send(\"DELETE\", \"/applications/{id}\".format(id=id))\n return result\n\n def application_patch(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/application-update\n result = self._send(\"PATCH\", \"/applications/{id}\".format(id=id), body=body)\n return result\n\n def application_owner_add(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/application-post-owners\n result = self._send(\"POST\", \"/applications/{id}/owners/$ref\".format(id=id), body=body)\n return result\n\n def application_owner_list(self, id):\n # https://docs.microsoft.com/en-us/graph/api/application-list-owners\n result = self._send(\"GET\", \"/applications/{id}/owners\".format(id=id))\n return result\n\n def application_owner_remove(self, id, owner_id):\n # https://docs.microsoft.com/en-us/graph/api/application-delete-owners\n result = self._send(\"DELETE\", \"/applications/{id}/owners/{owner_id}/$ref\".format(id=id, owner_id=owner_id))\n return result\n\n def application_add_password(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/application-addpassword\n # 'addPassword' appears in the API, so we keep its name, instead of using application_password_add\n result = self._send(\"POST\", \"/applications/{id}/addPassword\".format(id=id), body=body)\n return result\n\n def application_remove_password(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/application-removepassword\n result = self._send(\"POST\", \"/applications/{id}/removePassword\".format(id=id), body=body)\n return result\n\n def service_principal_create(self, body):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-post-serviceprincipals\n result = self._send(\"POST\", \"/servicePrincipals\", body=body)\n return result\n\n def service_principal_get(self, id):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-get\n result = self._send(\"GET\", \"/servicePrincipals/{id}\".format(id=id))\n return result\n\n def service_principal_list(self, filter=None):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-list\n result = self._send(\"GET\", \"/servicePrincipals\" + _filter_to_query(filter))\n return result\n\n def service_principal_delete(self, id):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-delete\n result = self._send(\"DELETE\", \"/servicePrincipals/{id}\".format(id=id))\n return result\n\n def service_principal_patch(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-update\n result = self._send(\"PATCH\", \"/servicePrincipals/{id}\".format(id=id), body=body)\n return result\n\n def service_principal_add_password(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-addpassword\n result = self._send(\"POST\", \"/servicePrincipals/{id}/addPassword\".format(id=id), body=body)\n return result\n\n def service_principal_remove_password(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-removepassword\n result = self._send(\"POST\", \"/servicePrincipals/{id}/removePassword\".format(id=id), body=body)\n return result\n\n def service_principal_owner_list(self, id):\n # https://docs.microsoft.com/en-us/graph/api/serviceprincipal-list-owners\n result = self._send(\"GET\", \"/servicePrincipals/{id}/owners\".format(id=id))\n return result\n\n def owned_objects_list(self):\n # https://docs.microsoft.com/en-us/graph/api/user-list-ownedobjects\n result = self._send(\"GET\", \"/me/ownedObjects\")\n return result\n\n def signed_in_user_get(self):\n # https://docs.microsoft.com/en-us/graph/api/user-get\n result = self._send(\"GET\", \"/me\")\n return result\n\n def directory_object_get_by_ids(self, body):\n # https://docs.microsoft.com/en-us/graph/api/directoryobject-getbyids\n result = self._send(\"POST\", \"/directoryObjects/getByIds\", body=body)\n return result\n\n def directory_object_check_member_groups(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/directoryobject-checkmembergroups\n result = self._send(\"POST\", \"/directoryObjects/{id}/checkMemberGroups\".format(id=id), body=body)\n return result\n\n def group_get_member_groups(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/directoryobject-getmembergroups\n result = self._send(\"POST\", \"/groups/{id}/getMemberGroups\".format(id=id), body=body)\n return result\n\n def group_create(self, body):\n # https://docs.microsoft.com/en-us/graph/api/group-post-groups\n result = self._send(\"POST\", \"/groups\", body=body)\n return result\n\n def group_get(self, id):\n # https://docs.microsoft.com/en-us/graph/api/group-get\n result = self._send(\"GET\", \"/groups/{id}\".format(id=id))\n return result\n\n def group_list(self, filter=None):\n # https://docs.microsoft.com/en-us/graph/api/group-list\n result = self._send(\"GET\", \"/groups\" + _filter_to_query(filter))\n return result\n\n def group_delete(self, id):\n # https://docs.microsoft.com/en-us/graph/api/group-delete\n result = self._send(\"DELETE\", \"/groups/{id}\".format(id=id))\n return result\n\n def group_owner_list(self, id):\n # https://docs.microsoft.com/en-us/graph/api/group-list-owners\n result = self._send(\"GET\", \"/groups/{id}/owners\".format(id=id))\n return result\n\n def group_owner_add(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/group-post-owners\n result = self._send(\"POST\", \"/groups/{id}/owners/$ref\".format(id=id), body=body)\n return result\n\n def group_owner_remove(self, id, owner_id):\n # https://docs.microsoft.com/en-us/graph/api/group-delete-owners\n result = self._send(\"DELETE\", \"/groups/{id}/owners/{owner_id}/$ref\".format(id=id, owner_id=owner_id))\n return result\n\n def group_member_list(self, id):\n # https://docs.microsoft.com/en-us/graph/api/group-list-members\n result = self._send(\"GET\", '/groups/{id}/members'.format(id=id))\n return result\n\n def group_member_add(self, id, body):\n # https://docs.microsoft.com/en-us/graph/api/group-post-members\n result = self._send(\"POST\", \"/groups/{id}/members/$ref\".format(id=id), body=body)\n return result\n\n def group_member_remove(self, id, member_id):\n # https://docs.microsoft.com/en-us/graph/api/group-delete-members\n result = self._send(\"DELETE\", \"/groups/{id}/members/{member_id}/$ref\".format(id=id, member_id=member_id))\n return result\n\n def user_create(self, body):\n # https://docs.microsoft.com/graph/api/user-post-users\n result = self._send(\"POST\", \"/users\", body=body)\n return result\n\n def user_get(self, id_or_upn):\n # https://docs.microsoft.com/graph/api/user-get\n\n # MSGraph known issues regarding '$' and '#' https://docs.microsoft.com/en-us/graph/known-issues#users\n if '@' in id_or_upn and '#' in id_or_upn:\n id_or_upn = id_or_upn.replace('#', '%23')\n if id_or_upn.startswith('$'):\n result = self._send(\"GET\", \"/users('{}')\".format(id_or_upn))\n else:\n result = self._send(\"GET\", \"/users/{}\".format(id_or_upn))\n return result\n\n def user_list(self, filter):\n # https://docs.microsoft.com/graph/api/user-list\n result = self._send(\"GET\", \"/users\" + _filter_to_query(filter))\n return result\n\n def user_delete(self, id_or_upn):\n # https://docs.microsoft.com/graph/api/user-delete\n result = self._send(\"DELETE\", \"/users/{}\".format(id_or_upn))\n return result\n\n def user_patch(self, id_or_upn, body):\n # https://docs.microsoft.com/graph/api/user-update\n result = self._send(\"PATCH\", \"/users/{}\".format(id_or_upn), body=body)\n return result\n\n def user_get_member_groups(self, id_or_upn, body):\n # https://docs.microsoft.com/en-us/graph/api/directoryobject-getmembergroups\n result = self._send(\"POST\", \"/users/{}/getMemberGroups\".format(id_or_upn), body=body)\n return result\n\n def oauth2_permission_grant_create(self, body):\n # https://docs.microsoft.com/en-us/graph/api/oauth2permissiongrant-post\n result = self._send(\"POST\", \"/oauth2PermissionGrants\", body=body)\n return result\n\n def oauth2_permission_grant_list(self, filter=None):\n # https://docs.microsoft.com/en-us/graph/api/oauth2permissiongrant-list\n result = self._send(\"GET\", \"/oauth2PermissionGrants\" + _filter_to_query(filter))\n return result\n\n def oauth2_permission_grant_delete(self, id):\n # https://docs.microsoft.com/en-us/graph/api/oauth2permissiongrant-delete\n result = self._send(\"DELETE\", \"/oAuth2PermissionGrants/{id}\".format(id=id))\n return result\n\n\ndef _filter_to_query(filter):\n if filter is not None:\n return \"?$filter={}\".format(filter)\n return ''\n\n\nclass GraphError(Exception):\n def __init__(self, message, response):\n super().__init__(message)\n self.response = response\n"
},
{
"alpha_fraction": 0.646354079246521,
"alphanum_fraction": 0.6503859162330627,
"avg_line_length": 37.411502838134766,
"blob_id": "74984c5619bcc88b77f3c0bc3d5801b6b4a17669",
"content_id": "6a7e0de4c2d849674b40454625544bd5bf89f3d5",
"detected_licenses": [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"MIT",
"LGPL-2.1-or-later"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8681,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 226,
"path": "/src/azure-cli/azure/cli/command_modules/acs/_graph.py",
"repo_name": "ravgill/azure-cli",
"src_encoding": "UTF-8",
"text": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport binascii\nimport datetime\nimport os\nimport time\nimport uuid\n\nimport dateutil\nfrom azure.cli.command_modules.acs._client_factory import get_graph_rbac_management_client\nfrom azure.cli.core.azclierror import AzCLIError\nfrom azure.graphrbac.models import (\n ApplicationCreateParameters,\n GetObjectsParameters,\n GraphErrorException,\n KeyCredential,\n PasswordCredential,\n ServicePrincipalCreateParameters,\n)\nfrom dateutil.relativedelta import relativedelta\nfrom knack.log import get_logger\nlogger = get_logger(__name__)\n\n\ndef _get_object_stubs(graph_client, assignees):\n params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees)\n return list(graph_client.objects.get_objects_by_object_ids(params))\n\n\ndef resolve_object_id(cli_ctx, assignee):\n client = get_graph_rbac_management_client(cli_ctx)\n result = None\n if assignee is None:\n raise AzCLIError('Inputted parameter \"assignee\" is None.')\n if assignee.find(\"@\") >= 0: # looks like a user principal name\n result = list(client.users.list(filter=\"userPrincipalName eq '{}'\".format(assignee)))\n if not result:\n result = list(client.service_principals.list(filter=\"servicePrincipalNames/any(c:c eq '{}')\".format(assignee)))\n if not result: # assume an object id, let us verify it\n result = _get_object_stubs(client, [assignee])\n\n # 2+ matches should never happen, so we only check 'no match' here\n if not result:\n raise AzCLIError(\"No matches in graph database for '{}'\".format(assignee))\n\n return result[0].object_id\n\n\ndef create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):\n if rbac_client is None:\n rbac_client = get_graph_rbac_management_client(cli_ctx)\n\n if resolve_app:\n try:\n uuid.UUID(identifier)\n result = list(rbac_client.applications.list(filter=\"appId eq '{}'\".format(identifier)))\n except ValueError:\n result = list(rbac_client.applications.list(filter=\"identifierUris/any(s:s eq '{}')\".format(identifier)))\n\n if not result: # assume we get an object id\n result = [rbac_client.applications.get(identifier)]\n app_id = result[0].app_id\n else:\n app_id = identifier\n\n return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))\n\n\ndef _build_application_creds(\n password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None\n):\n if password and key_value:\n raise AzCLIError(\"specify either --password or --key-value, but not both.\")\n\n if not start_date:\n start_date = datetime.datetime.utcnow()\n elif isinstance(start_date, str):\n start_date = dateutil.parser.parse(start_date)\n\n if not end_date:\n end_date = start_date + relativedelta(years=1)\n elif isinstance(end_date, str):\n end_date = dateutil.parser.parse(end_date)\n\n key_type = key_type or \"AsymmetricX509Cert\"\n key_usage = key_usage or \"Verify\"\n\n password_creds = None\n key_creds = None\n if password:\n password_creds = [\n PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)\n ]\n elif key_value:\n key_creds = [\n KeyCredential(\n start_date=start_date,\n end_date=end_date,\n value=key_value,\n key_id=str(uuid.uuid4()),\n usage=key_usage,\n type=key_type,\n )\n ]\n\n return (password_creds, key_creds)\n\n\ndef create_application(\n client,\n display_name,\n homepage,\n identifier_uris,\n available_to_other_tenants=False,\n password=None,\n reply_urls=None,\n key_value=None,\n key_type=None,\n key_usage=None,\n start_date=None,\n end_date=None,\n required_resource_accesses=None,\n):\n password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date)\n\n app_create_param = ApplicationCreateParameters(\n available_to_other_tenants=available_to_other_tenants,\n display_name=display_name,\n identifier_uris=identifier_uris,\n homepage=homepage,\n reply_urls=reply_urls,\n key_credentials=key_creds,\n password_credentials=password_creds,\n required_resource_access=required_resource_accesses,\n )\n try:\n result = client.create(app_create_param, raw=True)\n return result.output, result.response.headers[\"ocp-aad-session-key\"]\n except GraphErrorException as ex:\n if \"insufficient privileges\" in str(ex).lower():\n link = \"https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal\" # pylint: disable=line-too-long\n raise AzCLIError(\n \"Directory permission is needed for the current user to register the application. \"\n \"For how to configure, please refer '{}'. Original error: {}\".format(link, ex)\n )\n raise\n\n\ndef build_service_principal(rbac_client, cli_ctx, name, url, client_secret):\n # use get_progress_controller\n hook = cli_ctx.get_progress_controller(True)\n hook.add(messsage=\"Creating service principal\", value=0, total_val=1.0)\n logger.info(\"Creating service principal\")\n # always create application with 5 years expiration\n start_date = datetime.datetime.utcnow()\n end_date = start_date + relativedelta(years=5)\n result, aad_session_key = create_application(\n rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date\n )\n service_principal = result.app_id # pylint: disable=no-member\n for x in range(0, 10):\n hook.add(message=\"Creating service principal\", value=0.1 * x, total_val=1.0)\n try:\n create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)\n break\n # TODO figure out what exception AAD throws here sometimes.\n except Exception as ex: # pylint: disable=broad-except\n logger.error(str(ex))\n time.sleep(2 + 2 * x)\n else:\n return False, aad_session_key\n hook.add(message=\"Finished service principal creation\", value=1.0, total_val=1.0)\n logger.info(\"Finished service principal creation\")\n return service_principal, aad_session_key\n\n\ndef _create_client_secret():\n # Add a special character to satisfy AAD SP secret requirements\n special_char = \"$\"\n client_secret = binascii.b2a_hex(os.urandom(10)).decode(\"utf-8\") + special_char\n return client_secret\n\n\n# pylint: disable=unused-argument\ndef ensure_aks_service_principal(\n cli_ctx,\n service_principal=None,\n client_secret=None,\n subscription_id=None,\n dns_name_prefix=None,\n fqdn_subdomain=None,\n location=None,\n name=None,\n):\n aad_session_key = None\n # TODO: This really needs to be unit tested.\n rbac_client = get_graph_rbac_management_client(cli_ctx)\n if not service_principal:\n # --service-principal not specified, make one.\n if not client_secret:\n client_secret = _create_client_secret()\n salt = binascii.b2a_hex(os.urandom(3)).decode(\"utf-8\")\n if dns_name_prefix:\n url = \"https://{}.{}.{}.cloudapp.azure.com\".format(salt, dns_name_prefix, location)\n else:\n url = \"https://{}.{}.{}.cloudapp.azure.com\".format(salt, fqdn_subdomain, location)\n\n service_principal, aad_session_key = build_service_principal(rbac_client, cli_ctx, name, url, client_secret)\n if not service_principal:\n raise AzCLIError(\n \"Could not create a service principal with the right permissions. \" \"Are you an Owner on this project?\"\n )\n logger.info(\"Created a service principal: %s\", service_principal)\n # We don't need to add role assignment for this created SPN\n else:\n # --service-principal specfied, validate --client-secret was too\n if not client_secret:\n raise AzCLIError(\"--client-secret is required if --service-principal is specified\")\n return {\n \"client_secret\": client_secret,\n \"service_principal\": service_principal,\n \"aad_session_key\": aad_session_key,\n }\n"
},
{
"alpha_fraction": 0.6380931735038757,
"alphanum_fraction": 0.6404985785484314,
"avg_line_length": 47.1368408203125,
"blob_id": "9273009381d98ccade92f2073a9ea6f4e3af48b4",
"content_id": "2efed197e4ff04a5a42fe9b894b1b3cf165f332d",
"detected_licenses": [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-only",
"LGPL-2.1-or-later"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4573,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 95,
"path": "/src/azure-cli/azure/cli/command_modules/storage/operations/fileshare.py",
"repo_name": "ravgill/azure-cli",
"src_encoding": "UTF-8",
"text": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\nimport math\n\nfrom knack.log import get_logger\n\nfrom azure.cli.core.profiles import ResourceType\n\nlogger = get_logger(__name__)\n\n\ndef list_shares(client, prefix=None, marker=None, num_results=None,\n include_metadata=False, timeout=None, include_snapshots=False, **kwargs):\n from ..track2_util import list_generator\n generator = client.list_shares(name_starts_with=prefix, include_metadata=include_metadata, timeout=timeout,\n include_snapshots=include_snapshots, results_per_page=num_results, **kwargs)\n\n pages = generator.by_page(continuation_token=marker) # SharePropertiesPaged\n result = list_generator(pages=pages, num_results=num_results)\n\n if pages.continuation_token:\n next_marker = {\"nextMarker\": pages.continuation_token}\n result.append(next_marker)\n\n return result\n\n\ndef create_share(cmd, client, metadata=None, quota=None, fail_on_exist=False, timeout=None, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n if not fail_on_exist:\n return _dont_fail_on_exist(ex, StorageErrorCode.share_already_exists)\n raise ex\n\n\ndef share_exists(cmd, client, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.get_share_properties(**kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n return _dont_fail_on_exist(ex, StorageErrorCode.share_not_found)\n\n\ndef generate_share_sas(cmd, client, permission=None, expiry=None, start=None, policy_id=None, ip=None, protocol=None,\n cache_control=None, content_disposition=None, content_encoding=None,\n content_language=None, content_type=None):\n generate_share_sas_fn = cmd.get_models('_shared_access_signature#generate_share_sas')\n\n sas_kwargs = {'protocol': protocol}\n sas_token = generate_share_sas_fn(account_name=client.account_name, share_name=client.share_name,\n account_key=client.credential.account_key, permission=permission,\n expiry=expiry, start=start, ip=ip, cache_control=cache_control,\n policy_id=policy_id, content_disposition=content_disposition,\n content_type=content_type, content_encoding=content_encoding,\n content_language=content_language, **sas_kwargs)\n return sas_token\n\n\ndef delete_share(cmd, client, fail_not_exist=False, timeout=None, delete_snapshots=None, **kwargs):\n from azure.core.exceptions import HttpResponseError\n try:\n client.delete_share(timeout=timeout, delete_snapshots=delete_snapshots, **kwargs)\n return True\n except HttpResponseError as ex:\n from azure.cli.command_modules.storage.track2_util import _dont_fail_on_exist\n StorageErrorCode = cmd.get_models(\"_shared.models#StorageErrorCode\",\n resource_type=ResourceType.DATA_STORAGE_FILESHARE)\n if not fail_not_exist:\n return _dont_fail_on_exist(ex, StorageErrorCode.share_not_found)\n raise ex\n\n\ndef get_share_stats(client, timeout=None, **kwargs):\n result = client.get_share_stats(timeout=timeout, **kwargs)\n datasize = round(int(result) / math.pow(1024, 3))\n if datasize == 0:\n return str(datasize + 1)\n return str(datasize)\n\n\ndef set_share_metadata(client, metadata=None, timeout=None, **kwargs):\n client.set_share_metadata(metadata=metadata, timeout=timeout, **kwargs)\n return True\n"
}
] | 4 |
bcarreres/peculiar_velocities
|
https://github.com/bcarreres/peculiar_velocities
|
ccc7ff6ab8481403881410673a173d94de4241d8
|
15c4318d42bf5477e04a8577496480cdad519058
|
ff1e47667dff607aa6bb2544350dbe76831761fa
|
refs/heads/main
| 2023-05-12T07:14:17.261839 | 2021-05-28T14:14:07 | 2021-05-28T14:14:07 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.843137264251709,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 24.5,
"blob_id": "46bb7ab5d0f4161ea8cd56de05bcc9f38e5293dc",
"content_id": "bb8b0c4f3bb2a5c9b0ad232622f85c1bb821b47b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/README.md",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "# peculiar_velocities\nStudy of peculiar velocities\n"
},
{
"alpha_fraction": 0.5802752375602722,
"alphanum_fraction": 0.64449542760849,
"avg_line_length": 23.22222137451172,
"blob_id": "9dbc5963232df03f0eb46b8ab0cc9fec1a5c20e1",
"content_id": "d678d0bea41585d5695dcb6376893b905b6db743",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 18,
"path": "/pv_plot_contours.py",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pylab as plt\nplt.ion()\n\n\nroot = 'bastien_v0'\n\nplt.figure()\nx, y = np.loadtxt(root+'_two_sigma', unpack=1)\nplt.fill(x, y, color='C0', alpha=0.3)\nx, y = np.loadtxt(root+'_one_sigma', unpack=1)\nplt.fill(x, y, color='C0', alpha=1)\nplt.xlabel(r'$f\\sigma_8$', fontsize=16)\nplt.ylabel(r'$\\sigma_v$ [km/s]', fontsize=16)\nplt.xlim(0.3, 0.6)\nplt.ylim(150, 300)\nplt.axvline(0.4505, color='k', ls='--')\nplt.tight_layout()\n"
},
{
"alpha_fraction": 0.550916314125061,
"alphanum_fraction": 0.5740600824356079,
"avg_line_length": 34.8677978515625,
"blob_id": "f6aafc2dddb99ecec84c056eccd310eaa010ff79",
"content_id": "1e3b94b08187e7d44120029b93e12b52c39fd11c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10586,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 295,
"path": "/simulation.py",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import minimize_scalar\nfrom astropy.table import Table\n\n#-- This is from julianbautista/eboss_clustering \n#-- but one could change to astropy \nimport cosmo\n\nc_light = 299792.458 #-- km/s\n\ndef sig_v(sigma_m, z, cosmology):\n ''' Convert an error in magnitude to an error in velocity\n '''\n hz = cosmology.get_hubble(z)\n dl = cosmology.get_DL(z)\n return -c_light*np.log(10)/5 / (1 - c_light*(1+z)**2/hz/dl) * sigma_m\n\ndef get_peculiar_redshift(v_p):\n ''' Computes redshift from velocities \n relativistic Doppler effect\n which could be approximated by z_p = 1+v_p/c_light \n '''\n return np.sqrt( (1+v_p/c_light)/(1-v_p/c_light)) - 1\n\ndef get_peculiar_velocity(z_p):\n ''' Computes velocity from redshift\n relativistic Doppler effect \n '''\n a = (1+z_p)**2\n return c_light* (a-1)/(a+1)\n\ndef fit_peculiar_redshift(z_obs_in, mu_obs_in, z_th, mu_th):\n ''' Unbiased estimator of peculiar redshift \n that accounts for effects both in redshift and mu\n '''\n def difference(z_pec):\n z_cos = (1+z_obs_in)/(1+z_pec)-1\n mu_cos = np.interp(z_cos, z_th, mu_th)\n mu_obs = mu_cos + 10*np.log10(1+z_pec)\n diff = (mu_obs-mu_obs_in)**2/1e-4**2\n return diff\n\n res = minimize_scalar(difference, bounds=(-0.05, 0.05), method='bounded')\n if res.success == False:\n print(res)\n\n return res.x\n\ndef create_sim(cosmo_truth=None, cosmo_measurement=None, \n n_sn=10000, sigma_m=0.10, zmin=0.01, zmax=0.12, rms_v_p=300.,\n seed=0):\n ''' Create a simplified catalog of type-Ia supernovae\n \n Parameters \n -------\n cosmo_truth : cosmo.CosmoSimple instance\n Input cosmological model for simulation\n cosmo_measurement : cosmo.CosmoSimple instance\n Cosmological model used for measurements\n n_sn : int\n Number of supernovae\n sigma_m : float \n Intrinsic scatter to be added to distance moduli\n zmin : float\n Minimum redshift of simulation\n zmax : float \n Maximum redshift of simulation\n rms_v_p : float\n Intrinsic velocity dispersion of supernovae in km/s\n seed: int\n Seed for random number generator\n \n Returns\n -------\n catalog : astropy.table.Table containing\n z_cosmo\n mu_cosmo\n z_obs (total observed redshift)\n mu_obs (mu after standardisation)\n mu_obs_before (mu before standardisation)\n mu_error (added error or intrinsic scatter)\n v_p_true (true peculiar velocity in km/s)\n v_p_est1\n v_p_est2\n v_p_est3\n\n '''\n\n options = {'n_sn': n_sn, \n 'sigma_m': sigma_m,\n 'zmin': zmin, \n 'zmax': zmax,\n 'rms_v_p': rms_v_p,\n 'seed': seed}\n\n catalog = {}\n\n np.random.seed(seed)\n\n #-- Draw cosmological redshifts and compute distance moduli\n z_cosmo = zmin+np.random.rand(n_sn)*(zmax-zmin)\n mu_cosmo = cosmo_truth.get_distance_modulus(z_cosmo)\n\n catalog['z_cosmo'] = z_cosmo\n catalog['mu_cosmo'] = mu_cosmo\n\n #-- Draw peculiar velocities\n v_p = np.random.randn(n_sn)*rms_v_p\n z_p = get_peculiar_redshift(v_p)\n\n catalog['v_p_true'] = v_p\n catalog['z_p_true'] = z_p\n\n #-- Observed redshift\n z_obs = (1+z_cosmo)*(1+z_p) - 1\n\n #-- Observed distance modulus\n #-- From Eq. 18 of Davis et al. 2011 :\n #-- D_L(z_obs) = D_L_cosmo(z_cosmo) * (1 + z_pec)**2\n #-- \"The two factors of (1 + z_pec) enter the luminosity distance SN\n #-- correction. One is due to the Doppler shifting of the photons, \n #-- the other is due to relativistic beaming.\"\n mu_obs = mu_cosmo + 10*np.log10(1+z_p)\n\n #-- Add intrinsic scatter of magnitudes before (4*sigma) and after standardisation \n mu_error = np.random.randn(n_sn)*sigma_m\n mu_obs_before = mu_obs + mu_error*4\n mu_obs = mu_obs + mu_error\n \n catalog['z_obs'] = z_obs\n catalog['mu_obs_before'] = mu_obs_before\n catalog['mu_obs'] = mu_obs\n catalog['mu_error'] = mu_error\n\n #=========== Measurements =============#\n\n #-- Use interpolation to estimate the cosmological redshift from \n #-- the observed distance modulus\n z_th = np.linspace(1e-5, 0.5, 10000)\n mu_th = cosmo_measurement.get_distance_modulus(z_th)\n\n #-- Estimate 1 of the peculiar velocity\n #-- Simple assume that the change in luminosity is small and \n #-- calculate the corresponding z_cosmo\n z_cosmo_est = np.interp(mu_obs, mu_th, z_th)\n z_p_est1 = (1+z_obs)/(1+z_cosmo_est) - 1\n v_p_est1 = get_peculiar_velocity(z_p_est1)\n catalog['v_p_est1'] = v_p_est1\n\n #-- Estimate 2 of the peculiar velocity\n #-- Alternatively, one can use the difference in magnitude\n #-- Eq. 1 in Johnson et al. 2014 or Eq. 15 in Hue and Greene 2006\n mu_obs_est = cosmo_measurement.get_distance_modulus(z_obs)\n v_p_est2 = np.log(10)/5 * (mu_obs-mu_obs_est) * c_light\n v_p_est2 /= (1 - c_light*(1+z_obs)**2/ \n cosmo_measurement.get_hubble(z_obs)/\n cosmo_measurement.get_DL(z_obs)) \n catalog['v_p_est2'] = v_p_est2\n\n #-- Estimate 3 of the peculiar velocity \n #-- Fit for z_p \n z_p_est3 = np.array([fit_peculiar_redshift(zo, muo, z_th, mu_th) for zo, muo in zip(z_obs, mu_obs)])\n v_p_est3 = get_peculiar_velocity(z_p_est3)\n catalog['v_p_est3'] = v_p_est3\n\n\n catalog = Table(catalog)\n catalog.meta = options\n\n return catalog\n\ndef get_profiles(x, y, bins=10, percentiles=[2.5, 16, 50, 84, 97.5]):\n n_per = len(percentiles)\n x_bins = np.linspace(x.min(), x.max(), bins+1)\n y_profiles = np.zeros((n_per, bins))\n n_entries = np.zeros(bins)\n for i in range(bins):\n w = (x>=x_bins[i]) & (x<x_bins[i+1])\n y_profiles[:, i] = np.percentile(y[w], percentiles)\n n_entries[i] = np.sum(w)\n x_centers = 0.5*(x_bins[:-1]+x_bins[1:])\n return x_centers, y_profiles, n_entries\n\ndef plot_profiles(x_centers, y_profiles, color=None, ls=None):\n for y in y_profiles:\n plt.plot(x_centers, y, color=color, ls=ls)\n\n\ndef plot_hubble_diagram(catalog):\n ''' Illustration of the effect of peculiar velocities on the Hubble Diagram\n '''\n z_cosmo = catalog['z_cosmo']\n z_obs = catalog['z_obs']\n mu_cosmo = catalog['mu_cosmo']\n mu_obs = catalog['mu_obs']\n v_p = catalog['v_p_true']\n n_sn = v_p.size\n rms_v_p = catalog.meta['rms_v_p']\n\n plt.figure(figsize=(5,4))\n for i in range(n_sn):\n plt.plot([z_cosmo[i], z_obs[i]], [mu_cosmo[i], mu_obs[i]], 'k-', alpha=0.1)\n plt.scatter(z_obs, mu_obs, c=v_p, vmin=-5*rms_v_p, vmax=5*rms_v_p, \n cmap='seismic', s=4, label=r'$\\mu_{obs}(z_{obs})$')\n plt.colorbar(label=r'$v_p$ [km/s]')\n plt.xlabel(r'$z$')\n plt.ylabel(r'$\\mu$')\n plt.xscale('log')\n plt.title('Effect of velocities on Hubble Diagram')\n plt.tight_layout()\n\ndef plot_methods(catalog, cosmology, ylim=None): \n ''' Plot a comparison between three estimators of peculiar velocities\n '''\n z_cosmo = catalog['z_cosmo']\n v_p = catalog['v_p_true']\n rms_v_p = catalog['options']['rms_v_p']\n sigma_m = catalog['options']['sigma_m']\n\n #-- Compare the estimated versus true peculiar velocities\n for method in [1, 2, 3]:\n v_p_est = catalog['v_p_est'+str(method)]\n\n plt.figure()\n plt.scatter(z_cosmo, v_p_est-v_p, c=v_p, vmin=-5*rms_v_p, vmax=5*rms_v_p, \n cmap='seismic', s=4)\n x, y, ns = get_profiles(z_cosmo, v_p_est-v_p)\n plot_profiles(x, y, color='C2')\n for a in [1, -1, 2, -2]:\n plt.plot(x, a*sig_v(sigma_m, x, cosmology), 'k--')\n plt.xlabel(r'$z_{\\rm cosmo}$')\n plt.ylabel(r'$(\\hat{v}_p - v_p)$')\n plt.axhline(-rms_v_p, color='k', ls=':')\n plt.axhline(+rms_v_p, color='k', ls=':')\n plt.axhline(0, color='k', alpha=0.5, ls='--')\n plt.colorbar(label=r'$v_p$ [km/s]')\n plt.ylim(ylim)\n plt.title(f'Method {method}')\n plt.tight_layout()\n\ndef plot_malmquist_bias(catalog, \n mu_max=38., nbins=30, \n z_obs_min=0.015, z_obs_max=0.110,\n x_quantity='z_obs'):\n ''' Plot the effect of a flux limit on the Hubble Diagram \n and on the estimation of peculiar velocities\n '''\n\n catalog['delta_v'] = catalog['v_p_est3'] - catalog['v_p_true']\n\n mask_0 = np.ones(len(catalog), dtype=bool)\n mask_mu = catalog['mu_obs_before'] < mu_max\n mask_z = (catalog['z_obs'] < z_obs_max) & (catalog['z_obs'] > z_obs_min)\n\n ylabel = {'mu_obs': r'Distance modulus $\\mu$',\n 'v_p_true': 'Mean true peculiar velocity [km/s]',\n 'v_p_est3': 'Mean estimated peculiar velocity [km/s]',\n 'delta_v': 'Mean (est-true) peculiar velocity [km/s]'}\n\n #-- Compute average mu and dispersion without cut\n for quantity in ['mu_obs', 'v_p_true', 'v_p_est3', 'delta_v']:\n plt.figure()\n plt.plot(catalog[x_quantity], catalog[quantity], 'k.', ms=2, alpha=0.1, zorder=0)\n plt.plot(catalog[x_quantity][mask_mu], catalog[quantity][mask_mu], \n 'C3.', ms=2, alpha=0.1, zorder=0.5)\n\n for mask in [mask_0, mask_mu, (mask_mu & mask_z)]:\n x, y, n = get_profiles(catalog[x_quantity][mask], catalog[quantity][mask], \n percentiles=[16., 50, 84], bins=nbins)\n yerr = np.array([y[1]-y[0], y[2]-y[1]])/np.sqrt(n)\n plt.errorbar(x, y[1], yerr, fmt='o', zorder=1)\n\n if quantity == 'mu_obs':\n plt.axhline(mu_max, color='C1', ls=':')\n else:\n plt.axhline(0, color='k', ls=':')\n plt.axvline(z_obs_min, color='C2', ls=':')\n plt.axvline(z_obs_max, color='C2', ls=':')\n plt.xlabel(f'Redshift {x_quantity}') \n plt.ylabel(ylabel[quantity])\n plt.tight_layout()\n\n \n\n#-- Initialize true cosmology\n#cosmo_truth = cosmo.CosmoSimple(omega_m=0.31)\n#-- Initialize assumed fiducial cosmology for measurements\n#cosmo_measurement = cosmo.CosmoSimple(omega_m=0.29)\n#cosmo_measurement = cosmo_truth\n\n#cat = create_sim(cosmo_truth=cosmo_truth, cosmo_measurement=cosmo_measurement, \n# n_sn=100000, sigma_m=0.1, zmin=0.01, zmax=0.12, rms_v_p=300., seed=1)\n\n#plot_malmquist_bias(cat, mu_max=38, nbins=30, z_obs_min=0.015, z_obs_max=0.11)\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5281676054000854,
"alphanum_fraction": 0.5555013418197632,
"avg_line_length": 32.03628921508789,
"blob_id": "f7cd8e0a4a85d7cdd63d8d4551001b5e01b1440b",
"content_id": "3bb52f1052840cb45dd369608da5bc01447d3686",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24585,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 744,
"path": "/pv_covariance.py",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "from threadpoolctl import threadpool_limits\nimport numpy as np\nimport pylab as plt\nimport time\n\nfrom astropy.table import Table\n\nimport scipy.integrate\nimport scipy.special\nimport scipy.stats\n\nfrom numba import jit, prange\n\nimport iminuit\n\nfrom cosmo import CosmoSimple\n\nplt.ion()\n\n##-- Create a mock from a halo catalog \n\ndef read_halos(input_halos, \n cosmo=None, redshift_space=False, nhalos=None, zmin=None, zmax=None, \n subsample_fraction=1., seed=0):\n\n #-- Read halo catalog\n halos = Table.read(input_halos)\n \n #-- cut to small sky region for testing\n mask = np.ones(len(halos), dtype=bool)\n #mask = (halos['ra']<180) & (halos['ra']>0. ) & (halos['dec']>0) & (halos['dec']<70.)\n #mask = (halos['ra']<360) & (halos['ra']>180.) & (halos['dec']>0) & (halos['dec']<70.)\n #mask = (halos['ra']<180) & (halos['ra']>0. ) & (halos['dec']<0) & (halos['dec']>-70.)\n #mask = (halos['ra']<360) & (halos['ra']>180.) & (halos['dec']<0) & (halos['dec']>-70.)\n f_sky = np.sum(mask)/len(halos)\n\n if redshift_space:\n z = halos['redshift_obs']\n else:\n z = halos['redshift']\n z = z.data.astype(float)\n\n if not zmin is None:\n mask &= (z > zmin) \n if not zmax is None:\n mask &= (z < zmax)\n halos = halos[mask]\n z = z[mask]\n \n if subsample_fraction < 1.:\n #-- Downsampling to match 2MTF catalogs from Howlett et al. 2017\n #nz = density_z(halos['redshift'], f_sky, cosmo, nbins=30)\n #nz_gal = np.interp(halos['redshift'], nz['z_centers'], nz['density'])\n #prob_to_keep = 10**( (-4+2)/(0.03-0.002)*(halos['redshift']-0.002))#/nz_gal\n np.random.seed(seed)\n r = np.random.rand(len(halos))\n mask = r <= subsample_fraction\n halos = halos[mask]\n z = z[mask]\n\n if not nhalos is None:\n halos = halos[:nhalos]\n halos['ra'] = np.radians(halos['ra'])\n halos['dec'] = np.radians(halos['dec'])\n\n #-- Compute comoving distances in Mpc/h units to match power-spectrum units\n r_comov = cosmo.get_comoving_distance(z)*cosmo.pars['h']\n\n ra = halos['ra'].data.astype(float)\n dec = halos['dec'].data.astype(float)\n vel = halos['v_radial'].data.astype(float)\n vel_error = np.zeros(ra.size)\n\n catalog = {'ra': ra,\n 'dec': dec,\n 'r_comov': r_comov,\n 'vel': vel,\n 'vel_error': vel_error, \n 'redshift': z,\n 'weight': np.ones(ra.size),\n 'f_sky': f_sky,\n 'size': ra.size,\n 'n_gals': np.ones(ra.size)}\n return catalog\n\ndef add_intrinsic_scatter(catalog, cosmo, sigma_m=0.1, seed=0):\n ''' Convert error in distance modulus into error in velocity \n Draw Gaussian random errors for velocities\n '''\n z = catalog['redshift']\n sigma_v = cosmo.pars['c']*np.log(10)/5\n sigma_v /= (1 - cosmo.pars['c']*(1+z)**2/cosmo.get_hubble(z)/cosmo.get_DL(z)) \n sigma_v *= -1*sigma_m\n np.random.seed(seed)\n vel_error = np.random.randn(z.size)*sigma_v\n catalog['vel'] += vel_error\n catalog['vel_error'] = sigma_v\n\ndef create_mock_catalog(input_halos, cosmo, \n redshift_space=False,\n zmin=None, zmax=None,\n subsample_fraction=1.,\n nhalos=None, \n sigma_m = 0, seed=0):\n\n catalog = read_halos(input_halos,\n cosmo=cosmo, \n redshift_space=redshift_space, \n nhalos=nhalos, zmin=zmin, zmax=zmax,\n subsample_fraction=subsample_fraction, seed=seed)\n\n #-- Add errors on velocity measurements from a given error in distance modulus\n if sigma_m != 0:\n add_intrinsic_scatter(catalog, cosmo, sigma_m=sigma_m, seed=seed)\n\n return catalog\n\ndef read_catalog(input_catalog, cosmo, use_true_vel=False):\n\n ra, dec, z, vpec_est, vpec_true, vp_error = np.loadtxt(input_catalog, unpack=1)\n r_comov = cosmo.get_comoving_distance(z)*cosmo.pars['h']\n if use_true_vel:\n vel = vpec_true\n vel_error = np.zeros(vel.size)\n else:\n vel = vpec_est\n vel_error = vp_error\n f_sky = 0.5\n w = z < 0.1\n ra = ra[w]\n dec = dec[w]\n r_comov = r_comov[w]\n z = z[w]\n vel = vel[w]\n vel_error = vel_error[w]\n\n\n catalog = {'ra': ra,\n 'dec': dec,\n 'r_comov': r_comov,\n 'vel': vel,\n 'vel_error': vel_error, \n 'redshift': z,\n 'weight': np.ones(ra.size),\n 'f_sky': f_sky,\n 'size': ra.size,\n 'n_gals': np.ones(ra.size)}\n return catalog\n\ndef grid_velocities(catalog, grid_size=20.):\n ''' Transform a galaxy catalog into a voxel catalog,\n where voxels have grid_size in Mpc/h \n '''\n if grid_size==0:\n return catalog\n\n x = catalog['r_comov']*np.cos(catalog['ra'])*np.cos(catalog['dec'])\n y = catalog['r_comov']*np.sin(catalog['ra'])*np.cos(catalog['dec'])\n z = catalog['r_comov']*np.sin(catalog['dec'])\n\n position = np.array([x, y, z])\n pos_min = np.min(position, axis=1)\n pos_max = np.max(position, axis=1)\n #- Number of grid voxels per axis\n n_grid = np.floor((pos_max-pos_min)/grid_size).astype(int)+1\n #-- Total number of voxels\n n_pix = n_grid.prod()\n \n #-- Voxel index per axis\n index = np.floor( (position.T - pos_min)/grid_size ).astype(int)\n #-- Voxel index over total number of voxels\n i = (index[:, 0]*n_grid[1] + index[:, 1])*n_grid[2] + index[:, 2]\n\n #-- Perform averages per voxel\n sum_vel = np.bincount(i, weights=catalog['vel'] *catalog['weight'], minlength=n_pix)\n sum_vel2 = np.bincount(i, weights=catalog['vel']**2*catalog['weight'], minlength=n_pix)\n sum_vel_error = np.bincount(i, weights=catalog['vel_error']**2*catalog['weight'], minlength=n_pix)\n sum_we = np.bincount(i, weights=catalog['weight'], minlength=n_pix)\n sum_n = np.bincount(i, minlength=n_pix)\n\n #-- Consider only voxels with at least one galaxy\n w = sum_we > 0\n center_vel = sum_vel[w]/sum_we[w]\n #center_vel_std = np.sqrt(sum_vel2[w]/sum_we[w] - center_vel**2)/np.sqrt(sum_n[w])\n center_vel_error = np.sqrt(sum_vel_error[w]/sum_we[w])/np.sqrt(sum_n[w])\n center_weight = sum_we[w]\n center_ngals = sum_n[w]\n\n #-- Determine the coordinates of the voxel centers\n i_pix = np.arange(n_pix)[w]\n i_pix_z = i_pix % n_grid[2]\n i_pix_y = ((i_pix - i_pix_z)/n_grid[2]) % n_grid[1]\n i_pix_x = i_pix // (n_grid[1]*n_grid[2])\n i_pix = [i_pix_x, i_pix_y, i_pix_z]\n center_position = np.array([(i_pix[i]+0.5)*grid_size + pos_min[i] for i in range(3)])\n \n #-- Convert to ra, dec, r_comov\n center_r_comov = np.sqrt(np.sum(center_position**2, axis=0))\n center_ra = np.arctan2(center_position[1], center_position[0])\n center_dec = np.pi/2 - np.arccos(center_position[2]/center_r_comov)\n\n return {'ra': center_ra, \n 'dec': center_dec, \n 'r_comov': center_r_comov, \n 'vel': center_vel, \n 'vel_error': center_vel_error,\n 'weight': center_weight,\n 'n_gals': center_ngals,\n 'size': center_ra.size}\n\ndef density_z(z, f_sky, cosmo, zmin=None, zmax=None, nbins=50):\n ''' Compute comoving number density in [h^3/Mpc^3] as a function\n of redshift\n \n Input\n -----\n z: array with redshifts of galaxies\n f_sky: float - fraction of total sky covered\n cosmo: CosmoSimple instance - fiducial cosmology\n zmin: minimum redshift, default is np.min(z)\n zmax: maximum redshift, defautl is np.max(z)\n nbins: number of bins, default is 50\n\n Returns\n -----\n dict: Containing 'z_centers', 'z_edges', 'density', 'density_err', 'volume'\n '''\n\n if zmin is None:\n zmin = z.min()\n if zmax is None:\n zmax = z.max()\n bin_edges = np.linspace(zmin, zmax, nbins)\n counts, bin_edges = np.histogram(z, bins=bin_edges)\n\n r = cosmo.get_comoving_distance(bin_edges)*cosmo.pars['h']\n r3_diff = r[1:]**3 - r[:-1]**3\n vol_shell = f_sky * 4*np.pi/3 * r3_diff\n bin_centers = (bin_edges[1:]+bin_edges[:-1])*0.5\n density = counts / vol_shell\n density_err = np.sqrt(counts) / vol_shell\n volume = np.sum(vol_shell)\n\n return {'z_centers': bin_centers,\n 'z_edges': bin_edges,\n 'density': density,\n 'density_err': density_err,\n 'volume': volume}\n\ndef read_power_spectrum(non_linear='',\n redshift_space=False):\n\n #-- Read power spectrum from camb \n #-- units are in h/Mpc and Mpc^3/h^3\n pk_table = Table.read('pk_lin_camb_demnunii_1024.txt', format='ascii',names=('k', 'power'))\n k = pk_table['k'] \n pk = pk_table['power'] \n\n #-- apply non-linearities from Bel et al. 2019\n if non_linear == 'bel': \n sig8 = 0.84648\n a1 = -0.817+3.198*sig8\n a2 = 0.877 - 4.191*sig8\n a3 = -1.199 + 4.629*sig8\n pk = pk*np.exp(-k*(a1+a2*k+a3*k**2))\n\n #-- Read RegPT theory for theta-theta\n if non_linear == 'regpt':\n k, pdd, pdt, ptt = np.loadtxt('pk_regpt_demnunii_1024.txt', unpack=1)\n pk = ptt\n\n #-- Go to redshift-space (only if using redshift_obs)\n #-- based on Koda et al. 2014\n if redshift_space:\n sigma_u = 13. #- Mpc/h\n D_u = np.sin(k*sigma_u)/(k*sigma_u)\n pk *= D_u**2\n\n return k, pk\n\ndef reduce_resolution(k, pk, kmin=None, kmax=None, nk=None, linear=False):\n\n if kmin is None:\n kmin = k.min()\n if kmax is None:\n kmax = k.max()\n if nk is None:\n w = (k>=kmin)&(k<=kmax)\n nk = np.sum(w)\n if linear:\n knew = np.linspace(kmin, kmax, nk)\n else:\n knew = 10**np.linspace(np.log10(kmin), np.log10(kmax), nk)\n pknew = np.interp(knew, k, pk)\n return knew, pknew\n\ndef optimize_k_range(k, pk, precision=1e-5):\n\n kmin = k[:k.size//2]\n kmax = k[k.size//2:]\n var_true = np.trapz(pk, x=k)\n\n def get_var(kl, pkl, kmin=1e-3, kmax=0.1, nk=None):\n w = (kl>=kmin)&(kl<=kmax)\n return np.trapz(pkl[w], x=kl[w])\n \n var_kmin = np.array([get_var(k, pk, kmin=ki, kmax=k[-1]) for ki in kmin])\n var_kmax = np.array([get_var(k, pk, kmin=k[0], kmax=ki) for ki in kmax])\n error_kmin = 1-var_kmin/var_true\n error_kmax = 1-var_kmax/var_true\n kmin_opt = np.interp(precision/2, error_kmin, kmin)\n kmax_opt = np.interp(-precision/2, -error_kmax, kmax)\n\n k_opt, pk_opt = reduce_resolution(k, pk, kmin=kmin_opt, kmax=kmax_opt)\n error = 1-np.trapz(pk_opt, x=k_opt)/var_true\n print('kmin:', kmin_opt)\n print('kmax:', kmax_opt)\n return k_opt, pk_opt\n\ndef check_integrals():\n\n k, pk = read_power_spectrum(non_linear='regpt')\n scales = [1., 3., 10., 30., 100.]\n\n plt.figure()\n for i_scale, scale in enumerate(scales):\n win_radial = window(k, 200., 200+scale, 1.)\n win_transv = window(k, 200., 200., np.cos(scale/200.))\n full_int_radial = np.trapz(win_radial*pk, x=k)\n full_int_transv = np.trapz(win_transv*pk, x=k)\n i_k = np.arange(k.size//2, k.size)\n int_radial = np.zeros(i_k.size)\n int_transv = np.zeros(i_k.size)\n kmax = k[i_k]\n for j, i in enumerate(i_k):\n int_radial[j] = np.trapz(win_radial[:i]*pk[:i], x=k[:i])\n int_transv[j] = np.trapz(win_transv[:i]*pk[:i], x=k[:i])\n plt.plot(kmax, int_radial/full_int_radial-1, color=f'C{i_scale}', ls='--', label=f'dr = {scale}')\n plt.plot(kmax, int_transv/full_int_transv-1, color=f'C{i_scale}', ls=':')\n\n plt.legend()\n plt.xlabel(r'$k_{\\rm max}$ [h/Mpc]')\n plt.ylabel(r'Relative error on $C_{ij}$')\n plt.ylim(-1, 1)\n plt.xscale('log')\n\n\n@jit(nopython=True)\ndef angle_between(ra_0, dec_0, ra_1, dec_1):\n cos_alpha = np.cos(ra_1-ra_0)*np.cos(dec_0)*np.cos(dec_1) + np.sin(dec_0)*np.sin(dec_1)\n return cos_alpha\n\n@jit(nopython=True)\ndef separation(r_0, r_1, cos_alpha):\n return np.sqrt(r_0**2 + r_1**2 - 2*r_0*r_1*cos_alpha)\n\ndef j0(x):\n ''' This doesn't work with numba '''\n return scipy.special.spherical_jn(0, x)\n\n@jit(nopython=True)\ndef j0_alt(x):\n return np.sin(x)/x\n \ndef j2(x):\n ''' This doesn't work with numba '''\n return scipy.special.spherical_jn(2, x)\n\n@jit(nopython=True)\ndef j2_alt(x):\n return (3/x**2-1)*np.sin(x)/x - 3*np.cos(x)/x**2\n \n@jit(nopython=True)\ndef window(k, r_0, r_1, cos_alpha):\n ''' From Johnson et al. 2014 '''\n r = separation(r_0, r_1, cos_alpha)\n sin_alpha_squared = 1-cos_alpha**2\n win = 1/3*np.ones_like(k)\n if r > 0:\n j0kr = j0_alt(k*r) \n j2kr = j2_alt(k*r)\n win = 1/3*(j0kr - 2*j2kr)*cos_alpha\n win = win+(r_0*r_1/r**2*sin_alpha_squared * j2kr)\n return win\n\n \n@jit(nopython=True)\ndef window_2(k, r_0, r_1, cos_alpha):\n ''' From Adams and Blake 2020\n with RSD, but do not account for wide-angle effects\n '''\n r = separation(r_0, r_1, cos_alpha)\n win = np.ones_like(k)\n if r == 0:\n return win\n cos_gamma_squared = (1+cos_alpha)/2*(r_1-r_0)**2/r**2\n l2 = 0.5*(3*cos_gamma_squared-1)\n j0kr = j0_alt(k*r) \n j2kr = j2_alt(k*r)\n win = j0kr/3 + j2kr*(-2/3*l2)\n return win\n\n@jit(nopython=True)\ndef window_3(k, r_0, r_1, cos_alpha):\n ''' From Castorina and White 2020\n with RSD, account for wide-angle effects\n gives same results as window from Ma. et al. 2011\n '''\n r = separation(r_0, r_1, cos_alpha)\n win = np.ones_like(k)\n if r == 0:\n return win*1/3\n cos_gamma_squared = (1+cos_alpha)/2*(r_1-r_0)**2/r**2\n l2 = 0.5*(3*cos_gamma_squared-1)\n j0kr = j0_alt(k*r) \n j2kr = j2_alt(k*r)\n win = j0kr/3*cos_alpha - 2/3*j2kr*(l2- 1/4*(1-cos_alpha))\n return win\n\ndef test_window():\n ''' Reproduce Fig 4 of Johnson et al. 2014 \n even though his labels are switched \n between (a and e) and (b and d)\n '''\n k = 10**np.linspace(-4, 0, 1000)\n plt.figure()\n #-- Values from Johnson et al. 2014\n r0_r1_angle = [ [86.6, 133.7, 0.393],\n [76.8, 127.6, 1.313],\n [59.16, 142.5, 0.356],\n [51.9, 91.1, 0.315],\n [99.449, 158.4, 1.463]]\n r0_r1_angle = [ [50., 50., 0.],\n [50., 50., np.pi/2],\n [50., 50., np.pi]]\n for win, alpha, ls in zip([window, window_2, window_3], [1, 0.5, 1.0], ['-', '--', ':']):\n #for win, alpha, ls in zip([window_2, window_3], [ 0.5, 1.0], [ '--', ':']):\n for i, [r0, r1, angle] in enumerate(r0_r1_angle):\n plt.plot(k, win(k, r0, r1, np.cos(angle)), color=f'C{i}', ls=ls, alpha=alpha)\n plt.legend()\n plt.xlim(5e-4, 2e-1)\n plt.xscale('log')\n plt.xlabel('k [h/Mpc]')\n plt.ylabel(r'$W_{i,j}(k)$')\n\n#@jit(nopython=True, parallel=True)\ndef grid_window(k, L, n=100):\n if L == 0:\n return None\n\n window = np.zeros_like(k)\n theta = np.linspace(0, np.pi, n)\n phi = np.linspace(0, 2*np.pi, n)\n kx = np.outer(np.sin(theta), np.cos(phi))\n ky = np.outer(np.sin(theta), np.sin(phi))\n kz = np.outer(np.cos(theta), np.ones(n))\n dthetaphi = np.outer(np.sin(theta), np.ones(phi.size))\n for i in prange(k.size):\n ki = k[i]\n #-- the factor here has an extra np.pi because of the definition of np.sinc\n fact = (ki*L)/2/np.pi\n func = np.sinc(fact*kx)*np.sinc(fact*ky)*np.sinc(fact*kz)*dthetaphi\n win_theta = np.trapz(func, x=phi, axis=1)\n win = np.trapz(win_theta, x=theta)\n win *= 1/(4*np.pi)\n window[i] = win\n return window\n\n@jit(nopython=True)\ndef get_covariance(ra_0, dec_0, r_comov_0, ra_1, dec_1, r_comov_1, k, pk):\n ''' Get cosmological covariance for a given pair of galaxies \n and a given power spectrum (k, pk) in units of h/Mpc and (Mpc/h)^3\n '''\n cos_alpha = angle_between(ra_0, dec_0, ra_1, dec_1)\n win = window(k, r_comov_0, r_comov_1, cos_alpha)\n cova = np.trapz(pk * win, x=k)\n return cova\n\n@jit(nopython=True, parallel=True)\ndef build_covariance_matrix(ra, dec, r_comov, k, pk_nogrid, grid_win=None, n_gals=None):\n ''' Builds a 2d array with the theoretical covariance matrix \n based on the positions of galaxies (ra, dec, r_comov) \n and a given power spectrum (k, pk)\n '''\n nh = ra.size\n cov_matrix = np.zeros((nh, nh))\n if not grid_win is None:\n pk = pk_nogrid*grid_win**2\n else:\n pk = pk_nogrid*1\n\n for i in prange(nh):\n ra_0 = ra[i]\n dec_0 = dec[i]\n r_comov_0 = r_comov[i]\n for j in range(i+1, nh):\n ra_1 = ra[j]\n dec_1 = dec[j]\n r_comov_1 = r_comov[j]\n cov = get_covariance(ra_0, dec_0, r_comov_0, ra_1, dec_1, r_comov_1, k, pk)\n cov_matrix[i, j] = cov\n cov_matrix[j, i] = cov\n\n #-- For diagonal, window = 1/3\n var = np.trapz(pk/3, x=k)\n np.fill_diagonal(cov_matrix, var)\n\n if not grid_win is None:\n var_nogrid = np.trapz(pk_nogrid/3, x=k)\n #-- Eq. 22 of Howlett et al. 2017\n np.fill_diagonal(cov_matrix, var + (var_nogrid-var)/n_gals)\n\n #-- Pre-factor H0^2/(2pi^2)\n cov_matrix *= (100)**2/(2*np.pi**2) \n\n return cov_matrix\n\ndef corr_from_cov(cov):\n diag = np.diag(cov)\n return cov/np.sqrt(np.outer(diag, diag))\n\n#@jit(nopython=True, fastmath=True)\ndef log_likelihood(x, cova):\n ''' Computes log of the likelihood from \n a vector x and a covariance cova\n '''\n nx = x.size\n eigvals = np.linalg.eigvalsh(cova)\n #inv_matrix = np.linalg.inv(cova)\n #chi2 = x.T @ inv_matrix @ x\n chi2 = x.T @ np.linalg.solve(cova, x)\n log_like = -0.5*(nx*np.log(2*np.pi) \n + np.sum(np.log(eigvals))\n + chi2)\n return log_like\n\ndef fit_iminuit(vel, vel_error, n_gals, cov_cosmo):\n\n #@jit(nopython=True, parallel=False)\n def get_log_like(fs8, sig_v):\n diag_cosmo = np.diag(cov_cosmo)\n cov_matrix = cov_cosmo*fs8**2 \n diag_total = diag_cosmo*fs8**2 + sig_v**2/n_gals #+ vel_error**2\n np.fill_diagonal(cov_matrix, diag_total)\n log_like = log_likelihood(vel, cov_matrix)\n return -log_like\n \n t0 = time.time()\n m = iminuit.Minuit(get_log_like, fs8=0.5, sig_v=200.)\n m.errordef = iminuit.Minuit.LIKELIHOOD\n m.limits['fs8'] = (0.1, 2.)\n m.limits['sig_v'] = (0., 3000)\n m.migrad()\n m.minos()\n t1 = time.time()\n print(m)\n print(f'iMinuit fit lasted: {(t1-t0)/60:.2f} minutes')\n return m\n\n\ndef header_line(mig):\n npars = len(mig.parameters)\n line = '# name fval nfcn nfit valid '\n for pars in mig.parameters:\n line += f'{pars}_value {pars}_error {pars}_lower {pars}_lower_valid {pars}_upper {pars}_upper_valid '\n for i in range(npars): \n pars1 = mig.parameters[i]\n for j in range(i+1, npars):\n pars2 = mig.parameters[j]\n line += f'cova_{pars1}_{pars2} '\n return line\n\ndef fit_to_line(mig, name):\n npars = len(mig.parameters)\n #-- Values\n line = name \n line += f' {mig.fval} {mig.nfcn} {mig.nfit} {mig.valid*1} '\n for pars in mig.parameters:\n line += f'{mig.values[pars]} {mig.errors[pars]} '\n minos = mig.merrors[pars]\n line += f'{minos.lower} {minos.lower_valid*1} {minos.upper} {minos.upper_valid*1} '\n for i in range(npars):\n pars1 = mig.parameters[i]\n for j in range(i+1, npars):\n pars2 = mig.parameters[j]\n line += f'{mig.covariance[pars1, pars2]} '\n return line\n\ndef export_fit(output_fit, mig, name):\n\n fout = open(output_fit, 'w')\n #-- Header\n line = header_line(mig)\n print(line, file=fout)\n #-- Values\n line = fit_to_line(mig, name)\n print(line, file=fout)\n fout.close()\n\ndef main(name='test',\n input_catalog=None,\n nhalos = None,\n zmin = 0.,\n zmax = None,\n kmax = None,\n nk = 512,\n non_linear = 'regpt',\n redshift_space = False,\n grid_size = 40., \n subsample_fraction=1.,\n sigma_m=0.,\n seed=0, \n export_contours='',\n thread_limit=None,\n create_mock=False,\n use_true_vel=False):\n\n print('===========================================')\n print('Name of run:', name)\n print('Input catalog:', input_catalog)\n print('Number of selected halos:', nhalos)\n print('Redshift range:', zmin, zmax)\n print('kmax:', kmax)\n print('nk:', nk)\n print('Non-linear:', non_linear)\n print('Redshift-space:', redshift_space)\n print('Error in magnitude:', sigma_m)\n \n run = {}\n run['options'] = { 'name': name,\n 'input_catalog': input_catalog,\n 'zmin': zmin,\n 'zmax': zmax,\n 'non_linear': non_linear,\n 'redshift_space': redshift_space,\n 'grid_size': grid_size,\n 'subsample_fraction': subsample_fraction,\n 'sigma_m': sigma_m,\n 'seed': seed,\n 'create_mock': create_mock,\n 'use_true_vel': use_true_vel\n }\n\n t00 = time.time()\n\n #-- Set up fiducial cosmology\n cosmo = CosmoSimple(omega_m=0.32, h=0.67, mass_neutrinos=0.)\n sigma_8 = 0.84648 #-- DEMNUni value for m_nu = 0 (Castorina et al. 2015)\n \n #-- Read power spectrum model\n k, pk = read_power_spectrum(non_linear=non_linear, \n redshift_space=redshift_space)\n k, pk = optimize_k_range(k, pk, precision=1e-6)\n\n #-- Normalise by sigma_8 of this template power spectra\n pk /= sigma_8**2\n\n run['cosmology'] = {'omega_m':0.32, 'h': 0.67, 'k': k, 'pk': pk, 'sigma_8': sigma_8}\n\n #-- Create mock from halo catalog\n if create_mock:\n catalog = create_mock_catalog(input_catalog, cosmo, \n redshift_space=redshift_space,\n zmin=zmin, zmax=zmax, \n subsample_fraction=subsample_fraction,\n nhalos=nhalos, sigma_m=sigma_m, seed=seed)\n else:\n #-- Read halo catalog and compute comoving distances\n catalog = read_catalog(input_catalog, cosmo, use_true_vel=use_true_vel)\n\n print('Number of galaxies in catalog:', catalog['size'])\n print(f'Radial velocity dispersion: {np.std(catalog[\"vel\"]):.1f} km/s')\n\n #-- Put halos and velocities in a grid\n catalog = grid_velocities(catalog, grid_size=grid_size)\n\n print('Number of grid cells with data: ', catalog['size'])\n print(f'Radial velocity dispersion (grid): {np.std(catalog[\"vel\"]):.1f} km/s')\n\n #-- Compute grid window function\n grid_win = grid_window(k, grid_size)\n\n run['catalog'] = catalog\n run['grid_win'] = grid_win\n\n #-- Compute cosmological covariance matrix\n t0 = time.time()\n print(f'Computing cosmological covariance matrix...')\n cov_cosmo = build_covariance_matrix(catalog['ra'], \n catalog['dec'], \n catalog['r_comov'],\n k, pk, \n grid_win=grid_win, \n n_gals=catalog['n_gals']) \n t1 = time.time()\n print(f'Time elapsed calculating cov matrix {(t1-t0)/60:.2f} minutes')\n \n run['cov_cosmo'] = cov_cosmo\n\n #-- Print some elements of covariance matrix\n n_el = 10\n print(f'First {n_el} elements of cov_cosmo [10^5 km^2/s^2]:')\n for i in range(n_el):\n line = ' '\n for j in range(n_el):\n line+=f'{cov_cosmo[i, j]/1e5:.2f} '\n print(line)\n\n #print('Eigvalues:')\n #print(np.linalg.eigvalsh(cov_cosmo))\n\n #-- Perform fit of fsigma8\n print('Running iMinuit fit of fsigma8...')\n with threadpool_limits(limits=thread_limit, user_api='blas'):\n mig = fit_iminuit(catalog['vel'], \n catalog['vel_error'], \n catalog['n_gals'], \n cov_cosmo)\n line = fit_to_line(mig, name)\n\n run['migrad'] = mig\n\n if export_contours != '':\n print('Computing one sigma contour...')\n one_sigma = mig.mncontour('fs8', 'sig_v', cl=0.685, size=30)\n print('Computing two sigma contour...')\n two_sigma = mig.mncontour('fs8', 'sig_v', cl=0.95, size=30)\n np.savetxt(export_contours+'_one_sigma', one_sigma)\n np.savetxt(export_contours+'_two_sigma', two_sigma)\n run['one_sigma'] = one_sigma\n run['two_sigma'] = two_sigma\n\n\n tt = time.time()\n print(f'Total time elapsed for {name}: {(tt-t00)/60:.2f} minutes')\n print('')\n print('')\n\n\n\n return run \n\n#if __name__ == '__main__':\n# main()\n\n \n"
},
{
"alpha_fraction": 0.6284722089767456,
"alphanum_fraction": 0.6712962985038757,
"avg_line_length": 25.121212005615234,
"blob_id": "eda1ae1872cc5f25032b1bc105b5351ad6c0787d",
"content_id": "792a57c2e62aa2550b560c599ccad77f1f565070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 33,
"path": "/regpt_pk.py",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "import pyregpt\nimport numpy as np\nimport pylab as plt\n\nnthreads=30\nk, pklin = np.loadtxt('pk_lin_camb_demnunii_1024.txt', unpack=1)\nmask = (k<=1.0)\nk = k[mask]\npklin = pklin[mask]\nregpt0 = pyregpt.Spectrum2Loop()\nregpt0.set_pk_lin(k, pklin)\nregpt0.set_terms(k)\nregpt0.run_terms('delta', 'delta', nthreads=30)\np_dd = regpt0.pk()\n\nregpt1 = pyregpt.Spectrum2Loop()\nregpt1.set_pk_lin(k, pklin)\nregpt1.set_terms(k)\nregpt1.run_terms('delta', 'theta', nthreads=30)\np_dt = regpt1.pk()\n\n\nregpt2 = pyregpt.Spectrum2Loop()\nregpt2.set_pk_lin(k, pklin)\nregpt2.set_terms(k)\nregpt2.run_terms('theta', 'theta', nthreads=30)\np_tt = regpt2.pk()\n\nfout = open('pk_regpt_demnunii_1024.txt', 'w')\nprint( '# k[h/Mpc] P_delta,delta P_delta,theta P_theta,theta ', file=fout)\nfor i in range(k.size):\n print(f'{k[i]} {p_dd[i]} {p_dt[i]} {p_tt[i]}', file=fout)\nfout.close()\n\n\n"
},
{
"alpha_fraction": 0.5638569593429565,
"alphanum_fraction": 0.5957854390144348,
"avg_line_length": 28.50943374633789,
"blob_id": "823862134f76beb4c7564429359270d19ba57be3",
"content_id": "2800eb8c1393f1c12bbc8f3ba72cffe73d5da5e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3132,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 106,
"path": "/pv_plot_simulations.py",
"repo_name": "bcarreres/peculiar_velocities",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pylab as plt\nfrom cosmo import CosmoSimple\nimport pv_covariance\nplt.ion()\n\n\ninput_catalog = ('/Users/julian/Work/supernovae/peculiar/surveys/ztf/'\n +'LCDM_062_ztf_0000.dat.fits')\n#input_catalog = ('/Users/julian/Work/supernovae/peculiar/surveys/2mtf/'\n# +'LCDM_062_2mtf_0000.dat.fits')\n\n\ncosmo = CosmoSimple(omega_m=0.32, h=0.67)\n\nredshift_space = False\nadd_grid = False\nadd_grid_arrows=True\nplot_nz = False\n\nzmax = 0.1\nsubsample_fraction = 1.\ngrid_size = 30.\nsigma_m = 0.\n\ncatalog = pv_covariance.read_halos(input_catalog,\n cosmo=cosmo, \n redshift_space=redshift_space, \n zmax=zmax,\n subsample_fraction=1)\nprint('Number of galaxies:', len(catalog['ra']))\n\nnz = pv_covariance.density_z(catalog['redshift'], 1, cosmo)\n\npv_covariance.add_intrinsic_scatter(catalog, sigma_m=sigma_m, cosmo=cosmo)\n\ngrid = pv_covariance.grid_velocities(catalog, grid_size=grid_size)\n\ndef get_xyz(cat):\n x = cat['r_comov']*np.cos(cat['ra'])*np.cos(cat['dec'])\n y = cat['r_comov']*np.sin(cat['ra'])*np.cos(cat['dec'])\n z = cat['r_comov']*np.sin(cat['dec'])\n return x, y, z\n\nx, y, z = get_xyz(catalog)\nxg, yg, zg = get_xyz(grid)\n\nv = catalog['vel']\nvg = grid['vel']\n\nposition = np.array([x, y, z])\npos_min = np.min(position, axis=1)\npos_max = np.max(position, axis=1)\n#- Number of grid voxels per axis\nn_grid = np.floor((pos_max-pos_min)/grid_size).astype(int)+1\n\nw = (x>0) & (y > 0) & (z>-grid_size/2) & (z < grid_size/2)\nwg = (xg>0) & (yg > 0) & (zg>-grid_size/2) & (zg < grid_size/2)\nprint('Number of galaxies:', np.sum(w))\nprint('Number of grid centers:', np.sum(wg))\n\ngrid_x = np.arange(n_grid[0]+1)*grid_size+pos_min[0]\ngrid_y = np.arange(n_grid[1]+1)*grid_size+pos_min[1]\n\n\n#f = plt.figure(figsize=(12, 5))\nf = plt.figure(figsize=(6, 5))\nplt.scatter(x[w], y[w], c=catalog['vel'][w], cmap='seismic', s=2, \n\n vmin=-1000, vmax=1000)\nf.axes[0].set_aspect('equal')\nplt.xlabel(r'x [$h^{-1}$ Mpc]')\nplt.ylabel(r'y [$h^{-1}$ Mpc]')\ncbar = plt.colorbar()\ncbar.set_label('Velocity [km/s]', rotation=270)\n\n\nif add_grid:\n for g in grid_x: \n plt.axvline(g, color='k', ls='--', lw=1, alpha=0.5)\n for g in grid_y: \n if g>=-grid_size:\n plt.axhline(g, color='k', ls='--', lw=1, alpha=0.5)\n\n wg = (yg > -grid_size/2) & (zg>-grid_size/2) & (zg < grid_size/2)\n plt.autoscale(False)\n plt.scatter(xg[wg], yg[wg], c=vg[wg], s=1400, \n cmap='seismic', vmin=-1000, vmax=1000,\n marker='s', alpha=0.5)\n\nif add_grid_arrows:\n rg = np.sqrt(xg[wg]**2+yg[wg]**2)\n vx = vg[wg]*xg[wg]/rg\n vy = vg[wg]*yg[wg]/rg\n plt.quiver(xg[wg], yg[wg], vx, vy, vg[wg], cmap='seismic', edgecolors='k')\n\n\nif plot_nz:\n nz = pv_covariance.density_z(catalog['redshift'], 1, cosmo, nbins=20)\n plt.figure(figsize=(4,3))\n plt.errorbar(nz['z_centers'], nz['density']*subsample_fraction, \n nz['density_err']*subsample_fraction, fmt='o')\n plt.xlabel('Redshift')\n plt.ylabel(r'$\\bar{n}(z)$ [$h^3$ Mpc$^{-3}$]')\n plt.ylim(0, None)\n plt.tight_layout()\n "
}
] | 6 |
lance59/Monoamp-Repo
|
https://github.com/lance59/Monoamp-Repo
|
ded29461733e8f9afbd92d1a4feda8bdfff6e01a
|
210d8e59b6b88ca0187a92fa2276da1f1cfc344d
|
c9d606ab9caa76548fac3556ca16173697529572
|
refs/heads/master
| 2020-03-28T08:41:43.283848 | 2017-07-27T04:28:58 | 2017-07-27T04:28:58 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5910964608192444,
"alphanum_fraction": 0.6018136739730835,
"avg_line_length": 39.43333435058594,
"blob_id": "013f0675579d36a314d52636c6c20665b7b312bf",
"content_id": "63c644d316d5e20f3675a84f04a367d280e57120",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1213,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 30,
"path": "/Server/madAmpMySqlUpdate.php",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "<?php\n\n// Prevent caching.\nheader('Cache-Control: no-cache, must-revalidate');\nheader('Expires: Mon, 01 Jan 1996 00:00:00 GMT');\n// $json = \"'\".file_get_contents('php://input').\"'\";\n\n// json from javascript client\n$command = json_decode(file_get_contents('php://input'), true);\n\n$test_response = $command['fieldValue'];\n\n if (strpos($command['field'], \"Name\") !== false) {\n\t$str = sprintf(\"python Drivers/my_sql_update.py \" . $command['tableName'] . ' ' . $command['field'] . ' \"' . $command['fieldValue'] . '\" ' . $command['pk'] . ' ' . $command['pkValue']);\n } elseif (strpos($command['tableName'], \"attributes\") !== false) {\n\t$str = sprintf(\"python Drivers/my_sql_update.py \" . $command['tableName'] . \" \" . $command['field'] . \" \" . $command['fieldValue'] .\" \". $command['pk'] . \" '\" . $command['pkValue'] . \"'\");\n } else {\n\t $str = sprintf(\"python Drivers/my_sql_update.py \" . $command['tableName'] . \" \" . $command['field'] . \" \" . $command['fieldValue'] .\" \". $command['pk'] . \" \" . $command['pkValue']);\n }\n\nexec($str, $output);\n\n//echo array as string back to javascript - ajax call response\nif ($output[0] == $test_response) {\n\techo (json_encode($command));\n} else {\n\techo (json_encode($output));\n}\n\n?>\n"
},
{
"alpha_fraction": 0.6675724387168884,
"alphanum_fraction": 0.6675724387168884,
"avg_line_length": 23.53333282470703,
"blob_id": "5682627257025fd34487d8c14448043777d79bc4",
"content_id": "afe5a4f2a6dfd176dee8f32f1520b610912adbea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1104,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 45,
"path": "/Server/madAmpMySqlQuery.php",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "<?php\n\n// hostname, username, password - set them according to your setup\n\n$link = mysql_connect(\"localhost\", \"root\", \"udooer\");\n// select database\nmysql_select_db(\"madAmp\", $link);\n// Formulate Query\n$query = sprintf(\"SELECT * FROM `attributeControlMode`\");\n \n// Perform Query\n$result = mysql_query($query);\n// read result into array\n$madAmpData = array();\nwhile(($row = mysql_fetch_assoc($result))) {\n\t$madAmpData[] = $row;\n}\n\n$query = sprintf(\"SELECT * FROM `zones`\");\n$result = mysql_query($query);\nwhile(($row = mysql_fetch_assoc($result))) {\n\t$madAmpData[] = $row;\n}\n\n$query = sprintf(\"SELECT * FROM `sources`\");\n$result = mysql_query($query);\nwhile(($row = mysql_fetch_assoc($result))) {\n\t$madAmpData[] = $row;\n}\n\n$query = sprintf(\"SELECT * FROM `attributes`\");\n$result = mysql_query($query);\nwhile(($row = mysql_fetch_assoc($result))) {\n\t$madAmpData[] = $row;\n}\n// close mysql connection\n\nmysql_close($link);\n// Free the resources associated with the result set\n// This is done automatically at the end of the script\nmysql_free_result($result);\n\n$res = json_encode($madAmpData);\necho $res;\n?>\n"
},
{
"alpha_fraction": 0.6637413501739502,
"alphanum_fraction": 0.6836027503013611,
"avg_line_length": 25.35365867614746,
"blob_id": "4c4d4930b6f717f214d92955124a846e31ffe9ed",
"content_id": "438f1f2d7a0aeb37facb58492a9e5f12f1011cf3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2165,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 82,
"path": "/Server/Drivers/rs232_simulator.py",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "import time\nimport sys\nimport MySQLdb\n\ndef main():\n\t# argv[1] madAmp database function request\n\tdb_data = sys.argv[1]\n\n\t# check if first character in argv is command symbol < set status\n\tif ((db_data[0]) == \"<\"):\n\t\tcmd_status = 1\n\telse:\n\t\tcmd_status = 0\n\t\n\t# pass d_data and cmd_status to database driver return with response\n\tresp_data = my_sql_driver(db_data,cmd_status)\n\t\t\n\t# print response to console or Server - whomever called the program\n\tprint resp_data\n\t\n\t\ndef my_sql_driver(my_sql_data, status_cmd):\n\t\t\n\tif status_cmd == 1:\t# this is a database modify command - simulate RS232 Control command\n\t\taddress = my_sql_data[1:3]\n\t\tcontrol_function = my_sql_data[3:5]\n\t\tnew_value = my_sql_data[5:]\n\t\t\n\t\tmy_sql_update(address, control_function, new_value)\n\t\t\t\t\n\taddress = my_sql_data[1:3]\n\tquery_resp = my_sql_query(address)\n\t\n\treturn query_resp\n\n\ndef my_sql_update(address,control_function,new_value):\n\t# Open database connection\n\tdb = MySQLdb.connect(\"localhost\",\"root\",\"udooer\",\"rs232sim\")\n\n\t# prepare a cursor object using cursor() method\n\tcursor = db.cursor()\n\t\n\t# execute SQL update\n\tsql = \"update rs232sim.zones set {} = {} where AD = {}\".format(control_function, new_value, address)\n\t\n\tcursor.execute(sql)\n\t\n\t# commit changes in database\n\tdb.commit()\n\treturn\n\ndef my_sql_query(address):\n\n\t# Open database connection\n\tdb = MySQLdb.connect(\"localhost\",\"root\",\"udooer\",\"rs232sim\")\n\n\t# prepare a cursor object using cursor() method\n\tcursor = db.cursor()\n\n\t# execute SQL query using execute() method\n\tcursor.execute(\"SELECT * from zones where AD = %s\",(address))\n\t\n\t# Fetch all rows using fetchall() method\n\tresults = cursor.fetchall()\n\t\n\t# disconnect from server\n\tdb.close()\n\t\n\t# assign multiple variables to tuple results[0]\n\tAD,PA,PR,MU,DT,VO,TR,BS,BL,CH,KP,index = results[0]\n\t\n\t# create response string - RS232 comm returns one string with all settings \n\tquery_resp = \"?\"+address+\"#>\"+str(AD).zfill(2)+str(PA).zfill(2)+str(PR).zfill(2)+\\\n\t\tstr(MU).zfill(2)+str(DT).zfill(2)+str(VO).zfill(2)+str(TR).zfill(2)+str(BS).zfill(2)+\\\n\t\tstr(BL).zfill(2)+str(CH).zfill(2)+str(KP).zfill(2)+\"#\"+chr(13)\n\t\n\treturn query_resp\n\n\t\t\nif __name__ == '__main__':\n\tmain()\t\t\t\t\n"
},
{
"alpha_fraction": 0.6168830990791321,
"alphanum_fraction": 0.6178107857704163,
"avg_line_length": 24.069766998291016,
"blob_id": "022b4f4bc4c64081379edc9d9031619501585c8e",
"content_id": "7cd321b61c41c576fde51d46fea8e6f466bbf584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1078,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 43,
"path": "/Services/CustomFilters.js",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "(function(){\n\t'use strict';\n\tangular.module('MadAmpApp').filter('getRangeControls', function() {\n\t\n\t // Create the return function and set the required parameter name to **input**\n\t return function(input) {\n\t\n\t var out = [];\n\t\n\t // Using the angular.forEach method, go through the array of data and perform the operation of figuring out if the control is of type range.\n\t angular.forEach(input, function(attribute) {\n\t\n\t if (attribute.type === 'range') {\n\t out.push(attribute)\n\t }\n\t \n\t })\n\t\n\t return out;\n\t }\n\t});\n\t\n\tangular.module('MadAmpApp').filter('getFirstActiveZone', function() {\n\t\n\t // Create the return function and set the required parameter name to **input**\n\t return function(input) {\n\t\n\t var out = [];\n\t\n\t // Using the angular.forEach method, go through the array of data and perform the operation of figuring out if the control is of type range.\n\t angular.forEach(input, function(zone) {\n\n\t if (zone.activeStatus == \"1\") {\n\t \tout.push(zone)\n\t }\n\t \n\t })\n\t\n\t return out;\n\t }\n\t});\n\t\n})();\n"
},
{
"alpha_fraction": 0.5545134544372559,
"alphanum_fraction": 0.5545134544372559,
"avg_line_length": 22.054054260253906,
"blob_id": "0b25a2c16ffbe30b6e76368b8be938fc8c036d96",
"content_id": "35e40601fb16f270151d8fd6f607e175a0ba9c85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 37,
"path": "/Services/MadAmpAPI.js",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "(function(){\n 'use strict';\n \n\tangular.module('MadAmpApp').\n\t factory('MadAmpAPIservice', function($http) {\n\t\n\t var MadAmpAPI = {};\n\t\n\t MadAmpAPI.getSettings = function() {\n\t return $http({\n\t\t\tmethod:'GET',\n\t\t\turl:'/Server/madAmpMySqlQuery.php'\n\t\t\t});\n\t }\n\t \n\t \n\t MadAmpAPI.sendCommand = function(stringCmd) {\n\t return $http({\n\t method: 'POST', \n\t url:'/Server/madAmpPythonMessenger.php',\n\t data: {serStr:stringCmd},\n\t\t\theaders: {'Content-Type': 'application/json'},\n\t\t\t});\n\t }\n\t \n\t MadAmpAPI.updateSetting = function(settingEntity) {\n\t return $http({\n\t method: 'POST', \n\t url:'/Server/madAmpMySqlUpdate.php',\n\t data: JSON.stringify(settingEntity),\t\n\t\t\theaders: {'Content-Type': 'application/json'},\n\t\t\t});\n\t }\n\t \n\t return MadAmpAPI;\t \n\t });\t\n})();\n"
},
{
"alpha_fraction": 0.6394724249839783,
"alphanum_fraction": 0.6438691020011902,
"avg_line_length": 23.321428298950195,
"blob_id": "86956aeaaf8c32422d1579f12b5e880435159195",
"content_id": "9d885cdc240f256362943edae3b1f18d48c5db12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2047,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 84,
"path": "/Server/Drivers/my_sql_update.py",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "import time\nimport sys\nimport MySQLdb\n\ndef main():\n\t# argv[1] madAmp database function request\n\ttable = sys.argv[1]\n\tfield = sys.argv[2]\n\tfield_value = sys.argv[3]\n\tpk = sys.argv[4]\n\tpk_value = sys.argv[5]\n\t\n\tmy_sql_update(table, field, field_value, pk, pk_value)\n\t\t \t\t\n\t# pass data to database query driver\n\tresp = my_sql_query(table,field,pk,pk_value)\n\t\n\t# print response to console or Server - whomever called the program\n\tif (resp[0] == field_value):\n\t\tprint field_value\n\t\n\telse:\n\t\tprint resp[0]\n\t\n\ndef my_sql_update(table, field, field_value, pk, pk_value):\n\t# Open database connection\n\tdb = MySQLdb.connect(\"localhost\",\"root\",\"udooer\",\"madAmp\")\n\n\t# prepare a cursor object\n\tcursor = db.cursor()\n\t\n\t# Name change requires quotes for text\n\tif (field.endswith(\"Name\")):\n\t\tfield_value = field_value.replace(\"'\",\"''\")\n\t\tsql = \"UPDATE {} SET {} = '{}' WHERE {} = {}\".format(table, field, field_value, pk, pk_value)\n\t\t\n\t# attributes requires quotes for control field value\t\n\telif (table == \"attributes\"):\n\t\tsql = \"UPDATE `{}` SET `{}` = {} WHERE `{}` = '{}'\".format(table, field, field_value, pk, pk_value)\n\t\t\n\t# numerical update no quotes required\t\n\telse:\n\t\tsql = \"UPDATE {} SET {} = {} WHERE {} = {}\".format(table, field, field_value, pk, pk_value)\n\t\n\tcursor.execute(sql)\n\t\n\t# commit changes in database\n\tdb.commit()\n\tdb.close()\n\t\t\n\treturn\n\n\ndef my_sql_query(table, field, pk, pk_value):\n\n\t# Open database connection\n\tdb = MySQLdb.connect(\"localhost\",\"root\",\"udooer\",\"madAmp\")\n\n\t# prepare a cursor object using cursor() method\n\tcursor = db.cursor()\n\t\n\t# prep query command\n\tif (table == \"attributes\"):\n\t\tsql = \"SELECT `{}` FROM `{}` WHERE `{}` = '{}'\".format(field, table, pk, pk_value)\n\telse:\n\t\tsql = \"SELECT `{}` FROM `{}` WHERE `{}` = {}\".format(field, table, pk, pk_value)\n\n\t# execute SQL query using execute() method\n\tcursor.execute(sql)\n\t\n\t# Fetch all rows using fetchall() method\n\tresults = cursor.fetchall()\n\t\n\t# disconnect from server\n\tdb.close()\n\t\n\tquery_resp = results[0]\n\t\n\treturn\tquery_resp\n \n\nif __name__ == '__main__':\n\tmain()\t\t\t\t\n"
},
{
"alpha_fraction": 0.7099236845970154,
"alphanum_fraction": 0.7099236845970154,
"avg_line_length": 31.75,
"blob_id": "56ebce65dec46e68bdd146b6b0fcf59c27208804",
"content_id": "680b4cb32934e88124a5d0630e349f1e41eadad1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 4,
"path": "/jamie_update_server.sh",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Script to copy files to /var/www/html\nsudo cp -a /home/james/Monoamp-Repo/. /var/www/html/\necho \"File copy complete\"\n"
},
{
"alpha_fraction": 0.6024518609046936,
"alphanum_fraction": 0.6514886021614075,
"avg_line_length": 18.03333282470703,
"blob_id": "b7da3fc9070b61ddae96fc2883d5f5cf5f48afd0",
"content_id": "06b0b7c276aee3f856afe57f850be6a4f82e62b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 30,
"path": "/arduino/MultiSerialNeo.ino",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "/*\n Udoo Neo multple serial port\n\n Create comm link between Internal A9/M4 port to External Serial Port\n Serial = /dev/ttyMCC\n Serial0 RX - J4 pin 0\n Serial0 TX - J4 pin 1\n \n */\n\n\nvoid setup() {\n // initialize both serial ports:\n Serial.begin(115200);\n Serial0.begin(9600);\n}\n\nvoid loop() {\n // read from Serial0, send to A9/M4 Serial\n if (Serial0.available()) {\n int inByte = Serial0.read();\n Serial.write(inByte);\n }\n\n // read from A9/M4 Serial, send to Serial0\n if (Serial.available()) {\n int inByte = Serial.read();\n Serial0.write(inByte);\n }\n}\n"
},
{
"alpha_fraction": 0.601344108581543,
"alphanum_fraction": 0.6090386509895325,
"avg_line_length": 38.038021087646484,
"blob_id": "acbdb6d20a91f2ff00013cc8d789a5d1e7d80c4c",
"content_id": "38469e54c564e43782bdcbc1cc3121c27c4a53fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 10267,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 263,
"path": "/Components/Mobile/RemoteControl/RemoteControl.js",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "(function(){\n\t'use strict';\n var controllerId = 'RemoteControl';\n angular.module('MadAmpApp').controller(controllerId, ['MadAmpAPIservice', '$scope', '$filter', viewModel]);\n function viewModel(MadAmpAPIservice, $scope, $filter){\n \t\n\t\n\t\t$scope.zoneSettings = \"\",\n\t\t$scope.sourceSettings = \"\",\n\t\t$scope.attributeSettings = \"\";\n\t\t\t\n\t\tvar\tsendCommand = \"<\",\n\t\t sendQuery = \"?\",\n\t\t stringCmd = \"\",\n\t\t resp = \"\";\n\n\t\t//hard coded because of hardware constraints\n\t\t$scope.controlStatus =\n\t\t{\n\t\t ObjectCode:\n\t\t {\n\t\t unit: 1,\n\t\t zone: 1\n\t\t },\n\t\t\n\t\t Power: 0,\n\t\t Source: 1,\n\t\t Volume: 20,\t\t\n\t\t Bass: 7,\n\t\t Treble: 7,\n\t\t Balance: 10,\n\t\t Mute: 0\n\t\t};\n\t\t\t\t\t\t\t\t\t\n\t\t// POST mysql request for zone names\n\t\tvar settings = MadAmpAPIservice.getSettings().then(function(resp){\n\t\t\tparseMenuSettings(resp.data)\n\t\t// on load of window set zone_select to first zone and query status\n\t\t// from MonoAmp use result to set powerOn variable and all other\n\t\t// parameters\n\t\t\tvar firstActiveZone = $filter('getFirstActiveZone')($scope.zoneSettings)[0],\n\t\t\t\tinitialQuery = \"1\" + firstActiveZone.positionAddress;\n\t\t\t\n\t\t\tdocument.getElementById(\"zone_select\").value = initialQuery;\n\t\t\tstringCmd =\n\t\t \tsendQuery\n\t\t \t+ initialQuery\n\t\t\tconsole.log(\"App start - Query status Zone 1:\"+stringCmd);\n\t\t\tserCmd(stringCmd);\t// POST serial request for zone data\n\t\t\t\n\t\t\t}, function(resp){\n\t\t\t\tconsole.log(\"error importing app settings\")\n\t\t\t});\n\t\t\n\t\t$scope.toggleButton = function (button){\n\t\t //if power is on, send message to turn off\n\t\t //DO NOT SET BUTTON UNTIL RECIEVE A RESPONSE\n\t\t console.log(\"Toggle Pressed: \" + button.displayName);\n\t\t\n\t\t if ($scope.controlStatus.Power || button.control == \"PR\"){\n\t\t\t if ($scope.controlStatus[button.displayName]) {\n\t\t\t //disable source selection and power off selected zone\n\t\t\t stringCmd =\n\t\t\t sendCommand\n\t\t\t + $scope.controlStatus.ObjectCode.unit + \"\" + $scope.controlStatus.ObjectCode.zone\n\t\t\t + button.control\n\t\t\t + \"00\"; \n\t\t\t } else {\n\t\t\t stringCmd =\n\t\t\t sendCommand\n\t\t\t + $scope.controlStatus.ObjectCode.unit + \"\" + $scope.controlStatus.ObjectCode.zone\n\t\t\t + button.control\n\t\t\t + \"01\";\n\t\t\t }\n\t\t\t\n\t\t\t console.log(\"Command to Post:\" + stringCmd);\n\t\t\t serCmd(stringCmd);\n\t\t }\n\t\t}\n\t\t\n\t $scope.rangeControlClick = function (rangeControl, buttonDirection) {\n\t\t console.log(\"Button Pressed: \" + rangeControl.displayName + \"_\" + buttonDirection);\n\t\t if ($scope.controlStatus.Power){\n\t\t\t var controlValue = $scope.controlStatus[rangeControl.displayName];\n\t\t\t\n\t\t\t if (buttonDirection == \"UP\") {\n\t\t\t if (controlValue + 1 <= rangeControl.max) {\n\t\t\t stringCmd =\n\t\t\t sendCommand\n\t\t\t + $scope.controlStatus.ObjectCode.unit + \"\" + $scope.controlStatus.ObjectCode.zone\n\t\t\t + rangeControl.control\n\t\t\t + getValueString(controlValue + 1);\n\t\t\t console.log(\"Scale button action to Post: \" + stringCmd);\n\t\t\t serCmd(stringCmd);\n\t\t\t } else {\n\t\t\t console.log(\"Scale command out of range\");\n\t\t\t }\n\t\t\t } else { //button direction is down\n\t\t\t if (controlValue - 1 >= rangeControl.min) {\n\t\t\t stringCmd =\n\t\t\t\t\t\t\t\tsendCommand \n\t\t\t\t\t\t\t\t+ $scope.controlStatus.ObjectCode.unit + \"\" + $scope.controlStatus.ObjectCode.zone \n\t\t\t\t\t\t\t\t+ rangeControl.control\n\t\t\t\t\t\t\t\t+ getValueString(controlValue - 1);\n\t\t\t\t\t\t\tconsole.log(\"Scale button action to Post: \" + stringCmd);\n\t\t\t\t\t\t\tserCmd(stringCmd);\n\t\t\t } else {\n\t\t\t console.log(\"Scale command out of range\");\n\t\t\t }\n\t\t\t }\n\t\t }\n\t\t }\n\t\t \n\t\tfunction parseMenuSettings(resp)\n\t\t{\n\t\n\t\t\t$scope.slidersOn = parseInt(resp.slice(0,1)[0].slidersOn);\n\t\t\t$scope.zoneSettings = resp.slice(1,13);\n\t\t\t$scope.sourceSettings = resp.slice(13,19);\n\t\t\t\n\t\t\tvar attributes = resp.slice(19,resp.length);\n\t\t\t\n\t\t\t$scope.selectedZone = $scope.zoneSettings[0];\n\t\t\t\n\t\t\t$scope.powerSettings = $(attributes).filter(function (i,n){return n.control==='PR'})[0];\n\t\t\t$scope.muteSettings = $(attributes).filter(function (i,n){return n.control==='MU'})[0];\n\t\t\t$scope.globalSourceSettings = $(attributes).filter(function (i,n){return n.control==='CH'})[0]; \n\t\t\t\n\t\t\t$scope.rangeControls = $(attributes).filter(function (i,n){return n.type==='range'});\n\t\t}\n\t\t\n\t\tfunction serCmd(stringCmd){\n\t\t\t// use API service to issue a command to blah\n\t\t MadAmpAPIservice.sendCommand(stringCmd).success(function(resp) \n\t\t {\n\t\t \tsetControlStatus(resp);\n\t\t });\n\t\t}\n\n\t\t$scope.assignZone = function (){\n\t\t\t\n\t\t //$scope.controlStatus.ObjectCode.zone = document.getElementById(\"zone_select\").value;\n\t\t\tstringCmd =\n\t\t sendQuery\n\t\t + $scope.selectedZone.unitAddress + \"\" + $scope.selectedZone.positionAddress;\n\t\t\tconsole.log(\"Command to Post:\"+stringCmd);\n\t\t\tserCmd(stringCmd);\t// POST serial command to php\n\t\t}\n\t\t\n\t\t$scope.assignSource = function (){\n\t\t //$scope.controlStatus.Source.value = document.getElementById(\"source_select\").value;\n\t\t\tif ($scope.controlStatus.Power){\n\t\t\t stringCmd = \n\t\t sendCommand \n\t\t + $scope.controlStatus.ObjectCode.unit + \"\" + $scope.controlStatus.ObjectCode.zone \n\t\t + $scope.globalSourceSettings.control\n\t\t + getValueString($scope.controlStatus.Source);\n\t\t\t\tconsole.log(\"Command to post:\" + stringCmd);\n\t\t\t\tserCmd(stringCmd); // POST Source to php serial\n\t\t\t\t}\n\t\t\telse{\n\t\t\t\tconsole.log(\"No Zone selected or Power for zone is off\");\n\t\t\t}\n\t\t}\n\t\t\n\t\t$scope.getRangeValueWithOffset = function(rangeControl){\n\t\t\tvar value = parseInt($scope.controlStatus[rangeControl.displayName]) \n\t\t\t\t\t\t- parseInt(rangeControl.offset);\n\t\t\treturn value.toString();\n\t\t}\n\t\t\n\t\t$scope.getRangeOptions = function (rangeControl){\n\t\t\t\n\t\t\treturn {floor: parseInt(rangeControl.min), \n\t\t\t\t\tceil: parseInt(rangeControl.max),\n\t\t\t\t\trangeControl: rangeControl,\n\t\t\t\t\tapi: MadAmpAPIservice,\n\t\t\t\t\tcontrolStatus: $scope.controlStatus,\n\t\t\t\t\tonChange: function($scope) {\n\t\t\t\t\t\t//debugger;\n\t\t\t\t\t\tvar value = this.controlStatus[this.rangeControl.displayName];\n\t\t\t\t\t\tvar valueAsString = (value >= 10) ? value.toString() : \"0\" + value.toString();\n \t\t\tvar strCmd = \"<\" + this.controlStatus.ObjectCode.unit + \"\" + this.controlStatus.ObjectCode.zone \n\t\t\t\t\t\t\t\t+ this.rangeControl.control\n\t\t\t\t\t\t\t\t+ valueAsString;\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t MadAmpAPIservice.sendCommand(strCmd).success(function(resp) \n\t\t\t\t\t {\n\t\t\t\t\t \tsetControlStatus(resp);\n\t\t\t\t\t });\n \t\t\tconsole.log('on change '+strCmd); // logs 'on change slider-id'\n \t\t\t},};\n\t\t}\n\t\t\n\t\tfunction setMute(newMuteState) {\n\t\t\tvar muteButton = document.getElementById(\"TOGGLE_Mute\");\n\t\t\t\n\t\t\tif (newMuteState) {\n\t\t\t\tmuteButton.className = muteButton.className.replace(\"muteOff\", \"muteOn\");\n\t\t\t\tmuteButton.textContent = muteButton.textContent.replace(\"Mute\", \"Mute ON\");\n\t\t\t\t$scope.controlStatus.Mute = 1;\n\t\t\t} else {\n\t\t\t\tmuteButton.className = muteButton.className.replace(\"muteOn\", \"muteOff\");\n\t\t\t\tmuteButton.textContent = muteButton.textContent.replace(\"Mute ON\", \"Mute\");\n\t\t\t\t$scope.controlStatus.Mute = 0;\n\t\t\t}\n\t\t}\n\t\t\n\t\tfunction setPower(newPowerState) {\n\t\t var powerButton = document.getElementById(\"TOGGLE_Power\");\n\t\t //powerOn is the last state of the button\n\t\t //if powerOn is the same as the new state, no need to do anything\n\t\t if (newPowerState) {\n\t\t powerButton.className = powerButton.className.replace(\"powerOff\", \"powerOn\");\n\t\t powerButton.textContent = powerButton.textContent.replace(\"OFF\", \"ON\");\n\t\t $scope.controlStatus.Power = 1;\n\t\t } else {\n\t\t powerButton.className = powerButton.className.replace(\"powerOn\", \"powerOff\");\n\t\t powerButton.textContent = powerButton.textContent.replace(\"ON\", \"OFF\");\n\t\t $scope.controlStatus.Power = 0;\n\t\t } \n\t\t}\n\t\t\n\t\tfunction getValueString(value) {\n\t\t return (value >= 10) ? value.toString() : \"0\" + value.toString();\n\t\t}\n\t\t\t\t\t\n\t\tfunction setControlStatus(resp)\n\t\t{// This is the best way to perform an SQL query\n\t\t// For more examples, see mysql_real_escape_string()\n\t\t var n = resp.length;\n\t\t console.log (\"Reply length is: \" + n);\n\t\t if (n == 31) {\n\t\t //splitting the zone into unit and zone\n\t\t //control object code are what these values represent as a pair\n\t\t $scope.controlStatus.ObjectCode.unit = parseInt(resp.substr(5, 1));\n\t\t $scope.controlStatus.ObjectCode.zone = $scope.zoneSettings[parseInt(resp.substr(6, 1)) - 1].positionAddress;\n\t\t setPower(parseInt(resp.substr(9, 2)));\n\t\t\t\t\n\t\t\t\tsetMute(parseInt(resp.substr(11,2)));\n\t\t //$scope.controlStatus.Mute = parseInt(resp.substr(11, 2));\n\t\t $scope.controlStatus.Volume = parseInt(resp.substr(15, 2));\n\t\t $scope.controlStatus.Treble = parseInt(resp.substr(17, 2));\n\t\t $scope.controlStatus.Bass = parseInt(resp.substr(19, 2));\n\t\t $scope.controlStatus.Balance = parseInt(resp.substr(21, 2));\t\t\n\t\t $scope.controlStatus.Source = $scope.sourceSettings[parseInt(resp.substr(23, 2)) -1].positionAddress;\n\t\t \n\t\t //log the $scope.controlStatus object\n\t\t console.log(\"[setting controls] Zone is: \" + $scope.controlStatus.ObjectCode.unit\n\t\t + \"\" + $scope.controlStatus.ObjectCode.zone);\n\t\t console.log(\"[setting controls] Power (PR) is: \" + $scope.controlStatus.Power);\n\t\t console.log(\"[setting controls] Mute (MU) is: \" + $scope.controlStatus.Mute);\n\t\t console.log(\"[setting controls] Volume (VO) is: \" + $scope.controlStatus.Volume);\n\t\t console.log(\"[setting controls] Treble (TR) is: \" + $scope.controlStatus.Treble);\n\t\t console.log(\"[setting controls] Bass (BS) is: \" + $scope.controlStatus.Bass);\n\t\t console.log(\"[setting controls] Balance (BL) is: \" + $scope.controlStatus.Balance);\n\t\t console.log(\"[setting controls] Source (CH) is: \" + $scope.controlStatus.Source);\n\t\t } else {\n\t\t console.log(\"Response is incorrect format!\");\n\t\t }\n\t\t}\n\t}\n})();\n"
},
{
"alpha_fraction": 0.4190766215324402,
"alphanum_fraction": 0.564687967300415,
"avg_line_length": 29.323076248168945,
"blob_id": "f5bbb573563faa562e2350d3fe3156155a976fd6",
"content_id": "c005c049056225c0d292f6dabff2820f178d74cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1971,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 65,
"path": "/Data/rs232sim.sql",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "-- phpMyAdmin SQL Dump\n-- version 4.0.10deb1\n-- http://www.phpmyadmin.net\n--\n-- Host: localhost\n-- Generation Time: Oct 30, 2016 at 11:30 PM\n-- Server version: 5.5.52-0ubuntu0.14.04.1\n-- PHP Version: 5.5.9-1ubuntu4.19\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n\n--\n-- Database: `rs232sim`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `zones`\n--\n\nCREATE TABLE IF NOT EXISTS `zones` (\n `AD` int(11) DEFAULT NULL,\n `PA` int(11) DEFAULT NULL,\n `PR` int(11) DEFAULT NULL,\n `MU` int(11) DEFAULT NULL,\n `DT` int(11) DEFAULT NULL,\n `VO` int(11) DEFAULT NULL,\n `TR` int(11) DEFAULT NULL,\n `BS` int(11) DEFAULT NULL,\n `BL` int(11) DEFAULT NULL,\n `CH` int(11) DEFAULT NULL,\n `KP` int(11) DEFAULT NULL,\n `id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 AUTO_INCREMENT=13 ;\n\n--\n-- Dumping data for table `zones`\n--\n\nINSERT INTO `zones` (`AD`, `PA`, `PR`, `MU`, `DT`, `VO`, `TR`, `BS`, `BL`, `CH`, `KP`, `id`) VALUES\n(11, 0, 0, 0, 0, 13, 10, 5, 13, 1, 1, 1),\n(12, 0, 1, 0, 0, 13, 9, 9, 10, 4, 1, 2),\n(13, 0, 1, 0, 0, 21, 7, 7, 10, 3, 1, 3),\n(14, 0, 1, 0, 0, 21, 7, 7, 10, 6, 1, 4),\n(15, 0, 1, 0, 0, 18, 7, 7, 10, 5, 1, 5),\n(16, 0, 1, 0, 0, 19, 7, 7, 10, 2, 1, 6),\n(21, 0, 0, 0, 0, 14, 7, 7, 10, 2, 1, 7),\n(22, 0, 0, 0, 0, 14, 7, 7, 10, 2, 1, 8),\n(23, 0, 1, 0, 0, 14, 7, 7, 10, 1, 1, 9),\n(24, 0, 1, 0, 0, 14, 7, 7, 10, 2, 1, 10),\n(25, 0, 1, 0, 0, 14, 7, 7, 10, 2, 1, 11),\n(26, 0, 1, 0, 0, 14, 7, 7, 10, 2, 1, 12);\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n"
},
{
"alpha_fraction": 0.6529877781867981,
"alphanum_fraction": 0.6623470187187195,
"avg_line_length": 29.53333282470703,
"blob_id": "fd38aea5e8df36a149ee037d1b68aae9f6696f3f",
"content_id": "c6e6e43c37e0cc0981d11f92218f90fdc79f9ed7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1389,
"license_type": "no_license",
"max_line_length": 337,
"num_lines": 45,
"path": "/README.md",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "# Monoamp-Repo\n\nMadAmp - Monoprice Audio Distribution Amp\n=========================================\n\n Control a Monoprice 6 zone Audio Distribution Amp using a phone or tablet via wifi connection. Use the device browser to access the Apache Server. Target board for this software is an UDOO Neo. Although the program will work on any Linux device that has an RS232 port which must be connected to the Distribution Amp RS232 control port.\n \nAudio Amp Front View\n--------------------\n\n\n \nAudio Amp Rear View\n-------------------\n\n\n\nBrowser View of Control Panel\n-----------------------------\n\n\n\nSoftware Installation Required\n------------------------------\n* Apache Server\n* PHP\n* mySQL Database\n\n Instructions for installation of these programs on UDOO Neo can be found at this link.\n \n (http://www.udoo.org/tutorial/udoo-web-server/)\n\n Program also requires creation of madAmp database. Use Phpmyadmin to create the databases and import the sql files from the Data folder to populate the databases.\n \n Database - madAmp - import file /Data/madAmp.sql\n Database - rs232sim - import file /Data/rs232sim.sql\n\nAdditional Software Required\n----------------------------\n \n* Python mysqldb \n\n Install as follows: \n \n sudo apt-get install python-mysqldb\n \n \n \n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6570915579795837,
"alphanum_fraction": 0.6929982304573059,
"avg_line_length": 26.700000762939453,
"blob_id": "7d6e2c0a342c1c6ad6032eae1056a1aea41eafd5",
"content_id": "f8dde1599bfc49d08f0aebf8943157100a84db39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 557,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 20,
"path": "/Server/madAmpPythonMessenger.php",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "<?php\n\n// Function to log PHP activity\n// Prevent caching.\nheader('Cache-Control: no-cache, must-revalidate');\nheader('Expires: Mon, 01 Jan 1996 00:00:00 GMT');\n\n$command = json_decode(file_get_contents('php://input'), true);\n\n// python argument must be in quotes - commands have < as start character\n// that causes error bash: argv: No such file or directory\n$str = \"python Drivers/rs232_simulator.py \" . chr(39) . $command['serStr'] . chr(39);\n\nexec($str, $output);\n\n//echo array as string back to javascript - ajax call response\necho $output[0];\n\n\n?>\n\n\n\n"
},
{
"alpha_fraction": 0.7121211886405945,
"alphanum_fraction": 0.7121211886405945,
"avg_line_length": 32,
"blob_id": "f1b9c14d44210f80a3366d5f08212a8b80e5dbb6",
"content_id": "e13651b48ffbe8cce4c8e250cd955002d38570a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 4,
"path": "/update_server.sh",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Script to copy files to /var/www/html\nsudo cp -a /home/udooer/Monoamp-Repo/. /var/www/html/\necho \"File copy complete\"\n"
},
{
"alpha_fraction": 0.5981717109680176,
"alphanum_fraction": 0.6043984889984131,
"avg_line_length": 34.271026611328125,
"blob_id": "d727c6654df542dab6656ac7675901be4d15853d",
"content_id": "a5d415cde5801e8e157c044bcbfa4d77b10103c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 7548,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 214,
"path": "/Components/Mobile/RemoteSettings/RemoteSettings.js",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "(function(){\n\t'use strict';\n var controllerId = 'RemoteSettings',\n \tremote = angular.module('MadAmpApp');\n \n \n remote.controller(controllerId, ['MadAmpAPIservice', '$scope', '$sce', '$filter', '$q', '$interval', viewModel]);\n \n function viewModel(MadAmpAPIservice, $scope, $sce, $filter, $q, $interval){\n \tvar attributeToggleTemplate = '<div class=\"ui-grid-cell-contents\" ng-click=\"grid.appScope.toggleSettingsButton(row)\">'\n \t\t\t\t\t\t\t\t\t+'<button ng-if=\"grid.appScope.toggleVisible(row) == 1\" class=\"btn btn-success settingsButton\"><i class=\"fa fa-check\" aria-hidden=\"true\"></i></button>'\n \t\t\t\t\t\t\t\t\t+'<button ng-if=\"grid.appScope.toggleVisible(row) == 0\" class=\"btn btn-danger settingsButton\"><i class=\"fa fa-times\" aria-hidden=\"true\"></i></button>'\n \t\t\t\t\t\t\t\t\t+'</div>',\n \t\tglobalGridRowHeight = 76;\n \t\n \t$scope.oneAtATime = true;\n \t$scope.grids = [];\n \t$scope.gridApi = [];\n \t$scope.zoneGrid = {};\n \t\n\t\t$scope.registerGridApi = function(index, gridApi){\n\t\t //set gridApi on scope\n\t\t $scope.gridApi[index] = gridApi;\n\t\t gridApi.rowEdit.on.saveRow($scope, $scope.saveRow);\n\t\t};\n\t\t\n\t\t$scope.saveRow = function( rowEntity ) {\n\t\t // create a fake promise - normally you'd use the promise returned by $http or $resource\n\t\t var toggleData, currentGrid;\n\n \t\tif (rowEntity.hasOwnProperty('zoneName')){\n\t\t\t\n\t\t\t\tif (rowEntity.zoneName.trim() == \"\"){\n\t\t\t\t\trowEntity.zoneName = \"Zone \"+ rowEntity.positionAddress;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\ttoggleData = \n\t\t\t\t{\n\t\t\t\t tableName: \"zones\",\n\t \t field: \"zoneName\",\n\t \t fieldValue: rowEntity.zoneName,\n\t \t pk: \"positionAddress\",\n\t \t pkValue: rowEntity.positionAddress\t\n\t\t\t\t}\n\t\t\t\t\t\t\t\t\n\t\t\t\tcurrentGrid = 0;\n\t\t\t}\n\t\t\telse if (rowEntity.hasOwnProperty('sourceName')){\n\t\t\t\t\n\t\t\t\tif (rowEntity.sourceName.trim() == \"\"){\n\t\t\t\t\trowEntity.sourceName = \"Source \"+ rowEntity.positionAddress;\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\ttoggleData = \n\t\t\t\t{\n\t\t\t\t tableName: \"sources\",\n\t \t field: \"sourceName\",\n\t \t fieldValue: rowEntity.sourceName,\n\t \t pk: \"positionAddress\",\n\t \t pkValue: rowEntity.positionAddress\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tcurrentGrid = 1;\n\t\t\t}\n\t\t\t\n\t\t\tvar promise = MadAmpAPIservice.updateSetting(toggleData);\n\t\t\t$scope.gridApi[currentGrid].rowEdit.setSavePromise(rowEntity, promise);\n\t\t\tconsole.log(JSON.stringify(toggleData));\n\n \t}\n \t\n \t$scope.toggleSliders = function(){\n \t\tconsole.log(\"sliders on:\" + $scope.slidersOn);\n \t\t//debugger;\n \t\t\n \t\tvar toggleData = \n\t\t\t{\n\t\t\t tableName: \"attributeControlMode\",\n \t field: \"slidersOn\",\n \t fieldValue: $scope.slidersOn? \"0\": \"1\",\n \t pk: \"1\",\n \t pkValue: \"1\"\n\t\t\t}\n\t\t\t\n\t\t\tMadAmpAPIservice.updateSetting(toggleData).then(function(resp){\n\t\t\t\tconsole.log(\"toggled control mode successfully!\");\n\t\t\t}, function(resp){\n\t\t\t\tconsole.log(\"error toggling control mode\");\n\t\t\t\t$scope.slidersOn = $scope.slidersOn ? 0: 1; \n\t\t\t});\n \t}\n \t$scope.toggleSettingsButton = function(row){\n \t\tvar toggleData;\n \t\t$scope.currentRow = row;\n \t\tif (row.entity.hasOwnProperty('activeStatus')){\n\t\t\t\ttoggleData = \n\t\t\t\t{\n\t\t\t\t tableName: \"zones\",\n\t \t field: \"activeStatus\",\n\t \t fieldValue: !parseInt(row.entity.activeStatus.toString()) ? \"1\":\"0\",\n\t \t pk: \"positionAddress\",\n\t \t pkValue: row.entity.positionAddress\t\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (row.entity.hasOwnProperty('visibleStatus')){\n\t\t\t\ttoggleData = \n\t\t\t\t{\n\t\t\t\t tableName: \"attributes\",\n\t \t field: \"visibleStatus\",\n\t \t fieldValue: !parseInt(row.entity.visibleStatus.toString()) ? \"1\":\"0\",\n\t \t pk: \"control\",\n\t \t pkValue: row.entity.control\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tconsole.log(toggleData);\n\t\t\t\n\t\t\tMadAmpAPIservice.updateSetting(toggleData).then(function(resp){\n\t\t\t\tconsole.log(resp.data);\n\t\t\t\tif(resp.data.tableName == \"attributes\"){\n\t\t\t\t\t$scope.attributes[resp.data.field] = parseInt(resp.data.fieldValue);\n\t\t\t\t}\n\t\t\t\telse{\n\t\t\t\t\t$scope.zoneSettings[resp.data.field] = parseInt(resp.data.fieldValue);\n\t\t\t\t}\n\t\t\t\t$scope.currentRow.entity[resp.data.field] = parseInt(resp.data.fieldValue);\n\t\t\t}, function(resp){\n\t\t\t\tconsole.log(\"error importing app settings\");\n\t\t\t});\n \t}\n \t\n \tMadAmpAPIservice.getSettings().then(function(resp){\n\t\t\tparseMenuSettings(resp.data)\n\t\t\t}, function(resp){\n\t\t\t\tconsole.log(\"error importing app settings\")\n\t\t\t});\n\t\t\t\n\t\t\t\n\t\t$scope.toggleVisible = function (row){\n\t\t\tif (row.entity.hasOwnProperty('activeStatus')){\n\t\t\t\treturn row.entity.activeStatus;\n\t\t\t}\n\t\t\tif (row.entity.hasOwnProperty('visibleStatus')){\n\t\t\t\treturn row.entity.visibleStatus;\n\t\t\t}\n\t\t\treturn (\"error setting toggle button for \"+row.entity);\n\t\t};\n\t\t\n \tfunction parseMenuSettings(resp)\n\t\t{\n\t\t\t$scope.slidersOn = parseInt(resp.slice(0,1)[0].slidersOn);\n\t\t\t$scope.zoneSettings = resp.slice(1,13);\n\t\t\t$scope.sourceSettings = resp.slice(13,19);\n\t\t\tvar attributes = resp.slice(19,resp.length);\n\t\t\t$scope.attributes = $filter('getRangeControls')(attributes);\n\t\t\t\t\t\t\n\t\t\t$scope.zoneDefs = [ {name: 'positionAddress', displayName: 'Id', width: \"10%\", enableCellEdit: false, headerCellClass: 'gridHeader' }, \n\t\t\t\t\t\t\t\t{name: 'zoneName', displayName: 'Zone Name', width: \"60%\", headerCellClass: 'gridHeader', enableCellEditOnFocus: true }, \n \t\t\t\t\t\t\t\t{name: 'activeStatus', displayName: 'Active', enableCellEdit: false, width:\"30%\",\n \t\t\t\t\t\t\t\tcellTemplate: attributeToggleTemplate, headerCellClass: 'gridHeader'}, \n\t\t\t\t\t\t\t ];\n\t\t\t\t\t\t\t \n\t\t\t$scope.sourceDefs = [ {name: 'positionAddress', displayName: 'Id', width: \"15%\", enableCellEdit: false, headerCellClass: 'gridHeader'}, \n\t\t\t\t\t\t\t\t {name: 'sourceName', displayName: 'Source Name', headerCellClass: 'gridHeader', enableCellEditOnFocus: true }, \n\t\t\t\t\t\t\t \t];\n\t\t\t\t\t\t\t \t\n\t\t\t$scope.attributeDefs = [ {name: 'displayName', displayName: 'Attribute Name', enableCellEdit: false, headerCellClass: 'gridHeader'}, \n \t\t\t\t\t\t\t\t\t {name: 'visibleStatus', displayName: 'Visible', enableCellEdit: false, width:\"30%\",\n\t \t\t\t\t\t\t\t\t cellTemplate: attributeToggleTemplate, headerCellClass: 'gridHeader'}, \n\t\t\t\t\t\t\t\t ];\n\t\t\t\n\t\t\t$scope.grids = [{\n\t\t\t\t\t\t open: false,\n\t\t\t\t\t\t header: \"Zone Settings\",\n\t\t\t\t\t\t options: { enableHorizontalScrollbar: 0, \n\t\t\t\t\t\t\t\t\t enableVerticalScrollbar: 0,\n\t\t\t\t\t\t \t\t\t enableColumnMenus: false,\n\t\t\t\t \t\t\t\t\t enableCellSelection: true,\n\t\t\t\t \t\t\t\t\t data: $scope.zoneSettings,\n\t\t\t\t \t\t\t\t\t columnDefs: $scope.zoneDefs,\n\t\t\t\t \t\t\t\t\t rowHeight: globalGridRowHeight,\n\t\t\t\t \t\t\t\t\t onRegisterApi: function(gridApi){$scope.registerGridApi(0, gridApi);}\n\t\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t open: false,\n\t\t\t\t\t\t\t header: \"Source Settings\",\n\t\t\t\t\t\t \t options: { enableHorizontalScrollbar: 0, \n\t\t\t\t\t\t\t\t\t\tenableVerticalScrollbar: 0,\n\t\t\t\t\t\t \t \t\t\tenableColumnMenus: false,\n\t\t\t\t \t\t\t \t\t enableCellSelection: true,\n\t\t\t\t \t\t\t\t\t data: $scope.sourceSettings,\n\t\t\t\t \t\t\t\t\t columnDefs: $scope.sourceDefs,\n\t\t\t\t \t\t\t\t\t rowHeight: globalGridRowHeight,\n\t\t\t\t \t\t\t\t\t \tonRegisterApi: function(gridApi){$scope.registerGridApi(1, gridApi);}\n\t\t\t\t\t\t\t\t \t },\n\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t open: false,\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t header: \"Attribute Settings\",\n\t\t\t\t\t\t \t options: { enableHorizontalScrollbar: 0, \n\t\t\t\t\t\t\t\t\t\tenableVerticalScrollbar: 0,\n\t\t\t\t\t\t \t \t\t\tenableColumnMenus: false,\n\t\t\t\t \t\t\t \t\t enableCellSelection: true,\n\t\t\t\t \t\t\t\t\t data: $scope.attributes,\n\t\t\t\t \t\t\t\t\t columnDefs: $scope.attributeDefs,\n\t\t\t\t \t\t\t\t\t rowHeight: globalGridRowHeight,\n\t\t\t\t \t\t\t\t\t \tonRegisterApi: function(gridApi){$scope.registerGridApi(2, gridApi);}\n\t\t\t\t\t\t\t\t \t },\n\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t ];\n\t\t}\n\t}\n})();\n"
},
{
"alpha_fraction": 0.5945000052452087,
"alphanum_fraction": 0.6230000257492065,
"avg_line_length": 20.5053768157959,
"blob_id": "1a8a191153114147d4d24267a808ace6051d3ad9",
"content_id": "ab0f9cb8eb00eb98957216686c94db2255a0718c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2000,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 93,
"path": "/Server/Drivers/rs232_handler.py",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "import serial\nimport time\nimport sys\n\n\ndef main():\n\t# argv[1] data to transmit to RS232 MAD amp\n\ttx_data = sys.argv[1]\n\n\t# attach carriage return as command terminator\n\ttx_data = (tx_data + chr(13))\n\t\n\t# check if first character in argv is command symbol < set status\n\tif ((tx_data[0]) == \"<\"):\n\t\tcmd_status = 1\n\telse:\n\t\tcmd_status = 0\n\t\n\t# pass tx_data and cmd_status to Serial Port return with response\n\trx_data = rs232_driver(tx_data,cmd_status)\n\t\t\n\t# print response to console or Server - whomever called the program\n\tprint rx_data\n\t\n\ndef rs232_driver(data_to_tx, status_cmd):\n\t\n\t# create data_buffer to receive RS232 string from MAD amp\n\tdata_buffer = ''\n\t\n\tser = serial.Serial('/dev/ttyMCC',115200,timeout=1)\n\tser.flushOutput()\n\tser.flushInput()\n\n\t# status_cmd == 0 then query else cmd == 1 send control command then query\n\t# hashtag and target_count setup loop to receive expected length of response\n\t# sleep time required between write(transmit) and read(receive)\n\tif status_cmd == 0:\n\t\thashtag = 0\n\t\ttarget_count = 2\n\t\tser.write(data_to_tx)\n\t\ttime.sleep(0.08)\n\t\n\t\twhile (hashtag != target_count):\n\t\t\tfor c in ser.read():\n\t\t\t\tif (c > (chr(13))):\n\t\t\t\t\tdata_buffer += c\n\t\t\t\tif c == '#':\n\t\t\t\t\thashtag += 1\n\t\t\t\t\t\n\t\tser.close()\n\t\t\t\t\t\n\t\treturn data_buffer\n\telse:\n\t\ttarget_count = 1\n\t\thashtag = 0\n\t\tser.write(data_to_tx)\n\t\ttime.sleep(0.08)\n\t\t\n\t\twhile (hashtag != target_count):\n\t\t\tfor c in ser.read():\n\t\t\t\tif (c > (chr(13))):\n\t\t\t\t\tdata_buffer += c\n\t\t\t\tif c == '#':\n\t\t\t\t\thashtag += 1\n\t\t\n\t\t# delete the buffer then recreate buffer pointing to null\n\t\tdel data_buffer\n\t\tdata_buffer = ''\n\t\t\n\t\thashtag = 0\n\t\ttarget_count = 2\n\t\t\n\t\t# generate query_cmd from original data_to_tx\n\t\tquery_cmd = ''.join(['?', data_to_tx[1:3], chr(13)])\n\t\t\n\t\tser.write(query_cmd)\n\t\ttime.sleep(0.08)\n\t\t\n\t\twhile (hashtag != target_count):\n\t\t\tfor c in ser.read():\n\t\t\t\tif (c > (chr(13))):\n\t\t\t\t\tdata_buffer += c\n\t\t\t\tif c == '#':\n\t\t\t\t\thashtag += 1\n\t\t\t\t\t\n\t\tser.close()\n\t\t\t\t\t\n\t\treturn data_buffer\n\n\t\nif __name__ == '__main__':\n\tmain()\n"
},
{
"alpha_fraction": 0.5565908551216125,
"alphanum_fraction": 0.6078377366065979,
"avg_line_length": 27.286821365356445,
"blob_id": "7b0ad0f0a37b5072df29126f7d47385591a91831",
"content_id": "efdce692ffd26d6a25ab15d76052942cf173559c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 3649,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 129,
"path": "/Data/madAmp.sql",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "-- phpMyAdmin SQL Dump\n-- version 4.0.10deb1\n-- http://www.phpmyadmin.net\n--\n-- Host: localhost\n-- Generation Time: Oct 30, 2016 at 11:29 PM\n-- Server version: 5.5.52-0ubuntu0.14.04.1\n-- PHP Version: 5.5.9-1ubuntu4.19\n\nSET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\nSET time_zone = \"+00:00\";\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n\n--\n-- Database: `madAmp`\n--\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `attributeControlMode`\n--\n\nCREATE TABLE IF NOT EXISTS `attributeControlMode` (\n `slidersOn` tinyint(1) NOT NULL DEFAULT '0',\n PRIMARY KEY (`slidersOn`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1 COMMENT='toggle slider controls or button controls';\n\n--\n-- Dumping data for table `attributeControlMode`\n--\n\nINSERT INTO `attributeControlMode` (`slidersOn`) VALUES\n(0);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `attributes`\n--\n\nCREATE TABLE IF NOT EXISTS `attributes` (\n `control` text NOT NULL,\n `visibleStatus` tinyint(1) NOT NULL,\n `displayName` varchar(30) DEFAULT NULL,\n `upIcon` varchar(30) DEFAULT NULL,\n `downIcon` varchar(30) DEFAULT NULL,\n `min` int(11) DEFAULT NULL,\n `max` int(11) DEFAULT NULL,\n `type` varchar(30) DEFAULT NULL,\n `offset` int(11) NOT NULL DEFAULT '0'\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `attributes`\n--\n\nINSERT INTO `attributes` (`control`, `visibleStatus`, `displayName`, `upIcon`, `downIcon`, `min`, `max`, `type`, `offset`) VALUES\n('TR', 1, 'Treble', 'fa fa-chevron-up', 'fa fa-chevron-down', 0, 14, 'range', 7),\n('BS', 1, 'Bass', 'fa fa-chevron-up', 'fa fa-chevron-down', 0, 14, 'range', 7),\n('BL', 1, 'Balance', 'fa fa-chevron-right', 'fa fa-chevron-left', 0, 20, 'range', 10),\n('VO', 1, 'Volume', 'fa fa-volume-up', 'fa fa-volume-down', 0, 38, 'range', 0),\n('CH', 1, 'Source', NULL, NULL, 1, 6, 'dropDown', 0),\n('PR', 1, 'Power', NULL, NULL, 0, 1, 'toggle', 0),\n('MU', 1, 'Mute', NULL, NULL, 0, 1, 'toggle', 0);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `sources`\n--\n\nCREATE TABLE IF NOT EXISTS `sources` (\n `sourceName` text NOT NULL,\n `unitAddress` int(11) NOT NULL,\n `positionAddress` int(11) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `sources`\n--\n\nINSERT INTO `sources` (`sourceName`, `unitAddress`, `positionAddress`) VALUES\n('TurnTable', 0, 1),\n('Dvd Player', 0, 2),\n('Ipod2', 0, 3),\n('BlueRay Player', 0, 4),\n('Bluetooth', 0, 5),\n('Source-6', 0, 6);\n\n-- --------------------------------------------------------\n\n--\n-- Table structure for table `zones`\n--\n\nCREATE TABLE IF NOT EXISTS `zones` (\n `zoneName` text CHARACTER SET utf8 NOT NULL,\n `unitAddress` int(1) NOT NULL,\n `positionAddress` int(1) NOT NULL,\n `activeStatus` tinyint(1) NOT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n\n--\n-- Dumping data for table `zones`\n--\n\nINSERT INTO `zones` (`zoneName`, `unitAddress`, `positionAddress`, `activeStatus`) VALUES\n('Master Bedroom', 1, 1, 1),\n('Dining Room', 1, 2, 1),\n('Living Room', 1, 3, 1),\n('Jordan''s Rm', 1, 4, 1),\n('Rec Room', 1, 5, 1),\n('Office', 1, 6, 1),\n('Foyer', 2, 1, 1),\n('Bar', 2, 2, 1),\n('Sauna', 2, 3, 1),\n('Garage', 2, 4, 1),\n('Deck', 2, 5, 1),\n('Not used', 2, 6, 0);\n\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n"
},
{
"alpha_fraction": 0.6040462255477905,
"alphanum_fraction": 0.6040462255477905,
"avg_line_length": 27.83333396911621,
"blob_id": "19a0805491097bda26435134005d5bcc1c919887",
"content_id": "a7c657b5c7173f79066035ebb1d6ba357938c43d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 12,
"path": "/Components/Layout/TopNav.js",
"repo_name": "lance59/Monoamp-Repo",
"src_encoding": "UTF-8",
"text": "(function(){\n\t'use strict';\n var controllerId = 'TopNav',\n \tremote = angular.module('MadAmpApp');\n\n remote.controller(controllerId, ['$scope', '$location', viewModel]);\n function viewModel($scope, $location){\n \t\t$scope.isActive = function (viewLocation) {\n \treturn viewLocation === $location.path();\n \t};\n } \t\n})();\n"
}
] | 17 |
git202122/git-demo1405
|
https://github.com/git202122/git-demo1405
|
eef71807d7c4f9fff0af7223877638fb3d7e5cec
|
288fb97c72b6d81fc3a30576b8a1a005c4842e1a
|
fdf23f13510a8f0c04696e660e4d0b4298393716
|
refs/heads/main
| 2023-04-28T19:50:42.388822 | 2021-05-21T16:52:46 | 2021-05-21T16:52:46 | 367,327,712 | 0 | 0 | null | 2021-05-14T10:26:50 | 2021-05-17T09:13:15 | 2021-05-18T07:54:31 |
Python
|
[
{
"alpha_fraction": 0.7642276287078857,
"alphanum_fraction": 0.7642276287078857,
"avg_line_length": 19.5,
"blob_id": "af925ec76960b3ccfcaf9ffee784ccb878fc536e",
"content_id": "fb45f9cf4974cfa5d4c5572aef16d0f44090a653",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 6,
"path": "/prasad.py",
"repo_name": "git202122/git-demo1405",
"src_encoding": "UTF-8",
"text": "import voda\nimport jason\nprint \"I love java\"\nprint \"We have a lovey weather today\"\nprint \"love you\"\nprint \"rebase example\"\n"
}
] | 1 |
errorcodefive/fantasyscoretweak
|
https://github.com/errorcodefive/fantasyscoretweak
|
0425474317ca124016ff1f9d8730f97e0a95bcf0
|
496edd3db113d5ac46504bbbe38b162cebb337f0
|
5b991b4e1619bdcda4e83b253c3e999b7c997576
|
refs/heads/master
| 2016-08-11T07:14:55.177024 | 2016-02-23T22:17:14 | 2016-02-23T22:17:14 | 51,788,619 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6434309482574463,
"alphanum_fraction": 0.6574242115020752,
"avg_line_length": 32.267242431640625,
"blob_id": "ca7aa4ad7e536f43bf27979e12ccbb6459707e57",
"content_id": "644a7fb9d29b78b44ecc86b6346d4cbe76e99f2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3859,
"license_type": "no_license",
"max_line_length": 299,
"num_lines": 116,
"path": "/nflfant.py",
"repo_name": "errorcodefive/fantasyscoretweak",
"src_encoding": "UTF-8",
"text": "import nflgame\nimport fractions\nimport xlwt\nfrom formatting import formatName,getTeam,retName,retTeam,retID, retPos\nfrom nflgamefunctions import print_playerid, getScore, getFantScoring, padStats\n\nyear=2015\nplayer_list_fant = []\nformatplayer_list=[]\nfplayer_list_fant = open(\"cleanlist.txt\", 'r')\nteam_list =[]\nplay_team_list=[]\nteam_rosters={}\nteam_games=[]\n\n#load fantasy scoring system\nfantScoring = getFantScoring(\"fantScoringSystem.txt\")\nprint \"Fantasy scoring system loaded.\"\n\n\n#padStats(\"Calais Campbell#00-0026190$ARI&DL%defense_tkl: 46, defense_ast: 15, defense_tkl_loss: 16, defense_tkl_loss_yds: 59, defense_pass_def: 2, defense_qbhit: 18, defense_sk_yds: -44, defense_sk: 5.0, penalty: 3, penalty_yds: 25, defense_tkl_primary: 1, defense_frec_yds: 0, defense_frec: 1\",fantScoring)\n\n#Only players from our transaction list and roster list are used\n#Decided not to use every NFL player since many are unncessary since no one would pick them up\nfor line in fplayer_list_fant:\n player_list_fant.append(line.strip())\n#formatplayer_list.append(formatName(line))\n\nfor x in range (0,32):\n team_list.append(nflgame.teams[x][0])\n\nfor x in team_list:\n xTeamRoster = []\n for y in player_list_fant:\n if getTeam(y)==x:\n\n playerInfo = formatName(y) + \"#\" + print_playerid(formatName(y),x)+\"$\"+x+\"&\"+retPos(y)\n xTeamRoster.append(playerInfo)\n team_rosters[x]=xTeamRoster\n print x+\" roster loaded.\"\n\n#yearly games combined\n#kickers need play by play since kick distances are not recorded in combine\n\nfor x in team_list:\n teamGames=nflgame.games(year,home=x,away=x,kind='REG')\n teamCombined=nflgame.combine_play_stats(teamGames)\n playnum=-1\n print x+\" stats loaded.\"\n for y in team_rosters[x]:\n\n playnum=playnum+1\n playerStats = teamCombined.playerid(retID(y))\n try:\n team_rosters[x][playnum]=team_rosters[x][playnum]+\"%\"+playerStats.formatted_stats()\n #print playerStats.formatted_stats()\n except AttributeError:\n print \"No stats for \"+y\n\noutBook = xlwt.Workbook()\noutSheet=outBook.add_sheet(\"IndivStats\")\ncols = [\"First\",\"Last\",\"Team\",\"Pos\",\"GSIS\"]\nfor x in fantScoring:\n cols.append(x)\nrow_count=1\nfor col, headings in enumerate(cols):\n outSheet.write(0,col,headings)\nprint \"Outputting to Excel\"\nfor x in team_list:\n for y in team_rosters[x]:\n row_count=row_count+1\n first = retName(y)\n\n last=first[first.rfind(\" \"):].strip()\n first=first[:first.find(\" \")].strip()\n outSheet.write(row_count,0,first)\n outSheet.write(row_count,1,last)\n outSheet.write(row_count,2,y[y.find(\"$\")+1:y.find(\"&\")])\n outSheet.write(row_count,3,y[y.find(\"&\")+1:y.find(\"%\")])\n outSheet.write(row_count,4,retID(y))\n for z, statval in enumerate(padStats(y,fantScoring)):\n outSheet.write(row_count,z+5,statval)\n print x + \" added to Excel.\"\noutBook.save(\"toExcelPlayerStats.xls\")\n#loop for on the fly changing of stats\n# while True:\n# userIn=raw_input(\"X to exit, anything else to continue.\")\n# if (userIn==\"X\" or userIn==\"x\"):\n# break\n# newfantvalues=getFantScoring(\"fantScoringSystem.txt\")\n# for x in team_list:\n# for y in team_rosters[x]:\n#\n# print retName(y)\n# print getScore(y,newfantvalues)\n\n#used this to determine all stats that are recorded\n# stat_types=set()\n# for x in team_list:\n# for y in team_rosters[x]:\n# temp = y[y.find(\"%\")+1:]+\", \"\n# while temp.find(\":\")>0:\n# stat_temp=temp[:temp.find(\":\")]\n# stat_types.add(stat_temp)\n# temp=temp[temp.find(\",\")+1:]\n# statout = open(\"stat_types.txt\",\"w\")\n# stat_types=sorted(stat_types)\n# for x in stat_types:\n# statout.write(\"%s\\n\"%x.strip())\n\n\n\n\n#for each player\n#go through stats\n#assign total numerical value\n"
},
{
"alpha_fraction": 0.6227436661720276,
"alphanum_fraction": 0.6317689418792725,
"avg_line_length": 28.157894134521484,
"blob_id": "9efc3ab5b5c814ae20a54b6d936c04db038e0194",
"content_id": "7a8f5814fa14d9406b6057e337840ffaa61098c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/formatting.py",
"repo_name": "errorcodefive/fantasyscoretweak",
"src_encoding": "UTF-8",
"text": "def formatName(firstlast):\n name = firstlast.strip()\n\n firstname=name[0:name.find(\" \")]\n lastname=name[name.find(\" \"):name.find(\"&\")].strip()\n return firstname+\" \"+lastname\n\ndef getTeam(input):\n #format fName lName TEAM\n output = input[input.rfind(\"$\")+1:]\n return output.strip()\ndef retName(input):\n return input[:input.find(\"#\")]\ndef retID(input):\n return input[input.find(\"#\")+1:input.find(\"$\")]\ndef retTeam(input):\n return input[input.find(\"$\")+1:]\ndef retPos(input):\n return input[input.find(\"&\")+1:input.find(\"$\")]\n"
},
{
"alpha_fraction": 0.6152466535568237,
"alphanum_fraction": 0.6331838369369507,
"avg_line_length": 24.930233001708984,
"blob_id": "34fe0af24ec52f496b795588a633e398a6161dbc",
"content_id": "9bc7ce21f1506a7bead027d2f125211446024537",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1115,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 43,
"path": "/playerlistcleaner.py",
"repo_name": "errorcodefive/fantasyscoretweak",
"src_encoding": "UTF-8",
"text": "import re\nfrom sets import Set\n\neof = False\ncleanlist = []\nmesslist = open(\"messlist.txt\",'r')\nlincount = 0\nplayerchunk = ['','','','']\nfor x in range(0,338):\n for i in range(0,4):\n playerchunk[i]=messlist.readline()\n parse = playerchunk[1]\n\n playname = parse[0:parse.rfind('-')].strip()\n if playname.endswith(\"K\"):\n playname=playname[:-1]+\"&K\"\n else:\n playname=playname[:-2]+\"&\"+playname[-2:]\n\n playteam = parse[parse.rfind('-')+1:parse.rfind('-')+5]\n cleanlist.append(playname+\"$\"+playteam.strip())\n\nmesslist.close()\nmesslist = open(\"messyrosters.txt\",'r')\nline=[]\nfor x in range (0,232):\n pickNo = messlist.readline()#don't need\n pickName = messlist.readline() #keep whole\n pickInfo = messlist.readline()\n pickTeam = pickInfo[pickInfo.find('-')+1:pickInfo.find('PICK')]\n pickPos = pickInfo[:pickInfo.find('-')-1]\n cleanlist.append((pickName.strip()+\"&\"+pickPos.strip()+\"$\"+pickTeam.strip()))\n\noutlist = open(\"cleanlist.txt\",'w')\n\ncleanset = Set(cleanlist)\n\ncleanlist = list(cleanset)\n\n\nfor x in cleanlist:\n outlist.write(\"%s\\n\" % x)\noutlist.close()\n"
},
{
"alpha_fraction": 0.602226734161377,
"alphanum_fraction": 0.6088056564331055,
"avg_line_length": 30.365079879760742,
"blob_id": "dee8dc4d6298801ce5e968cdb330cd16c48f3820",
"content_id": "fd6300a9f5214cde95f62f676f7a51690e37c6bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1976,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 63,
"path": "/nflgamefunctions.py",
"repo_name": "errorcodefive/fantasyscoretweak",
"src_encoding": "UTF-8",
"text": "import nflgame\nfrom fractions import Fraction\n\ndef find_playerid(name, team=None):\n if not nflgame.find(name, team):\n return 'No match'\n if len(nflgame.find(name, team)) > 1:\n print 'Could be either {}'.format(\n ' or '.join([str(p) for p in nflgame.find(name, team)]))\n return [p.gsis_id for p in nflgame.find(name, team)]\n else:\n return nflgame.find(name, team)[0].gsis_id\n\ndef print_playerid(name, team=None):\n return find_playerid(name, team)\n\ndef getScore(player,statsList):\n playerstats=player[player.find(\"%\")+1:]+\",\"\n playerScore=0.0\n while (playerstats.find(\":\")>0):\n current_stat=playerstats[:playerstats.find(\":\")].strip()\n for x in statsList:\n\n if current_stat==x[:x.rfind(\"=\")]:\n #from statslist\n stat_value = Fraction(x[x.rfind(\"=\")+1:].strip())\n stat_multiplier=playerstats[playerstats.find(\":\")+1:playerstats.find(\",\")].strip()\n playerScore = playerScore+(float(stat_value)*float(stat_multiplier))\n\n playerstats=playerstats[playerstats.find(\",\")+1:]\n return playerScore\n\n\n\ndef padStats(player,statsList):\n playerstats=player[player.find(\"%\")+1:]+\",\"\n padded = []\n\n for num,stat in enumerate(statsList):\n\n statname=stat[:stat.find(\"=\")]+\":\"\n\n if playerstats.find(statname)==-1:\n padded.append(0)\n else:\n statpos=playerstats.find(statname)\n temp = playerstats[playerstats.find(\":\",statpos)+1:playerstats.find(\",\",statpos)]\n #temp = playerstats[statpos:playerstats.find(\":\",statpos)]\n padded.append(temp.strip())\n\n return padded\n\n\n\ndef getFantScoring(filename):\n fantStats=open(filename,\"r\")\n fantScoring=[]\n for stat in fantStats:\n statName=stat[:stat.find(\"=\")]\n statValue=stat[stat.rfind(\"=\"):]\n fantScoring.append(statName+statValue.strip())\n fantStats.close()\n return fantScoring\n"
}
] | 4 |
efemurat/covidstatistic
|
https://github.com/efemurat/covidstatistic
|
c2adff23d358ff902a87f4d8229d43359405c54d
|
7427d62df6399ba6c2eebd9c346f1b717deae67d
|
e94d03ac48e90cf8bcfab1775db8766fc23031a7
|
refs/heads/main
| 2022-12-28T12:01:09.010336 | 2020-10-07T21:54:48 | 2020-10-07T21:54:48 | 302,145,924 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6430547833442688,
"alphanum_fraction": 0.6519092321395874,
"avg_line_length": 30.719297409057617,
"blob_id": "d0c4f084f18df7a9ee3b6fc3fc3848e98bd4f0a6",
"content_id": "c1e0101d93cde8b8ba21ff7d2e49f3a7b78e5459",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1807,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 57,
"path": "/app.py",
"repo_name": "efemurat/covidstatistic",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\nimport json\nfrom urllib.request import urlopen\nimport requests\n\napp = Flask(__name__)\nheaders = {'content-type': 'application/json'}\nurl = 'https://opendata.ecdc.europa.eu/covid19/casedistribution/json'\n#filename = requests.post(url, data=json.dumps(dict(mynum=123)), headers=headers)\nfilename = \"indir.json\"\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return(render_template('home.html'))\n\[email protected](\"/\" , methods=['POST'])\n\n\ndef result():\n country1 = request.form['country']\n casesCountry1 = getCases(country1)\n deathsCountry1 = getDeaths(country1)\n dateLabels = getDates()\n return(render_template('home.html', country1=country1, casesCountry1=casesCountry1, deathsCountry1=deathsCountry1, dateLabels=dateLabels ))\n\ndef getCases(country):\n with open(filename, 'r') as json_file:\n jsonData = json.load(json_file)\n caseList = []\n for record in jsonData['records']:\n if record['countriesAndTerritories'] == country:\n caseList.append(int(record['cases']))\n return(list(reversed(caseList)))\n\ndef getDates():\n with open(filename, 'r') as json_file:\n jsonData = json.load(json_file)\n dateList = []\n for record in jsonData['records']:\n if record['countryterritoryCode'] == 'ZMB':\n dateList.append(record['dateRep'])\n return(list(reversed(dateList)))\n\ndef getDeaths(country):\n with open(filename, 'r') as json_file:\n jsonData = json.load(json_file)\n deathList = []\n for record in jsonData['records']:\n if record['countriesAndTerritories'] == country:\n deathList.append(int(record['deaths']))\n return(list(reversed(deathList)))\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)"
}
] | 1 |
Ronnicks/oscaremmanuelcamarillohernandez-codigospython
|
https://github.com/Ronnicks/oscaremmanuelcamarillohernandez-codigospython
|
4957d99b66d98dfc4be6529ec04d9400ab6b1d73
|
0b2aea4221155aac635794507c82cab800cc83ba
|
c275386e1d0b4ea56ebb5a98a69cbb2b291d1e21
|
refs/heads/main
| 2023-08-05T01:46:41.913808 | 2021-09-14T05:26:59 | 2021-09-14T05:26:59 | 406,235,792 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5341463685035706,
"alphanum_fraction": 0.5780487656593323,
"avg_line_length": 27.428571701049805,
"blob_id": "918fc6a253a39efcf79e897af533f72b5958673b",
"content_id": "738453f0c05d0130de6ed79e6676ab990bb37bdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 14,
"path": "/ejer5.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "from math import sqrt\r\na = float(input(\"Ingrese coeficiente de x^2: \"))\r\nb = float(input(\"Ingrese coeficiente de x: \"))\r\nc = float(input(\"Ingrese coeficiente de variable: \"))\r\nx1=0\r\nx2=0\r\nif ((b**2)-4*a*c) < 0:\r\n print(\"La solucion son números complejos\")\r\nelse:\r\n x1 = (-b+sqrt(b**2-(4*a*c)))/(2*a)\r\n x2 = (-b-sqrt(b**2-(4*a*c)))/(2*a)\r\n print(\"Las soluciones son: \")\r\n print(x1)\r\n print(x2)"
},
{
"alpha_fraction": 0.460317462682724,
"alphanum_fraction": 0.60317462682724,
"avg_line_length": 14.25,
"blob_id": "f8b7fcd0b43e708d863e62126cde0e6d4ea7952e",
"content_id": "7661bf4cad0ac0797103e61f161af64ce4438ad5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 4,
"path": "/ejer10.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "lis = [0,1,2,3,4,5,6]\r\nlis.remove(3)\r\nlis.remove(6)\r\nprint(lis)"
},
{
"alpha_fraction": 0.5922330021858215,
"alphanum_fraction": 0.606796145439148,
"avg_line_length": 24,
"blob_id": "45dacf895753a97f02cc6902590f259fab44ada5",
"content_id": "d9daf38cb935ff46cd0fa86d2c22a79f2e3a4307",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 8,
"path": "/ejer6.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "numero = input(\"Ingrese un número: \")\r\nnum = int(numero)\r\nif num > 0:\r\n print(\"El número es positivo\")\r\nelif num < 0:\r\n print(\"El número es negativo\")\r\nelif num == 0:\r\n print(\"El número es cero\")"
},
{
"alpha_fraction": 0.672340452671051,
"alphanum_fraction": 0.672340452671051,
"avg_line_length": 57.25,
"blob_id": "951875779223828eeac2776915386fba0e4fcb20",
"content_id": "fbd043c7a7cf4f6fced15471452e9206d9b21d70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 4,
"path": "/ejer2.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "nombre = input(\"Ingrese su nombre: \")\r\ntelf = input(\"Ingrese su número de teléfono: \")\r\ncorreo = input(\"Ingrese su correo electronico: \")\r\nprint (\"Bienvenido \"+ nombre +\" su número de teléfono es: \" + telf +\" , su correo es: \"+ correo)"
},
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.7023809552192688,
"avg_line_length": 48.79999923706055,
"blob_id": "b823f18213d84a05ca6f9f3d69884950b063c8ee",
"content_id": "6828c630f5f5e1105d64e2e4f67603197649407d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 5,
"path": "/ejer3.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "res1 = float(input(\"Ingrese el valor de la primera resistencia: \"))\r\nres2 = float(input(\"Ingrese el valor de la segunda resistencia: \"))\r\n\r\ntotal=(res1*res2)/(res1+res2)\r\nprint(\"El valor de la resistencia total en paralelo es: \" + str(total)+ \" ohms.\")"
},
{
"alpha_fraction": 0.5135135054588318,
"alphanum_fraction": 0.5783783793449402,
"avg_line_length": 21.375,
"blob_id": "af9c3a3b6db66a2ac5321280bbbdb1fa4d79d894",
"content_id": "2a53342b039c1e01746b7a92e4c57590de90d882",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/ejer4.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "mat = [2,4],[7,5]\r\nprint(\"Matriz: \")\r\n\r\nfor array in mat: print(array)\r\ndeter =mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]\r\n\r\nprint(\"La determinante de la matriz es: \")\r\nprint(deter)"
},
{
"alpha_fraction": 0.6106194853782654,
"alphanum_fraction": 0.6106194853782654,
"avg_line_length": 21,
"blob_id": "29fdb7336a68cdd87d8901af0443e02727edd902",
"content_id": "58767eaa1f91a8a6e98e960bcc40874143e5283d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/ejer9.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "pala= input(\"Escriba la palabra que desea invertir: \")\r\ninv= \"\"\r\nfor i in pala: \r\n inv = i + inv \r\nprint(inv)"
},
{
"alpha_fraction": 0.6799163222312927,
"alphanum_fraction": 0.6841003894805908,
"avg_line_length": 30,
"blob_id": "ef5902c958e194067de669971f38d746f3973d82",
"content_id": "4d5f833c68eea211d7a058d9e32fc283b1335fd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 15,
"path": "/ejer7.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "print(\"Evaluación Crediticia\")\r\nnom= input(\"Nombre: \")\r\nedad= int(input(\"Edad: \"))\r\ningre= int(input(\"Ingreso mensual: \"))\r\negre= int(input(\"Egreso mensual: \"))\r\ncant= int(input(\"Cantidad del prestamo a solicitar: \"))\r\nmes= int(input(\"Meses a pagar: \"))\r\n\r\ntotal=ingre-egre\r\ncuota=cant/mes\r\n\r\nif edad < 18 or ingre < egre or total < cuota:\r\n print(\"No cumple con los requisitos para obtener el crédito\")\r\nelse :\r\n print(\"Cumple con los requisitos para obtener el crédito\")"
},
{
"alpha_fraction": 0.37037035822868347,
"alphanum_fraction": 0.42592594027519226,
"avg_line_length": 16.33333396911621,
"blob_id": "255b8a0e1a9861aafc475dce2b64454110cd1f2b",
"content_id": "b7a1506b09515a2f1580c1e7d94d519f421e210e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 6,
"path": "/ejer8.py",
"repo_name": "Ronnicks/oscaremmanuelcamarillohernandez-codigospython",
"src_encoding": "UTF-8",
"text": "n=5\r\nfor i in range(n+1):\r\n print(\"*\"*i)\r\nif i == 5:\r\n for n in range(n-1,0,-1):\r\n print(\"*\"*n)"
}
] | 9 |
sanchitvj/C4_SMP_ML
|
https://github.com/sanchitvj/C4_SMP_ML
|
5e056cb1069fdce8339cc2337ca5d142c95138b8
|
a4fb993cf2b407c0b1b80f228ff58afb9b15ea3d
|
0c777da0b3cb467a5733d6c27d4e5f91b162959e
|
refs/heads/master
| 2022-07-29T16:15:21.599387 | 2020-05-23T19:28:03 | 2020-05-23T19:28:03 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.8269230723381042,
"avg_line_length": 51,
"blob_id": "170a7e3f3e1afc8c9f253674b1504e187f4e21e7",
"content_id": "5fb920e5cd5abe9e6c6097bf6f8d6e07d93387f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/README.md",
"repo_name": "sanchitvj/C4_SMP_ML",
"src_encoding": "UTF-8",
"text": "# C4 Summer Mentorship Program for Machine Learning\n"
},
{
"alpha_fraction": 0.5983718037605286,
"alphanum_fraction": 0.6092265844345093,
"avg_line_length": 27.30769157409668,
"blob_id": "f669f70376709fb8978a0c54d24fd65cb3cdfc96",
"content_id": "8d6d55d0f8128646807016a93ef77523e5f0a812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 737,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 26,
"path": "/week_2/costFunction.py",
"repo_name": "sanchitvj/C4_SMP_ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sigmoid import sigmoid\nepsilon = 1e-5 #to suppress warning of division by 0\n\ndef costFunction(theta, X, y):\n '''returns cost for theta, X and y\n np.log(a)==> returns array with elementwise log on array a\n use the sigmoid function that's being imported above \n ''' \n m = y.size\n h = sigmoid(X @ theta)\n \n J = -((np.log(h + epsilon).T).dot(y)+np.log(1-h + epsilon).T.dot(1-y))/m\n\n return J \n\ndef gradient(theta, X, y):\n \n '''' calculate gradient descent for logistic regression'''\n m = y.size\n theta=theta.reshape(-1,1)\n h = sigmoid(X.dot(theta))\n\n grad = ((1/m) * np.dot(X.T,(h - y)))\n\n return(grad.flatten())\t# returns copy of array in one dimension\n\n"
},
{
"alpha_fraction": 0.6784313917160034,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 35,
"blob_id": "9e557525dc05de49f72bd00fc888aac03fd7fbb1",
"content_id": "8e1af6c855540d25fbe20b4d6a59998acfcbb496",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 7,
"path": "/week_2/sigmoidReg.py",
"repo_name": "sanchitvj/C4_SMP_ML",
"src_encoding": "UTF-8",
"text": "from scipy.special import expit #to encounter the problem of exp Overflow\nimport numpy as np\ndef sigmoidReg(z):\n '''Returns sigmoid of z\n np.exp(A)==> returns element each element X in the form e^X'''\n sgm= 1/(1+np.exp(-z))\n return expit(sgm)\n \n"
}
] | 3 |
viljamirom/face_detection
|
https://github.com/viljamirom/face_detection
|
0dc5cdb1829a66293899d708e6b66f7c22715b96
|
2afeb22b19d5599670bc1220ae57af666c387496
|
2c0031171219cb1ead2a3559c4bc9367e70ea04f
|
refs/heads/main
| 2023-04-11T02:08:57.971230 | 2021-04-21T12:15:41 | 2021-04-21T12:15:41 | 334,998,567 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7345823645591736,
"alphanum_fraction": 0.7447306513786316,
"avg_line_length": 35.628570556640625,
"blob_id": "762de8103837ed2c38688c4d06fb71aff9e68cc7",
"content_id": "8ca53817ed88499d1303d1b6fbb6c5220ca2f540",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1281,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 35,
"path": "/README.md",
"repo_name": "viljamirom/face_detection",
"src_encoding": "UTF-8",
"text": "# Face detection\nFace detection for deep speaking avatar, part of Bsc thesis project.\\\nExample for this project and the used models can be found [here](https://github.com/mahehu/TUT-live-age-estimator).\n\n# How to use\nDependencies: OpenCV - 4.5.1+ (Used in tests)\n\\\n\\\nRequires a webcam for the video feed. Documentation of the videocapture can be found [here](https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html#details).\n\\\n\\\n--output OUTPUT: Desired directory path for the detection file. (Must give as argument) \n\nSaves the detections in text (.txt) file with default name of \"latest_face_detection.txt\" to the given directory. Updates/rewrites that file after every second. \nCreates bounding box coordinates of the biggest detected face in form of: min_x;min_y;max_x;max_y.\n\nNote the coordinate system (scaled from 0.0 to 1.0):\n\\\n\\\n\n\n```\nusage: detect_faces.py [-h] --output OUTPUT\n\nDetect faces from video feed and returns bounding box of biggest detection\n\noptional arguments:\n -h, --help show this help message and exit\n --output OUTPUT Path to the output directory\n\n ```\nExample usage:\n```\nC:\\path\\to\\face_detection>python detect_faces.py --output path/to/outputdir (or only dir name if in current level)\n ```"
},
{
"alpha_fraction": 0.5574876070022583,
"alphanum_fraction": 0.5759403705596924,
"avg_line_length": 32.5476188659668,
"blob_id": "777cade9c53134e3dca69f84c9d05b3120b9e5c8",
"content_id": "9e1e16e3fb313903e282c553b3d04f4a6c4dbc6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2818,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 84,
"path": "/detect_faces.py",
"repo_name": "viljamirom/face_detection",
"src_encoding": "UTF-8",
"text": "import cv2\nimport argparse\nimport time\nimport os\n\n\ndef parse_output_file(path, format):\n name = \"latest_face_detection\"\n output_file = path+name+format\n return output_file\n\n\ndef check_output_path(path):\n if os.path.isfile(path):\n raise Exception(\"Path points to file and not to directory\")\n if not path[-1] == '/':\n path += '/'\n if not os.path.exists(path):\n raise Exception(\"Could not find given directory\")\n return path\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Detect faces from video feed and returns bounding box of biggest detection')\n parser.add_argument('--output', required=True,\n help='Path to the output directory')\n args = parser.parse_args()\n output_path = check_output_path(args.output)\n\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n raise Exception(\"Could not open camera\")\n\n frozen_graph = 'frozen_inference_graph.pb'\n text_graph = 'graph.pbtxt'\n net = cv2.dnn.readNetFromTensorflow(frozen_graph, text_graph)\n net_input_width = 240\n net_input_height = 180\n\n while True:\n _, img = cap.read()\n #img = cv2.flip(img, 1) # might need to mirror the image\n\n blob = cv2.dnn.blobFromImage(img, size=(net_input_width, net_input_height), swapRB=True, crop=False)\n net.setInput(blob)\n netOut = net.forward()\n\n rows, cols = img.shape[0:2]\n max_area = 0\n\n default_target = [float(1 / 3), float(1 / 3), float(2 / 3), float(2 / 3)] # desired default bounding box\n target_face = default_target\n\n for detection in netOut[0, 0, :, :]:\n score = float(detection[2])\n min_x = float(detection[3])\n min_y = float(detection[4])\n max_x = float(detection[5])\n max_y = float(detection[6])\n width = int(max_x * cols - min_x * cols)\n height = int(max_y * rows - min_y * rows)\n if score > 0.3 and width > 40:\n area = int(width * height)\n if area > max_area:\n max_area = area\n target_face = [min_x, min_y, max_x, max_y]\n\n # video feed from detection\n cv2.rectangle(img, (int(target_face[0] * cols), int(target_face[1] * rows)), (int(target_face[2] * cols), int(target_face[3] * rows)), (255, 0, 0), 2)\n cv2.imshow(\"image\", img)\n\n file_to_save = parse_output_file(output_path, \".txt\")\n with open(file_to_save, 'w') as file:\n file.write(\";\".join(str(x) for x in target_face))\n file.close()\n\n key = cv2.waitKey(1) # note that if video feed window is not active, program must be stopped manually\n if key & 0xFF == ord('q'):\n break\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
Saravan-srg/N-way-set-associative-cache-implementation-in-python
|
https://github.com/Saravan-srg/N-way-set-associative-cache-implementation-in-python
|
310be9d6def417ede6711218f421b284c2f6a9d9
|
d4a58743317917b7bc15d02c5f95392d8517cb07
|
53b3ad2b84a0b0a6663ad8b078f8122cbb56fbe5
|
refs/heads/master
| 2022-12-10T04:16:16.512073 | 2020-09-08T19:09:49 | 2020-09-08T19:09:49 | 293,904,000 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5220953822135925,
"alphanum_fraction": 0.5390693545341492,
"avg_line_length": 28.938596725463867,
"blob_id": "c9c12ff2124422cd1c8de1f0ff2340bf508928e8",
"content_id": "5afb25f42faca593c99e65fabcee12a3f59f4109",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3417,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 114,
"path": "/n_way_sa_cache.py",
"repo_name": "Saravan-srg/N-way-set-associative-cache-implementation-in-python",
"src_encoding": "UTF-8",
"text": "import math\nclass cache_line:\n def __init__(self,address, number_of_ways):\n self.n = number_of_ways\n self.m = int(math.log2(number_of_ways))\n self.myaddress = address\n self.tag_l = []\n self.tag_l.append(self.myaddress[0:7+self.m])\n self.index = self.myaddress[30-(23-self.m):30]\n \n def get_index(self):\n return self.index\n\n def get_tag(self):\n return self.tag_l[0]\n\n def get_taglist(self):\n return self.tag_l\n \n def taglist_isfull(self):\n if len(self.get_taglist()) == n :\n return True\n\n def taglist_isempty(self):\n if len(self.get_taglist()) == 0 :\n return True\n\n\ndef hit_or_miss(obj_, new_obj): # Returns an index acccording to which hit or miss is decided, also performs required operations. \n index_ = 0\n if obj_.index == new_obj.index:\n if new_obj.get_tag() in obj_.tag_l: # If the object's tag is in the tag list.\n index_ = 1 #HIT\n lru_same = obj_.tag_l.index(new_obj.get_tag()) #LRU replacement\n new_t = obj_.tag_l.pop(lru_same)\n obj_.tag_l.append(new_t)\n \n else:\n if(obj_.taglist_isfull()):\n index_ = 2 # index_ is matching but tag is not (tag list is full)\n obj_.tag_l.pop(0) #LRU replacement\n obj_.tag_l.append(new_obj.tag_l[0])\n \n else:\n obj_.tag_l.append(new_obj.tag_l[0])\n index_ = 3 # index_ is matching but tag is not (tag list is not full), appends new tag.\n else:\n index_ = 4 # Both index_ and tags are not matching. So, it's a definite miss. Object is added to cache.\n return index_\n\n\ndef hexa_to_binary(hexa_addr): # Converts hexa decimal address to binary.\n new_hexa_addr = hexa_addr[2:]\n binary_addr = \"{0:08b}\".format(int(new_hexa_addr, 16))\n \n l = 32 - binary_addr.__len__()\n binary_addr = \"0\"*l + binary_addr \n return binary_addr\n\ndef segregate(input_string): # Segregates file read line into instructon and address\n instruction = input_string[0]\n add = input_string[2:12]\n return [instruction, add]\n\n\nif __name__ == \"__main__\":\n\n n = input(\"Enter number of ways n:\")\n cache_list = [] #cache\n no_of_hits = -1\n no_of_misses = 1\n count_T = 0\n \n f = open(\"test.trace\", \"r\") #Trace file. Change this to test on other trace files\n count2 = 0\n s = []\n\n for i in f:\n s.append(hexa_to_binary(segregate(i)[1]))\n \n \n for addr in s:\n count_T += 1\n print(count_T)\n\n index = 0\n j = 0\n obj_cl = cache_line(addr,n)\n s.pop(0)\n\n if cache_list.__len__() == 0:\n cache_list.append(obj_cl)\n\n for obj in cache_list: \n index = hit_or_miss(obj, obj_cl) \n if index == 1:\n no_of_hits = no_of_hits + 1\n break\n\n if index == 4:\n no_of_misses = no_of_misses+1\n cache_list.append(obj_cl)\n \n elif index != 1:\n no_of_misses = no_of_misses+1 #LRU replacement is used \n\n hit_percent = (no_of_hits*(1.0)) / ((1.0)*(no_of_hits+no_of_misses))*(100*1.0)\n print(f.name)\n print (\"Number of ways = \", n)\n print (\"Number of hits : \", no_of_hits)\n print (\"Number of miss : \", no_of_misses)\n print (\"Hit percent : \", hit_percent)\n\n f.close() \n"
}
] | 1 |
mkbeh/parser_music.i.ua
|
https://github.com/mkbeh/parser_music.i.ua
|
a66d2a271bd008a62477ce3ea2ef6ea69afe0900
|
67e485217b748e97fe5fc2204191a7795f35607f
|
fe440d2e022e25bbacef2093bba80b1077b53199
|
refs/heads/master
| 2022-11-15T21:22:03.683480 | 2020-07-19T23:28:26 | 2020-07-19T23:28:26 | 280,968,319 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.583961009979248,
"alphanum_fraction": 0.6030128598213196,
"avg_line_length": 23.532608032226562,
"blob_id": "e433aa333c72bed93677772b10fb8e671bf4614c",
"content_id": "7163ade168d27659b030e56e020eb42185dc4f91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2257,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 92,
"path": "/downloader.py",
"repo_name": "mkbeh/parser_music.i.ua",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nDownload music list from specific user profile page.\nUser profile: http://music.i.ua/user/5497167/playlist/62530/#p0\n\"\"\"\n\nimport os\n\nimport requests\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup as BS\n\n\nOUTPUT_DIR = os.path.join(os.path.dirname(__file__), 'music')\nSITE_URL = 'http://music.i.ua'\nPAGE_URL = 'http://music.i.ua/user/5497167/playlist/62530/#p{}'\nBROWSER = webdriver.Firefox()\n\nPAGE_NUM = 0\nSUMMARY_FILES_CREATED = 0\n\n\ndef progress_msg(func):\n def wrapper(*args, **kwargs):\n print(f' # Downloading new file \"{args[1]} - {args[2]}\" from page #{PAGE_NUM}')\n func(*args, **kwargs)\n print(f' + File \"{args[1]} - {args[2]}\" successfully downloaded.')\n print(f' $ Total downloaded and created {SUMMARY_FILES_CREATED} files.\\n')\n return wrapper\n\n\nif not os.path.exists(OUTPUT_DIR):\n os.mkdir('music')\n\n\ndef get_html_js(url):\n BROWSER.get(url)\n return BROWSER.page_source\n\n\ndef get_html(url):\n try:\n return requests.get(url, timeout=(3, 27)).content\n except Exception as e:\n print(e)\n\n\n@progress_msg\ndef create_file(audio_url, singer, composition):\n path = os.path.join(OUTPUT_DIR, f'{singer} - {composition}')\n\n if not os.path.exists(path):\n bs = BS(get_html_js(audio_url), 'lxml')\n audio_url = 'http:' + bs.find('audio').attrs['src']\n\n r = requests.get(audio_url, allow_redirects=True)\n open(path, 'wb').write(r.content)\n\n global SUMMARY_FILES_CREATED\n SUMMARY_FILES_CREATED += 1\n return SUMMARY_FILES_CREATED\n\n\ndef downloader_handler(page_num):\n url = PAGE_URL.format(page_num)\n bs = BS(get_html(url), 'lxml')\n\n for tr in bs.findAll('tr')[1:]:\n audio_url = composition = singer = None\n\n for index, val in enumerate(tr.findAll('a')):\n if index == 0:\n audio_url = SITE_URL + val.attrs['href']\n elif index == 1:\n composition = val.get_text()\n elif index == 2:\n singer = val.get_text()\n\n create_file(audio_url, singer, composition)\n\n\ndef main():\n for i in range(1, 9):\n global PAGE_NUM\n PAGE_NUM = i\n downloader_handler(i)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6794871687889099,
"alphanum_fraction": 0.7628205418586731,
"avg_line_length": 38,
"blob_id": "d553399b29941fea83b1477baa1c235da9ce1f1b",
"content_id": "d4972765e83a0e904ab9d23e0a24d51e5b7ce6bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/README.md",
"repo_name": "mkbeh/parser_music.i.ua",
"src_encoding": "UTF-8",
"text": "# Downloader from http://music.i.ua/ \n\nDownload music list from specific user profile page.\nUser profile: http://music.i.ua/user/5497167/playlist/62530/#p0\n"
}
] | 2 |
supriyantomaftuh/notifico
|
https://github.com/supriyantomaftuh/notifico
|
4f98b4f8b7d8e5ea678b9aa05409997ea72f25d6
|
c2268887723650370352c7cbc52edb4703d4acc8
|
d7b538724166752a3d09f93206df92bf07a7e6c5
|
refs/heads/master
| 2018-03-09T11:04:54.659541 | 2015-09-11T19:59:05 | 2015-09-11T19:59:05 | 42,341,249 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5634517669677734,
"alphanum_fraction": 0.5661590695381165,
"avg_line_length": 27.689319610595703,
"blob_id": "520294f2a3b296354c189a59e263b63a7e87020c",
"content_id": "c2ff13f2c985eb4a567b35526fa6aeea93a72e3e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2955,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 103,
"path": "/notifico/services/hooks/appveyor.py",
"repo_name": "supriyantomaftuh/notifico",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf8 -*-\n__all__ = ('AppVeyorHook',)\n\nimport json\nfrom hashlib import sha256\n\nfrom flask.ext import wtf\n\nfrom notifico.services.hooks import HookService\n\nclass AppVeyorConfigForm(wtf.Form):\n use_colors = wtf.BooleanField('Use Colors', validators=[\n wtf.Optional()\n ], default=True, description=(\n 'If checked, commit messages will include minor mIRC coloring.'\n ))\n\nclass AppVeyorHook(HookService):\n \"\"\"\n HookService hook for https://ci.appveyor.com\n \"\"\"\n SERVICE_NAME = 'AppVeyor'\n SERVICE_ID = 80\n\n @classmethod\n def service_description(cls):\n return cls.env().get_template('appveyor_desc.html').render()\n\n @classmethod\n def handle_request(cls, user, request, hook):\n payload = request.get_json()\n if not payload:\n return\n \n # event_name = payload['eventName']\n event_data = payload['eventData']\n \n strip = not hook.config.get('use_colors', True)\n\n summary = cls._create_summary(event_data)\n details = 'Details: {0}'.format(event_data['buildUrl'])\n details = cls._prefix_line(details, event_data)\n\n yield cls.message(summary, strip)\n yield cls.message(details, strip)\n\n @classmethod\n def _prefix_line(cls, line, event_data):\n \"\"\"\n Prefixes lines with [RepoName] and adds colours\n \"\"\"\n\n prefix = u'{RESET}[{BLUE}{name}{RESET}] '.format(\n name=event_data['projectName'],\n **HookService.colors\n )\n return prefix + line\n\n @classmethod\n def _create_summary(cls, payload):\n \"\"\"\n Create and return a one-line summary of the build\n \"\"\"\n if payload['failed'] == True:\n status_colour = HookService.colors['RED']\n elif payload['passed'] == True:\n status_colour = HookService.colors['GREEN']\n\n lines = []\n\n # Build number\n lines.append(u'AppVeyor - build {number}:'.format(\n number=payload['buildVersion'],\n ))\n\n # Status and correct colours\n lines.append(u'{status}{message}{RESET}.'.format(\n status=status_colour,\n message=payload['status'],\n **HookService.colors\n ))\n\n # branch & commit hash\n lines.append(u'({G}{branch}{R} @ {G}{commit}{R})'.format(\n branch=payload['branch'],\n commit=payload['commitId'][:7],\n G=HookService.colors['GREEN'],\n R=HookService.colors['RESET']\n ))\n \n if payload['isPullRequest'] == True:\n lines.append(u'(pull request {G}#{n}{R})'.format(\n n=payload['pullRequestId'],\n G=HookService.colors['GREEN'],\n R=HookService.colors['RESET']\n ))\n \n line = u' '.join(lines)\n return cls._prefix_line(line, payload)\n\n @classmethod\n def form(cls):\n return AppVeyorConfigForm\n"
}
] | 1 |
meh132/derbydash
|
https://github.com/meh132/derbydash
|
96920a29fab9974868ed0d4789e92f43925288d7
|
91a16ec3ce853b1b0f06759d511b5413593dad93
|
d14198177e00ca8e3cdc4d1add0b0c01699426a9
|
refs/heads/master
| 2020-09-06T15:26:39.006536 | 2020-01-30T23:34:19 | 2020-01-30T23:34:19 | 220,465,062 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.3831728398799896,
"alphanum_fraction": 0.40065890550613403,
"avg_line_length": 32.735042572021484,
"blob_id": "26fb621baa0a49f4d3f52048745a76c216ff1f71",
"content_id": "1cfa2f3f977ee7ecf2e80f989dffaa3604cc0dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3946,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 117,
"path": "/Tab1.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport dash_daq as daq\n\n# Genarte table\ndef gen_tab1():\n return html.Div([\n html.H2('Race Setup'),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '80%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n multiple=True\n ),\n html.Div(id='output-data-upload'),\n html.H2('Track Setup'),\n html.H3('Connect to Track'), \n daq.StopButton(id='connect-button', buttonText='Connect'),\n html.Div(id='connect-results'),\n \n html.H3('Lane Order'),\n html.Button('Reverse Lanes', id='lane-reverse-button'),\n # Track lanes 'on' reads, 'on4' sets\n dcc.RadioItems(\n id='lane-number',\n options=[\n {'label': '4 Lanes', 'value': 'on4'},\n {'label': '6 Lanes', 'value': 'on6'},\n ],\n value='on4', \n labelStyle={'display': 'inline-block'}\n ) ,\n\n # Lane Character 'ol' reads, 'ol1' sets to '1'\n dcc.RadioItems(\n id='lane-character',\n options=[\n {'label': 'Set to Digit', 'value': 'ol1'},\n {'label': 'Set to Letter', 'value': 'ol2'},\n ],\n #value='ol1', \n labelStyle={'display': 'inline-block'}\n ) ,\n \n # Lane Place character 'op' reads, 'op2' sets to '1'\n dcc.RadioItems(\n id='place-character',\n options=[\n {'label': '4 Lanes', 'value': 'op2'},\n {'label': '6 Lanes', 'value': '6'},\n ],\n value='op2', labelStyle={'display': 'inline-block'}\n ) ,\n \n\n # om - lane mask, 'om0' resets to all lanes\n\n\n # od - decimal places 3,4,5 options\n dcc.RadioItems(\n id='decimal-places',\n options=[\n {'label': '3 digits', 'value': 'od3'},\n {'label': '4 digits', 'value': 'od4'},\n {'label': '5 digits', 'value': 'od5'},\n ],\n value='od4', labelStyle={'display': 'inline-block'}\n ) ,\n\n # or reset delay - 0, 10, 30\n dcc.RadioItems(\n id='reset-delay',\n options=[\n {'label': '3 digits', 'value': 'or0'},\n {'label': '4 digits', 'value': 'or10'},\n {'label': '5 digits', 'value': 'or30'},\n ],\n value='or', labelStyle={'display': 'inline-block'}\n ) ,\n # ov - reverse lane numbering\n dcc.RadioItems(\n id='reset-delay',\n options=[\n {'label': '1,2,3,4', 'value': 'ov0'},\n {'label': '4,3,2,1', 'value': 'ov1'},\n ],\n value='ov1', labelStyle={'display': 'inline-block'}\n ) , \n ]) \n\n\ndef serialwrite(code):\n command = code +'\\r\\n'\n newcommand = command.encode('ascii')\n #ser.write(newcommand)\n #results = ser.readline()\n results = 'xxx'\n #laneresults = results.decode().split()\n #print(laneresults)\n return results\n\n #\n # reverse lane order = 'ov1'+'\\r\\n'"
},
{
"alpha_fraction": 0.6184319853782654,
"alphanum_fraction": 0.6279207468032837,
"avg_line_length": 17.650442123413086,
"blob_id": "6093a9b681c6c684dd0bc6828c42ca1dc09443b2",
"content_id": "7873cfeade063dd71cdcc290e84a1a1cbd45a6b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8431,
"license_type": "no_license",
"max_line_length": 296,
"num_lines": 452,
"path": "/ARchive/DerbyFinal.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd;\nimport numpy as np;\nimport serial, time \nimport json\nimport matplotlib\n\n\n# In[ ]:\n\n\n# connect to Track\nser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\nprint(ser.name)\n\n\n# In[2]:\n\n\n# Read from CSV to load Races\nraces = pd.read_csv('Races.csv')\nraces = races.set_index('Race Number')\n\n\n# Read list of racers and cars from CSV\n\nracers = pd.read_csv('Racers.csv')\nracers = racers.set_index('Number')\nracers.head()\n\n\n# In[ ]:\n\n\n# create Race Output for First race\n# for race, build details for lane\n# get lane 1 car\nrace = {}\nlane = 1\n\n\nfor carNum in races.loc[11]:\n name = racers.loc[carNum,'Name']\n #print(lane, carNum, name)\n details = {'car': str(carNum), 'name': name}\n trackName = 'lane' + str(lane)\n race[trackName] = details\n lane = lane +1\n\nnextup = {'nextUp': race}\nwith open('src/nextup.json', 'w') as outfile:\n json.dump(nextup, outfile)\n\n\n# In[ ]:\n\n\n# Set Race\ncurrent_race = 5\n\n\n# In[ ]:\n\n\n# resultsdf = pd.DataFrame(columns=[\"Race\",\"Lane\",\"Car\",\"Time\",\"Scout\",\"Den\"])\n#resultsdf = pd.DataFrame(columns=[\"Race\",\"Lane\",\"Car\",\"Time\"])\n\nresultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\nresultsdf.head()\n\n\n# # Start of the Race process\n\n# In[ ]:\n\n\n# show curret race\ncurrent_contestants = races.loc[current_race]\nprint(races.loc[current_race])\n\n\n# In[ ]:\n\n\n# Reset Track if needed\n#Reset Track\ncommand = 'r'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\n\n\n# ## Run Race\n# \n\n# In[ ]:\n\n\n# set current race if needed\ncurrent_race = 10\n\n\n# ### Call out # of lanes and race number\n# \n\n# In[ ]:\n\n\nprint('In Race ' + str(current_race) + ' the following cars are racing: \\n'+ str(races.loc[current_race]))\n\n\n# In[ ]:\n\n\n# Turn on all Lanes om# is used to turn off track\ncommand = 'om0'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nreturnmessage = ser.readline()\nprint(returnmessage.decode())\n\n\n# In[ ]:\n\n\n# Turn off tracks that are not being used\n# lanesToBlock = [1,4]\nlanesToBlock = []\n\nfor x in (lanesToBlock):\n #lanenum = (x[4:5])\n #command = 'om' + lanenum +'\\r\\n'\n command = 'om' + str(x) +'\\r\\n'\n print(command)\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n returnmessage = ser.readline()\n print(returnmessage.decode())\n\n\n# In[ ]:\n\n\n\n# Get Race Results\n# get race results\ncommand = 'rg'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nresults = ser.readline()\nlaneresults = results.decode().split()\nprint(laneresults)\n\n\n# confirm Lane Results\n\n\n# # Force finish race \n\n# In[ ]:\n\n\n# force race end\ncommand = 'ra'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nresults = ser.readline()\n\n\n# In[ ]:\n\n\n\n# Get Race Results\n# get race results\ncommand = 'rp'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nresults = ser.readline()\nlaneresults = results.decode().split()\nprint(laneresults)\n\n\n# In[ ]:\n\n\n#laneresults = ['1=1.1704', '2=5.4159b', '3=3.5462c', '4=3.7246d']\nprint('Confirm Results: \\n' + str(laneresults))\n\n\n# ## Calculate leaders and publish leaders based on average time\n\n# In[ ]:\n\n\nrace = {}\nfor laneresult in laneresults:\n lane = laneresult[0]\n car = races.loc[int(current_race),'Lane '+lane]\n time = float(laneresult[2:8])\n name = racers.loc[car,'Name']\n den = racers.loc[car,'Den']\n category = racers.loc[car,'Category']\n place = laneresult[8:9]\n \n # Create results json\n details = {'race' : current_race , 'lane' : int(lane), 'car': str(car), 'name': name, 'time': time, 'place': place, 'den': den, 'category': category} \n trackName = 'lane' + str(lane)\n race[trackName] = details\n \n \n # add den, category\n resultsdf = resultsdf.append(details, ignore_index=True)\n #print(details)\n \ncurrent_race = current_race +1\nresultsdf.to_csv('resultsFile.csv')\n\n\n# In[ ]:\n\n\n## update or rewrite Json for current race. \ncurrentRace = {'currentRace': race}\nwith open('src/current.json', 'w') as outfile:\n json.dump(currentRace, outfile)\n\n\n## update or rewrite Json for current race. \navg = resultsdf.groupby(['car','name']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .reset_index()\n\navgtimes = avg.to_dict(orient='records')\navgTimes = {'averageTimes': avgtimes}\nwith open('src/avg.json', 'w') as outfile:\n json.dump(avgTimes, outfile)\n\ntop = resultsdf.groupby(['car','name']) .agg({'time':'min'}) .reset_index()\n\ntoptimes = top.to_dict(orient='records')\n\n## update or rewrite Json for current race. \ntopTimes = {'topSpeeds': toptimes}\nwith open('src/top.json', 'w') as outfile:\n json.dump(topTimes, outfile)\n \n# create Race Output for Upcoming race\n# for race, build details for lane\n# get lane 1 car\nrace = {}\nlane = 1\nfor carNum in races.loc[current_race]:\n name = racers.loc[carNum,'Name']\n #print(lane, carNum, name)\n details = {'car': str(carNum), 'name': name}\n trackName = 'lane' + str(lane)\n race[trackName] = details\n lane = lane +1\n\nnextup = {'nextUp': race}\nwith open('src/nextup.json', 'w') as outfile:\n json.dump(nextup, outfile)\n\n\n# In[ ]:\n\n\n## End of Race\n\n\n# # Final Results\n\n# In[ ]:\n\n\n## Den Results\nden_results = resultsdf.groupby(['den','car','name']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .sort_values(['den','time'], ascending=True)\n #.reset_index()\nprint(den_results)\n\n\n# In[ ]:\n\n\n## Den Results\nden_results = resultsdf.groupby(['den','car','name']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .sort_values('time', ascending=True)\n #.reset_index()\nprint(den_results)\n\n\n# In[ ]:\n\n\n## Fastest Lane\nlane_results = resultsdf.groupby(['lane']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .sort_values('time', ascending=True)\n #.reset_index()\nprint(lane_results)\n\n\n# In[ ]:\n\n\n# Get top cars after dropping slowest lap\nstandings = resultsdf.groupby('car') .apply(lambda x: x.drop([x['time'].idxmax()])) .rename_axis(['time','time']) .groupby('car') .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed','time':'Average Time'}) .sort_values('Average Time', ascending=True)\n\n\n#standings = standings.sort_values('Average Time', ascending=True)\nprint(standings.iloc[0:4])\n\n\n# # Extra commands\n\n# In[ ]:\n\n\n# Turn on Set Place indicatorto #\ncommand = 'op2'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nreturnmessage = ser.readline()\nprint(returnmessage.decode())\n\n\n# In[4]:\n\n\nbackup = pd.read_csv('resultsFile.csv')\nbackup.head()\n\n\n# In[5]:\n\n\n## Den Results\nden_results = backup.groupby(['den','car','name']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .sort_values(['den','time'], ascending=True)\n #.reset_index()\nprint(den_results)\n\n\n# In[ ]:\n\n\n\n\n\n# In[6]:\n\n\n## Den Results\nden_results = backup.groupby(['car','name']) .agg({'car':'size', 'time':'mean'}) .rename(columns={'car':'Races Completed'}) .sort_values('time', ascending=True)\n #.reset_index()\nprint(den_results)\n\n\n# In[8]:\n\n\nden_results.columns\n\n\n# In[15]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\n\n\n# In[19]:\n\n\nden_results.hist(column='time',bins=50)\n\n\n# In[ ]:\n\n\n# Find Car by race and lane\ncar = racedf.loc[1,'lane1']\nprint (car)\n\n\n# In[ ]:\n\n\nresults = ser.readline()\n\n\n# In[ ]:\n\n\ntimes = avg.loc[:,['car','name','Average Time']]\ntimes = times.reset_index()\ntimes.head()\n\n\n# In[ ]:\n\n\n# delete race result or duplicate race result\n\n\n# In[ ]:\n\n\n# write out rsults df and load from csv\n\n\n# In[ ]:\n\n\ndef serialwrite(code):\n command = code +'\\r\\n'\n #newcommand = command.encode('ascii')\n #ser.write(newcommand)\n #results = ser.readline()\n #laneresults = results.decode().split()\n #print(laneresults)\n return results\n\nserialwrite('rg')\n\n\n# In[ ]:\n\n\ncurrentrace['currentRace']['lane1']['time'] ='3.012'\nlastRace = currentrace['currentRace']\ntype(lastRace)\n\n\n# In[ ]:\n\n\n# Reverse Lane order\ncommand = 'ov1'+'\\r\\n'\nnewcommand = command.encode('ascii')\nser.write(newcommand)\nresults = ser.readline()\nprint(laneresults)\n\n\n# confirm Lane Results\n\n\n# In[ ]:\n\n\n#races.loc[2,'Lane 2'] = 1\n#races.head(15)\n\n"
},
{
"alpha_fraction": 0.6275395154953003,
"alphanum_fraction": 0.6343114972114563,
"avg_line_length": 24.941177368164062,
"blob_id": "5ccc7d07fddc608182d9497909b51b1c44708391",
"content_id": "8ce2542b3febf98957151bd1faeb980012260368",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 443,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 17,
"path": "/functions.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\n\n# Genarte table1\ndef generate_table(dataframe, max_rows=15):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))]\n )\n\n\n"
},
{
"alpha_fraction": 0.6310204267501831,
"alphanum_fraction": 0.6375510096549988,
"avg_line_length": 19.41666603088379,
"blob_id": "2647df69f8925c5341cda3b9214f2cc52dbac76a",
"content_id": "4d948bfd5ecfa1c34c29efaa110a8b6fea1c7499",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 60,
"path": "/track.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import serial\n\n# connect to Track\nser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\n\n\n\ndef resetLanes():\n command = 'om0'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return ser.readline()\n\n# toggle lanes \ndef toggleLane(x):\n command = 'om' + str(x) +'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return ser.readline()\n\ndef laneStatus(x):\n return True\n\n\n\ndef getResults():\n command = 'rg'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return ser.readline().decode().split()\n\ndef forceEnd():\n command = 'ra'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return ser.readline().decode().split()\n\ndef prevResults():\n command = 'rp'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return ser.readline().decode().split()\n\n\n\n# Turn on Set Place indicatorto #\ndef setPlace():\n command = 'op2'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return True\n\n\n\n# Reverse Lane order\ndef reverseLaneOrder():\n command = 'ov1'+'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n return True\n"
},
{
"alpha_fraction": 0.647988498210907,
"alphanum_fraction": 0.6508620977401733,
"avg_line_length": 29.173913955688477,
"blob_id": "aa20d67863260936bb746d6636b7c3241664fc47",
"content_id": "a5a3f47213412baad0dfc67af10a6770aac862d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 23,
"path": "/callbacks.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\n\nimport pandas as pd\n\n# Callback after upload\[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n"
},
{
"alpha_fraction": 0.5404230356216431,
"alphanum_fraction": 0.562384843826294,
"avg_line_length": 30.195402145385742,
"blob_id": "7c7dc05b3821ea1ef1e7ba34febd8dafe52a20ed",
"content_id": "ac9dc0611c5d3a1bbad8bd827e6137a088c55f7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13569,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 435,
"path": "/app200.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport dash_daq as daq\nfrom functions import *\nimport plotly.graph_objs as go\n\n# Input and upload libary\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\n\n#from callbacks import *\n#from track import *\nfrom Tab1 import *\n#from Tab2 import *\nfrom Tab3 import *\nfrom load import *\n\n\n## Colors for winners\ncolors = [\"#F0FFF0\",\"#ffd700\",\"#c0c0c0\",\"#cd7f32\",\"#F0FFF0\"]\n\n\n#resultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n#resultsdf = pd.read_csv('./resultsFile.csv')\n#resultsdf.to_csv('resultsFile4016.csv')\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napp.config.suppress_callback_exceptions = True\n\n# for dyanmic\napp.config['suppress_callback_exceptions'] = True\n\n\n# Set up Tabs\n\napp.layout = html.Div([\n dcc.Tabs(id=\"tabs\", value='tab-2', children=[\n dcc.Tab(label='Setup', value='tab-1'),\n dcc.Tab(label='Race', value='tab-2'),\n dcc.Tab(label='Results', value='tab-3'),\n ]),\n html.Div(id='tabs-content')\n])\n\n\n## Callbacks\n# Render Tabs\n\[email protected](Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'tab-1':\n return html.Div([\n gen_tab1()\n\n ])\n elif tab == 'tab-2':\n return html.Div(children=[\n dcc.Interval(\n id='interval-component',\n interval=10*1000, # in milliseconds\n n_intervals=0\n ),\n gen_tab2() # each tab reloads the csv file\n ])\n \n elif tab == 'tab-3':\n return html.Div(children=[\n gen_tab3() # each tab reloads the csv file\n \n ])\n\n\n\n## Generate Tab 2\n# Generate table\ndef gen_tab2():\n #resultsdf = pd.read_csv('./resultsFile.csv')\n #resultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n return html.Div(id='main_cols',children=[\n html.Div(id='race', children=[\n daq.NumericInput(\n id='my-daq-numericinput',\n max=60,\n value=1,\n label='Race Number',\n labelPosition='top',\n min=1), \n html.Div(id='lanes', children=[\n html.H3('Lanes'),\n #html.H3(results_df.loc['race']),\n html.Div(id='lane1', children=[\n html.H3('Lane 1'),\n html.Div(id='name1'),\n html.Div(id='time1')\n ], className=\"two columns\"),\n html.Div(id='lane2', children=[\n html.H3('Lane 2'),\n html.Div(id='name2'),\n html.Div(id='time2')\n ], className=\"two columns\"),\n html.Div(id='lane3', children=[\n html.H3('Lane 3'),\n html.Div(id='name3'),\n html.Div(id='time3')\n ], className=\"two columns\"),\n html.Div(id='lane4', children=[\n html.H3('Lane 4'),\n html.Div(id='name4'),\n html.Div(id='time4')\n ], className=\"two columns\"),\n ], \n className=\"twelve columns\",\n style={'border': 'solid', 'text-align': 'center'}),\n daq.StopButton(id='button-results', buttonText='Get Results'),\n html.Div(id='results-button-output'),\n daq.StopButton(id='button-save', buttonText='Save Results'),\n html.Div(id='save-button-output'),\n daq.StopButton(buttonText='Refresh leaderboard', id='Refresh'),\n #html.Div(id='race-controls', children=[\n # daq.StopButton(id='button-results', buttonText='Get Results'),\n # html.Div(id='results-button-output'),\n #html.Button('Force Race End', id='button-end'),\n #html.Button('Next Race', id='button-next'),\n # ], \n # style={'margin': '80px' , 'text-align': 'center'},\n # className=\"eight columns\"), \n html.H4('Upcoming Races', className=\"eight column\"), \n html.Div(id='next-race')], \n style={'font-size': '200%', 'margin': '80px' , 'text-align': 'center'},\n className=\"eight columns\"), \n html.H4('Fastest Single Race'),\n html.Div(id='leader-board',\n className=\"three columns\", \n style={'font-size': '150%','border': 'solid'}),\n ], \n )\n\n\n# Callback after upload \[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_uploads(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n # for testing load results\n #Plotlyresultsdf = pd.read_csv('./resultsFile.csv')\n # Read from CSV to load Races\n #races = pd.read_csv('Races.csv')\n #races = races.set_index('Race Number')\n # Read list of racers and cars from CSV\n #racers = pd.read_csv('Racers.csv')\n #racers = racers.set_index('Number')\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n\n\n# Connect Call back should read and set statuses of radio buttons\n\n\n# callback for race change\n# update cars for a lane, lanes will pick up their value\n### how do we update more values than children, can we call out specific values\n## can we update style to show winner??\n \n\n## lane 1\[email protected](\n Output('name1', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_lane1(value):\n carNum = races.loc[value,'Lane 1']\n name = racers.loc[carNum,'Name']\n return html.Div([\n html.H5(carNum ),\n html.H4(name),\n ], style={'background-color': 'grey','border': 'solid', 'text-align': 'center'})\n\n\[email protected](\n Output('time1', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_time1(value):\n #time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==1)]['time']\n return html.Div([\n daq.LEDDisplay(\n id='lane1-leddisplay',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n\n\n## Lane 2\[email protected](\n Output('name2', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_lan2(value):\n carNum = races.loc[value,'Lane 2']\n name = racers.loc[carNum,'Name']\n return html.Div([\n html.H5(carNum ),\n html.H4(name),\n ], style={'background-color': 'grey','border': 'solid', 'text-align': 'center'})\n\n\n\[email protected](\n Output('time2', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_time2(value):\n #time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==2)]['time']\n return html.Div([\n daq.LEDDisplay(\n id='lane2-leddisplay',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n\n## LAne 3 \n\[email protected](\n Output('name3', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_lane3(value):\n carNum = races.loc[value,'Lane 3']\n name = racers.loc[carNum,'Name']\n return html.Div([\n html.H5(carNum ),\n html.H4(name),\n ], style={'background-color': 'grey','border': 'solid', 'text-align': 'center'})\n\n\n\[email protected](\n Output('time3', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_time3(value):\n #time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==3)]['time']\n return html.Div([\n daq.LEDDisplay(\n id='lane3-leddisplay',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n\n\n## lane 4\[email protected](\n Output('name4', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_lane4(value):\n carNum = races.loc[value,'Lane 4']\n name = racers.loc[carNum,'Name']\n return html.Div([\n html.H5(carNum ),\n html.H6(name),\n ], style={'background-color': 'grey','border': 'solid', 'text-align': 'center'})\n\n\n\[email protected](\n Output('time4', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_time4(value):\n #time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==4)]['time']\n return html.Div([\n daq.LEDDisplay(\n id='lane4-leddisplay',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n\n## Race Controls\n# # html.Button('Get Results', id='button-results'),\n# html.Button('Force Race End', id='button-end'),\n# html.Button('Next Race', id='button-next'),\n\n\[email protected](\n Output('next-race', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_nextrace(value):\n df = races.loc[value+1:value+3]\n return dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in df.columns],\n data=df.to_dict('records')\n ) \n\n\n## Refresh leader board \[email protected](\n Output('leader-board', 'children'),\n [Input('interval-component','n_intervals'),\n Input('Refresh','n_clicks')])\ndef update_leaders(intervals,clicks):\n results_df = pd.read_csv('./resultsFile4016.csv')\n #rf = df[['name','time']]\n rf = results_df.groupby(['car', 'name'], as_index=False).agg({\"time\": \"mean\"}).sort_values('time', ascending=True)\n rf['time'] = rf['time'].map('{:,.3f}'.format)\n return dash_table.DataTable(\n id='leader-board',\n columns=[{\"name\": i, \"id\": i} for i in rf.columns],\n #columns=[['name','car','time']],\n data=rf.to_dict('records')\n #data=leader_df.to_dict('records')\n ) \n\n## Get Results\[email protected](\n [Output(component_id='results-button-output', component_property='children'),\n Output('lane1-leddisplay', 'value'),\n Output('lane2-leddisplay', 'value'),\n Output('lane3-leddisplay', 'value'),\n Output('lane4-leddisplay', 'value')],\n [Input(component_id='button-results', component_property='n_clicks'),\n #Input('my-daq-numericinput', 'value')\n ])\ndef update_times(clicks):\n laneresults = ['1=1.1704a', '2=5.4159b', '3=3.5462c', '4=3.7246d']\n # get race results\n command = 'rg'+'\\r\\n'\n newcommand = command.encode('ascii')\n #ser.write(newcommand)\n #results = ser.readline()\n #laneresults = results.decode().split()\n\n return clicks,laneresults[0][2:8],laneresults[1][2:8],laneresults[2][2:8],laneresults[3][2:8]\n\n\n#daq.StopButton(id='button-save', buttonText='Save Results'),\[email protected](\n Output('save-button-output', 'children'),\n [Input(component_id='button-save', component_property='n_clicks'),\n #Input('my-daq-numericinput', 'value')\n ],\n [State('my-daq-numericinput', 'value'),\n State('lane1-leddisplay', 'value'),\n State('lane2-leddisplay', 'value'),\n State('lane3-leddisplay', 'value'),\n State('lane4-leddisplay', 'value')\n ])\ndef update_save(clicks,current_race,time1,time2,time3,time4):\n resultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n times = [time1,time2,time3,time4]\n race = {}\n for lane in [1,2,3,4]:\n car = races.iloc[current_race-1,lane-1]\n name = racers.loc[car,'Name']\n den = racers.loc[car,'Den']\n category = racers.loc[car,'Category']\n time = times[lane-1]\n place = 1\n\n # Create results json\n details = {'race' : current_race , 'lane' : int(lane), 'car': str(car), 'name': name, 'time': time, 'place': place, 'den': den, 'category': category} \n trackName = 'lane' + str(lane)\n race[trackName] = details\n \n # append to dg\n resultsdf = resultsdf.append(details, ignore_index=True)\n #resultsdf = pd.DataFrame(details)\n\n # write results \n resultsdf.to_csv('resultsFile4016.csv',mode='a', header=False)\n return 'Race '+ str(current_race) +' Saved...'\n \n\n\n# callback for race change\[email protected](\n Output('connect-results', 'children'),\n [Input('connect-button', 'n_clicks')])\ndef update_output(value):\n # connect to Track\n ser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\n #ser.name = value\n return ser.name\n\n\n# callback for race change\[email protected](\n Output('lane-character', 'value'),\n [Input('lane-number', 'value')])\ndef update_output(value):\n return 'ol1'\n\n## Get Race Results\n#laneresults = ['1=1.1704a', '2=5.4159b', '3=3.5462c', '4=3.7246d']\n#@app.callback(\n# Output('lane-character', 'value'),\n# [Input('lane-number', 'value')])\n#def update_output(value):\n# laneresults = ['1=1.1704a', '2=5.4159b', '3=3.5462c', '4=3.7246d']\n# return 'ol1'\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.48795291781425476,
"alphanum_fraction": 0.4973330795764923,
"avg_line_length": 27.621051788330078,
"blob_id": "61b41ce75df72238cea254b34e32f6a800270ba9",
"content_id": "8694a2473c892618e24d068bb94c01c35103c2ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5437,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 190,
"path": "/ARchive/app.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\n\nimport pandas as pd\n#from track import *\n\n# Input and upload libary\nfrom dash.dependencies import Input, Output, State\n\n\ntest_df = pd.read_csv('./resultsFile.csv')\n\n# Genarte table\ndef generate_table(dataframe, max_rows=10):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))]\n )\n\n\n# Pasrs CSV upload - upload races and racers\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'Racers' in filename:\n # Assume that the user uploaded a CSV file\n racers = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n df = racers\n elif 'Races' in filename:\n # Assume that the user uploaded an excel file\n races = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n df = races\n elif 'csv' in filename:\n # Assume that the user uploaded an excel file\n results = pd.read_csv(\n io.StringIO(decoded.decode('utf-8'))) \n df = results \n results.to_csv('resultsFile.csv') \n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return html.Div([\n html.H5(filename),\n html.H6(datetime.datetime.fromtimestamp(date)),\n\n dash_table.DataTable(\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns],\n editable=True,\n filter_action=\"native\",\n sort_action=\"native\",\n row_deletable=True,\n selected_columns=[],\n selected_rows=[],\n page_action=\"native\",\n page_current= 0,\n page_size= 20,\n ),\n html.Hr(), # horizontal line\n\n # For debugging, display the raw contents provided by the web browser\n html.Div('Raw Content'),\n html.Pre(contents[0:200] + '...', style={\n 'whiteSpace': 'pre-wrap',\n 'wordBreak': 'break-all'\n })\n ])\n\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n\n# for dyanmic\napp.config['suppress_callback_exceptions'] = True\n\n\n# Set up Tabs\n\napp.layout = html.Div([\n dcc.Tabs(id=\"tabs\", value='tab-1', children=[\n dcc.Tab(label='Setup', value='tab-1'),\n dcc.Tab(label='Race', value='tab-2'),\n dcc.Tab(label='Results', value='tab-3'),\n ]),\n html.Div(id='tabs-content')\n])\n\n# Render Tabs\n\[email protected](Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'tab-1':\n return html.Div([\n html.H3('Race Setup'),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '80%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n multiple=True\n ),\n html.Div(id='output-data-upload'), \n ])\n elif tab == 'tab-2':\n return html.Div([\n html.H3('Tab content 2')\n ])\n elif tab == 'tab-3':\n return html.Div(children=[\n html.H1(children='Race Results'),\n html.H4(children='Pack Pinewood Derby'),\n generate_table(test_df),\n \n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {\n 'x': test_df['race'],\n 'y': test_df['time'],\n 'text': test_df['name'],\n 'mode': 'markers',\n 'marker': {'color': test_df['lane']}\n }\n ],\n\n }\n )\n ]\n )\n\n# Callback after upload\[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.5309491157531738,
"alphanum_fraction": 0.5625859498977661,
"avg_line_length": 30.60869598388672,
"blob_id": "a438f88b1496a4fd2c109ed4bc5ddc467cad2fe1",
"content_id": "c31b2a5f9a718c4986f34b5291388db1f9699728",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 727,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 23,
"path": "/Tab4.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\n\n# Genarte table\ndef gen_tab2():\n return html.Div([\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay2',\n value='3.102',\n label=\"Lane 2\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ), style={'width': '24%', 'display': 'inline-block'}),\n html.H3(['Race '],style={'width': '80%', 'display': 'inline-block'}),\n html.H3(['Leader Board'],style={'width': '20%', 'display': 'inline-block'}),\n # Set up current race Drop down?\n # Show Current Race\n\n\n # Get race Results\n ], style={'width': '100%', 'display': 'inline-block'})\n"
},
{
"alpha_fraction": 0.4328692555427551,
"alphanum_fraction": 0.45058903098106384,
"avg_line_length": 31.81789207458496,
"blob_id": "9d407189e730e68ca62a552fcd1ae47e19087f9f",
"content_id": "5a43857b6bf6668655542112fca1f97fa1eed75a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10271,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 313,
"path": "/app.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport dash_daq as daq\nfrom functions import *\nimport plotly.graph_objs as go\n\n# Input and upload libary\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n#app.config.suppress_callback_exceptions = True\n#app.config['suppress_callback_exceptions'] = True\n\n# Set up Tabs\n\napp.layout = html.Div([\n dcc.Tabs(id=\"tabs\", value='tab-2', children=[\n dcc.Tab(label='Setup', value='tab-1'),\n dcc.Tab(label='Race', value='tab-2'),\n dcc.Tab(label='Results', value='tab-3'),\n ]),\n html.Div(id='tabs-content')\n])\n\n\ntab1markdown_text = '''\n ### Setup\n\n Need to set the lane time to have 4 places, \n place value to have Number instead of letter\n and \n\n '''\n\[email protected](Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'tab-1':\n return html.Div([\n html.H2('Race Setup'),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '80%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n multiple=True\n ),\n # connect usb\n html.Div(id='output-data-upload'),\n html.H2('Track Setup'),\n html.H3('Connect to Track'), \n daq.StopButton(id='connect-button', buttonText='Connect'),\n html.Div(id='connect-results'),\n # default settings\n dcc.Markdown(children='tab1markdown_text'),\n html.Button(id='setup-button'),\n html.H3(id='laneset-results'), \n html.H3(id='placeset-results'),\n html.H3(id='decimal-results'),\n html.H3(id='timeout-results'),\n ])\n\n\n elif tab == 'tab-2':\n return html.Div(children=[\n dcc.Interval(\n id='interval-component',\n interval=10*1000, # in milliseconds\n n_intervals=0\n ),\n html.Div(id='main_cols',children=[\n html.Div(id='race', children=[\n daq.NumericInput(\n id='my-daq-numericinput',\n max=60,\n value=1,\n label='Race Number',\n labelPosition='top',\n min=1), \n html.Div(id='lanes', children=[\n html.H3('Lanes'), \n\n html.Div(id='lane1', children=[\n html.H3('Lane 1'),\n html.Div([\n html.H5(id='car1'),\n html.H4(id='name1'),\n ]),\n html.Div([\n daq.LEDDisplay(\n id='time1',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n ]),\n html.Div(id='lane2', children=[\n html.H3('Lane 2'),\n html.Div([\n html.H5(id='car2'),\n html.H4(id='name2'),\n ]),\n html.Div([\n daq.LEDDisplay(\n id='time2',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n ]),\n html.Div(id='lane3', children=[\n html.H3('Lane 3'),\n html.Div([\n html.H5(id='car3'),\n html.H4(id='name3'),\n ]),\n html.Div([\n daq.LEDDisplay(\n id='time3',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n ]),\n html.Div(id='lane4', children=[\n html.H3('Lane 4'),\n html.Div([\n html.H5(id='car4'),\n html.H4(id='name4'),\n ]),\n html.Div([\n daq.LEDDisplay(\n id='time4',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ])\n ]),\n ]), \n daq.StopButton(id='button-results', buttonText='Get Results'),\n html.Div(id='results-button-output'),\n daq.StopButton(id='button-save', buttonText='Save Results'),\n html.Div(id='save-button-output'),\n daq.StopButton(buttonText='Refresh leaderboard', id='Refresh'),\n\n html.H4('Upcoming Races', className=\"eight column\"), \n html.Div(id='next-race')], \n style={'font-size': '200%', 'margin': '80px' , 'text-align': 'center'},\n className=\"eight columns\")], \n # second column \n html.Div(children=[\n html.H4('Fastest Single Race'),\n html.Div(id='leader-board',\n className=\"three columns\", \n style={'font-size': '150%','border': 'solid'})\n ])\n )\n ])\n \n elif tab == 'tab-3':\n return html.Div(children=[\n gen_tab3() # each tab reloads the csv file\n \n ])\n\n## CAllbacks for Race\n\n## update racer info\n## Lane 2\[email protected](\n [Output('car1', 'children'),\n Output('name1', 'children'),\n Output('car2', 'children'),\n Output('name2', 'children'),\n Output('car3', 'children'),\n Output('name3', 'children'),\n Output('car4', 'children'),\n Output('name4', 'children')],\n [Input('my-daq-numericinput', 'value')])\ndef update_racers(value):\n car1 = races.loc[value,'Lane 1']\n name1 = racers.loc[car1,'Name']\n car2 = races.loc[value,'Lane 2']\n name2 = racers.loc[car2,'Name']\n car3 = races.loc[value,'Lane 3']\n name3 = racers.loc[car3,'Name']\n car4 = races.loc[value,'Lane 4']\n name4 = racers.loc[car4,'Name']\n return car1,name1,car2,name2,car3,name3 car4,name4\n\n\n\n\n\n\n## Next race details\[email protected](\n Output('next-race', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_nextrace(value):\n df = races.loc[value+1:value+3]\n return dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in df.columns],\n data=df.to_dict('records')\n ) \n\n\n\n\n## Callbacks for Track Setup tab\n\n# Callback after upload \[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_uploads(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n # for testing load results\n #Plotlyresultsdf = pd.read_csv('./resultsFile.csv')\n # Read from CSV to load Races\n #races = pd.read_csv('Races.csv')\n #races = races.set_index('Race Number')\n # Read list of racers and cars from CSV\n #racers = pd.read_csv('Racers.csv')\n #racers = racers.set_index('Number')\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n# callback for track connect\[email protected](\n Output('connect-results', 'children'),\n [Input('connect-button', 'n_clicks')])\ndef connect__usb(value):\n ser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\n\n return ser.name\n\n html.Button(id='setup-button'),\n html.H3(id='laneset-results'), \n html.H3(id='placeset-results'),\n\n## CAll back for track set\[email protected](\n [Output('laneset-results', 'children'),\n Output('placeset-results', 'children'),\n Output('decimal-results', 'children'),\n Output('timeout-results', 'children')],\n [Input('setup-button', 'n_clicks')])\ndef setup_output(value):\n laneset = serialwrite('on4')\n placeset = serialwrite('ol1')\n decimalset = serialwrite('on4')\n timeoutset = serialwrite('or')\n return laneset,placeset,decimalset\n\n\ndef serialwrite(code):\n command = code +'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n results = ser.readline()\n #laneresults = results.decode().split()\n #print(laneresults)\n return results.decode()\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.5069324374198914,
"alphanum_fraction": 0.5242634415626526,
"avg_line_length": 29.105262756347656,
"blob_id": "d953a3d1426d0925c16e4c32729ae997c82b573b",
"content_id": "5aa48077a1c37c4f300acc7b76c00c3f6c6258b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 38,
"path": "/Tab3.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nfrom functions import *\nimport pandas as pd\n\n# Genarte table\ndef gen_tab3():\n results_df = pd.read_csv('resultsFile4016.csv')\n #rf = df[['name','time']]\n rf = results_df.groupby(['den','car', 'name'], as_index=False).agg({\"time\": \"mean\"}).sort_values(['den','time'], ascending=True)\n rf['time'] = rf['time'].map('{:,.3f}'.format)\n return dash_table.DataTable(\n id='final-board',\n columns=[{\"name\": i, \"id\": i} for i in rf.columns],\n #columns=[['name','car','time']],\n data=rf.to_dict('records'),\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['Date', 'Region']\n ],\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n },\n {\n 'if': {'column_id': 'time'},\n 'backgroundColor': '#3D9970',\n 'color': 'white',\n }\n ],\n style_as_list_view=True,\n #data=leader_df.to_dict('records')\n ) \n "
},
{
"alpha_fraction": 0.5169752836227417,
"alphanum_fraction": 0.5246913433074951,
"avg_line_length": 33.105262756347656,
"blob_id": "56b1547e4b31b5225f42887cc6388648caa00730",
"content_id": "a690b5656986e56e650f374ac6bab211c858dd3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1944,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 57,
"path": "/ARchive/Tab2012820.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_daq as daq\nimport dash_table\nfrom functions import *\n\n\n# generate leader board\n#def gen_leaderboard():\n # den_results = resultsdf.groupby(['den','car','name'])\n # .agg({'car':'size', 'time':'mean'})\n # .rename(columns={'car':'Races Completed'})\n #.sort_values('time', ascending=True)\n\n#current-race = 1\n\n\n# Genarte table\ndef gen_tab2(results_df):\n return html.Div(id='main_cols',children=[\n html.Div(id='race', children=[\n daq.NumericInput(\n id='my-daq-numericinput',\n max=60,\n value=1,\n label='Race Number',\n labelPosition='top',\n min=1), \n html.Div(id='lanes', children=[\n html.H3('Lanes'),\n #html.H3(results_df.loc['race']),\n\n html.Div(id='lane1', className=\"three columns\"),\n html.Div(id='lane2', className=\"three columns\"),\n html.Div(id='lane3', className=\"three columns\"),\n html.Div(id='lane4', className=\"two columns\"),\n ], \n className=\"twelve columns\",\n style={'border': 'solid', 'text-align': 'center'}),\n html.Div(id='race-controls', children=[\n html.Button('Get Results', id='button-results'),\n html.Button('Force Race End', id='button-end'),\n html.Button('Next Race', id='button-next'),\n ], \n style={'margin': '80px' , 'text-align': 'center'},\n className=\"ten columns\"), \n ], className=\"nine columns\"),\n html.Div(id='leader-board', children=[\n html.H3('Fastest Average Time'),\n #html.Table([html.Tr(['Name ','Den ', 'Time '])]),\n generate_table(results_df),\n ],\n className=\"two columns\", \n style={'border': 'solid'}),\n ],\n )\n"
},
{
"alpha_fraction": 0.5653029084205627,
"alphanum_fraction": 0.5723839402198792,
"avg_line_length": 27.863636016845703,
"blob_id": "e9c73e4a426f855d7db87012a59db43a9a6ed1a4",
"content_id": "819ff569a1c3b5b9901561fdec9d03fbd9bdc363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2542,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 88,
"path": "/load.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "\nimport base64\nimport datetime\nimport io\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\nimport pandas as pd\n\n\n### todo\n# if racers and races don't exist -- \n\n\n## Load Exisitng ?\n\n\n# for testing load results\n#resultsdf = pd.read_csv('./resultsFile.csv')\n#resultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n\n# Read from CSV to load Races\nraces = pd.read_csv('Races.csv')\nraces = races.set_index('Race Number')\n\n\n# Read list of racers and cars from CSV\n\nracers = pd.read_csv('Racers.csv')\nracers = racers.set_index('Number')\n\n\n#Load New\n# Pasrs CSV upload - upload races and racers\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'Racers' in filename:\n # Assume that the user uploaded a CSV file\n racers = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n df = racers\n elif 'Races' in filename:\n # Assume that the user uploaded an excel file\n races = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n df = races\n elif 'Results' in filename:\n # Assume that the user uploaded an excel file\n results = pd.read_csv(\n io.StringIO(decoded.decode('utf-8'))) \n df = results \n #resultsdf = results\n #results.to_csv('resultsFile.csv') \n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n return html.Div([\n html.H5(filename),\n html.H6(datetime.datetime.fromtimestamp(date)),\n\n dash_table.DataTable(\n data=df.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df.columns],\n editable=True,\n filter_action=\"native\",\n sort_action=\"native\",\n row_deletable=True,\n selected_columns=[],\n selected_rows=[],\n page_action=\"native\",\n page_current= 0,\n page_size= 20,\n ),\n html.Hr(), # horizontal line\n\n # For debugging, display the raw contents provided by the web browser\n html.Div('Raw Content'),\n html.Pre(contents[0:200] + '...', style={\n 'whiteSpace': 'pre-wrap',\n 'wordBreak': 'break-all'\n })\n ])\n\n"
},
{
"alpha_fraction": 0.45816025137901306,
"alphanum_fraction": 0.4783382713794708,
"avg_line_length": 29.987653732299805,
"blob_id": "d7c87d857d1e064cb41f3ec059d8caa899e8582b",
"content_id": "3e0dae7821bddf57c688612a8bf9e7f069cbc05d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5055,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 162,
"path": "/ARchive/app118.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nimport dash_daq as daq\nimport plotly.graph_objs as go\n\n\n\n\n\nresults = pd.read_csv('./resultsFile.csv')\nresults['carstring'] = results['car'].astype('str')\n#df.sort_values(by=['col1', 'col2'])\nallresults = results.sort_values(by=['time'], ascending=True)\n\n#results = allresults[['name','car','time']].\ntopresults = allresults.loc[1:5,['name','car','time']]\n\n#avg = allresults.groupby(['car','name','lane']).agg({'car':'size', 'time':'mean'}).rename(columns={'car':'Races Completed'}) .reset_index()\n\n#avgtimes = avg.to_dict(orient='records')\n\n# Read from CSV to load Races\nraces = pd.read_csv('Races.csv')\nraces = races.set_index('Race Number')\nn_clicks = 1\n\n# Read list of racers and cars from CSV\n\nracers = pd.read_csv('Racers.csv')\nracers = racers.set_index('Number')\n\ncurrent_contestants = races.loc[n_clicks]\nspeed = lambda x: x\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash', style={'width': '100%', 'textAlign': 'center'\n }),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n html.Div(children=[\n dcc.Graph(\n figure=go.Figure(\n data=[\n go.Bar(\n y=allresults[allresults.lane==1]['name'],\n x=allresults[allresults.lane==1]['time'].apply(speed),\n text=allresults[allresults.lane==1]['name'],\n name='Lane 1',\n orientation='h',\n marker=go.bar.Marker(\n color='red'\n )\n ),\n go.Bar(\n \n y=allresults[allresults.lane==2]['name'],\n x=allresults[allresults.lane==2]['time'].apply(speed),\n name='Lane 2',\n orientation='h',\n marker=go.bar.Marker(\n color='blue'\n )\n ),\n go.Bar(\n \n y=allresults[allresults.lane==3]['name'],\n x=allresults[allresults.lane==3]['time'].apply(speed),\n name='Lane 3',\n orientation='h',\n marker=go.bar.Marker(\n color='green'\n )\n ),\n go.Bar(\n \n y=allresults[allresults.lane==4]['name'],\n x=allresults[allresults.lane==4]['time'].apply(speed),\n name='Lane 4',\n orientation='h',\n marker=go.bar.Marker(\n color='yellow'\n )\n )\n ],\n layout=go.Layout(\n title='Race Results',\n\n showlegend=True,\n barmode='stack',\n legend=go.layout.Legend(\n x=1.0,\n y=0.0\n ),\n margin=go.layout.Margin(l=40, r=0, t=40, b=30)\n )\n ),\n style={'width': '20%', 'display': 'inline-block'},\n id='my-graph'\n ),\n html.Div(children=[\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay1',\n value='3.102',\n label=\"Lane 1\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ),style={'width': '20%','display': 'inline-block'}),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay2',\n value='3.102',\n label=\"Lane 2\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ),style={'width': '20%','display': 'inline-block'}),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay3',\n value='3.102',\n label=\"Lane 3\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ),style={'width': '20%','display': 'inline-block'}),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay4',\n value='3.502',\n label=\"Lane 4\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ),style={'width': '20%','display': 'inline-block'})], style={'width': '100%','display': 'inline-block'}\n )\n ], \n style={'width': '100%', 'display': 'inline-block', 'align': 'top'}),\n dash_table.DataTable(id='table',\n columns=[{\"name\": i, \"id\": i} for i in topresults.columns],\n data=allresults.to_dict(\"rows\"),\n ),\n])\n\n\n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0',debug=True,port=8050)\n\n\n\n\nfig = go.Figure(data=go.Scatter(x=test_df['race'],\n y=test_df['time'],\n mode='markers',\n marker_color=test_df['lane']\n text=test_df['name']))\n fig.show() \n ])\n\n\n \n "
},
{
"alpha_fraction": 0.45830202102661133,
"alphanum_fraction": 0.4773581027984619,
"avg_line_length": 33.61465835571289,
"blob_id": "c6354bfed42f252d8cd081ccbf25afd23e83d494",
"content_id": "f7d75d3d498cefb733532b6e248c0fb12d1d83c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14641,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 423,
"path": "/derby_new.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport dash_daq as daq\nfrom functions import *\nimport plotly.graph_objs as go\nimport serial\n\n# Input and upload libary\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\n\nser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\n#resultsdf = pd.read_csv('resultsFile.csv')\nresultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n\n# Read from CSV to load Races\nraces = pd.read_csv('Races.csv')\nraces = races.set_index('Race Number')\n\n\n# Read list of racers and cars from CSV\n\nracers = pd.read_csv('Racers.csv')\nracers = racers.set_index('Number')\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n#app.config.suppress_callback_exceptions = True\napp.config['suppress_callback_exceptions'] = True\n\n# Set up Tabs\n\napp.layout = html.Div([\n dcc.Tabs(id=\"tabs\", value='tab-2', children=[\n dcc.Tab(label='Setup', value='tab-1'),\n dcc.Tab(label='Race', value='tab-2'),\n dcc.Tab(label='Results', value='tab-3'),\n ]),\n html.Div(id='tabs-content')\n])\n\n\ntab1markdown_text = '''\n ### Setup\n\n Need to set the lane time to have 4 places, \n place value to have Number instead of letter\n and \n\n '''\n\[email protected](Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'tab-1':\n return html.Div([\n html.H2('Race Setup'),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '80%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n multiple=True\n ),\n # connect usb\n html.Div(id='output-data-upload'),\n html.H2('Track Setup'),\n html.H3('Connect to Track'), \n daq.StopButton(id='connect-button', buttonText='Connect'),\n html.Div(id='connect-results'),\n # default settings\n dcc.Markdown(children=tab1markdown_text),\n html.Button(id='setup-button'),\n html.H3(id='laneset-results'), \n html.H3(id='placeset-results'),\n html.H3(id='decimal-results'),\n html.H3(id='timeout-results'),\n ])\n\n\n elif tab == 'tab-2':\n return html.Div(children=[\n dcc.Interval(\n id='interval-component',\n interval=10*1000, # in milliseconds\n n_intervals=0\n ),\n html.Div(id='main_cols',children=[\n html.Div(id='race', children=[\n daq.NumericInput(\n id='my-daq-numericinput',\n max=60,\n value=1,\n label='Race Number',\n labelPosition='top',\n min=1),\n daq.ToggleSwitch(\n id='edit',\n label='Editable',\n value=False),\n html.Div(id='lanes', children=[\n html.H3('Lanes'), \n\n html.Div(id='lane1', children=[\n html.H3('Lane 1'),\n html.Div([\n daq.LEDDisplay(\n id='time1',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ]),\n html.Div([\n html.H5(id='car1'),\n html.H6(id='name1'),\n ]) \n ], className = \"three columns\"),\n html.Div(id='lane2', children=[\n html.H3('Lane 2'),\n html.Div([\n daq.LEDDisplay(\n id='time2',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ),\n html.Div([\n html.H5(id='car2'),\n html.H6(id='name2'),\n ]),\n ])\n ], className = \"two columns\"),\n html.Div(id='lane3', children=[\n html.H3('Lane 3'),\n html.Div([\n daq.LEDDisplay(\n id='time3',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )\n ]),\n html.Div([\n html.H5(id='car3'),\n html.H6(id='name3'),\n ]), \n ], className = \"two columns\"),\n html.Div(id='lane4', children=[\n html.H3('Lane 4'),\n html.Div([\n daq.LEDDisplay(\n id='time4',\n value=0,\n label='Time',\n labelPosition='bottom',\n size='40',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ),\n html.Div([\n html.H5(id='car4'),\n html.H6(id='name4'),\n ]), \n ])\n ], className = \"two columns\"),\n ],className = \"twelve columns\"), \n html.Hr(id='line'),\n html.H4('Upcoming Races', className=\"eight column\"), \n html.Div(id='next-race')], \n style={'font-size': '200%', 'margin': '80px' , 'text-align': 'center'},\n className=\"eight columns\")]), \n # second column \n html.Div(children=[\n \n daq.StopButton(id='button-results', buttonText='Get Results'),\n html.Div(id='results-button-output'),\n daq.StopButton(id='button-save', buttonText='Save Results'),\n html.Div(id='save-button-output'),\n #daq.StopButton(buttonText='Refresh leaderboard', id='Refresh'),\n html.H4('Fastest Single Race'),\n #generate_table(results_df, max_rows=15)\n #dash_table.DataTable(\n # id='leadtable',\n # columns=[{\"name\": i, \"id\": i} for i in resultsdf.columns],\n #data=resultsdf.to_dict('records')\n # ), \n ])\n \n ])\n \n elif tab == 'tab-3':\n #global results_df\n rf = resultsdf.groupby(['den','car', 'name'], as_index=False).agg({\"time\": \"mean\"}).sort_values(['den','time'], ascending=True)\n #rf['time'] = rf['time'].map('{:,.3f}'.format)\n return dash_table.DataTable(\n id='final-board',\n columns=[{\"name\": i, \"id\": i} for i in rf.columns],\n #columns=[['name','car','time']],\n data=rf.to_dict('records'),\n style_data_conditional=[\n {'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n },\n {'if': {'column_id': 'time'},\n 'backgroundColor': '#3D9970',\n 'color': 'white',\n }\n ],\n style_as_list_view=True,\n )\n \n\n## CAllbacks for Race\n\n## update racer info\n## Lane 2\[email protected](\n [Output('car1', 'children'),\n Output('name1', 'children'),\n Output('car2', 'children'),\n Output('name2', 'children'),\n Output('car3', 'children'),\n Output('name3', 'children'),\n Output('car4', 'children'),\n Output('name4', 'children'),\n Output('edit','value') ],\n [Input('my-daq-numericinput', 'value')])\ndef update_racers(value):\n car1 = races.loc[value,'Lane 1']\n name1 = racers.loc[car1,'Name']\n car2 = races.loc[value,'Lane 2']\n name2 = racers.loc[car2,'Name']\n car3 = races.loc[value,'Lane 3']\n name3 = racers.loc[car3,'Name']\n car4 = races.loc[value,'Lane 4']\n name4 = racers.loc[car4,'Name']\n return car1,name1,car2,name2,car3,name3,car4,name4,False\n\n\n### update times\[email protected](\n [Output('time1', 'value'),\n Output('time2', 'value'),\n Output('time3', 'value'),\n Output('time4', 'value')],\n [Input('button-results', 'n_clicks'),\n Input('my-daq-numericinput', 'value')],\n [State('edit', 'value')])\ndef update_times(clicks,value,edit):\n if edit:\n #laneresults = ['1=1.1212a','2=1.2323b','3=1.1234c','4=1.4321d']\n \n laneresults = serialwrite('rg')\n #time1=1\n time1=laneresults[0][2:8]\n time2=laneresults[1][2:8]\n time3=laneresults[2][2:8]\n time4=laneresults[3][2:8]\n \n else:\n #time1=0\n time1=resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==1)]['time']\n time2=resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==2)]['time']\n time3=resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==3)]['time']\n time4=resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==4)]['time']\n return time1,time2,time3,time4\n\n## Save Results\[email protected](\n #[\n Output('save-button-output', 'children'),\n #Output('leadtable', 'data')],\n [Input(component_id='button-save', component_property='n_clicks'),\n ],\n [State('my-daq-numericinput', 'value'),\n State('time1', 'value'),\n State('time2', 'value'),\n State('time3', 'value'),\n State('time4', 'value'),\n State('edit', 'value')\n ])\ndef save_output(clicks,current_race,time1,time2,time3,time4,edit):\n #resultsdf = pd.DataFrame(columns=['race', 'lane', 'car', 'name', 'time', 'place', 'den', 'category'])\n global resultsdf\n times = [time1,time2,time3,time4]\n race = {}\n for lane in [1,2,3,4]:\n car = races.iloc[current_race-1,lane-1]\n name = racers.loc[car,'Name']\n den = racers.loc[car,'Den']\n category = racers.loc[car,'Category']\n time = times[lane-1]\n place = 1\n\n # Create results json\n details = {'race' : current_race , 'lane' : int(lane), 'car': str(car), 'name': name, 'time': time, 'place': place, 'den': den, 'category': category} \n trackName = 'lane' + str(lane)\n race[trackName] = details\n \n # append to dg\n resultsdf = resultsdf.append(details, ignore_index=True)\n if edit:\n # write results \n resultsdf.to_csv('newResults.csv')\n message = 'Race '+ str(current_race) +' Saved...' # write results \n #leader_df = results_df.groupby(['car', 'name'], as_index=False).agg({\"time\": \"mean\"}).sort_values('time', ascending=True)\n #leader_df['time'] = leader_df['time'].map('{:,.3f}'.format) \n #data = leader_df.to_dict('records')\n \n else:\n message = 'cannot save'\n\n #resultsdf.to_csv('resultsFile.csv',mode='a', header=False)\n return message#,data\n\n\n## Next race details\[email protected](\n Output('next-race', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_nextrace(value):\n df = races.loc[value+1:value+3]\n return dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in df.columns],\n data=df.to_dict('records')\n ) \n\n\n\n\n## Callbacks for Track Setup tab\n\n# Callback after upload \[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_uploads(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n # for testing load results\n #Plotlyresultsdf = pd.read_csv('./resultsFile.csv')\n # Read from CSV to load Races\n #races = pd.read_csv('Races.csv')\n #races = races.set_index('Race Number')\n # Read list of racers and cars from CSV\n #racers = pd.read_csv('Racers.csv')\n #racers = racers.set_index('Number')\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\n# callback for track connect\[email protected](\n Output('connect-results', 'children'),\n [Input('connect-button', 'n_clicks')])\ndef connect__usb(value):\n ser = serial.Serial('/dev/ttyUSB0', 9600, timeout = None)\n\n return ser.name\n\n\n## CAll back for track set\[email protected](\n [Output('laneset-results', 'children'),\n Output('placeset-results', 'children'),\n Output('decimal-results', 'children'),\n Output('timeout-results', 'children')],\n [Input('setup-button', 'n_clicks')])\ndef setup_output(value):\n laneset = serialwrite('on4')\n placeset = serialwrite('ol1')\n decimalset = serialwrite('on4')\n timeoutset = serialwrite('or')\n return laneset,placeset,decimalset,timeoutset\n\n\ndef serialwrite(code):\n command = code +'\\r\\n'\n newcommand = command.encode('ascii')\n ser.write(newcommand)\n results = ser.readline()\n #laneresults = results.decode().split()\n #print(laneresults)\n return results.decode().split()\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.6436949968338013,
"alphanum_fraction": 0.6686217188835144,
"avg_line_length": 29.33333396911621,
"blob_id": "e398b8fe7fc7e2f50f6f196f2559d7a930550e90",
"content_id": "501a07a4cbe74637740626c0fa6b230403674217",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1364,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 45,
"path": "/grid.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\n\nimport pandas as pd\n#from track import *\n#from Tab1 import *\n#from Tab2 import *\n#from Tab3 import *\n\n\n#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css','./derby.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n\n# for dyanmic\n#app.config['suppress_callback_exceptions'] = True\n\n\n# Set up Tabs\n\napp.layout = html.Div(className=\"grid-container\", children = [\n html.Div(id='Header', className='Header', children = [html.H2('Header')] ),\n html.Div(id='Race-Number', className='Race-Number', children = [html.H2('Race Number')] ),\n html.Div(id='Lane-1', className='Lane-1', children = [html.H2('Lane 1')] ),\n html.Div(id='Lane-2', className='Lane-2', children = [html.H2('Lane 2')] ),\n html.Div(id='Lane-3', className='Lane-3', children = [html.H2('Lane 3')] ),\n html.Div(id='Lane-4', className='Lane-4', children = [html.H2('Lane 4')] ),\n html.H2('Leaders', className=\"Leaders\"),\n html.H2('Race Controls', className=\"Race-Controls\"),\n ])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.8080357313156128,
"avg_line_length": 23.77777862548828,
"blob_id": "ff23f8f748da677e664122c6e67e83b4062888b7",
"content_id": "a96feda9b5ff3398f43975ba871a6de2aad33787",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/README.md",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# derbydash\n\nTrack control app using python and Plotly Dash\n\nTrack interface is described here: \nhttps://www.etekgadget.com/Manuals/SmartLineCommandManual.PDF\n\n\nhttp://www.besttrack.com/Champ%20Timer%20Command%20Manual.pdf\n\n"
},
{
"alpha_fraction": 0.5266251564025879,
"alphanum_fraction": 0.5486399531364441,
"avg_line_length": 31.992395401000977,
"blob_id": "f5f721f43541a3a35c576c602fb5d529cd1bdf78",
"content_id": "638a918c5472e09f0d1eb6c58bbf7d8ed1b731c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8676,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 263,
"path": "/ARchive/app1212.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport base64\nimport datetime\nimport io\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\n\nimport pandas as pd\n#from track import *\nfrom Tab1 import *\nfrom Tab2 import *\nfrom Tab3 import *\nfrom load import *\n\n# Input and upload libary\nfrom dash.dependencies import Input, Output, State\n\n\n## Colors for winners\ncolors = [\"#F0FFF0\",\"#ffd700\",\"#c0c0c0\",\"#cd7f32\",\"#F0FFF0\"]\n\n\n#leader_df = resultsdf\n#leader_df = resultsdf.groupby(['den','car','name']).agg({'car':'size', 'time':'mean'}).rename(columns={'car':'Races Completed'}).sort_values('time', ascending=True)\n#leader_df = resultsdf.groupby('car').apply(lambda x: x.drop([x['time'].idxmax()])).rename_axis(['time','time']).groupby('car').agg({'car':'size', 'time':'mean'}).rename(columns={'car':'Races Completed','time':'Average Time'}).sort_values('Average Time', ascending=True)\n#leader_df = resultsdf.groupby(['den','car','name']).agg({'car':'size', 'time':'mean'}).rename(columns={'car':'Races Completed'}).sort_values(['den','time'], ascending=False)\nleader_df = resultsdf.groupby(['car', 'name'], as_index=False).agg({\"time\": \"mean\"}).sort_values('time', ascending=True)\n#leader_df2 = leader_df.map('{:,.2f}'.format)\nleader_df['time'] = leader_df['time'].map('{:,.3f}'.format)\nleader1_df = resultsdf.groupby('car') \\\n .apply(lambda x: x.drop([x['time'].idxmax()]))\\\n .rename_axis(['time','time'])\\\n .groupby('car' )\\\n .agg({'car':'size', 'time':'mean'}) \\\n .rename(columns={'car':'Races Completed','time':'Average Time'}) \\\n .sort_values('Average Time', ascending=True)\n\n\n\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n#app.config.suppress_callback_exceptions = True\n\n# for dyanmic\napp.config['suppress_callback_exceptions'] = True\n\n\n# Set up Tabs\n\napp.layout = html.Div([\n dcc.Tabs(id=\"tabs\", value='tab-2', children=[\n dcc.Tab(label='Setup', value='tab-1'),\n dcc.Tab(label='Race', value='tab-2'),\n dcc.Tab(label='Results', value='tab-3'),\n ]),\n html.Div(id='tabs-content')\n])\n\n# Render Tabs\n\[email protected](Output('tabs-content', 'children'),\n [Input('tabs', 'value')])\ndef render_content(tab):\n if tab == 'tab-1':\n return html.Div([\n gen_tab1()\n\n ])\n elif tab == 'tab-2':\n return html.Div(children=[\n gen_tab2(leader_df)\n ])\n \n elif tab == 'tab-3':\n return html.Div(children=[\n gen_tab3(resultsdf)\n \n ])\n\n# Callback after upload\[email protected](Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n# Connect Call back should read and set statuses of radio buttons\n\n\n# callback for race change\n# update cars for a lane, lanes will pick up their value\n### how do we update more values than children, can we call out specific values\n## can we update style to show winner??\[email protected](\n Output('lane1-name', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 1']\n return carNum\n\[email protected](\n Output('lane2-name', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 2']\n return carNum\n\[email protected](\n Output('lane1', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 1']\n name = racers.loc[carNum,'Name']\n\n ### Results if available\n time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==1)]['time']\n place = int(resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==1)]['place'])\n color = colors[place]\n #color = colors[0]\n return html.Div([\n html.H4('Lane 1'),\n html.H5(carNum ),\n html.H5(place),\n #html.H5(color),\n html.H4(name),\n daq.LEDDisplay(\n id='lane1-leddisplay',\n value=time,\n label='Time',\n labelPosition='bottom',\n size='20',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ), \n ], \n style={'border-radius': '25px', 'background-color': color,'border': 'solid', 'text-align': 'center'})\n\[email protected](\n Output('lane2', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 2']\n name = racers.loc[carNum,'Name']\n\n ### Results if available\n time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==2)]['time']\n place = int(resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==2)]['place'])\n #color = colors[place]\n color = colors[0]\n return html.Div([\n html.H4('Lane 2'),\n html.H5(carNum ),\n html.H5(place),\n #html.H5(color),\n html.H4(name),\n daq.LEDDisplay(\n id='lane2-leddisplay',\n value=time,\n label='Time',\n labelPosition='bottom',\n size='20',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ), \n ], \n style={'border-radius': '25px', 'background-color': color,'border': 'solid', 'text-align': 'center'})\n\n\[email protected](\n Output('lane3', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 3']\n name = racers.loc[carNum,'Name']\n\n ### Results if available\n time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==3)]['time']\n place = int(resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==3)]['place'])\n color = colors[place]\n #color = colors[0]\n return html.Div([\n html.H4('Lane 3'),\n html.H5(carNum ),\n html.H5(place),\n #html.H5(color),\n html.H4(name),\n daq.LEDDisplay(\n id='lane3-leddisplay',\n value=time,\n label='Time',\n labelPosition='bottom',\n size='20',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ), \n ], \n style={'border-radius': '25px', 'background-color': color,'border': 'solid', 'text-align': 'center'})\n\n @app.callback(\n Output('lane4', 'children'),\n [Input('my-daq-numericinput', 'value')])\ndef update_output(value):\n carNum = races.loc[value,'Lane 4']\n name = racers.loc[carNum,'Name']\n\n ### Results if available\n time = resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==4)]['time']\n place = int(resultsdf[(resultsdf['race']==value) & (resultsdf['lane']==4)]['place'])\n color = colors[place]\n #color = colors[0]\n return html.Div([\n html.H4('Lane 4'),\n html.H5(carNum ),\n html.H5(place),\n #html.H5(color),\n html.H4(name),\n daq.LEDDisplay(\n id='lane4-leddisplay',\n value=time,\n label='Time',\n labelPosition='bottom',\n size='20',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n ), \n ], \n style={'border-radius': '25px', 'background-color': color,'border': 'solid', 'text-align': 'center'}) \n\n\n# callback for race change\[email protected](\n Output('lane-character', 'value'),\n [Input('lane-number', 'value')])\ndef update_output(value):\n return 'ol1'\n\n## Get Race Results\n#laneresults = ['1=1.1704a', '2=5.4159b', '3=3.5462c', '4=3.7246d']\n#@app.callback(\n# Output('lane-character', 'value'),\n# [Input('lane-number', 'value')])\n#def update_output(value):\n# laneresults = ['1=1.1704a', '2=5.4159b', '3=3.5462c', '4=3.7246d']\n# return 'ol1'\n\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)"
},
{
"alpha_fraction": 0.491813600063324,
"alphanum_fraction": 0.514483630657196,
"avg_line_length": 27.872726440429688,
"blob_id": "a03ed4f2ffbb5f127896430e2650934501f3a981",
"content_id": "a4b87726fc31bdce139be85b8d4944962d3199eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4765,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 165,
"path": "/ARchive/app315.py",
"repo_name": "meh132/derbydash",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nimport dash_daq as daq\nimport plotly.graph_objs as go\n\n\n\n\n\nresults = pd.read_csv('./resultsFile.csv')\nresults['carstring'] = results['car'].astype('str')\n#df.sort_values(by=['col1', 'col2'])\nallresults = results.sort_values(by=['time'], ascending=False)\n\n#results = allresults.loc[['name','car','time']]\n\n#avg = allresults.groupby(['car','name','lane']).agg({'car':'size', 'time':'mean'}).rename(columns={'car':'Races Completed'}) .reset_index()\n\n#avgtimes = avg.to_dict(orient='records')\n\n# Read from CSV to load Races\nraces = pd.read_csv('Races.csv')\nraces = races.set_index('Race Number')\nn_clicks = 1\n\n# Read list of racers and cars from CSV\n\nracers = pd.read_csv('Racers.csv')\nracers = racers.set_index('Number')\n\ncurrent_contestants = races.loc[n_clicks]\nspeed = lambda x: x\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H1(children='Hello Dash'),\n\n html.Div(children='''\n Dash: A web application framework for Python.\n '''),\n dcc.Graph(\n figure=go.Figure(\n data=[\n go.Bar(\n \n y=allresults['name'],\n x=allresults['time'].apply(speed),\n text=allresults['name'],\n name='Lane 1',\n orientation='h',\n marker=go.bar.Marker(\n color='red'\n ),\n\n )\n ],\n transforms=go.transforms(\n type='aggregate',\n groups = allresults['lane'],\n aggregations = go.aggregations(\n [target ='x', func = 'avg',\n [target ='y', func = 'sum'] \n )\n ),\n layout=go.Layout(\n title='Race Results',\n\n showlegend=True,\n barmode='stack',\n legend=go.layout.Legend(\n x=1.0,\n y=0.0\n ),\n margin=go.layout.Margin(l=40, r=0, t=40, b=30)\n )\n ),\n style={'height': 800, 'width':800},\n id='my-graph'\n ),\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'y': [1, 2, 3], 'x': [4, 1, 2], 'type': 'bar', 'name': 'SF','orientation':'h'},\n {'y': [2, 1, 3], 'x': [4, 2, 5], 'type': 'bar', 'name': u'Montréal','orientation':'h'},\n \n ],\n 'layout': {\n 'title': 'Dash Data Visualization', 'barmode':'stack'\n }\n }\n ),\n dash_table.DataTable(id='table',\n columns=[{\"name\": i, \"id\": i} for i in allresults.columns],\n data=allresults.to_dict(\"rows\"),\n ),\n html.Div(children=[\n html.Div([\n daq.Indicator(\n id='my-daq-indicator',\n value=True,\n label=\"Lane 1\",\n color=\"#00cc96\"),\n daq.LEDDisplay(\n id='my-daq-leddisplay1',\n value='3.102',\n color=\"#FF5E5E\",\n backgroundColor=\"#000000\"\n )],\n style={'width': '24%', 'display': 'inline-block', 'textAlign': 'center'\n }),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay2',\n value='3.102',\n label=\"Lane 2\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ), style={'width': '24%', 'display': 'inline-block'}),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay3',\n value='3.102',\n label=\"Lane 3\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ), style={'width': '24%', 'display': 'inline-block'}),\n html.Div(daq.LEDDisplay(\n id='my-daq-leddisplay4',\n value='3.502',\n label=\"Lane 4\",\n color=\"#FF5E5E\",\n backgroundColor=\"#A9A9A9\"\n ), style={'width': '10%', 'display': 'inline-block'})]),\n dcc.Slider(\n min=-5,\n max=10,\n step=0.5,\n value=-3,\n ),\n html.Div(dcc.Input(id='input-box', type='text')),\n html.Button('Submit', id='button'),\n html.Div(id='output-container-button', children='Enter a value and press submit')\n\n\n])\n\[email protected](\n dash.dependencies.Output('output-container-button', 'children'),\n [dash.dependencies.Input('button', 'n_clicks')],\n [dash.dependencies.State('input-box', 'value')])\ndef update_output(n_clicks, value):\n return 'The input value was \"{}\" and the button has been clicked {} times'.format(\n value,\n n_clicks\n )\n\n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0',debug=True,port=8050)\n"
}
] | 18 |
Karamawy/TFTP-Server
|
https://github.com/Karamawy/TFTP-Server
|
033c4c6cc8c8c38996ba8f12829d128ec96dd7e2
|
0af876ac9bc0879396e09defa6c6406e00f05264
|
8bc3ca92642247a35942a0f5bd6f469508bdebd5
|
refs/heads/master
| 2022-04-10T10:01:19.293105 | 2020-03-22T21:17:18 | 2020-03-22T21:17:18 | 247,338,286 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7570621371269226,
"alphanum_fraction": 0.7966101765632629,
"avg_line_length": 34.20000076293945,
"blob_id": "1be53e3decf7f2dedfaf0f4d2c031e2d3462e96d",
"content_id": "d1f3b3594ee7b1e4db0806f044ee126e040c6824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 5,
"path": "/README.md",
"repo_name": "Karamawy/TFTP-Server",
"src_encoding": "UTF-8",
"text": "# TFTP Client Using Python\n\n\n\nYou can go back to the project document [here](https://docs.google.com/document/d/1vQJH0F5o-q8BFCIdrF1i1xWYHEXrPvaQRS7dgELmnpQ/edit?usp=sharing).\n\n"
},
{
"alpha_fraction": 0.609321117401123,
"alphanum_fraction": 0.6162183284759521,
"avg_line_length": 32.49504852294922,
"blob_id": "7a4a2a88ec509e836e4ced33148b5ecacaf5376b",
"content_id": "588cae0b022f08a8d2c4e13a2fd4afad2dc33ac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10149,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 303,
"path": "/4724_4733_lab1.py",
"repo_name": "Karamawy/TFTP-Server",
"src_encoding": "UTF-8",
"text": "# Don't forget to change this file's name before submission.\nimport sys\nimport os\nimport enum\nimport socket\nimport struct\nimport random\nfrom struct import pack\n\nclass TftpProcessor(object):\n \"\"\"\n Implements logic for a TFTP client.\n The input to this object is a received UDP packet,\n the output is the packets to be written to the socket.\n\n This class MUST NOT know anything about the existing sockets\n its input and outputs are byte arrays ONLY.\n\n Store the output packets in a buffer (some list) in this class\n the function get_next_output_packet returns the first item in\n the packets to be sent.\n\n This class is also responsible for reading/writing files to the\n hard disk.\n\n Failing to comply with those requirements will invalidate\n your submission.\n\n Feel free to add more functions to this class as long as\n those functions don't interact with sockets nor inputs from\n user/sockets. For example, you can add functions that you\n think they are \"private\" only. Private functions in Python\n start with an \"_\", check the example below\n \"\"\"\n class TftpPacketType(enum.Enum):\n \"\"\"\n Represents a TFTP packet type add the missing types here and\n modify the existing values as necessary.\n \"\"\"\n RRQ = 1\n WRQ = 2\n DATA = 3\n ACK = 4\n ERROR = 5\n\n\n def __init__(self):\n \"\"\"\n Add and initialize the *internal* fields you need.\n Do NOT change the arguments passed to this function.\n\n Here's an example of what you can do inside this function.\n \"\"\"\n self.packet_buffer = []\n self._oldpacket= None\n self._file= None\n self._doneuploading = False\n pass\n \n\n def process_udp_packet(self, packet_data, packet_source):\n \"\"\"\n Parse the input packet, execute your logic according to that packet.\n packet data is a bytearray, packet source contains the address\n information of the sender.\n \"\"\"\n # Add your logic here, after your logic is done,\n # add the packet to be sent to self.packet_buffer\n # feel free to remove this line\n print(f\"Received a packet from {packet_source}\")\n in_packet = self._parse_udp_packet(packet_data)\n out_packet = self._do_some_logic(in_packet)\n self._oldpacket=out_packet\n # This shouldn't change.\n self.packet_buffer.append(out_packet)\n\n def _parse_udp_packet(self, packet_bytes):\n \"\"\"\n You'll use the struct module here to determine\n the type of the packet and extract other available\n information.\n \"\"\"\n in_packet=[]\n #print(packet_bytes)\n opcode = packet_bytes[0:2]\n opcode=int.from_bytes(opcode,'big')\n #print(opcode)\n in_packet.append(opcode)\n #length = packet_bytes.find(b'\\0', start=2)\n if opcode==self.TftpPacketType.ACK.value:\n blocknum=packet_bytes[2:4]\n blocknum=int.from_bytes(blocknum,'big')\n in_packet.append(blocknum)\n elif opcode==self.TftpPacketType.DATA.value:\n blocknum=packet_bytes[2:4]\n blocknum=int.from_bytes(blocknum,'big')\n in_packet.append(blocknum)\n data=packet_bytes[4:]\n in_packet.append(data)\n elif opcode==self.TftpPacketType.ERROR.value:\n errorcode=packet_bytes[2:4]\n errorcode=int.from_bytes(errorcode,'big')\n in_packet.append(errorcode)\n return in_packet\n \n\n def _do_some_logic(self, input_packet):\n \"\"\"\n Example of a private function that does some logic.\n \"\"\"\n opcode=input_packet[0]\n if opcode==self.TftpPacketType.ACK.value:\n return self._continue_sending(input_packet[1])\n elif opcode==self.TftpPacketType.DATA.value:\n return self._continue_reading(input_packet)\n elif opcode==self.TftpPacketType.ERROR.value:\n if(input_packet[1]==1):\n print(\"File not found in server\")\n exit(0)\n else:\n return self._oldpacket \n pass\n\n def get_next_output_packet(self):\n \"\"\"\n Returns the next packet that needs to be sent.\n This function returns a byetarray representing\n the next packet to be sent.\n\n For example;\n s_socket.send(tftp_processor.get_next_output_packet())\n\n Leave this function as is.\n \"\"\"\n return self.packet_buffer.pop(0)\n\n def has_pending_packets_to_be_sent(self):\n \"\"\"\n Returns if any packets to be sent are available.\n\n Leave this function as is.\n \"\"\"\n return len(self.packet_buffer) != 0\n\n def _send_ack(self,blocknum):\n request = '!HH'\n request = pack(request,self.TftpPacketType.ACK.value,blocknum)\n return request\n\n def _continue_reading(self,input_packet):\n self._file.write(input_packet[2])\n return self._send_ack(input_packet[1])\n\n def _continue_sending(self,blocknum):\n bytes_to_be_sent=self._file.read(512)\n if len(bytes_to_be_sent)<512:\n self._doneuploading=True\n #print(bytes_to_be_sent)\n request='!hh{}s'\n request = request.format(len(bytes_to_be_sent))\n request = pack(request,self.TftpPacketType.DATA.value,blocknum+1,bytes_to_be_sent)\n return request\n\n def request_file(self, file_path_on_server):\n \"\"\"\n This method is only valid if you're implementing\n a TFTP client, since the client requests or uploads\n a file to/from a server, one of the inputs the client\n accept is the file name. Remove this function if you're\n implementing a server.\n \"\"\"\n self._file=open(file_path_on_server,\"wb\")\n formatstring=\"!H{}sB8sB\" #\n formatstring = formatstring.format(len(file_path_on_server))\n opcode = self.TftpPacketType.RRQ.value\n RRQ = pack(formatstring,opcode,file_path_on_server.encode(),0,\"netascii\".encode(),0)\n return RRQ\n\n def upload_file(self, file_path_on_server):\n \"\"\"\n This method is only valid if you're implementing\n a TFTP client, since the client requests or uploads\n a file to/from a server, one of the inputs the client\n accept is the file name. Remove this function if you're\n implementing a server.\n \"\"\"\n \n self._file=open(file_path_on_server,'rb')\n formatstring=\"!H{}sB8sB\" #\n formatstring = formatstring.format(len(file_path_on_server))\n opcode = self.TftpPacketType.WRQ.value\n WRQ = pack(formatstring,opcode,file_path_on_server.encode(),0,\"netascii\".encode(),0)\n return WRQ\n\n def getDoneuploading(self):\n return self._doneuploading\n def closeFile(self):\n self._file.close()\n pass\n\n\ndef check_file_name():\n script_name = os.path.basename(__file__)\n import re\n matches = re.findall(r\"(\\d{4}_)+lab1\\.(py|rar|zip)\", script_name)\n if not matches:\n print(f\"[WARN] File name is invalid [{script_name}]\")\n pass\n\n\ndef setup_sockets(address):\n \"\"\"\n Socket logic MUST NOT be written in the TftpProcessor\n class. It knows nothing about the sockets.\n\n Feel free to delete this function.\n \"\"\"\n skt=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n #skt.settimeout(1)\n return skt\n\n\ndef parse_user_input(address, operation, file_name=None):\n # Your socket logic can go here,\n # you can surely add new functions\n # to contain the socket code. \n # But don't add socket code in the TftpProcessor class.\n # Feel free to delete this code as long as the\n # functionality is preserved.\n skt=setup_sockets(address)\n processor = TftpProcessor()\n if operation == \"push\":\n print(f\"Attempting to upload [{file_name}]...\")\n processor.upload_file(file_name)\n WRQ=processor.upload_file(file_name)\n skt.sendto(WRQ,(address,69))\n while True:\n data,server = skt.recvfrom(516)\n processor.process_udp_packet(data,server)\n skt.sendto(processor.get_next_output_packet(),server)\n if processor.getDoneuploading() == True:\n break\n processor.closeFile()\n print(\"Done Uploading!\")\n elif operation == \"pull\":\n print(f\"Attempting to download [{file_name}]...\")\n RRQ=processor.request_file(file_name) \n skt.sendto(RRQ,(address,69)) #SENDING THE RRQ\n while True:\n data,server = skt.recvfrom(516)\n processor.process_udp_packet(data,server)\n skt.sendto(processor.get_next_output_packet(),server)\n if len(data[4:])<512:\n break\n processor.closeFile()\n print(\"Done Downloading!\")\n pass\n\n\ndef get_arg(param_index, default=None):\n \"\"\"\n Gets a command line argument by index (note: index starts from 1)\n If the argument is not supplies, it tries to use a default value.\n\n If a default value isn't supplied, an error message is printed\n and terminates the program.\n \"\"\"\n try:\n return sys.argv[param_index]\n except IndexError as e:\n if default:\n return default\n else:\n print(e)\n print(\n f\"[FATAL] The comamnd-line argument #[{param_index}] is missing\")\n exit(-1) # Program execution failed.\n\n\ndef main():\n \"\"\"\n Write your code above this function.\n if you need the command line arguments\n \"\"\"\n print(\"*\" * 50)\n print(\"[LOG] Printing command line arguments\\n\", \",\".join(sys.argv))\n check_file_name()\n print(\"*\" * 50)\n\n # This argument is required.\n # For a server, this means the IP that the server socket\n # will use.\n # The IP of the server, some default values\n # are provided. Feel free to modify them.\n ip_address = get_arg(1, \"127.0.0.1\")\n operation = get_arg(2, \"pull\")\n file_name = get_arg(3, \"kisho.txt\")\n # Modify this as needed.\n parse_user_input(ip_address, operation, file_name)\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 2 |
tdegeus/batchcrop
|
https://github.com/tdegeus/batchcrop
|
e963863ab78cb14c091254ea471f2bd98254458f
|
5ff3c6b7213a87e22c3ed200f9984ca09bbda9f0
|
5e98995464fb901afc53d4e2a5969bee295430a9
|
refs/heads/master
| 2020-11-27T02:07:02.982672 | 2020-01-20T16:55:01 | 2020-01-20T16:55:01 | 229,266,742 | 0 | 0 |
MIT
| 2019-12-20T13:07:05 | 2019-12-20T13:22:40 | 2020-01-20T16:53:23 |
Python
|
[
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.61654132604599,
"avg_line_length": 29.18181800842285,
"blob_id": "f19e342f1369a4fbe62695a8c3755908dc37a532",
"content_id": "aa03d5552b2aec48ce0595e877971d91626f63e1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 665,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 22,
"path": "/setup.py",
"repo_name": "tdegeus/batchcrop",
"src_encoding": "UTF-8",
"text": "\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nimport re\n\nfilepath = 'batchcrop/__init__.py'\n__version__ = re.findall(r'__version__ = \\'(.*)\\'', open(filepath).read())[0]\n\nsetup(\n name = 'batchcrop',\n version = __version__,\n license = 'MIT',\n author = 'Tom de Geus',\n author_email = '[email protected]',\n description = 'Crop a batch of images.',\n long_description = 'Crop a batch of images.',\n keywords = 'convert',\n url = 'https://github.com/tdegeus/batchcrop',\n packages = find_packages(),\n install_requires = ['docopt>=0.6.2'],\n entry_points = {\n 'console_scripts': ['batchcrop = batchcrop.cli.batchcrop:main']})\n"
},
{
"alpha_fraction": 0.7035661339759827,
"alphanum_fraction": 0.7035661339759827,
"avg_line_length": 26.75257682800293,
"blob_id": "81901f59feec0790bf9b032967fe056676726c03",
"content_id": "8194fa9815f16cbd4cb5833d570e5c7c256cbec9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2692,
"license_type": "permissive",
"max_line_length": 468,
"num_lines": 97,
"path": "/README.md",
"repo_name": "tdegeus/batchcrop",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.org/tdegeus/batchcrop)\n[](https://anaconda.org/conda-forge/batchcrop)\n\n# batchcrop\n\nCrop a batch of images with the same margins. This script wraps the `convert` command provided by *ImageMagic*\n\n# Contents\n\n<!-- MarkdownTOC -->\n\n- [Disclaimer](#disclaimer)\n- [Getting batchcrop](#getting-batchcrop)\n - [Using conda](#using-conda)\n - [Using PyPi](#using-pypi)\n - [From source](#from-source)\n- [Usage](#usage)\n\n<!-- /MarkdownTOC -->\n\n# Disclaimer\n\nThis library is free to use under the [MIT license](https://github.com/tdegeus/batchcrop/blob/master/LICENSE). Any additions are very much appreciated, in terms of suggested functionality, code, documentation, testimonials, word-of-mouth advertisement, etc. Bug reports or feature requests can be filed on [GitHub](https://github.com/tdegeus/batchcrop). As always, the code comes with no guarantee. None of the developers can be held responsible for possible mistakes.\n\nDownload: [.zip file](https://github.com/tdegeus/batchcrop/zipball/master) | [.tar.gz file](https://github.com/tdegeus/batchcrop/tarball/master).\n\n(c - [MIT](https://github.com/tdegeus/batchcrop/blob/master/LICENSE)) T.W.J. de Geus (Tom) | [email protected] | www.geus.me | [github.com/tdegeus/batchcrop](https://github.com/tdegeus/batchcrop)\n\n# Getting batchcrop\n\n## Using conda\n\n```bash\nconda install -c conda-forge batchcrop\n```\n\nThis will also install all necessary dependencies.\n\n## Using PyPi\n\n```bash\npip install batchcrop\n```\n\nThis will also install the necessary Python modules, **but not ImageMagic**.\n\n## From source\n\n```bash\n# Download batchcrop\ngit checkout https://github.com/tdegeus/batchcrop.git\ncd batchcrop\n\n# Install\npython -m pip install .\n```\n\nThis will also install the necessary Python modules, **but not ImageMagic**.\n\n# Usage\n\nThe usage is as follows (see `batchcrop --help`):\n\n```none\nbatchcrop\n Crop a batch of images with the same margins.\n\nUsage:\n batchcrop [options] <image>...\n\nArguments:\n The images to crop.\n\nOptions:\n -a, --append=<str>\n Append filenames, if empty the input files are overwritten. [default: ]\n\n --background=<str>\n Apply a background color (e.g. \"none\" or \"white\").\n\n --flatten\n Flatten input images: required for transparent PNG-files.\n\n --temp-dir=<str>\n Output directory for temporary images (deleted if not specified).\n\n -v, --verbose\n Print all executed commands.\n\n -h, --help\n Show help.\n\n --version\n Show version.\n\n(c-MIT) T.W.J. de Geus | [email protected] | www.geus.me | github.com/tdegeus\n```\n"
}
] | 2 |
LeoKavanagh/telegram-bot
|
https://github.com/LeoKavanagh/telegram-bot
|
03ce04fc0e29dabdc6b6361cd3ff830d351c62a8
|
b5b1a0a6c5a28685b8d3f740b0cba3159d4dfda1
|
c410742f45d81b48158f0dd7f220437c0c76f288
|
refs/heads/master
| 2023-02-23T17:41:24.857598 | 2022-05-16T20:54:35 | 2022-05-16T20:54:35 | 121,312,385 | 0 | 0 | null | 2018-02-12T22:48:14 | 2022-05-16T20:54:49 | 2023-02-16T05:57:55 |
Python
|
[
{
"alpha_fraction": 0.6347150206565857,
"alphanum_fraction": 0.6355785727500916,
"avg_line_length": 30.29729652404785,
"blob_id": "d9442ffc51b1bca003b441e1233acc5c5f978fa5",
"content_id": "c0c469cc494ed0f2e945d04b1ea40294a4d50852",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1158,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 37,
"path": "/bot.py",
"repo_name": "LeoKavanagh/telegram-bot",
"src_encoding": "UTF-8",
"text": "import os\nimport requests\n\n# The main URL for the Telegram API with our bot's token\nBASE_URL = \"https://api.telegram.org/bot{}\".format(os.environ['THINGDONE_BOT_TOKEN'])\n\ndef receive_message(msg):\n \"\"\"Receive a raw message from Telegram\"\"\"\n try:\n message_text = str(msg[\"message\"][\"text\"])\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n return message_text, chat_id\n except Exception as e:\n print(e)\n return (None, None)\n\ndef handle_message(message_text):\n \"\"\"Calculate a response to the message\"\"\"\n return message_text\n\ndef send_message(message_text, chat_id):\n \"\"\"Send a message to the Telegram chat defined by chat_id\"\"\"\n data = {\"text\": message_text.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + \"/sendMessage\"\n try:\n response = requests.post(url, data).content\n except Exception as e:\n print(e)\n\ndef run(message):\n \"\"\"Receive a message, handle it, and send a response\"\"\"\n try:\n message_text, chat_id = receive_message(message)\n response = handle_message(message_text)\n send_message(response, chat_id)\n except Exception as e:\n print(e)\n"
},
{
"alpha_fraction": 0.47804877161979675,
"alphanum_fraction": 0.6910569071769714,
"avg_line_length": 15.621622085571289,
"blob_id": "6cd8918e9fa72cda4051e839474b0dbd1fe3cb7d",
"content_id": "9d33a55def4ceba15c66a2b4a698b402dfd87555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 37,
"path": "/requirements.txt",
"repo_name": "LeoKavanagh/telegram-bot",
"src_encoding": "UTF-8",
"text": "argcomplete==2.0.0\nboto3==1.23.1\nbotocore==1.26.1\ncertifi==2021.10.8\ncfn-flip==1.3.0\ncharset-normalizer==2.0.12\nclick==8.1.3\ndurationpy==0.5\nFlask==2.1.2\nfuture==0.18.2\nhjson==3.0.2\nidna==3.3\nimportlib-metadata==4.11.3\nitsdangerous==2.1.2\nJinja2==3.1.2\njmespath==1.0.0\nkappa==0.6.0\nMarkupSafe==2.1.1\npep517==0.12.0\npip-tools==6.6.1\nplacebo==0.10.0\npython-dateutil==2.8.2\npython-slugify==6.1.2\nPyYAML==6.0\nrequests==2.27.1\ns3transfer==0.5.2\nsix==1.16.0\ntext-unidecode==1.3\ntoml==0.10.2\ntomli==2.0.1\ntqdm==4.64.0\ntroposphere==4.0.2\nurllib3==1.26.9\nWerkzeug==2.1.2\nwsgi-request-logger==0.4.6\nzappa==0.54.1\nzipp==3.8.0\n"
},
{
"alpha_fraction": 0.5859788656234741,
"alphanum_fraction": 0.5859788656234741,
"avg_line_length": 22.59375,
"blob_id": "2b7c909a3766560cf767f465d0bc2db2be2877cc",
"content_id": "1727b3b2e2090306ed7508b3afe3834697e42787",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 756,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 32,
"path": "/server.py",
"repo_name": "LeoKavanagh/telegram-bot",
"src_encoding": "UTF-8",
"text": "import os\nfrom flask import Flask\nfrom flask import request\nfrom bot import run, send_message\n\napp = Flask(__name__)\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef receive():\n try:\n run(request.json)\n return \"Hello\"\n except Exception as e:\n print(e)\n return \"error of some description\"\n\n\[email protected](\"/msg\", methods=[\"GET\", \"POST\"])\ndef msg():\n \"\"\"\n Send a message in headers\n \"\"\"\n\n text = request.headers.get('msg')\n\n try:\n send_message(text, os.environ['TELEGRAM_CHAT_ID'])\n except Exception as e:\n msg = \"I tried to send the message to you \" \\\n \"but it didn't work: {} - {}\".format(type(e), e)\n return msg\n return \"Sent message to you on Telegram: {}\".format(text)\n\n"
},
{
"alpha_fraction": 0.6467065811157227,
"alphanum_fraction": 0.6467065811157227,
"avg_line_length": 26.79166603088379,
"blob_id": "7742839d6e58239a33f85ac9c59d4159e6a6d1f7",
"content_id": "96bae56616980a54f9ea5ce7322c89b635990933",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 24,
"path": "/set_webhook.py",
"repo_name": "LeoKavanagh/telegram-bot",
"src_encoding": "UTF-8",
"text": "import os\nfrom pprint import import pprint\nimport requests\n\ndef main():\n # Telegram botfather will give you this\n bot_token = os.environ['THINGDONE_BOT_TOKEN']\n\n # The place where the code is running; ngrok or AWS Lambda or wherever\n deployed_url = os.environ['THINGDONE_BOT_DEPLOYED_URL']\n\n test_url = deployed_url + \"/{}\".format(bot_token)\n\n def get_url(method):\n return \"https://api.telegram.org/bot{}/{}\".format(bot_token,method)\n\n r = requests.get(get_url(\"setWebhook\"), data={\"url\": test_url})\n r = requests.get(get_url(\"getWebhookInfo\"))\n pprint(r.status_code)\n pprint(r.json())\n\n\nif __name__ == '__main__':\n main()\n\n"
}
] | 4 |
setseed42/rl-fighters
|
https://github.com/setseed42/rl-fighters
|
ac22d35823754916d4f9a6e845dcb152f62ad3df
|
001b6c994c0b42ffea49752a90ca1300141e0018
|
03babe6044d32226ad21e934bd740e4364904ef6
|
refs/heads/master
| 2023-08-09T14:32:33.916323 | 2023-07-11T16:04:00 | 2023-07-11T16:04:00 | 214,890,819 | 0 | 0 | null | 2019-10-13T20:41:31 | 2023-07-11T15:57:22 | 2023-07-25T23:37:23 |
Python
|
[
{
"alpha_fraction": 0.474744975566864,
"alphanum_fraction": 0.48357799649238586,
"avg_line_length": 36.91509246826172,
"blob_id": "188a1ab342c7205ae7e7fdd611e1ccded8cdf395",
"content_id": "b1c8c2daa6d29fdb79144869a8abfc60cdacf37b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8038,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 212,
"path": "/gym-fighters/gym_fighters/envs/character.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "from gym_fighters.envs.game_env import GameEnv\nimport numpy as np\nfrom pyglet.window import key\n\n\nclass Character(GameEnv):\n def __init__(self, char_index: int, game_env: GameEnv, other_chars: np.ndarray, n_rays: int):\n self.char_index = char_index\n self.state = game_env.state\n self.char_width = game_env.char_width\n self.char_height = game_env.char_height\n self.char_state = self._get_index_state(char_index)\n self.char_box = np.array([\n [0, 0],\n [0, self.char_height],\n [self.char_width, 0],\n [self.char_width, self.char_height],\n ]) - (self.char_width // 2)\n self.hitbox_rad = np.sqrt(\n (self.char_width // 2)**2 +\n (self.char_height // 2)**2\n )\n self.direction = np.array([10, 0])\n self.other_chars = other_chars\n self.n_rays = n_rays\n self.key_map = game_env.key_map\n self.anim_map = game_env.anim_map\n self.direct_map = game_env.direct_map\n self.window_height = game_env.window_height\n self.window_width = game_env.window_width\n\n def update_char_position(self, symbol, state):\n self.state = state\n self.char_state = self._get_index_state(self.char_index)\n if self.char_state['stance'] == 'dead':\n return self.state\n\n if symbol in self.key_map:\n vector = self.key_map[symbol]\n self.direction = vector\n new_coords, char_collided, map_collided, who_hit = self._propose_movement(\n vector)\n self._set_char_key('coords', new_coords)\n if map_collided:\n self._set_char_key('stance', 'dead')\n return self.state\n elif char_collided:\n self._set_char_key('stance', 'jumping_' +\n self.anim_map[symbol])\n else:\n self._set_char_key('stance', 'standing_' +\n self.anim_map[symbol])\n elif symbol == key.SPACE:\n self._set_char_key(\n 'stance',\n 'attack_' + self.direct_map[tuple(self.direction)]\n )\n new_coords, char_collided, map_collided, who_hit = self._propose_movement(\n self.direction)\n if char_collided:\n for who in who_hit:\n self.state['stance'][who] = 'dead'\n self._set_char_key(\n 'coords',\n new_coords\n )\n elif map_collided:\n self._set_char_key(\n 'coords',\n self.state['coords'][self.char_index] - self.direction\n )\n self._set_char_key('stance', 'dead')\n return self.state\n else:\n self._set_char_key(\n 'coords',\n new_coords\n )\n else:\n print('Invalid symbol')\n return self.state\n\n def get_vision(self):\n return self._send_rays(5, self.n_rays)\n\n def _propose_movement(self, vector):\n new_coords = self.state['coords'][self.char_index] + vector\n map_collided = self._map_collision(new_coords)\n char_collided, who_hit = self._char_collision(new_coords)\n has_collided = map_collided or char_collided\n if has_collided:\n return self.state['coords'][self.char_index], char_collided, map_collided, who_hit\n else:\n return new_coords, char_collided, map_collided, who_hit\n\n def _map_collision(self, new_coords):\n new_box = self.char_box + new_coords\n return np.any(new_box < 0) or \\\n np.any(new_box[:, 0] > self.window_width - 1) or \\\n np.any(new_box[:, 1] > self.window_height - 1)\n\n def _char_collision(self, new_coords):\n distances = np.sqrt(\n ((new_coords-self.state['coords']\n [self.other_chars])**2).sum(axis=1)\n )\n collisions = np.array(distances < self.hitbox_rad)\n return np.any(collisions), self.other_chars[collisions]\n\n def _get_char_key(self, key):\n return self.state[key][self.char_index]\n\n def _set_char_key(self, key, value):\n self.state[key][self.char_index] = value\n\n def _send_rays(self, ray_dist, n_rays):\n center = self.state['coords'][self.char_index]\n\n def handle_ray(ray_ix):\n ## Returns distance to collision and what it collides with.\n ## Wall = 0 and Character = 1\n angle = ray_ix * np.pi * 2 / n_rays\n #ray_dist = 0.1\n d = np.array([\n ray_dist*np.cos(angle),\n ray_dist*np.sin(angle)\n ])\n\n def handle_ray_char_collision(other_char):\n f = center - self.state['coords'][other_char]\n a = np.dot(d, d)\n b = 2 * np.dot(f, d)\n c = np.dot(f, f) - (self.hitbox_rad ** 2)\n det = b**2 - 4*a*c\n crosses = det >= 0\n if crosses:\n t_0 = (np.sqrt(det) - b) / (2*a)\n t_1 = -1 * (b + np.sqrt(det)) / (2*a)\n if np.sign(t_0) == 1 and np.sign(t_1) == -1:\n return t_0, crosses\n elif np.sign(t_0) == -1 and np.sign(t_1) == 1:\n return t_0, crosses\n elif np.sign(t_0) == 1 and np.sign(t_1) == 1:\n return min(t_0, t_1), crosses\n else:\n return None, False\n else:\n return None, crosses\n\n ray_char_collisions = [\n handle_ray_char_collision(other_char)\n for other_char in self.other_chars\n ]\n ray_char_collisions = [\n collision for collision in ray_char_collisions\n if collision[1]\n ]\n if len(ray_char_collisions) > 0:\n scaler = min(ray_char_collisions, key=lambda x: x[0])[0]\n return scaler * np.linalg.norm(d), 1\n\n def handle_wall(coll_coord, coll_coord_ix, max_lim):\n coll_other_ix = np.abs(coll_coord_ix - 1)\n t = (coll_coord - center[coll_coord_ix]) / d[coll_coord_ix]\n if t < 0:\n return None, False\n coll_other = center[coll_other_ix] + (t * d[coll_other_ix])\n if 0 <= coll_other <= max_lim:\n dist = np.linalg.norm(t*d)\n if coll_coord_ix == 0:\n dist_norm = dist / self.window_width\n else:\n dist_norm = dist / self.window_height\n return dist_norm, True\n return None, False\n\n collisions = {\n 'left': handle_wall(0, 0, self.window_height),\n 'right': handle_wall(self.window_width, 0, self.window_height),\n 'down': handle_wall(0, 1, self.window_width),\n 'up': handle_wall(self.window_height, 1, self.window_width),\n }\n collisions = [\n (value[0], key)\n for key, value\n in collisions.items()\n if value[1] == True\n ]\n\n try:\n return min(collisions, key=lambda x: x[0])[0], 0\n except:\n print(self.state['coords'][self.char_index])\n print(self.char_width)\n print(self.char_height)\n print(ray_ix)\n print(angle)\n raise\n\n return self._flatten([\n handle_ray(ray_ix)\n for ray_ix in range(n_rays)\n ])\n\n def _get_index_state(self, index):\n return {\n key: self.state[key][index]\n for key in self.state\n }\n\n def _flatten(self, l):\n return [item for sublist in l for item in sublist]\n"
},
{
"alpha_fraction": 0.5402105450630188,
"alphanum_fraction": 0.5532631874084473,
"avg_line_length": 29.44871711730957,
"blob_id": "86c1403e39c77619914f31bb3169ecedd46d042f",
"content_id": "f9c1de2448c33082c30b14b2331833f091acde2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2375,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 78,
"path": "/rl-agent/train.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport gym\nfrom model import MLPModel, LSTMModel\nimport time\nimport gym_fighters\n\n\ndef replace_models_with_model(i, models):\n model = models[i]\n for (j, other_model) in enumerate(models):\n if j != i:\n other_model.replace_model(model.model)\n return models\n\n\ndef play_game(env, models, render=False):\n steps = 0\n n_chars = env.num_chars\n chars = range(n_chars)\n state = env.reset()\n done = False\n\n while not done:\n for char_ix in chars:\n other_char_ixes = [i for i in chars if i != char_ix]\n if render:\n env.render()\n time.sleep(.01)\n action = models[char_ix].predict(state[char_ix, :])\n new_state, reward, done, _ = env.step(action, char_ix)\n models[char_ix].add_to_memory(\n state[char_ix, :], action, reward[char_ix])\n state = new_state\n steps += 1\n if done:\n if steps == 1:\n return models\n reward = models[char_ix].train(last=True)\n models = replace_models_with_model(char_ix, models)\n for other_char_ix in other_char_ixes:\n models[other_char_ix].train(\n last=False, others_reward=reward)\n models = replace_models_with_model(other_char_ix, models)\n return models\n\n\ndef get_mean_cause_loss(models):\n return np.array([\n np.mean(model.cause_losses)\n for model in models\n ])\n\n\ndef main(model, n_chars=2, n_rays=16):\n env = gym.make('fighters-v0', num_chars=n_chars, n_rays=n_rays)\n episode_nb = 1\n models = [\n model(env.observations_dim, env.action_choices, n_chars, i)\n for i in range(n_chars)\n ]\n epochs_before_saving = 100\n while np.all(get_mean_cause_loss(models) > 0.01) or episode_nb < 100:\n if episode_nb % epochs_before_saving == 0:\n models = play_game(env, models, render=True)\n print(f'Game {episode_nb} done')\n models[0].save_model()\n else:\n models = play_game(env, models, render=False)\n episode_nb += 1\n\n print('Finished!')\n model[0].save_model()\n\n\nif __name__ == \"__main__\":\n np.random.seed(42)\n #main(MLPModel, n_chars=2, n_rays=16)\n main(LSTMModel, n_chars=2, n_rays=16)\n"
},
{
"alpha_fraction": 0.8545454740524292,
"alphanum_fraction": 0.8545454740524292,
"avg_line_length": 54,
"blob_id": "25de0a0179f9f7d97d7f2f90b11032e45b56fbf1",
"content_id": "4532e075129debb32cbf26ab1adb0b3953702994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 1,
"path": "/gym-fighters/gym_fighters/envs/__init__.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "from gym_fighters.envs.fighters_env import FightersEnv\n"
},
{
"alpha_fraction": 0.5139665007591248,
"alphanum_fraction": 0.5337057709693909,
"avg_line_length": 35.283782958984375,
"blob_id": "e0e22c398741be19e37d180eb96a0714490e90d4",
"content_id": "4d3a0a0bee903dbd4c040099103cf4324cd4ab2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2685,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 74,
"path": "/gym-fighters/gym_fighters/envs/game_env.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nimport pyglet\nfrom pyglet.window import key\nimport pkg_resources\n\nclass GameEnv(object):\n def __init__(self, num_chars: int):\n self.num_chars: int = num_chars\n self.window_width: int = 300\n self.window_height: int = 400\n self.state = None\n self.image_map = self._get_art()\n self.char_width = self.image_map['standing_up'].width\n self.char_height = self.image_map['standing_up'].height\n self.action_matrix = [\n [key.RIGHT, 'right', np.array([10, 0]), [1, 0, 0, 0]],\n [key.LEFT, 'left', np.array([-10, 0]), [0, 1, 0, 0]],\n [key.UP, 'up', np.array([0, 10]), [0, 0, 1, 0]],\n [key.DOWN, 'down', np.array([0, -10]), [0, 0, 0, 1]],\n ]\n self.anim_map = {l[0]: l[1] for l in self.action_matrix}\n self.key_map = {l[0]: l[2] for l in self.action_matrix}\n self.direct_map = {tuple(l[2]): l[1] for l in self.action_matrix}\n self.reset_state()\n\n def reset_state(self):\n height_splits = np.linspace(\n self.char_height, self.window_width-self.char_height, num=self.num_chars+1)\n y_coords = np.array([\n np.random.randint(height_splits[i]+10, height_splits[i+1]-10)\n for i in range(self.num_chars)\n ]).reshape(self.num_chars, 1)\n np.random.shuffle(y_coords)\n self.state = {\n 'stance': np.resize('standing_right', self.num_chars).astype('<U16'),\n 'coords': np.hstack([\n np.random.randint(\n self.char_width, self.window_width-self.char_width, size=(self.num_chars, 1)),\n y_coords\n ])\n }\n return self\n\n def _get_art(self):\n DATA_PATH = pkg_resources.resource_filename(\n 'gym_fighters', 'envs/assets')\n print(DATA_PATH)\n imgs = os.listdir(DATA_PATH)\n loader = pyglet.resource.Loader([DATA_PATH])\n\n def get_animation_name(img):\n return img \\\n .replace('knight_', '') \\\n .replace('.png', '')\n\n def get_image_resource(img):\n path = f'{DATA_PATH}/{img}'\n #char_resource = loader.image(path)\n char_resource = pyglet.image.load(path)\n char_resource.anchor_x = char_resource.width // 2\n char_resource.anchor_y = char_resource.height // 2\n return char_resource\n\n return {\n get_animation_name(img): get_image_resource(img)\n for img in imgs\n if '2' not in img\n and '.png' in img\n }\n\n def set_state(self, state):\n self.state = state\n return self\n"
},
{
"alpha_fraction": 0.5965909361839294,
"alphanum_fraction": 0.625,
"avg_line_length": 34.20000076293945,
"blob_id": "eea19cc2b73f71e6841af1a5847223a2ef1689da",
"content_id": "6154964654d3f00eebd5ee8820ab9969d262dcef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 10,
"path": "/gym-fighters/setup.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nsetup(name='gym_fighters',\n version='0.0.1',\n include_package_data=True,\n #packages=['mypkg'],\n #package_dir={'mypkg': 'src/mypkg'},\n package_data={'gym_fighters': ['gym_fighters/envs/*.png']},\n install_requires=['gym==0.15.3', 'numpy', 'pyglet==1.3.2']#And any other dependencies required\n)\n"
},
{
"alpha_fraction": 0.510756254196167,
"alphanum_fraction": 0.5154136419296265,
"avg_line_length": 29.67346954345703,
"blob_id": "2bcd8f7f5bba8dd6b9fa49f252f8a00e04d63f3a",
"content_id": "54aef3a179e7b047b999db7f04f6b7c6fdb0cf32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4509,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 147,
"path": "/gym-fighters/gym_fighters/envs/fighters_env.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "import gym\nimport numpy as np\nimport pyglet\nfrom pyglet.window import key\nfrom gym_fighters.envs.character import Character\nfrom gym_fighters.envs.game_env import GameEnv\n\nclass FightersEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, num_chars=2, n_rays=4):\n self.num_chars = num_chars\n self.n_rays = n_rays\n self.game_env = GameEnv(num_chars)\n self.chars = [\n Character(\n char_index=i,\n game_env=self.game_env,\n other_chars=self._get_other_chars(i),\n n_rays=n_rays,\n )\n for i in range(num_chars)\n ]\n self.vision = np.array([char.get_vision() for char in self.chars])\n self.action_choices = [\n key.RIGHT,\n key.LEFT,\n key.UP,\n key.DOWN,\n key.SPACE\n ]\n self.observations_dim = n_rays * 2\n self.window=None\n\n def _make_window(self):\n self.window = pyglet.window.Window(\n width=self.game_env.window_width,\n height=self.game_env.window_height,\n )\n\n def step(self, action, char_ix):\n new_state = self.chars[char_ix].update_char_position(action, self.game_env.state)\n self.game_env = self.game_env.set_state(new_state)\n if sum(self.game_env.state['stance'] == 'dead') == (self.num_chars - 1):\n done = True\n reward = (self.game_env.state['stance']!='dead').astype(float)\n reward = (reward * 2) - 1\n return None, reward, done, None\n else:\n reward = np.zeros(self.num_chars)\n done = False\n self.vision = np.array([\n char.get_vision()\n for char in self.chars\n ])\n return self.vision, reward, done, None\n\n def reset(self):\n if self.window is not None:\n self.window.close()\n self.window=None\n self.game_env = self.game_env.reset_state()\n self.chars = [\n Character(\n char_index=i,\n game_env=self.game_env,\n other_chars=self._get_other_chars(i),\n n_rays=self.n_rays,\n )\n for i in range(self.num_chars)\n ]\n return self.vision\n\n def render(self, mode='human', close=False):\n if self.window is None:\n self._make_window()\n def create_batch():\n batch = pyglet.graphics.Batch()\n redundant_but_necessary_for_batch_to_work = []\n return batch, redundant_but_necessary_for_batch_to_work\n\n pyglet.clock.tick()\n\n @self.window.event\n def on_draw():\n self.window.clear()\n batch = create_batch()\n self._sprite(\n batch,\n image_name=self.game_env.state['stance'],\n coords=self.game_env.state['coords'],\n )\n\n batch[0].draw()\n\n for window in pyglet.app.windows:\n window.switch_to()\n window.dispatch_events()\n window.dispatch_event('on_draw')\n window.flip()\n\n def _len_of_one(self, *objs):\n for obj in objs:\n if hasattr(obj, '__len__'):\n return len(obj)\n return 1\n\n def _sprite(self, batch, image_name, coords):\n length = self._len_of_one(image_name, coords)\n image_name = np.resize(image_name, length)\n x = np.resize(coords[:,0], length)\n y = np.resize(coords[:,1], length)\n\n for _image_name, _x, _y in zip(image_name, x, y):\n img = self.game_env.image_map[_image_name]\n sprite = pyglet.sprite.Sprite(img, x=_x, y=_y, batch=batch[0])\n batch[1].append(sprite)\n\n\n def _get_other_chars(self, index: int) -> np.ndarray:\n other_chars = np.arange(self.num_chars)\n return other_chars[other_chars != index]\n\n\n\n\n\nif __name__ == \"__main__\":\n import time\n n_chars=2\n env = FightersEnv(2, 4)\n run = 0\n while True:\n run += 1\n state = env.reset()\n state = state\n step = 0\n done = False\n while not done:\n step += 1\n env.render()\n for char_ix in range(n_chars):\n action = np.random.choice(env.action_choices, 1)[0]\n next_state, reward, done, info = env.step(action, char_ix)\n if done:\n print(f'game {run} done')\n break\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 11,
"blob_id": "d6933dae05fe4ae7361542ee71bfbffe97739562",
"content_id": "a8e4a532318894b8470caa503ce7833fb2b0f2b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 1,
"path": "/gym-fighters/README.md",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "#Game logic\n\n"
},
{
"alpha_fraction": 0.5010101199150085,
"alphanum_fraction": 0.5090909004211426,
"avg_line_length": 27.285715103149414,
"blob_id": "38884ca6cf051dde84c74931a16591d319f7227e",
"content_id": "fc5aeaeb5031a0776b9c7520c203afbe8533bc2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 35,
"path": "/rl-agent/play.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "import gym\nimport gym_fighters\nfrom model import LSTMModel, MLPModel\nimport time\n\n\ndef main(model, chars_trained_on, n_chars=2, n_rays=16):\n chars = range(n_chars)\n env = gym.make('fighters-v0', num_chars=n_chars, n_rays=n_rays)\n models = [\n model(env.observations_dim, env.action_choices, chars_trained_on)\n for i in range(n_chars)\n ]\n while True:\n state = env.reset()\n done = False\n while not done:\n for char_ix in chars:\n env.render()\n time.sleep(.01)\n action = models[char_ix].predict(\n state[char_ix, :], explore=False)\n new_state, _, done, _ = env.step(action, char_ix)\n state = new_state\n if done:\n for model in models:\n model.end_game()\n break\n\n # state = new_state\n\n\nif __name__ == \"__main__\":\n main(MLPModel, 3)\n main(LSTMModel, 3)\n"
},
{
"alpha_fraction": 0.5706528425216675,
"alphanum_fraction": 0.5829939246177673,
"avg_line_length": 37.956729888916016,
"blob_id": "5b9f888a0b92c7b00ddf71b1d4dd5461ce849012",
"content_id": "120a830d0a502f0744e0c71fc37628559bb352eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8103,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 208,
"path": "/rl-agent/model.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom tensorflow import keras\nimport os\nimport datetime\nimport numpy as np\nfrom collections import deque\nfrom abc import ABCMeta, abstractmethod\n# keras.backend.set_floatx('float64')\n\n\nclass AbstractModel(metaclass=ABCMeta):\n def __init__(self, observation_dim, actions, n_chars, name, batch_size, shuffle, char_ix):\n tf.random.set_seed(42)\n self.actions = actions\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.model_path = f'./model/char_{name}'\n self.model_name = f'{n_chars}_model.hdf5'\n self.full_model_path = f'{self.model_path}/{self.model_name}'\n if not os.path.exists(self.model_path):\n os.makedirs(self.model_path)\n if os.path.exists(self.full_model_path):\n print(\"loading previous weights\")\n self.model = tf.keras.models.load_model(\n self.full_model_path,\n )\n else:\n print('Training from scratch')\n self.model = self._model_arch(observation_dim, len(self.actions))\n\n self.epoch = 0\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n train_log_dir = f'logs/gradient_tape/{current_time}/char_{name}_{n_chars}_{char_ix}'\n self.train_summary_writer = tf.summary.create_file_writer(\n train_log_dir)\n self.exploration = 1\n self.gamma = 0.99\n self.reset_memory()\n self.min_exploration = 0.01\n self.exploration_decay = 0.001\n self.cause_wins = deque(maxlen=100)\n self.cause_losses = deque(maxlen=100)\n self.running_reward = None\n\n @abstractmethod\n def _model_arch(self):\n pass\n\n @abstractmethod\n def end_game(self):\n pass\n\n def train_loop(self, sample_weights, cause_wins, cause_losses, steps):\n history = self.model.fit(\n np.array(self.memory['x']), np.array(self.memory['y']),\n sample_weight=np.array(sample_weights) if len(sample_weights)>1 else None,\n epochs=1,\n batch_size=self.batch_size,\n shuffle=self.shuffle,\n verbose=0\n )\n with self.train_summary_writer.as_default():\n for key, items in history.history.items():\n tf.summary.scalar(key, items[-1], step=self.epoch)\n tf.summary.scalar('running_reward',\n self.running_reward, step=self.epoch)\n tf.summary.scalar('exploration', self.exploration, step=self.epoch)\n tf.summary.scalar('cause_wins', cause_wins, step=self.epoch)\n tf.summary.scalar('cause_losses', cause_losses, step=self.epoch)\n tf.summary.scalar('steps', len(self.memory['x']), step=self.epoch)\n self.end_game()\n self.epoch += 1\n\n def save_model(self):\n self.model.save(self.full_model_path)\n\n def replace_model(self, model):\n self.model = model\n\n def get_running_reward(self):\n if self.running_reward is None:\n return self.memory['reward_sum']\n else:\n return self.running_reward * 0.99 + self.memory['reward_sum'] * 0.01\n\n def train(self, last, others_reward=None):\n if last:\n if self.memory['reward_sum'] == 1:\n # Means character killed other player\n self.memory['reward'][-1] *= 1\n # maybe scale up reward sum as well\n self.cause_wins.append(1)\n self.cause_losses.append(0)\n if self.memory['reward_sum'] == -1:\n # Means character hit wall\n self.cause_losses.append(1)\n self.cause_wins.append(0)\n reward = self.memory['reward'][-1]\n else:\n reward = others_reward * -1\n self.cause_wins.append(0)\n self.cause_losses.append(0)\n self.memory['reward'][-1] = reward\n self.memory['reward_sum'] += reward\n self.running_reward = self.get_running_reward()\n self.train_loop(\n sample_weights=self.discount_rewards(\n self.memory['reward'], self.gamma),\n cause_wins=np.mean(self.cause_wins),\n cause_losses=np.mean(self.cause_losses),\n steps=len(self.memory['x'])\n )\n self.exploration = max(\n self.min_exploration,\n self.exploration*(1-self.exploration_decay)\n )\n self.reset_memory()\n return reward\n\n def predict(self, x, explore=True):\n if explore:\n exploration = self.exploration\n else:\n exploration = 0\n x = np.expand_dims(x, axis=0)\n p_exp = np.random.uniform(0, 1, 1)[0]\n if p_exp < exploration:\n return np.random.choice(self.actions, 1)[0]\n else:\n action_proba = self.model.predict(x, verbose=0)[0]\n return np.random.choice(self.actions, 1, p=action_proba)[0]\n\n def add_to_memory(self, x, y, reward):\n self.memory['x'].append(x)\n y_vec = np.zeros(len(self.actions))\n y_vec[self.actions.index(y)] = 1\n self.memory['y'].append(y_vec)\n self.memory['reward'].append(reward)\n self.memory['reward_sum'] += reward\n\n def reset_memory(self):\n self.memory = {\n 'x': [],\n 'y': [],\n 'reward': [],\n 'reward_sum': 0\n }\n # Karpathy (cf. https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5)\n\n def discount_rewards(self, r, gamma):\n r = np.array(r)\n discounted_r = np.zeros_like(r)\n running_add = 0\n # we go from last reward to first one so we don't have to do exponentiations\n for t in reversed(range(0, r.size)):\n if r[t] != 0:\n # if the game ended (in Pong), reset the reward sum\n running_add = 0\n # the point here is to use Horner's method to compute those rewards efficiently\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n\n discounted_r -= np.mean(discounted_r) # normalizing the result\n discounted_r /= np.std(discounted_r) # idem\n return discounted_r\n\n\nclass MLPModel(AbstractModel):\n def __init__(self, observation_dim: int, actions: np.ndarray, n_chars: int, char_ix=0):\n super().__init__(observation_dim, actions, n_chars, 'mlp', 32, True, char_ix)\n\n def _model_arch(self, observation_dim: int, actions_dim: int):\n input_layer = keras.layers.Input(shape=(observation_dim,))\n # x = keras.layers.BatchNormalization()(input_layer)\n x = keras.layers.Dense(200, activation='relu',\n kernel_initializer='glorot_uniform')(input_layer)\n output_layer = keras.layers.Dense(\n actions_dim, activation='softmax', kernel_initializer='RandomNormal')(x)\n model = keras.models.Model(input_layer, output_layer)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n def end_game(self):\n pass\n\n\nclass LSTMModel(AbstractModel):\n def __init__(self, observation_dim, actions, n_chars, char_ix=0):\n super().__init__(observation_dim, actions, n_chars, 'lstm', 1, False, char_ix)\n\n def _model_arch(self, observation_dim, actions_dim):\n input_layer = keras.layers.Input(\n shape=(observation_dim,), batch_size=1)\n x = keras.layers.BatchNormalization()(input_layer)\n x = keras.layers.RepeatVector(1)(x)\n x = keras.layers.LSTM(\n 200, kernel_initializer='glorot_uniform', stateful=True)(x)\n output_layer = keras.layers.Dense(\n actions_dim, activation='softmax', kernel_initializer='RandomNormal')(x)\n model = keras.models.Model(input_layer, output_layer)\n optimizer = keras.optimizers.RMSprop(clipvalue=5)\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n def end_game(self):\n self.model.reset_states()\n"
},
{
"alpha_fraction": 0.5517241358757019,
"alphanum_fraction": 0.5960590839385986,
"avg_line_length": 12.533333778381348,
"blob_id": "b6355fab92b9e931b3f07fe3e75634ebee97171d",
"content_id": "0f04a4d1a2e9703b0ff52318024de13a01c85068",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 15,
"path": "/Pipfile",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\npyglet = \"==1.3.2\"\nnumpy = \"*\"\ngym = \"==0.15.3\"\ntensorflow = \"*\"\n\n[dev-packages]\n\n[requires]\npython_version = \"3.7\"\n"
},
{
"alpha_fraction": 0.7244094610214233,
"alphanum_fraction": 0.7322834730148315,
"avg_line_length": 20.16666603088379,
"blob_id": "084be0df6c18b9799c6581190bb37409c931a7dc",
"content_id": "fbe676a76091262abdf6b1f2a4615695a277b529",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 6,
"path": "/gym-fighters/gym_fighters/__init__.py",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "from gym.envs.registration import register\n\nregister(\n id='fighters-v0',\n entry_point='gym_fighters.envs:FightersEnv',\n)\n"
},
{
"alpha_fraction": 0.7492492198944092,
"alphanum_fraction": 0.7552552819252014,
"avg_line_length": 23.66666603088379,
"blob_id": "ee33d0131a6e8754afe8c09b25a30286881daf68",
"content_id": "5fab4b998016685f2bea874df4740a5d87586cc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 666,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 27,
"path": "/README.md",
"repo_name": "setseed42/rl-fighters",
"src_encoding": "UTF-8",
"text": "# rl-fighters\n\nPrerequisites:\n- [pipenv](https://pipenv.pypa.io/en/latest/)\n\nRun to install python environment\n```bash\npipenv install\n```\nActivate the pipenv environment\n```bash\npipenv shell\n```\nRun to install gym environment (the game)\n```bash\npip install -e gym-fighters\n```\n\n`python rl-agent/train.py` trains agents with tensorboard logs at `rl-agent/logs/gradient_tape`\n\nEvery 100 games a window will pop up with the game running in real time to see how they develop.\nOnce trained, watch them play running `python rl-agent/play.py`\n\n\nUbuntu requirements:\n\nIf you get error message `ImportError: Library \"GLU\" not found.` run `sudo apt-get install freeglut3-dev`\n"
}
] | 12 |
rimas79/OFXGenerator
|
https://github.com/rimas79/OFXGenerator
|
631aa408a68d6538ade8911ff5515a5a337cb78d
|
a7f40be5b9b4b1bc207d5f1b1e2be5980f9cf70b
|
bd9d71c9df59d597a9ba126922a4b6ac7f1eae52
|
refs/heads/master
| 2020-05-26T23:54:11.856107 | 2012-10-17T13:28:11 | 2012-10-17T13:28:11 | 2,408,566 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5645805597305298,
"alphanum_fraction": 0.5699067711830139,
"avg_line_length": 23.21505355834961,
"blob_id": "60414b693bfcc79c6a638c724774194516181ebf",
"content_id": "d117d5b44209482fbda9732307112ffd0b7f7512",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2314,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 93,
"path": "/AccStatement.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "WINDOWS-1251",
"text": "#-*- coding: cp1251 -*-\n'''\nCreated on 28.09.2011\n\n@author: Dennis.Erokhin\n'''\n\nclass AccStatement():\n '''\n Statement holder\n '''\n __transList = None\n __acctID = None\n __FID = None\n __company = None\n __servDate = None\n __startDate = None\n __endDate = None\n\n def __init__(self):\n '''\n Constructor\n '''\n self.__transList = []\n\n def getTransHeader(self):\n return [\"OpDate\", \"CheckDate\", \"OpNum\", \"OpPayee\", \"OpCur\", \"OpSum\", \"AccSum\", \"TrnType\"]\n\n def get_trans_list(self):\n return self.__transList\n\n def insertTransaction(self, trans):\n#TODO: если дата списания выбивается из периода отчета делать предупреждение\n if (trans.checkDate < self.__startDate or trans.checkDate > self.__endDate):\n print(\"date errors\")\n print(\"start_date\", self.__startDate)\n print(\"end_date\", self.__endDate)\n print(\"op_date\", trans.opDate)\n print(\"check_date\", trans.checkDate)\n raise(Exception(\"Transaction check date error\"))\n self.__transList.append(trans.getTransRecord())\n# self.__transList.append(trans)\n \n def __iter__(self):\n return iter(self.__transList)\n# \n# def next(self):\n# return self.next().getTransRecord();\n \n def get_acct_id(self):\n return self.__acctID\n\n def get_fid(self):\n return self.__FID\n\n def get_company(self):\n return self.__company\n\n def get_serv_date(self):\n return self.__servDate\n\n def get_start_date(self):\n return self.__startDate\n\n def get_end_date(self):\n return self.__endDate\n\n def getPeriod(self):\n return (self.__startDate, self.__endDate)\n\n def set_acct_id(self, value):\n self.__acctID = value\n\n def set_fid(self, value):\n self.__FID = value\n\n def set_company(self, value):\n self.__company = value\n\n def set_serv_date(self, value):\n self.__servDate = value\n\n def set_start_date(self, value):\n self.__startDate = value\n\n def set_end_date(self, value):\n self.__endDate = value\n\n def setCompany(self, company):\n self.__company = company\n \n def setFID(self, FID):\n self.__FID = FID\n\n"
},
{
"alpha_fraction": 0.5201305747032166,
"alphanum_fraction": 0.5313746929168701,
"avg_line_length": 28.031578063964844,
"blob_id": "ae2cc647dc18608115b2eb49013e15926f4f464b",
"content_id": "0ddaa6095202c16dec5a15686f044d91bd5cfc53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2781,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 95,
"path": "/VTBCSVParser.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "#-*- coding: iso-8859-15 -*-\n'''\nCreated on 19.11.2010\n\n@author: Dennis.Erokhin\n'''\nfrom report_parser import ReportParser\nimport csv\nimport re\nfrom datetime import datetime\nfrom BankTrans import BankTrans\n\nclass VTBCSVParser(ReportParser):\n '''\n Parsing VTB csv files\n '''\n stCard = \"CARD\"\n stAcct = \"ACCT\"\n\n __file = None\n __writer = None\n __stmtReader = None\n __state = None\n __trans = None\n __prog = None\n\n def __init__(self, file, writer):\n '''\n Constructor\n '''\n self.__file = file\n self.__writer = writer\n self.__stmtReader = csv.reader(file, delimiter = \";\")\n self.__prog = re.compile(\"(.*?)\\s*(\\d{6})\")\n pass\n\n def parseReason(self, text):\n result = self.__prog.match(text)\n if result:\n return (result.group(1), result.group(2))\n\n def formatDate(self, text):\n dt = datetime.strptime(text, \"%d.%m.%Y\")\n return dt.strftime(\"%Y%m%d\")\n pass \n \n def formatDateEx(self, text):\n dt = datetime.strptime(text, \"%d.%m.%Y %H:%M:%S\")\n return dt.strftime(\"%Y%m%d\")\n pass \n \n def processCardRecord(self, row):\n self.__trans.opDate = self.formatDateEx(row[1])\n self.__trans.checkDate = self.formatDate(row[2])\n self.__trans.opSum = row[3]\n self.__trans.opCur = row[4]\n self.__trans.accSum = row[6]\n (payee, num) = self.parseReason(row[7])\n self.__trans.opPayee = payee\n self.__trans.opNum = num\n pass\n\n def processAccountRecord(self, row):\n print(\",\".join(row))\n self.__trans.opDate = self.formatDateEx(row[0])\n self.__trans.checkDate = self.__trans.opDate\n self.__trans.opNum = row[1]\n self.__trans.opSum = row[2]\n self.__trans.accSum = self.__trans.opSum\n self.__trans.opPayee = row[3] \n pass\n \n def processRow(self, row):\n if self.__state == self.stCard:\n self.__trans = BankTrans()\n self.__trans.trnType = \"CREDIT\"\n self.processCardRecord(row)\n self.__writer.write(self.__trans)\n elif self.__state == self.stAcct:\n self.__trans = BankTrans()\n self.__trans.trnType = \"DEP\"\n self.processAccountRecord(row)\n self.__writer.write(self.__trans)\n if row[0] == \"Номер счета\":\n self.__writer.setAcctID(row[1])\n elif row[0] == \"Номер карты\":\n self.__state = self.stCard\n elif row[0] == \"Дата\":\n self.__state = self.stAcct\n \n def feed(self, text): \n for row in self.__stmtReader:\n# print(\",\".join(row))\n self.processRow(row) \n pass"
},
{
"alpha_fraction": 0.5085784196853638,
"alphanum_fraction": 0.533088207244873,
"avg_line_length": 19.200000762939453,
"blob_id": "ae2e947ac4736889d80e1a86df92805431c853bf",
"content_id": "3ee8fdd3844a958fb3ae6a76ff85443e7474b417",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 816,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 40,
"path": "/report_parser.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "#-*- coding: cp1251 -*-\n'''\nCreated on 19.11.2010\n\n@author: Dennis.Erokhin\n'''\nfrom datetime import datetime\n\ndef isCredit(sum):\n if sum.endswith(\"CR\"):\n sum = sum.replace(\"CR\",\"\")\n else:\n sum = \"-\"+sum\n sum = sum.replace(\".\", \",\")\n return str(sum)\n \ndef format_trn_date(date):\n assert len(date) == 5 or len(date) == 7\n date = date.lower()\n if len(date) == 5:\n format = \"%d%b\"\n elif len(date) == 7:\n format = \"%d%b%y\"\n# print(date)\n# print(format)\n dt = datetime.strptime(date.encode(\"cp1251\"), format)\n return dt\n pass\n\nclass ReportParser():\n '''\n Parent class for all parsers\n '''\n\n __statement = None\n __file = None\n \n def __init__(self, file, holder):\n self.__statement = holder\n self.__file = file\n "
},
{
"alpha_fraction": 0.5743142366409302,
"alphanum_fraction": 0.5807980298995972,
"avg_line_length": 28.610687255859375,
"blob_id": "0b013179bf4490b6196726a5029c013ea3b5e031",
"content_id": "b8125c0731030650dd2bb2b7caf673a4ea817f15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4010,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 131,
"path": "/main.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "#-*- coding: cp1251 -*-\r\n'''\r\nCreated on 13.11.2010\r\n\r\n@author: Dennis.Erokhin\r\n'''\r\nfrom SBRFTextParser import SBRFTextParser\r\nfrom sbrf_html_parser import SBRFHtmlParser\r\nfrom VTBCSVParser import VTBCSVParser\r\nfrom ofx import OFX\r\nimport csv\r\nfrom AccStatement import AccStatement\r\nimport locale\r\nimport codecs\r\nimport cStringIO\r\nfrom os.path import splitext\r\nimport argparse\r\n\r\nVTB = \"VTB\"\r\nSBRFT = \"SBRFT\"\r\nSBRFH = \"SBRFH\"\r\n\r\nlocale.setlocale(locale.LC_ALL, 'russian')\r\n\r\nfileType = None\r\n \r\nclass UnicodeWriter:\r\n \"\"\"\r\n A CSV writer which will write rows to CSV file \"f\",\r\n which is encoded in the given encoding.\r\n \"\"\"\r\n\r\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\r\n # Redirect output to a queue\r\n self.queue = cStringIO.StringIO()\r\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\r\n self.stream = f\r\n self.encoder = codecs.getincrementalencoder(encoding)()\r\n\r\n def writerow(self, row):\r\n res = [];\r\n for s in row:\r\n if hasattr(s, 'encode'):\r\n res.append(s.encode(\"utf-8\"))\r\n else:\r\n res.append(s)\r\n# self.writer.writerow([s.encode(\"utf-8\") for s in row if type(s)!='datetime.datetime'])\r\n self.writer.writerow(res)\r\n # Fetch UTF-8 output from the queue ...\r\n data = self.queue.getvalue()\r\n data = data.decode(\"utf-8\")\r\n # ... and reencode it into the target encoding\r\n data = self.encoder.encode(data)\r\n # write to the target stream\r\n self.stream.write(data)\r\n # empty queue\r\n self.queue.truncate(0)\r\n\r\n def writerows(self, rows):\r\n for row in rows:\r\n self.writerow(row) \r\n\r\ndef fileProcess(inFileName, fileType, outFileName, outFileType):\r\n print(inFileName)\r\n \r\n fileName= splitext(inFileName)[0].lower()\r\n# \r\n# fileExt = splitext(name)[1].lower()\r\n# \r\n# if fileExt == \".csv\":\r\n# fileType = VTB\r\n# elif fileExt == \".txt\":\r\n# fileType = SBRFT\r\n# elif fileExt == \".html\":\r\n# fileType = SBRFH\r\n# else:\r\n# raise NotImplementedError()\r\n# \r\n rep = AccStatement()\r\n print(fileType)\r\n with open(inFileName, 'r') as f: \r\n if fileType == VTB:\r\n myParser = VTBCSVParser(f, rep)\r\n elif fileType == SBRFT:\r\n rep.set_acct_id(fileName[0:4])\r\n myParser = SBRFTextParser(f, rep)\r\n elif fileType == SBRFH:\r\n myParser = SBRFHtmlParser(f, rep) \r\n else:\r\n raise NotImplementedError() \r\n rep.setCompany(fileType)\r\n rep.setFID(\"1001\")\r\n myParser.feed()\r\n f.close()\r\n \r\n print(outFileType)\r\n print(rep)\r\n if outFileType == \"OFX\":\r\n with open(outFileName, \"w\") as file_object:\r\n myOFX = OFX(file_object)\r\n myOFX.saveFile(rep)\r\n elif outFileType == \"CSV\":\r\n myCsv = UnicodeWriter(open(outFileName, 'w'), dialect='excel', lineterminator='\\n', delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n myCsv.writerow(rep.getTransHeader())\r\n myCsv.writerows(rep)\r\n else:\r\n raise NotImplementedError() \r\n file_object.closed\r\n rep = None \r\n\r\nif __name__ == '__main__':\r\n print(\"Hello, World!\")\r\n\r\n'''\r\nfor root, dirs, files in os.walk(\"../IN/\"):\r\n for name in files:\r\n fileName = join(root, name) \r\n fileProcess(root, name)\r\n'''\r\nparser = argparse.ArgumentParser(description='Convert bank reports')\r\nparser.add_argument('-i', '--input', help='input file type', choices=['VTB', 'SBRFT', 'SBRFH'])\r\nparser.add_argument('-o', '--output', help='output file type', choices=['OFX', 'CSV'])\r\nparser.add_argument('input_file', help='input file name')\r\nparser.add_argument('output_file', help='output file name')\r\n\r\n\r\nargs = parser.parse_args()\r\nprint (args)\r\nfileProcess(args.input_file, args.input, args.output_file, args.output)\r\n \r\nexit()\r\n"
},
{
"alpha_fraction": 0.35170498490333557,
"alphanum_fraction": 0.3628073036670685,
"avg_line_length": 31.972972869873047,
"blob_id": "a6efea29287fa4c926dd0ebac4d5ee90ad4751b6",
"content_id": "a7c14b00afe1420bb0c4945583da195c6551bfe2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2522,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 74,
"path": "/ofx.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "#-*- coding: cp1251 -*-\r\n'''\r\nCreated on 15.11.2010\r\n\r\n@author: Dennis.Erokhin\r\n'''\r\n\r\nfrom OFXWriter import OFXWriter\r\nfrom lxml import etree as ET\r\nfrom lxml.builder import E\r\n\r\nclass OFX(OFXWriter):\r\n '''\r\n Generate OFX document\r\n '''\r\n\r\n def __init__(self, file):\r\n OFXWriter.__init__(self, file)\r\n\r\n def saveFile(self, report):\r\n ofxDoc = (\r\n E.OFX(\r\n E.SIGNONMSGSRSV1(\r\n E.SONRS(\r\n E.STATUS(\r\n E.CODE(\"0\"),\r\n E.SEVERITY(\"INFO\")\r\n ),\r\n E.DTSERVER(report.get_serv_date().strftime(\"%Y%m%d\")),\r\n E.LANGUAGE(\"RUS\"),\r\n E.FI(\r\n E.ORG(report.get_company()),\r\n E.FID(report.get_fid())\r\n )\r\n )\r\n ),\r\n E.BANKMSGSRSV1(\r\n E.STMTTRNRS(\r\n E.TRNUID(\"1001\")\r\n ),\r\n E.STATUS(\r\n E.CODE(\"0\"),\r\n E.SEVERITY(\"INFO\")\r\n ),\r\n E.STMTRS(\r\n E.CURDEF(\"RUR\"),\r\n E.CCACCTFROM(\r\n E.ACCTID(report.get_acct_id())\r\n ),\r\n E.BANKTRANLIST(\r\n E.DTSTART(report.get_start_date().strftime(\"%Y%m%d\")),\r\n E.DTEND(report.get_end_date().strftime(\"%Y%m%d\")),\r\n )\r\n )\r\n )\r\n )\r\n )\r\n \r\n transList = ofxDoc.find(\".//BANKTRANLIST\")\r\n for trans in report.get_trans_list():\r\n trns = (\r\n E.STMTTRN(\r\n E.DTPOSTED(trans[0].strftime(\"%Y%m%d\")), #.opDates\r\n E.DTUSER(trans[1].strftime(\"%Y%m%d\")), #.checkDate\r\n E.TRNAMT(trans[6]), #.accSum\r\n E.FITID(str(trans[0].strftime(\"%Y%m%d\")) + str(trans[2])), #.opDate, .opNum\r\n E.CHECKNUM(trans[2]), #.opNum\r\n E.NAME(trans[3]), #.opPayee\r\n E.MEMO(trans[3]) #.opPayee\r\n )\r\n )\r\n transList.append(trns)\r\n# print(ET.tostring(ofxDoc, pretty_print=True))\r\n self.file_object.write(ET.tostring(ofxDoc, pretty_print=True))\r\n "
},
{
"alpha_fraction": 0.5153477787971497,
"alphanum_fraction": 0.5266122221946716,
"avg_line_length": 34.158416748046875,
"blob_id": "49424c4c09ef6ecfe6254ed45ef4dbe1ab65415a",
"content_id": "0a6e068bebbfe642221d505bbd8f0108cae7cb6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3573,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 101,
"path": "/SBRFTextParser.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "WINDOWS-1252",
"text": "#-*- coding: cp1251 -*-\n'''\nCreated on 18.11.2010\n\n@author: Dennis.Erokhin\n'''\nfrom report_parser import ReportParser, isCredit, format_trn_date\nfrom BankTrans import BankTrans\nimport re\n\n#const\n_long_date_fmt = \"\\d\\d\\w{3}\\d\\d\"\n_head_rx = re.compile('ÎÒ×ÅÒ ÑÎÑÒÀÂËÅÍ ÇÀ ÏÅÐÈÎÄ.*')\n_delim_str = re.compile('-[+]-')\n_delim_rx = re.compile('[+]-*')\nprog = re.compile(\".*\\s(\\d\\d\\w{3})\\s+(\\d\\d\\w{3}\\d\\d)\\s+.*\\s+(\\d+[.]\\d{2}).*\", re.LOCALE)\ntrans = re.compile(\".*\\s(\\d\\d\\w{3})\\s(\\d\\d\\w{3}\\d\\d)\\s([\\dE+]{6})\\s+(.{22})\\s+(\\w{0,3})\\s+(\\d*[.]*\\d{0,2})\\s+(\\d+[.]\\d{2}[CR]*).*\", re.LOCALE) \n\nclass SBRFTextParser(ReportParser):\n '''\n Parsing SBRF text repors\n '''\n _state = None\n\n def __init__(self, file, holder):\n '''\n initialization\n '''\n self.__statement = holder\n self.__file = file\n\n def get_next_line(self):\n line = self.__file.readline()\n return line\n \n def is_header(self, str):\n return str != \"\" and _head_rx.match(str)\n \n def is_delim(self, str):\n return str != \"\" and _delim_str.search(str)\n \n def get_statement_params(self, str):\n rx = '.*\\s('+_long_date_fmt+')\\s*-\\s*('+_long_date_fmt+')\\s*('+_long_date_fmt+').*'\n params_rx = re.compile(rx, re.L)\n result = params_rx.match(str)\n if result:\n print(result.groups())\n self.__statement.set_start_date(format_trn_date(result.group(1)))\n self.__statement.set_end_date(format_trn_date(result.group(2)))\n self.__statement.set_serv_date(format_trn_date(result.group(3))) \n else:\n raise(Exception(\"non-header string in header position\")) \n \n def process_header(self):\n if self._state == 'begin':\n self._state = 'header'\n str = self.get_next_line()\n self.get_statement_params(str)\n \n def process_delim(self, str):\n delims = _delim_rx.finditer(str)\n if self._state == 'header':\n self._state = 'first_delim'\n elif self._state == 'first_delim':\n self._state = 'second_delim'\n self.pl_pos = [pl.start() for pl in delims]\n elif self._state == 'second_delim':\n self._state = 'third_delim'\n elif self._state == 'third_delim':\n self._state = 'first_delim'\n \n def process_trans(self, str):\n result = prog.match(str) \n if result:\n fields = trans.match(str)\n assert fields\n print(fields.groups())\n curTrans = BankTrans()\n curTrans.trnType = \"CREDIT\"\n curTrans.set_dates(opDate = format_trn_date(fields.group(1)), \\\n checkDate = format_trn_date(fields.group(2)), \\\n period = self.__statement.getPeriod())\n curTrans.opNum = fields.group(3)\n curTrans.opPayee = fields.group(4).decode('cp1251')\n curTrans.opCur = fields.group(5)\n curTrans.opSum = fields.group(6)\n curTrans.accSum = isCredit(fields.group(7))\n self.__statement.insertTransaction(curTrans)\n pass\n \n def feed(self):\n self._state = 'begin'\n str = self.get_next_line()\n while str:\n if self.is_header(str):\n self.process_header()\n elif self.is_delim(str):\n self.process_delim(str)\n elif self._state == 'second_delim':\n self.process_trans(str)\n str = self.get_next_line()\n"
},
{
"alpha_fraction": 0.524193525314331,
"alphanum_fraction": 0.551612913608551,
"avg_line_length": 32.44444274902344,
"blob_id": "7e43bd43b1bd707a7112c2650585fedf53446e54",
"content_id": "162949ad263347b810f898b67fa6fab0b691d03b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1860,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 54,
"path": "/sbrf_html_parser.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "'''\r\nCreated on 13.11.2010\r\n\r\n@author: Dennis.Erokhin\r\n'''\r\nfrom report_parser import ReportParser, isCredit, format_trn_date\r\nfrom BankTrans import BankTrans\r\nimport lxml.html\r\n\r\nROW_COUNT = 8; \r\n\r\ndef debug_print(text):\r\n debug = 0;\r\n if debug > 0:\r\n print(text)\r\n \r\nclass SBRFHtmlParser(ReportParser):\r\n '''\r\n Parse SBRF CC report\r\n '''\r\n xpathAcctID=\"body/table[1]/tr[2]/td[1]/table[1]/tr[2]/td[1]\"\r\n xpathServDate = \"body/table/tr[2]/td[3]/table[1]/tr[2]/td\";\r\n xpathRepPeriod = \"body/table/tr[2]/td[3]/table[1]/tr[2]/td[2]/text()\";\r\n\r\n def __init__(self, file, holder):\r\n self.__statement = holder\r\n self.__file = file\r\n\r\n def feed(self):\r\n doc = lxml.html.parse(self.__file)\r\n root = doc.getroot()\r\n txt1 = root.xpath(self.xpathAcctID)\r\n self.__statement.set_acct_id(txt1[0].text)\r\n \r\n txt1 = root.xpath(self.xpathServDate)\r\n debug_print(txt1)\r\n self.__statement.set_serv_date(format_trn_date(txt1[0].text))\r\n self.__statement.set_start_date(format_trn_date(txt1[1].text[0:7]))\r\n self.__statement.set_end_date(format_trn_date(txt1[1].text[-7:]))\r\n \r\n txt2 = root.xpath('body/table[2]/tr[@class=\"rowTrnData\"]')\r\n for tr in txt2:\r\n curTrans = BankTrans()\r\n curTrans.trnType = \"CREDIT\"\r\n curTrans.set_dates(opDate = tr[1].text, \\\r\n checkDate = tr[2].text, \\\r\n period = self.__statement.getPeriod())\r\n curTrans.opNum = tr[3].text\r\n curTrans.opPayee = tr[4].text\r\n curTrans.opCur = tr[5].text\r\n if tr[6].text:\r\n curTrans.opSum = isCredit(tr[6].text)\r\n curTrans.accSum = isCredit(tr[7].text)\r\n self.__statement.insertTransaction(curTrans)\r\n"
},
{
"alpha_fraction": 0.44897958636283875,
"alphanum_fraction": 0.47230321168899536,
"avg_line_length": 14.428571701049805,
"blob_id": "b47680f5d93ce703650de3002bdef43df2ca6560",
"content_id": "a5fa48bc388dfb2d8388c799f2938c99b111806c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 21,
"path": "/OFXWriter.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "'''\r\nCreated on 15.11.2010\r\n\r\n@author: Dennis.Erokhin\r\n'''\r\n\r\nclass OFXWriter():\r\n '''\r\n Write OFX to FILE\r\n '''\r\n file_object = None;\r\n\r\n def __init__(self, file):\r\n '''\r\n Constructor\r\n '''\r\n self.file_object = file;\r\n \r\n def write(self):\r\n raise NotImplementedError()\r\n pass"
},
{
"alpha_fraction": 0.4920828342437744,
"alphanum_fraction": 0.5042631030082703,
"avg_line_length": 23.59375,
"blob_id": "9b68a015b5b61b47dcf70ac4be309ee69d8333ba",
"content_id": "ffbd059c9475144c7cef2fd4b096b4f1d3b11526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 64,
"path": "/BankTrans.py",
"repo_name": "rimas79/OFXGenerator",
"src_encoding": "UTF-8",
"text": "'''\r\nCreated on 15.11.2010\r\n\r\n@author: Dennis.Erokhin\r\n'''\r\n\r\nfrom datetime import datetime\r\n\r\nclass BankTrans(): \r\n '''\r\n Bank transaction record\r\n '''\r\n\r\n opDate = None\r\n checkDate = None\r\n opNum = None\r\n opPayee = None\r\n opCur = None\r\n opSum = None\r\n accSum = None\r\n trnType = None\r\n\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n\r\n\r\n def set_dates(self, opDate, checkDate, period):\r\n def replace_year(date):\r\n assert len(date) == 5 or len(date) == 7\r\n date = date.lower()\r\n if len(date) == 5:\r\n date = date+str(period[0].year)[-2:]\r\n format = \"%d%b%y\"\r\n# print(date)\r\n# print(format)\r\n dt = datetime.strptime(date.encode(\"cp1251\"), format)\r\n\r\n if dt.month == period[0].month:\r\n dt = dt.replace(period[0].year)\r\n else:\r\n dt = dt.replace(period[1].year)\r\n return dt\r\n \r\n if not checkDate:\r\n print(\"checkDate is None\")\r\n checkDate = opDate\r\n \r\n self.opDate = replace_year(opDate)\r\n self.checkDate = replace_year(checkDate)\r\n \r\n def getTransRecord(self):\r\n return [self.opDate, self.checkDate, self.opNum, self.opPayee, self.opCur, self.opSum, self.accSum, self.trnType]\r\n \r\n def print_trans(self):\r\n print(self.opDate);\r\n print(self.checkDate);\r\n print(self.opNum);\r\n print(self.opPayee);\r\n print(self.opCur);\r\n print(self.opSum);\r\n print(self.accSum);\r\n print(self.trnType);\r\n "
}
] | 9 |
sivabuddi/Hackathon20
|
https://github.com/sivabuddi/Hackathon20
|
34043d25c6e29bb09d0400c887e89731836aaed2
|
36dccecf591d5cde664597e19f0ffeaa6414c6a6
|
723db4a7c70fe326929d55af5946be53bbb16705
|
refs/heads/master
| 2022-10-12T02:39:19.401925 | 2020-06-05T20:24:10 | 2020-06-05T20:24:10 | 268,317,769 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7049180269241333,
"alphanum_fraction": 0.7148303389549255,
"avg_line_length": 35.9295768737793,
"blob_id": "605ab91198e46cbc3a6285cc9acd17073a37ef6f",
"content_id": "5710b44352f67c79b9d399b438f93e586c3c042f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2623,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 71,
"path": "/2_Transfer_Learning.py",
"repo_name": "sivabuddi/Hackathon20",
"src_encoding": "UTF-8",
"text": "from keras.models import load_model\nfrom keras_preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.np_utils import to_categorical\nimport re\nfrom sklearn.preprocessing import LabelEncoder\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\nimport numpy as np\nimport pickle\n\nmodel = load_model('lstm.h5')\nwith open('tokenizer.pickle', 'rb') as handle:\n loaded_tokenizer = pickle.load(handle)\n\n\n\ndata = pd.read_csv('Validation2.csv')\ndata = data[[\"target\",\"comment_text\",\"toxicity_annotator_count\"]]\ncolumns_titles = [\"comment_text\",\"toxicity_annotator_count\",\"target\"]\ndata=data.reindex(columns=columns_titles)\n\ndata['comment_text'] = data['comment_text'].apply(lambda x: x.lower())\ndata['comment_text'] = data['comment_text'].apply((lambda x: re.sub('[^a-zA-z\\s]', '', x)))\n\nprint(\"word count before removing stop words\")\ndata['word_count'] = data['comment_text'].apply(lambda x: len(x.split()))\n#print(data['comment_text'])\ndata['comment_text'] = data['comment_text'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop]))\n\nprint(\"=========================================================================================\")\n\nprint(\"word count after stop words\")\n#print(data['comment_text'])\ndata['word_count_stopwords'] = data['comment_text'].str.split().str.len()\nprint(data[['word_count','word_count_stopwords']])\n\n#max_fatures = 4000\nloaded_tokenizer.fit_on_texts(data['comment_text'].values)\nX = loaded_tokenizer.texts_to_sequences(data['comment_text'].values)\nX = pad_sequences(X)\nprint(X.shape[1])\n\nembed_dim= 256\nbatch_size = 64\nlstm_out = 128\n\n\nsort_by_stop = data.sort_values('word_count_stopwords',ascending=False)\nprint(sort_by_stop)\n\n\nX_final=np.array(X)\ndata['target'] = data['target'].fillna((data['target'].mean()))\ny_final=np.array(data['target'])\nprint(X_final.shape, y_final.shape)\n\nX_train, X_test, Y_train, Y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)\nmodel.fit(X_train, Y_train, epochs=5, batch_size=batch_size, verbose=2)\nmodel.save('lstm1.h5')\nscore, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=batch_size)\nprint(score)\nprint(acc)\n\n"
},
{
"alpha_fraction": 0.7001121044158936,
"alphanum_fraction": 0.7104820609092712,
"avg_line_length": 34.32673263549805,
"blob_id": "f364f95f3c7d33a492dbf14475b01ec6294a68fd",
"content_id": "132dc7a6958c58dc8717dcc5ece6e67b52587241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3568,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 101,
"path": "/Filter_Validation.py",
"repo_name": "sivabuddi/Hackathon20",
"src_encoding": "UTF-8",
"text": "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils.np_utils import to_categorical\nimport re\nfrom sklearn.preprocessing import LabelEncoder\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\nimport numpy as np\n\n\ndata = pd.read_csv('Validation1.csv')\ndata = data[[\"target\",\"comment_text\",\"toxicity_annotator_count\"]]\ncolumns_titles = [\"comment_text\",\"toxicity_annotator_count\",\"target\"]\ndata=data.reindex(columns=columns_titles)\n\ndata['comment_text'] = data['comment_text'].apply(lambda x: x.lower())\ndata['comment_text'] = data['comment_text'].apply((lambda x: re.sub('[^a-zA-z\\s]', '', x)))\n\nprint(\"word count before removing stop words\")\ndata['word_count'] = data['comment_text'].apply(lambda x: len(x.split()))\n#print(data['comment_text'])\ndata['comment_text'] = data['comment_text'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop]))\n\nprint(\"=========================================================================================\")\n\nprint(\"word count after stop words\")\n#print(data['comment_text'])\ndata['word_count_stopwords'] = data['comment_text'].str.split().str.len()\nprint(data[['word_count','word_count_stopwords']])\n\n\nvoc_size = 80000\ntokenizer = Tokenizer(num_words=voc_size, split=' ')\ntokenizer.fit_on_texts(data['comment_text'].values)\nX = tokenizer.texts_to_sequences(data['comment_text'].values)\nX = pad_sequences(X)\nprint(X.shape[1])\nimport pickle\n# https://androidkt.com/saved-keras-model-to-predict-text-from-scratch/\n# save tokenizer\nwith open('tokenizer.pickle', 'wb') as handle:\n pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nembed_dim= 256\nbatch_size = 64\nlstm_out = 128\n\n\ndef createmodel():\n model = Sequential()\n model.add(Embedding(voc_size, embed_dim, input_length=X.shape[1]))\n model.add(SpatialDropout1D(0.4))\n model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))\n model.add(Dense(2, activation='sigmoid'))\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.save('lstm.h5')\n return model\n\nsort_by_stop = data.sort_values('word_count_stopwords',ascending=False)\nprint(sort_by_stop)\n\nX_final=np.array(X)\ndata['target'] = data['target'].fillna((data['target'].mean()))\ny_final=np.array(data['target'])\n\nprint(X_final.shape, y_final.shape)\nX_train, X_test, Y_train, Y_test = train_test_split(X_final, y_final, test_size=0.33, random_state=42)\n\nmodel = createmodel()\nhistory= model.fit(X_train, Y_train, validation_split=0.33, epochs=5, batch_size=batch_size, verbose=2)\nprint(history.history)\nscore, acc = model.evaluate(X_test, Y_test, verbose=2, batch_size=batch_size)\nprint(score)\nprint(acc)\n\nprint(history.history.keys())\n# \"Accuracy\"\nimport matplotlib.pyplot as plt\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n# \"Loss\"\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n"
}
] | 2 |
demyashev/python.machine.learning
|
https://github.com/demyashev/python.machine.learning
|
1aede6af98f28af35d9aeee30faf8fc302f90096
|
743efc9cd5b04e9ff6fc08cc081cd5e2f82fb91b
|
51e13a004e9186e2db1afa0ed32b66df4fd2388f
|
refs/heads/master
| 2020-04-23T07:49:48.510301 | 2019-02-16T18:01:14 | 2019-02-16T18:01:14 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.643539309501648,
"alphanum_fraction": 0.6536517143249512,
"avg_line_length": 26.031496047973633,
"blob_id": "0f200d9a7c30a0817f5f1ad6af9cf477491a63ec",
"content_id": "a2bb69dac5318e0de7a15bcee0d52cd7cf46ddcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3770,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 127,
"path": "/main.py",
"repo_name": "demyashev/python.machine.learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport nltk\r\nimport os\r\nimport email_read_util\r\n\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom nltk.metrics import edit_distance\r\n\r\n\r\ndef read_email_files():\r\n X = []\r\n y = []\r\n for i in range(len(labels)):\r\n filename = 'inmail.' + str(i+1)\r\n email_str = email_read_util.extract_email_text(\r\n os.path.join(DATA_DIR, filename))\r\n X.append(email_str)\r\n y.append(labels[filename])\r\n return X, y\r\n\r\n\r\ndef read_email_files_load():\r\n X = []\r\n y = []\r\n for i in range(len(labels)):\r\n filename = 'inmail.' + str(i+1)\r\n email_str = email_read_util.load(\r\n os.path.join(DATA_DIR, filename))\r\n X.append(email_str)\r\n y.append(labels[filename])\r\n return X, y\r\n\r\n\r\ndef classify(X, y, clf, vectorizer):\r\n\r\n # Преобразование массива строк в структуру bag of words\r\n X_vector = vectorizer.fit_transform(X)\r\n\r\n # Обучение\r\n clf.fit(X_vector, y)\r\n\r\n # Оценка\r\n score = cross_val_score(clf, X_vector, y, cv=3)\r\n print('Accuracy: ', end='')\r\n print(score)\r\n print('Mean accuracy: ', end='')\r\n print(score.mean())\r\n\r\n\r\ndef compare(email_str0, email_str1, clf, vectorizer):\r\n\r\n # Классификация исходного письма\r\n Z = []\r\n Z.append(email_str0)\r\n Z_vector = vectorizer.transform(Z)\r\n label = clf.predict(Z_vector)[0]\r\n print('Predicted label: ', end='')\r\n print(label)\r\n\r\n # Классификация измененного письма\r\n Z = []\r\n Z.append(email_str1)\r\n Z_vector = vectorizer.transform(Z)\r\n label = clf.predict(Z_vector)[0]\r\n print('New label: ', end='')\r\n print(label)\r\n\r\n\r\nnltk.download('punkt')\r\nnltk.download('stopwords')\r\n\r\nDATA_DIR = 'datasets/trec07p/data/'\r\nLABELS_FILE = 'datasets/trec07p/full/index'\r\n\r\n# Получаем метки классов\r\nlabels = {}\r\nwith open(LABELS_FILE) as f:\r\n for line in f:\r\n line = line.strip()\r\n label, key = line.split()\r\n labels[key.split('/')[-1]] = 1 if label.lower() == 'ham' else 0\r\n\r\n\r\nprint('Наивный Байес')\r\nX, y = read_email_files()\r\nvectorizer = CountVectorizer()\r\nclf = MultinomialNB()\r\nclassify(X, y, clf, vectorizer)\r\n\r\n# Сравнение измененного письма (extract_email_text)\r\nprint('Отравление Байеса')\r\nfilename = 'inmail.4'\r\nemail_str0 = email_read_util.extract_email_text(os.path.join(DATA_DIR, filename))\r\nemail_str1 = email_read_util.extract_email_text(os.path.join(filename))\r\nind = X.index(email_str0)\r\nprint('First label: ', end='')\r\nprint(y[ind])\r\nprint('Edit distance: ', end='')\r\nprint(edit_distance(email_str0, email_str1))\r\n\r\ncompare(email_str0, email_str1, clf, vectorizer)\r\n\r\nprint('Замена extract_email_text на load')\r\nX, y = read_email_files_load()\r\nclassify(X, y, clf, vectorizer)\r\nemail_str0 = email_read_util.load(os.path.join(DATA_DIR, filename))\r\nemail_str1 = email_read_util.load(os.path.join(filename))\r\ncompare(email_str0, email_str1, clf, vectorizer)\r\n\r\nprint('Биграммы')\r\nvectorizer = CountVectorizer(ngram_range=(2, 2))\r\nclassify(X, y, clf, vectorizer)\r\ncompare(email_str0, email_str1, clf, vectorizer)\r\n\r\nprint('TF/IDF')\r\nvectorizer = TfidfVectorizer()\r\nclassify(X, y, clf, vectorizer)\r\ncompare(email_str0, email_str1, clf, vectorizer)\r\n\r\nprint('Случайный лес')\r\nclf = RandomForestClassifier()\r\nclassify(X, y, clf, vectorizer)\r\ncompare(email_str0, email_str1, clf, vectorizer)\r\n"
},
{
"alpha_fraction": 0.6439118385314941,
"alphanum_fraction": 0.7031885385513306,
"avg_line_length": 30.362415313720703,
"blob_id": "1152a97a3d900d830b7bc9812e3e6da81575a891",
"content_id": "a40905859c00b6d7e8230ae62b8da8c0f26477fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5930,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 149,
"path": "/README.md",
"repo_name": "demyashev/python.machine.learning",
"src_encoding": "UTF-8",
"text": "# Отчет по machine.learning\nТретье задание из [курса](https://github.com/Ba-Ski/AI-in-IS-course) по машинному обучению.\n\n## Код\nПовторяющийся код вынесен в отдельные функции. А именно:\n\n> def classify(X, y, clf, vectorizer)\n\n # Преобразование массива строк в структуру bag of words\n X_vector = vectorizer.fit_transform(X)\n\n # Обучение\n clf.fit(X_vector, y)\n\n # Оценка\n score = cross_val_score(clf, X_vector, y, cv=3)\n print('Accuracy: ', end='')\n print(score)\n print('Mean accuracy: ', end='')\n print(score.mean())\n\nПреобразовывает массив строк письма в структуру Bag of words, обучает классификатор, производит проверку результатов с использованием кросс-валидации, выводим результаты:\n* Accuracy - точность\n* Mean accuracy - усредненная точность\n\n> def compare(email_str0, email_str1, clf, vectorizer)\n\n # Классификация исходного письма\n Z = []\n Z.append(email_str0)\n Z_vector = vectorizer.transform(Z)\n label = clf.predict(Z_vector)[0]\n print('Predicted label: ', end='')\n print(label)\n\n # Классификация измененного письма\n Z = []\n Z.append(email_str1)\n Z_vector = vectorizer.transform(Z)\n label = clf.predict(Z_vector)[0]\n print('New label: ', end='')\n print(label)\n\nСравниваем письма, строим прогноз, выводим результаты.\n* Predicted label - было\n* New label - стало\n\n## Выполнение задания\n### Наивный Байес [пункт 0-2]\nЧитаем письма, с помощью наивного байесовского классификатора проводим обучение и оценку.\n \n X, y = read_email_files()\n vectorizer = CountVectorizer()\n clf = MultinomialNB()\n classify(X, y, clf, vectorizer)\n\nПолучаем результат:\n\n Accuracy: [0.97983294 0.96901352 0.95111182]\n Mean accuracy: 0.9666527593717619\n\n### Отравления Байеса [пункт 3]\nМеняем письмо:\n\n и измененым (справа)\")\n*Различие между оригинальным письмом (слева) и измененым (справа)*\n\n filename = 'inmail.4'\n email_str0 = email_read_util.extract_email_text(os.path.join(DATA_DIR, filename))\n email_str1 = email_read_util.extract_email_text(os.path.join(filename))\n ind = X.index(email_str0)\n print('First label: ', end='')\n print(y[ind])\n print('Edit distance: ', end='')\n print(edit_distance(email_str0, email_str1))\n compare(email_str0, email_str1, clf, vectorizer)\n\nНа первом (исходном) письме, обученный классификатор считает письмо спамом. Второе (измененное) тоже спам. Расстояние Левенштейна равно `82`\n\n First label: 0\n Edit distance: 82\n Predicted label: 0\n New label: 0\n\n### Замена extract_email_text на load [пункт 4]\n\nЗагружаем письма с помощью метода `load()`, а не `extract_email_text()`, проводим классификацию, сравниваем письма.\n\n X, y = read_email_files_load()\n classify(X, y, clf, vectorizer)\n email_str0 = email_read_util.load(os.path.join(DATA_DIR, filename))\n email_str1 = email_read_util.load(os.path.join(filename))\n compare(email_str0, email_str1, clf, vectorizer)\n\nКлассификатор, после замены функции, отнес модифицированное письмо к спаму.\n\n Accuracy: [0.97820207 0.97295147 0.95497036]\n Mean accuracy: 0.9687079683156293\n Predicted label: 0\n New label: 0\n\n### Биграммы [пункт 5]\n\n**CountVectorizer** с параметрами инициализации `ngram_range=(2, 2)`\n \n vectorizer = CountVectorizer(ngram_range=(2, 2))\n classify(X, y, clf, vectorizer)\n compare(email_str0, email_str1, clf, vectorizer)\n\nТочность повысилась, оба письма - спам:\n\n Accuracy: [0.98838504 0.98174224 0.97497912]\n Mean accuracy: 0.9817021344353768\n Predicted label: 0\n New label: 0\n\n**TfidfVectorizer**\n\n vectorizer = TfidfVectorizer()\n classify(X, y, clf, vectorizer)\n compare(email_str0, email_str1, clf, vectorizer)\n \nВ обоих случаях \"отравленное\" письмо отнесли к спаму:\n\n Accuracy: [0.97143994 0.97171838 0.9761327 ]\n Mean accuracy: 0.9730970052068706\n Predicted label: 0\n New label: 0\n\nТочность меньше, чем при `CountVectorizer(ngram_range=(2, 2))`, но выше, чем у `байесовского`.\n\n### Случайный лес [пункт 6-7]\n\n clf = RandomForestClassifier()\n classify(X, y, clf, vectorizer)\n compare(email_str0, email_str1, clf, vectorizer)\n\nУсредненная точность выше `байесовского`, `TfidfVectorizer`, но чуть меньше `CountVectorizer` с биграммами. Оба письма - спам:\n\n Accuracy: [0.98281623 0.98257757 0.97748518]\n Mean accuracy: 0.9809596590451125\n Predicted label: 0\n New label: 0\n\n\n## Ссылки\n\n* [Задание](https://github.com/Ba-Ski/AI-in-IS-course/tree/master/Homework%203)\n* [Dataset](https://plg.uwaterloo.ca/~gvcormac/treccorpus07/)\n"
}
] | 2 |
bcjohnnie/SSRNScrape
|
https://github.com/bcjohnnie/SSRNScrape
|
7b005abd3b74e60acdc9ec755401de20c5b28374
|
4d75263e748cc52623224e6ba43dcbae9b38332b
|
c35f189da70d0161e65a74f66851d18987e6a97a
|
refs/heads/master
| 2021-08-15T15:02:03.400196 | 2017-11-17T21:53:39 | 2017-11-17T21:53:39 | 111,153,842 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8093587756156921,
"alphanum_fraction": 0.8093587756156921,
"avg_line_length": 71.125,
"blob_id": "4eb7e39161488faef045d55a15313c8015a77bb2",
"content_id": "7e7070d0b5b850017badebe8da575ed25c532985",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 269,
"num_lines": 8,
"path": "/README.md",
"repo_name": "bcjohnnie/SSRNScrape",
"src_encoding": "UTF-8",
"text": "# SSRNScrape\nA Python script to scrape recently posted articles to various SSRN ejournals\n\nCurrently dependent on the selenium package.\n\nTesting implementation visits a specific SSRN ejournal page, re-sorts to descending by date published (there may be a better way to do this using the requests library).\n\nThe script then takes the HTML of the first n entries, and pulls out relevant metadata. Currently just prints data to console, planning to add this data to a dictionary so that it can be output programmatically in HTML and turned into an email list of recent postings.\n"
},
{
"alpha_fraction": 0.607630729675293,
"alphanum_fraction": 0.6250588893890381,
"avg_line_length": 31.603174209594727,
"blob_id": "77f6afbe39b6eb44c3fecbfcf38dbff70ee1bfb7",
"content_id": "308b0cd65f72f394ba00464a05e3efd98ea07e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2123,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 63,
"path": "/SSRNscrape.py",
"repo_name": "bcjohnnie/SSRNScrape",
"src_encoding": "UTF-8",
"text": "import requests, re\r\nfrom selenium import webdriver\r\n\r\nbrowser = webdriver.Firefox()\r\n\r\nbrowser.get('https://papers.ssrn.com/sol3/JelJour_Results.cfm?Network=no&form_name=journalBrowse&journal_id=157488')\r\n\r\nsortElem = browser.find_element_by_css_selector('#sort-by')\r\nsortElem.click()\r\nclickElem = browser.find_element_by_css_selector('#sort-by > option:nth-child(6)')\r\nclickElem.click()\r\n\r\nbodyElem = browser.find_element_by_css_selector('.tbody')\r\nbody = bodyElem.get_attribute('innerHTML')\r\n\r\nstopInd = body.find('11.')\r\nhtml = body[:stopInd]\r\n\r\nregex = re.compile(r\"\\s*(<[^<>]+>)\\s*\")\r\nhtml2 = regex.sub(\"\\g<1>\", html)\r\nhtml2 = html2[33:]\r\n\r\n\r\nbrowser.close()\r\n\r\nprint(len(html2))\r\n\r\nfor i in range (10):\r\n print(\"Entry: \" + str(i + 1))\r\n endRange = html2.find('<div class=\"trow')\r\n range = html2[:endRange]\r\n absLinkStart = range.find('href')\r\n absLinkEnd = range.find('>', absLinkStart)\r\n absLink = range[absLinkStart + 6: absLinkEnd - 1]\r\n print(absLink)\r\n titleStart = absLinkEnd + 1\r\n titleEnd = range.find('</a>')\r\n title = range[titleStart:titleEnd]\r\n print(title)\r\n if (range.find('<i>', titleEnd) != -1):\r\n citStart = range.find('<i>', titleEnd)\r\n citEnd = range.find('</i>', citStart)\r\n citation = range[citStart + 3: citEnd]\r\n print(citation)\r\n dateStart = range.find('Posted:', titleEnd)\r\n dateEnd = range.find('</span>', dateStart)\r\n date = range[dateStart:dateEnd]\r\n authEnd = dateEnd\r\n while(range.find('AbsByAuth', authEnd) != -1):\r\n authLinkStart = range.find('href', authEnd)\r\n authLinkEnd = range.find('target', authLinkStart)\r\n authLink = range[authLinkStart + 6: authLinkEnd - 1]\r\n authStart = range.find('>', authLinkEnd)\r\n authEnd = range.find('</a>', authLinkStart)\r\n author = range[authStart + 1: authEnd]\r\n print(authLink)\r\n print(author)\r\n affStart = range.find('afiliations', authEnd)\r\n affEnd = range.find('</div>', affStart)\r\n affiliations = range[affStart + 13: affEnd]\r\n print(affiliations)\r\n \r\n html2 = html2[endRange + 33:]\r\n \r\n"
}
] | 2 |
ChahalSandeep/Genetic-Algorithm
|
https://github.com/ChahalSandeep/Genetic-Algorithm
|
bd49a2a4bcbe7689ce3c768f9ad692e2cdd81717
|
0d63e3affabd7dd3310b41a3654f9e0a37fb5be4
|
c8e51ad4e4478241f90aa805f8b29292c5a65727
|
refs/heads/master
| 2021-01-21T07:30:22.416473 | 2017-05-17T20:05:47 | 2017-05-17T20:05:47 | 91,615,530 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5610119104385376,
"alphanum_fraction": 0.586309552192688,
"avg_line_length": 23.923076629638672,
"blob_id": "4cc9a59407057446c12e3bdb7eba3fd1ba4f47f9",
"content_id": "b0879ecaf8eed875b2252199879a696cf09d5759",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 26,
"path": "/PA2.py",
"repo_name": "ChahalSandeep/Genetic-Algorithm",
"src_encoding": "UTF-8",
"text": "import pyevolve\r\nfrom pyevolve import G1DList\r\nfrom pyevolve import GSimpleGA\r\n\r\n\r\ndef eval_func(chromosome):\r\n score = 0.0\r\n sum = 0.0\r\n \r\n # iterate over the chromosome\r\n for value in chromosome:\r\n sum = value + sum\r\n score = 1/(1+abs(sum-x)) \r\n if sum == x:\r\n score+= 1\r\n return score\r\n\r\ngenome = G1DList.G1DList(20) #elements in list\r\nx = input(\"Enter the sum you want:\")\r\ngenome.evaluator.set(eval_func)\r\nga = GSimpleGA.GSimpleGA(genome)#engine\r\nga.setGenerations(200)\r\nga.evolve(freq_stats=10)\r\nprint ga.bestIndividual()\r\na = sum(ga.bestIndividual())\r\nprint a"
}
] | 1 |
yalotfi/AssortedAI
|
https://github.com/yalotfi/AssortedAI
|
6372f06bbaf5d35193a8053aad07636e70d33676
|
cc107229f15059aa6edaf3a620e5957fad351c93
|
cebcd62d6b3e314b051bd526a566494094c046c1
|
refs/heads/master
| 2021-09-15T06:21:43.064652 | 2018-05-27T20:12:01 | 2018-05-27T20:12:01 | 103,035,256 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5699558258056641,
"alphanum_fraction": 0.5832105875015259,
"avg_line_length": 26.15999984741211,
"blob_id": "3ccc40851ece946fc3e15219a94ee4f5aba10f53",
"content_id": "c52916e101c4d2edf888e54082ed0d5c502868ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 25,
"path": "/assort/preprocessing/sample.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef train_test_split(X, y, seed, test_size=0.3):\n np.random.seed(seed)\n\n # Initial set up/helpers\n m_x, m_y = X.shape[0], y.shape[0]\n n_x, n_y = X.shape[1], y.shape[1]\n\n # 1) Join features and labels and shuffle their order\n dataset = np.column_stack([X, y])\n np.random.shuffle(dataset)\n\n # 2) Split the train/test sets\n n = round(m_x * test_size)\n test_set = dataset[:n]\n train_set = dataset[n:]\n\n # 3) Pull the train/test features and labels\n X_train = train_set[:, :n_x]\n y_train = train_set[:, -n_y:]\n X_test = test_set[:, :n_x]\n y_test = test_set[:, -n_y:]\n return (X_train, y_train), (X_test, y_test)\n"
},
{
"alpha_fraction": 0.5258832573890686,
"alphanum_fraction": 0.5312677025794983,
"avg_line_length": 27.162534713745117,
"blob_id": "4ecdc3253ff47a763d5542a046ce73f8ed90ae75",
"content_id": "f831bb5ba984ed7f2ecd4b2f306349f3c38bac2a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10586,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 363,
"path": "/assort/linear/logistic.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\nfrom assort import _INITIALIZER_CONFIG\r\nfrom assort.activations import sigmoid\r\nfrom assort.activations import softmax\r\nfrom assort.regularizers import l2_reg\r\n\r\n\r\nclass LinearClassifier(object):\r\n \"\"\"Base class for Linear Classifiers\"\"\"\r\n\r\n def __init__(self, epochs=1, lr=0.05, lmda=0.01):\r\n super(LinearClassifier, self).__init__()\r\n # Take in model hyperparameters at object instantiation\r\n self._epochs = epochs\r\n self._alpha = lr\r\n self._lmda = lmda\r\n\r\n # Define hyperparameter dictionary as attribute\r\n self.hyperparameters = {\r\n \"epochs\": self._epochs,\r\n \"learning_rate\": self._alpha,\r\n \"regularization_term\": self._lmda\r\n }\r\n\r\n # Training attributes\r\n self.cost_cache = []\r\n self.trained_params = {}\r\n self.trained_grads = {}\r\n\r\n def _init_zeros(self, n, k):\r\n \"\"\"\r\n Initialize model parameters with zero\r\n\r\n Arguments\r\n ---------\r\n n : int\r\n Define number of features, n\r\n k : int\r\n Define number of classes, k\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Initialized weights, w, with shapes (n, k)\r\n float\r\n Initialized bias, b\r\n \"\"\"\r\n w = np.zeros((n, k))\r\n b = 0.\r\n return w, b\r\n\r\n def _binary_xent(self, Y_hat, Y):\r\n \"\"\"\r\n Binary Cross Entropy loss function\r\n\r\n Arguments\r\n ---------\r\n Y_hat : ndarray\r\n Probability of y given x\r\n Y : ndarray\r\n Actual value of y given x\r\n\r\n Returns\r\n -------\r\n float\r\n Cost (or loss) of predicted values\r\n \"\"\"\r\n m = Y.shape[0]\r\n case_0 = Y * np.log(Y_hat)\r\n case_1 = (1 - Y) * np.log(1 - Y_hat)\r\n return -(1 / m) * np.sum(case_0 + case_1)\r\n\r\n def _categorical_xent(self, Y_hat, Y):\r\n \"\"\"\r\n Categorical Cross Entropy loss function\r\n\r\n Arguments\r\n ---------\r\n Y_hat : ndarray\r\n Probability of y given x\r\n Y : ndarray\r\n Actual value of y given x\r\n\r\n Returns\r\n -------\r\n float\r\n Cost (or loss) of predicted values\r\n \"\"\"\r\n m = Y.shape[0]\r\n logprobs = np.log(Y_hat)\r\n return -(1 / m) * np.sum(Y * logprobs)\r\n\r\n def _batch_gradient_descent(self, propagate, X, y):\r\n \"\"\"\r\n Perform the batch gradient descent algorithm\r\n\r\n Arguments\r\n ---------\r\n propagate : callback\r\n Function that computes the cost and gradient of a single pass\r\n X : ndarray\r\n Training features\r\n y : ndarray\r\n Trainging labels\r\n \"\"\"\r\n # Helpers: dimensionality and classes\r\n m, n = X.shape[0], X.shape[1]\r\n k = y.shape[1]\r\n\r\n # Initialize model parameters (weights and bias)\r\n w, b = self._init_zeros(n, k)\r\n\r\n cost_cache = []\r\n for i in range(self._epochs):\r\n # Perform single pass of forward and backward propagation\r\n cost, grads = propagate(X, y, w, b)\r\n\r\n # Store the cost for each iteration\r\n cost_cache.append(cost)\r\n if i % 100 == 0:\r\n print(\"Cost after iteration {}: {}\".format(i, cost))\r\n\r\n # Update model parameters\r\n dw = grads[\"dw\"]\r\n db = grads[\"db\"]\r\n w = w - self._alpha * dw\r\n b = b - self._alpha * db\r\n\r\n # Store trained parameters and their gradients\r\n parameters = {\"w\": w, \"b\": b}\r\n gradients = {\"dw\": dw, \"db\": db}\r\n return parameters, gradients, cost_cache\r\n\r\n\r\nclass LogisticRegression(LinearClassifier):\r\n \"\"\"\r\n Logistic regression trained on Stochastic Gradient Descent\r\n\r\n Arguments\r\n ---------\r\n epochs : int\r\n Number of full passes to make over dataset when training\r\n lr : float\r\n Learning rate which determines size of parameter update when training\r\n lmda : float\r\n Degree to which training cost should be regularized\r\n\r\n Attributes\r\n ----------\r\n hyperparameters : dictionary\r\n Stored hyperparameters for logging\r\n trained_params : dictionary\r\n Trained parameters\r\n trained_grads : dictionary\r\n Trained parameter gradients\r\n\r\n Methods\r\n -------\r\n fit\r\n Train model with batch gradient descent\r\n predict\r\n Make predictions for input data\r\n evaluate\r\n Compute the mean accuracy measure for the model\r\n \"\"\"\r\n\r\n def __init__(self, epochs=1, lr=0.05, lmda=0.01):\r\n super().__init__(epochs, lr, lmda)\r\n\r\n def _hypothesis(self, X, w, b):\r\n Z = np.dot(X, w) + b\r\n return sigmoid(Z)\r\n\r\n def _propagate(self, X, Y, w, b):\r\n # Forward Pass\r\n A = self._hypothesis(X, w, b)\r\n cost = self._binary_xent(A, Y)\r\n\r\n # Backward Pass\r\n m = X.shape[0]\r\n dZ = A - Y\r\n dw = (1 / m) * np.dot(X.T, dZ)\r\n db = (1 / m) * np.sum(dZ)\r\n\r\n # Regularize the cost and gradient\r\n cost += l2_reg(w, self._lmda)\r\n dw += l2_reg(w, self._lmda, derivative=True)\r\n\r\n # Return the cost and gradients\r\n grads = {\"dw\": dw, \"db\": db}\r\n return cost, grads\r\n\r\n def fit(self, X, y):\r\n params, grads, cost_cache = self._batch_gradient_descent(\r\n self._propagate, X, y)\r\n\r\n # Store trained parameters, their gradients and cost history\r\n self.cost_cache = cost_cache\r\n self.trained_params = {\"w\": params[\"w\"], \"b\": params[\"b\"]}\r\n self.trained_grads = {\"dw\": grads[\"dw\"], \"db\": grads[\"db\"]}\r\n return self\r\n\r\n def predict(self, X, thresh=0.5):\r\n \"\"\"\r\n Predict classes given input data\r\n\r\n This method uses the trained parameters learned during training. Use\r\n after fitting the model! A boolean array is return comparing the\r\n predicted probability to the given threshold.\r\n\r\n Arguments\r\n ---------\r\n X : ndarray\r\n Input data, X, to predict\r\n\r\n Returns\r\n -------\r\n bool ndarray\r\n Predicted classes for each input feature, X\r\n \"\"\"\r\n # Make a prediction about each class\r\n w = self.trained_params[\"w\"]\r\n b = self.trained_params[\"b\"]\r\n y_pred = self._hypothesis(X, w, b) > thresh\r\n return y_pred.astype(int).reshape((y_pred.shape[0], 1))\r\n\r\n def evaluate(self, X_test, y_test):\r\n \"\"\"\r\n Compute the mean accuracy of the trained classifier\r\n\r\n This method uses the trained parameters learned during training. Use\r\n after training! Furthermore, y_test should not be one-hot encoded to\r\n match the predictions which collapse to a vector as well.\r\n\r\n Arguments\r\n ---------\r\n X_test : ndarray\r\n Test features to evaluate of shape (m, n)\r\n y_test : ndarray\r\n Test labels to evaluate of shape (m, 1)\r\n\r\n Returns\r\n -------\r\n float\r\n Ratio of correct labels to incorrect labels\r\n \"\"\"\r\n y_pred = self.predict(X_test)\r\n return np.mean(y_pred == y_test)\r\n\r\n\r\nclass SoftmaxRegression(LinearClassifier):\r\n \"\"\"\r\n Softmax regression trained with Batch Gradient Descent\r\n\r\n Arguments\r\n ---------\r\n epochs : int\r\n Number of full passes to make over dataset when training\r\n lr : float\r\n Learning rate which determines size of parameter update when training\r\n lmda : float\r\n Degree to which training cost should be regularized\r\n\r\n Attributes\r\n ----------\r\n hyperparameters : dictionary\r\n Stored hyperparameters for logging\r\n trained_params : dictionary\r\n Trained parameters\r\n trained_grads : dictionary\r\n Trained parameter gradients\r\n\r\n Methods\r\n -------\r\n fit\r\n Train model with batch gradient descent\r\n predict\r\n Make predictions for input data\r\n evaluate\r\n Compute the mean accuracy measure for the model\r\n \"\"\"\r\n\r\n def __init__(self, epochs=1, lr=0.05, lmda=0.01):\r\n super().__init__(epochs, lr, lmda)\r\n\r\n def _hypothesis(self, X, w, b):\r\n Z = np.dot(X, w) + b\r\n return softmax(Z, axis=1)\r\n\r\n def _propagate(self, X, Y, w, b):\r\n # Forward Pass\r\n A = self._hypothesis(X, w, b)\r\n cost = self._categorical_xent(A, Y)\r\n\r\n # Backward Pass\r\n dZ = A - Y\r\n dw = np.dot(X.T, dZ)\r\n db = np.sum(dZ)\r\n\r\n # Regularize the cost and gradient\r\n cost += l2_reg(w, self._lmda)\r\n dw += l2_reg(w, self._lmda, derivative=True)\r\n\r\n # Return the cost and gradients\r\n grads = {\"dw\": dw, \"db\": db}\r\n return cost, grads\r\n\r\n def fit(self, X, y):\r\n params, grads, cost_cache = self._batch_gradient_descent(\r\n self._propagate, X, y)\r\n\r\n # Store trained parameters, their gradients and cost history\r\n self.cost_cache = cost_cache\r\n self.trained_params = {\"w\": params[\"w\"], \"b\": params[\"b\"]}\r\n self.trained_grads = {\"dw\": grads[\"dw\"], \"db\": grads[\"db\"]}\r\n return self\r\n\r\n def predict(self, X):\r\n \"\"\"\r\n Predict classes given input data\r\n\r\n This method uses the trained parameters learned during training. Use\r\n after training!\r\n\r\n Arguments\r\n ---------\r\n X : ndarray\r\n Input data, X, to predict\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Predicted classes for each input feature, X\r\n \"\"\"\r\n w = self.trained_params[\"w\"]\r\n b = self.trained_params[\"b\"]\r\n A = self._hypothesis(X, w, b)\r\n y_pred = np.argmax(A, axis=1)\r\n return y_pred.reshape((y_pred.shape[0], 1))\r\n\r\n def evaluate(self, X_test, y_test):\r\n \"\"\"\r\n Compute the mean accuracy of the trained classifier\r\n\r\n This method uses the trained parameters learned during training. Use\r\n after training! Furthermore, y_test should not be one-hot encoded to\r\n match the predictions which collapse to a vector as well.\r\n\r\n Arguments\r\n ---------\r\n X_test : ndarray\r\n Test features to evaluate of shape (m, n)\r\n y_test : ndarray\r\n Test labels to evaluate of shape (m, 1)\r\n\r\n Returns\r\n -------\r\n float\r\n Ratio of correct labels to incorrect labels\r\n \"\"\"\r\n y_pred = self.predict(X_test)\r\n return np.mean(y_pred == y_test)\r\n"
},
{
"alpha_fraction": 0.6583124399185181,
"alphanum_fraction": 0.6624895334243774,
"avg_line_length": 27.5,
"blob_id": "2c158c0b5280d2550e3c57c59590f4d78b381bb1",
"content_id": "190e79cf4725fac40acaec417418c6ad2c998149",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1197,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 42,
"path": "/setup.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\n\nhere = path.abspath(path.dirname(__file__))\n\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n\nsetup(\n # High level package information\n name='AssortedAI',\n version='0.1.0',\n description='An assortment of ML algorithms and tools',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n license=\"MIT License\",\n\n # Maintainer information\n url='https://github.com/yalotfi/AssortedAI',\n author='Yaseen Lotfi',\n author_email='[email protected]',\n\n # Make the package searchable\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n ],\n keywords='machine learning artificial intelligence',\n\n # Dependency Information\n packages=find_packages(),\n install_requires=['mkl', 'numpy', 'matplotlib']\n)\n"
},
{
"alpha_fraction": 0.7482883334159851,
"alphanum_fraction": 0.7672170996665955,
"avg_line_length": 97.23999786376953,
"blob_id": "720cc43891745033c83567e7d57ff98024428960",
"content_id": "f2dc39377c338235e00d35c21c691904c4f994ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2483,
"license_type": "permissive",
"max_line_length": 479,
"num_lines": 25,
"path": "/examples/README.md",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "## Example: Kaggle's Digit Recognition\r\n\r\n### Task:\r\n\r\nClassify 28x28 resolution images of handwritten digits [0-9], doing so from scratch (NumPy)\r\n\r\n### Description:\r\n\r\nKaggle's training set is distributed as a csv wherein each row represents a single image. As such, each image has been flattened to a `(1, 784)` vector but can be represented as a `(28, 28)` matrix. The first column of both the train and test files are the labels. These are real values between 0 and 255.\r\n\r\nThe models will be multi-class classifiers and so the predicted label, `y_hat`, and the actual label, `y`, will be represented as vectors of length `k_classes`; with 10 possible digits, these label vectors will have shape: `(1, 10)`. These vectors are stacked into a matrix of shape `(m_examples, k_classes)`.\r\n\r\nConcretely, each label is a sparse (one-hot) vector meaning every element is zero except for the index of the actual class which is 1. Given an image of a 3, its one-hot encoded vector would be: `[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]`. It is important to note that in this case, the 0th index corresponds to the class for 0 and the 10th index is the class for 9. It could be flipped, but I will assume this structure.\r\n\r\n### Models to Build:\r\n\r\n1. Softmax Regression (1-layer NN)\r\n2. Shallow Neural Network (2-layer NN)\r\n3. Deep Neural Network (L-layer NN)\r\n\r\nIntuition for neural networks begins with logistic regression wherein they behave as the most basic feed-forward network possible. Data is passed through a linear function and the logistic (sigmoid) function squeezing the prediction to a real value between 0 and 1 (the prediction).\r\n\r\nThe negative log-liklihood function, or categorical-cross entropy, computes the error of the model's paramters. In order to minimize this loss, we update the weights and biases using stochastic gradient descent. The gradient is simply the sum of partial derivative of the loss function with respect to each parameter. This value is scaled by a set learning rate and adjusts the weights and biases. This update is done iteratively over a number of training iterations or \"epochs.\"\r\n\r\nAll that really changes with deeper and wider feed-forward neural networks is their capability to represent more complex features. The fundamental concept remains the same wherein it makes a prediction given data and parameters, computes how far off it was given the \"correct answer,\" and then adjusts parameters based on each ones contribution to the overall loss.\r\n\r\n"
},
{
"alpha_fraction": 0.6071817278862,
"alphanum_fraction": 0.6115342974662781,
"avg_line_length": 26.71875,
"blob_id": "6d2fbe1c647b398b8b697cc634245d2a7cb8f7a9",
"content_id": "6c359a7d8a85ad8ce510fdad253eda967d7ade41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 32,
"path": "/assort/initializers.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n\r\nclass Initializer(object):\r\n \"\"\"Base initializer class is parent of all initializers.\"\"\"\r\n def __call__(self):\r\n raise NotImplementedError\r\n\r\n\r\nclass Zeros(Initializer):\r\n \"\"\"Initialize parameters as an array of zeros.\"\"\"\r\n def __call__(self, shape):\r\n return np.zeros(shape)\r\n\r\n\r\nclass Ones(Initializer):\r\n \"\"\"Initialize parameters as an array of ones.\"\"\"\r\n def __call__(self, shape):\r\n return np.ones(shape)\r\n\r\n\r\nclass RandomNormal(Initializer):\r\n \"\"\"Initialize parameters from a normal random distribution.\"\"\"\r\n def __init__(self, mean=0., stdv=1., seed=None):\r\n self.mean = mean\r\n self.stdv = stdv\r\n self.seed = seed\r\n\r\n def __call__(self, shape, scaling_factor=0.1):\r\n if self.seed is not None:\r\n np.random.seed(self.seed)\r\n return np.random.normal(self.mean, self.stdv, shape) * scaling_factor\r\n"
},
{
"alpha_fraction": 0.6407506465911865,
"alphanum_fraction": 0.6407506465911865,
"avg_line_length": 34.52381134033203,
"blob_id": "deff04c44a9943139f1adc104f90f74f8b17ec5c",
"content_id": "8f13d9532841f10e737b49e95ce5fb982c96d8ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 21,
"path": "/assort/datasets/download_util.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom urllib import request\n\n\ndef download(url, fname, directory):\n \"\"\"Download the file from the given url, filename, and directory.\"\"\"\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n else:\n print(\"Directory exists: %s\" % directory)\n filepath = os.path.join(directory, fname)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (fname, filepath))\n local_fname, _ = request.urlretrieve(url + fname, filepath)\n statinfo = os.stat(filepath)\n print(\"Successfully downloaded %s bytes %s\\n\" % (fname, statinfo.st_size))\n else:\n print(\"File %s exists in %s\\n\" % (fname, filepath))\n return filepath\n"
},
{
"alpha_fraction": 0.6321526169776917,
"alphanum_fraction": 0.6321526169776917,
"avg_line_length": 28.58333396911621,
"blob_id": "dd076957f2ebee6a46ba0c073dc365d4d831e066",
"content_id": "efbb4d855586315b38ba21eb9c8d5f0a6a5e028f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 12,
"path": "/assort/optimizers.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n\r\nclass GradientDescent(object):\r\n \"\"\"docstring for GradientDescent.\"\"\"\r\n def __init__(self, learning_rate, epochs):\r\n super(GradientDescent, self).__init__()\r\n self.alpha = learning_rate\r\n self.epochs = epochs\r\n\r\n def _update_params(self, parameters, gradient):\r\n return parameters - self.alpha * gradient\r\n"
},
{
"alpha_fraction": 0.6465116143226624,
"alphanum_fraction": 0.6616278886795044,
"avg_line_length": 24.875,
"blob_id": "eec9815ce194f31d5103d39fced775079f557d2d",
"content_id": "d9eee9145adc2624c6a056c87e159a088c4f505c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 860,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 32,
"path": "/examples/spam.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nimport numpy as np\r\n\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.utils import load_datasets\r\nfrom assort.preprocessing import feature_scaling as norm\r\nfrom assort.linear.logistic import LogisticRegression\r\n\r\n\r\ndef main():\r\n # Load Spam Dataset\r\n (X_train, y_train), (X_test, y_test) = load_datasets.get_spam(99)\r\n print(X_train.shape)\r\n print(y_train.shape)\r\n print(X_test.shape)\r\n print(y_test.shape)\r\n\r\n # Normalization\r\n X_train_norm = norm.standardize(X_train)\r\n X_test_norm = norm.standardize(X_test)\r\n\r\n # Train and evaluate the model\r\n model = LogisticRegression(epochs=5000, lr=10e-5, lmda=10e-5)\r\n model.fit(X_train_norm, y_train)\r\n print(model.evaluate(X_train_norm, y_train))\r\n print(model.evaluate(X_test_norm, y_test))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5707730054855347,
"alphanum_fraction": 0.5778326988220215,
"avg_line_length": 34.412498474121094,
"blob_id": "9698d2b7931dc645c3be35448068d4dc42de7663",
"content_id": "6acbe8fa1a83f605f14da9c80af066f87f687efe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2833,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 80,
"path": "/assort/datasets/mnist_util.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport gzip\nimport struct\nimport numpy as np\n\nfrom . import download\n\n\nclass MNISTReader(object):\n \"\"\"docstring for MNISTReader.\"\"\"\n def __init__(self, directory, download_flag, flatten_flag):\n super(MNISTReader, self).__init__()\n self.directory = directory\n self.download_flag = download_flag\n self.flatten_flag = flatten_flag\n self.fnames = {\n \"train_set\": {\n \"features\": 'train-images-idx3-ubyte.gz',\n \"labels\": 'train-labels-idx1-ubyte.gz'\n },\n \"test_set\": {\n \"features\": 't10k-images-idx3-ubyte.gz',\n \"labels\": 't10k-labels-idx1-ubyte.gz'\n }\n }\n self.train_set = self._load_train_set\n self.test_set = self._load_test_set\n\n def _download_mnist(self, which_set):\n \"\"\"Download train or test MNIST sets from Yann LeCunn's public repo,\n dumping the gzip files in the given directory.\n \"\"\"\n url = 'http://yann.lecun.com/exdb/mnist/'\n for key, fname in which_set.items():\n filepath = download(url, fname, self.directory)\n\n def _read_labs(self, flab):\n \"\"\"Unpack label file bytes into NumPy Array\"\"\"\n with gzip.open(flab) as f:\n magic, m = struct.unpack(\">II\", f.read(8))\n labels = np.fromstring(f.read(), dtype=np.int8)\n assert(m == labels.shape[0])\n return labels, m\n\n def _read_imgs(self, fimg):\n \"\"\"Unpack image file bytes into NumPy Array\"\"\"\n with gzip.open(fimg, 'rb') as f:\n magic, m, rows, cols = struct.unpack(\">IIII\", f.read(16))\n images = np.fromstring(f.read(), dtype=np.uint8)\n assert(rows == cols) # 28 x 28\n return images, rows, cols\n\n def _pull_set(self, which_set):\n \"\"\"Try to download raw MNIST and return (feature, label) set.\"\"\"\n # Construct path to correct files\n img_path = os.path.join(self.directory, which_set[\"features\"])\n lab_path = os.path.join(self.directory, which_set[\"labels\"])\n\n # Decide to download files from public repo or not\n if self.download_flag:\n self._download_mnist(which_set)\n\n # Read in raw MNIST files\n labels, m = self._read_labs(lab_path)\n images, rows, cols = self._read_imgs(img_path)\n\n # Decide to return vectorized images or not\n if self.flatten_flag:\n return (images.reshape(m, rows * cols), labels.reshape(m, 1))\n else:\n return (images.reshape(m, rows, cols), labels.reshape(m, 1))\n\n @property\n def _load_train_set(self):\n return self._pull_set(which_set=self.fnames[\"train_set\"])\n\n @property\n def _load_test_set(self):\n return self._pull_set(which_set=self.fnames[\"test_set\"])\n"
},
{
"alpha_fraction": 0.557345986366272,
"alphanum_fraction": 0.5687204003334045,
"avg_line_length": 23.731706619262695,
"blob_id": "4f9939c226be9f5a97056e92f9584feddb466e19",
"content_id": "d830611ce62b86f69ae5003a48b019c65bad6046",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1055,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 41,
"path": "/assort/activations.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n\r\ndef sigmoid(z):\r\n \"\"\"Sigmoid or logistic activation function\"\"\"\r\n return 1 / (1 + np.exp(-z))\r\n\r\n\r\n# def softmax(z):\r\n# \"\"\"Softmax activation function (generalized sigmoid)\"\"\"\r\n# z -= np.max(z)\r\n# return np.exp(z) / np.sum(np.exp(z), axis=0)\r\n\r\ndef softmax(arr, axis=None):\r\n X = np.atleast_2d(arr)\r\n if axis is None:\r\n axis = next(j[0] for j in enumerate(X.shape) if j[1] > 1)\r\n X = X - np.expand_dims(np.max(X, axis=axis), axis)\r\n X = np.exp(X)\r\n probs = X / np.expand_dims(np.sum(X, axis=axis), axis)\r\n if len(arr.shape) == 1:\r\n return probs.flatten()\r\n else:\r\n return probs\r\n\r\n\r\ndef tanh(z):\r\n \"\"\"Hyperbolic tangent activation function\"\"\"\r\n sinh = np.exp(z) - np.exp(-z)\r\n cosh = np.exp(z) + np.exp(-z)\r\n return sinh / cosh\r\n\r\n\r\ndef relu(z):\r\n \"\"\"Rectified Linear Unit activation function\"\"\"\r\n return np.maximum(z, 0, z)\r\n\r\n\r\ndef leaky_relu(z, a=0.01):\r\n \"\"\"Leaky Rectified Linear Unit activation function\"\"\"\r\n return np.maximum(z, a * z, z)\r\n"
},
{
"alpha_fraction": 0.5369059443473816,
"alphanum_fraction": 0.5411526560783386,
"avg_line_length": 30.69871711730957,
"blob_id": "8f7b2c142419c51960e6cc98b3866209471e60a4",
"content_id": "6919b16f532c9e8e24caaffd2f211d2d354f25b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4945,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 156,
"path": "/assort/neural/dnn.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom assort.initializers import RandomNormal, Zeros\nfrom assort.activations import relu, sigmoid\n\n\nclass DeepNeuralNetwork(object):\n \"\"\"docstring for DeepNeuralNetwork.\"\"\"\n def __init__(self, layer_dims):\n super(DeepNeuralNetwork, self).__init__()\n self.layer_dims = layer_dims\n self.L = len(self.layer_dims)\n self.parameters = self._get_parameters()\n\n @property\n def _get_parameters(self):\n return self._initialize_parameters(self.layer_dims)\n\n def _initialize_parameters(self, layer_dims):\n \"\"\"Initialize model parameters with random normal distribution\n\n Arguments\n ---------\n layer_dims : list\n Each element is the input dimension of the corresponding layer\n\n Returns\n -------\n dict\n Python dictionary containing initialized weights and biases for\n each layer of the model\n \"\"\"\n rand_init = RandomNormal()\n zero_init = Zeros()\n parameters = {}\n for l in range(1, self.L):\n weight_shape = (layer_dims[l], layer_dims[l - 1])\n bias_shape = (layer_dims[l], 1)\n parameters['W' + str(l)] = rand_init(weight_shape)\n parameters['b' + str(l)] = zero_init(bias_shape)\n return parameters\n\n def _linear_forward(A_prev, W, b):\n \"\"\"Implement a single linear transformation during forward propagation\n\n Arguments\n ---------\n A_prev : ndarray\n The previous layer's activations are inputs to the current layer.\n - Size: (hh_prev, m_examples)\n W : ndarray\n The current layer's weights\n - Size: (hh_curr, hh_prev)\n b : ndarray\n The current layer's bias units\n - Size: (hh_curr, 1)\n\n Returns\n -------\n ndarray\n The input to an activation function of the current layer\n tuple\n Cache the previous activations and layer parameters which will be\n used to compute back propagation\n \"\"\"\n W, b = parameters['W'], parameters['b']\n Z = np.dot(W, A_prev) + b\n cache = (A_prev, W, b)\n return Z, cache\n\n def _forward_activation(A_prev, W, b, activation):\n \"\"\"Implement a single forward activation layer in a neural network.\n\n Arguments\n ---------\n A_prev : ndarray\n The previous layer's activations are inputs to the current layer.\n - Size: (hh_prev, m_examples)\n W : ndarray\n The current layer's weights\n - Size: (hh_curr, hh_prev)\n b : ndarray\n The current layer's bias units\n - Size: (hh_curr, 1)\n activation : str\n String defining which activation function to use\n\n Returns\n -------\n ndarray\n Input to the next layer's A_prev\n tuple\n Cache containing the linear cache plus the linear input to this\n layer's activation\n \"\"\"\n Z, linear_cache = self._linear_forward(A_prev, W, b)\n if activation == 'sigmoid':\n A = sigmoid(Z)\n elif activation == 'relu':\n A = relu(Z)\n cache = (linear_cache, Z)\n return A, cache\n\n def _feed_forward(self, X, parameters):\n \"\"\"Implement a full forward pass of the neural network\n\n Argument\n --------\n X : ndarray\n Input data with shape (n_features, m_examples)\n\n Returns\n -------\n ndarray\n Final layer activation output\n list\n List of caches from each layer\n \"\"\"\n # Define helper variables\n L = self.L # Number of layers in the DNN\n A = X # Input data, X, is just the 'first' layer activation\n\n # Implement [LINEAR -> RELU] for (L - 1) layers, storing each cache\n caches = []\n for l in range(1, L):\n A_prev = A\n W_l = parameters['W' + str(l)]\n b_l = parameters['W' + str(l)]\n A, cache = self._forward_activation(A_prev, W_l, b_l, 'relu')\n caches.append(cache)\n\n # Implement [LINEAR -> SOFTMAX] for final layer, L, and add final cache\n W_L = parameters['W' + str(L)]\n b_L = parameters['W' + str(L)]\n AL, cache = self._forward_activation(A, W_L, b_L, 'softmax')\n caches.append(cache)\n return AL, caches\n\n def _linear_backward(dZ, cache):\n A_prev, W, b = cache\n m = A_prev.shape[0]\n dW = (1 / m) * np.dot(dZ, A_prev.T)\n db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T, dZ)\n return dA_prev, dW, db\n\n\ndef main():\n layers = [784, 20, 2, 20, 10]\n nn = DeepNeuralNetwork(layers)\n for key, value in nn.parameters.items():\n print(\"{} | {}\".format(key, value.shape))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.558635413646698,
"alphanum_fraction": 0.5671641826629639,
"avg_line_length": 31.5,
"blob_id": "17851ed22dd86d14bf20992a13982c9cbd437d63",
"content_id": "c731dae7650e70eba5209fb25670a68b9eaa612e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1407,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 42,
"path": "/examples/mnist.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nimport numpy as np\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.utils.load_datasets import get_mnist\r\nfrom assort.preprocessing import feature_scaling as norm\r\nfrom assort.preprocessing import one_hot as ova\r\nfrom assort.linear.logistic import SoftmaxRegression\r\n\r\n\r\ndef main():\r\n # Load the MNIST dataset\r\n (X_train, y_train), (X_test, y_test) = get_mnist(download=False,\r\n serialize=False,\r\n binary=False,\r\n bin_digits=[0, 1],\r\n flatten=True)\r\n\r\n print(X_train.shape)\r\n print(y_train.shape)\r\n print(X_test.shape)\r\n print(y_test.shape)\r\n\r\n # Rescale pixel values\r\n X_train_norm = norm.rescale(X_train)\r\n X_test_norm = norm.rescale(X_test)\r\n\r\n # Create one-hot encoded labels:\r\n digit_classes = int(np.max(y_train))\r\n y_train_ova = ova.one_hot_encode(y_train, digit_classes)\r\n y_test_ova = ova.one_hot_encode(y_test, digit_classes)\r\n\r\n # Build the model and evaluate\r\n model = SoftmaxRegression(epochs=250, lr=10e-5, lmda=10e-5)\r\n model.fit(X_train_norm, y_train_ova)\r\n print(model.evaluate(X_train_norm, y_train))\r\n print(model.evaluate(X_test_norm, y_test))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.8099173307418823,
"alphanum_fraction": 0.8099173307418823,
"avg_line_length": 39.33333206176758,
"blob_id": "c2f8b5b207deb1f42375e116c6ae47eaab1cbdf7",
"content_id": "806eefc38c8076dc3836a4183846d014d2d72685",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 3,
"path": "/assort/preprocessing/__init__.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "from .encode import one_hot\nfrom .sample import train_test_split\nfrom .scale import rescale, mean_normalize, standardize\n"
},
{
"alpha_fraction": 0.5245901346206665,
"alphanum_fraction": 0.5901639461517334,
"avg_line_length": 28.5,
"blob_id": "d3edb5df3716852546d1bb96ad9c724513056221",
"content_id": "b15a0133476343bb40bd0ac09ffc8390930ed1a2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 366,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 12,
"path": "/assort/tests/test_softmax.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import os\r\nimport sys\r\nimport numpy as np\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.activations import softmax\r\n\r\n# Testing softmax function - correctly produces probability distribution\r\nprint(softmax([[1.2, 0.9, 0.4],\r\n [-3.4, 0, -10]]))\r\nprint(np.sum(softmax([[1.2, 0.9, 0.4],\r\n [-3.4, 0., -10]]), axis=0))\r\n"
},
{
"alpha_fraction": 0.8717948794364929,
"alphanum_fraction": 0.8717948794364929,
"avg_line_length": 38,
"blob_id": "968c0130c0d4840017439c9c7d367e3aac604aa0",
"content_id": "e1801430b53773d4471fc6d094a261fbe9203804",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/assort/linear/__init__.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "from .linreg import LinearRegression\nfrom .logistic import LogisticRegression\n"
},
{
"alpha_fraction": 0.4430379867553711,
"alphanum_fraction": 0.6645569801330566,
"avg_line_length": 15.55555534362793,
"blob_id": "f76043b8c906e2d72c1153fb848e86fb08705f58",
"content_id": "21b03e653fc8b5ac19a2a179223c74d45b53301a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 158,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "certifi==2016.2.28\r\ncycler==0.10.0\r\nmatplotlib==2.0.2\r\nnumpy==1.13.1\r\npyparsing==2.2.0\r\npython-dateutil==2.6.1\r\npytz==2017.2\r\nsix==1.10.0\r\nwincertstore==0.2\r\n"
},
{
"alpha_fraction": 0.5153488516807556,
"alphanum_fraction": 0.5265116095542908,
"avg_line_length": 28.714284896850586,
"blob_id": "c652967728f867de31cf06e861403ab02f60c004",
"content_id": "ee56764478740b93d3bc4e4b56ab1bac3196d256",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1075,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 35,
"path": "/examples/mnist_bin.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nimport numpy as np\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.datasets import get_mnist\r\nfrom assort.preprocessing import rescale\r\nfrom assort.linear import LogisticRegression\r\n\r\n\r\ndef main():\r\n (X_train, y_train), (X_test, y_test) = get_mnist(download=False,\r\n serialize=False,\r\n binary=True,\r\n bin_digits=[0, 1],\r\n flatten=True)\r\n\r\n print(X_train.shape)\r\n print(y_train.shape)\r\n print(X_test.shape)\r\n print(y_test.shape)\r\n\r\n # Rescale pixel values\r\n X_train_norm = rescale(X_train)\r\n X_test_norm = rescale(X_test)\r\n\r\n # Build the model and evaluate\r\n model = LogisticRegression(epochs=400, lr=10e-5, lmda=10e-5)\r\n model.fit(X_train_norm, y_train)\r\n print(model.evaluate(X_train_norm, y_train))\r\n print(model.evaluate(X_test_norm, y_test))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6701030731201172,
"alphanum_fraction": 0.6793814301490784,
"avg_line_length": 25.714284896850586,
"blob_id": "d7262e1669a8ea2de3532529a8eab6f86132b88b",
"content_id": "cc5b96f73d95f7d38651818d83ab9cf7fdc43256",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 970,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 35,
"path": "/examples/housing.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\n\r\n# Add AssortedAI to system path for development\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.datasets import get_housing\r\nfrom assort.preprocessing import standardize\r\nfrom assort.optimizers import GradientDescent\r\nfrom assort.linear import LinearRegression\r\n\r\ndef main():\r\n X_train, y_train, X_test = get_housing()\r\n print(X_train.shape)\r\n print(y_train.shape)\r\n print(X_test.shape)\r\n\r\n # Perform feature scaling on X_train and X_test\r\n X_norm = standardize(X_train)\r\n X_test = standardize(X_test)\r\n\r\n # Fit Linear Regression model\r\n model = LinearRegression(X_norm, y_train)\r\n sgd = GradientDescent(learning_rate=0.03, epochs=100)\r\n model = model.fit(sgd, print_cost_freq=10)\r\n\r\n # Visualize training error over each iteration\r\n model.plot_error()\r\n\r\n # Make predictions with trained model\r\n pred = model.predict(X_test)\r\n print(pred)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5783186554908752,
"alphanum_fraction": 0.5816993713378906,
"avg_line_length": 34.36885070800781,
"blob_id": "42bdff093d298850664a0e77be89b63ec61bd0dd",
"content_id": "f2b92bdf9db6c5903f6612570ed4f2eb73b7002a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4437,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 122,
"path": "/assort/linear/linreg.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom assort import _INITIALIZER_CONFIG\r\nfrom assort import _COST_FUNC_CONFIG\r\nfrom assort.cost_functions import MeanSquaredError\r\n\r\n\r\nclass RegressionModel(object):\r\n \"\"\"LinearModel: Base class for all linear models.\r\n \"\"\"\r\n\r\n def __init__(self, X_train, y_train):\r\n super(RegressionModel, self).__init__()\r\n # Get training features and labels\r\n self.X_train = X_train\r\n self.y_train = y_train\r\n self.m, self.n = self.X_train.shape[0], self.X_train.shape[1]\r\n\r\n # Assigned when model is training\r\n self.cost_cache = []\r\n self.trained_params = {}\r\n self.trained_grads = {}\r\n\r\n def _init_weights(self, weight_initializer, shape):\r\n try:\r\n weight_initializer = _INITIALIZER_CONFIG[weight_initializer]()\r\n return weight_initializer(shape)\r\n except ValueError:\r\n print(\"Initializer not supported...\")\r\n\r\n def _init_bias(self, bias_initializer, shape):\r\n try:\r\n bias_initializer = _INITIALIZER_CONFIG[bias_initializer]()\r\n return bias_initializer(shape)\r\n except ValueError:\r\n print(\"Initializer not supported...\")\r\n\r\n def _init_cost_func(self, objective, y_train, y_hat, X_train):\r\n try:\r\n return _COST_FUNC_CONFIG[objective](y_train, y_hat, X_train)\r\n except ValueError:\r\n print(\"Objective not supported...\")\r\n\r\nclass LinearRegression(RegressionModel):\r\n \"\"\"OLS regression trained with Stochastic Gradient Descent\r\n\r\n Attributes:\r\n X_train -- training feature matrix with shape (m, n)\r\n y_train -- training label vector with shape (m, 1)\r\n weight_initializer -- how to initialize model parameters\r\n cost_cache -- numpy array of historical training error\r\n trained_params -- python dictionary storing:\r\n \"theta\" -- ndarray - optimized model parameters\r\n\r\n Methods:\r\n fit -- perform gradient descent\r\n predict -- make prediction after fitting linear regresion\r\n plot_error -- plot cost after each training iteration\r\n \"\"\"\r\n\r\n def __init__(self,\r\n X_train,\r\n y_train,\r\n objective='mean_squared_error',\r\n weight_initializer='zeros'):\r\n super().__init__(X_train, y_train)\r\n intercept = np.ones((self.m, 1))\r\n self.X_ = np.c_[intercept, self.X_train]\r\n self.theta_start = self._init_theta(weight_initializer)\r\n self.objective = objective\r\n\r\n def _init_theta(self, weight_initializer):\r\n shape = (self.n + 1, 1)\r\n return self._init_weights(weight_initializer, shape)\r\n\r\n def _propagate(self, theta):\r\n y_hat = np.dot(self.X_, theta)\r\n mse = self._init_cost_func(\r\n self.objective, self.y_train, y_hat, self.X_)\r\n return (mse.get_cost, mse.get_grads)\r\n\r\n def fit(self, optimizer, print_cost_freq=100):\r\n \"\"\"Fit OLS Regression with Stochastic Gradient Descent\r\n\r\n Arguments:\r\n optimizer -- optimizer class\r\n print_cost_freq -- print cost when train iter mod freq = 0\r\n\r\n Return:\r\n self\r\n \"\"\"\r\n print(\"Training model...\")\r\n w = self.theta_start\r\n for i in range(optimizer.epochs):\r\n cost, grad = self._propagate(w)\r\n # 3) Update parameters, theta, by learning rate and gradient\r\n w = optimizer._update_params(w, grad)\r\n # 4) Save and print cost after every training iteration\r\n self.cost_cache.append(cost)\r\n if i % print_cost_freq == 0:\r\n print(\"Error at iteration {}: {}\".format(i, cost))\r\n\r\n # Save optimized parameters\r\n self.trained_params = {\"theta\": w}\r\n self.trained_grads = {\"grads\": grad}\r\n print(\"Model is trained, optimized results stored...\\n\")\r\n return self\r\n\r\n def predict(self, X):\r\n \"\"\"Make a prediction with the trained model\"\"\"\r\n X_ = np.c_[np.ones((X.shape[0], 1)), X]\r\n theta = self.trained_params[\"theta\"]\r\n return np.dot(X_, theta)\r\n\r\n def plot_error(self):\r\n \"\"\"Simply plot the model error over training\"\"\"\r\n print(\"Plotting model error...\\n\")\r\n plt.plot(self.cost_cache)\r\n plt.ylabel('Training Cost')\r\n plt.xlabel('Training Iteration')\r\n plt.show()\r\n"
},
{
"alpha_fraction": 0.5931283831596375,
"alphanum_fraction": 0.5967450141906738,
"avg_line_length": 23.04347801208496,
"blob_id": "89cd808f3159d48729c23f5e1cc0be9f18538d57",
"content_id": "d9f41910cdd3a79a3ab943f85b00da0fd8f854a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 23,
"path": "/assort/preprocessing/scale.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef rescale(X):\n \"\"\"Rescale data between [0, 1]\"\"\"\n numer = X - np.min(X)\n denom = np.max(X) - np.min(X)\n return numer / denom\n\n\ndef mean_normalize(X):\n \"\"\"Normalize the mean of a given distribution\"\"\"\n numer = X - np.mean(X)\n denom = np.max(X) - np.min(X)\n return numer / denom\n\n\ndef standardize(X):\n \"\"\"Standardize features to have zero mean and unit-variance\"\"\"\n # X - X_bar / std\n x_bar = np.mean(X) # feature means\n sigma = np.std(X) # feature st deviations\n return (X - x_bar) / sigma\n"
},
{
"alpha_fraction": 0.7385892271995544,
"alphanum_fraction": 0.745643138885498,
"avg_line_length": 43.47169876098633,
"blob_id": "4bc439b1d99c58c1986b5543e00fc8b5e40fef9e",
"content_id": "5302a1eddb1c527fcd3de3760377e718a337df11",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2410,
"license_type": "permissive",
"max_line_length": 413,
"num_lines": 53,
"path": "/README.md",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "## AssortedAI\r\n\r\nThis project is primarily a way to organize individual ML algorithms that I've implemented in disparate repositories. AssortedAI will become a package that has core functionality that makes machine learning simple and declarative. At a bare minimum, it will cover basic supervised learning algorithms, at least one clustering technique, variations of stochastic gradient descent, and some preprocessing utilities.\r\n\r\nBeyond the basic prerequisites, I would like the implement visualization tools and dimensionality reduction techniques like PCA and t-SNE. In terms of optimization, I haven't used second order, Netown methods so this could be another stretch goal for the project.\r\n\r\nFinally, there will be several detailed examples of machine learning tasks from digit recognition to flower-type prediction, and housing price predictions. It will provide a solid context on using the APIs. I aim for it to feel akin to a simplified sklearn work-flow.\r\n\r\n### Motivation:\r\n\r\n1. My GitHub account has become pretty *disorganized with many half-baked projects.* The structure and focus of building a dedicated package will help clean these repos.\r\n2. *Education!* I have found that for myself, implementing these algorithms from scratch provides a much stronger intuition for how they work.\r\n3. *Simplicity.* Many newcomers to ML, like myself, find a lot of the most popular packages to be very overwhelming. Given that this package is meant to be an educational experiment, hopefully it will prove the same for others.\r\n\r\n### Models:\r\n\r\n1. ~~Linear Regression~~\r\n2. Softmax Regression (generalized ~~logistic regression~~)\r\n3. Shallow (2-L) Feedforward Neural Networks\r\n4. Deep (L-layer) Feedforward Neural Networks\r\n5. K-Nearest Neighbors\r\n6. K-Means Clustering\r\n\r\n*NOTE: Just basic goals for now that touch on most ML fundamentals*\r\n\r\n### Other Components:\r\n\r\n1. Activation Functions\r\n * ~~Sigmoid~~\r\n * ~~Softmax~~\r\n * ~~Hyperbolic Tangent~~\r\n * ~~ReLU~~\r\n * ~~Leaky-ReLU~~\r\n\r\n2. Initializers\r\n\t* ~~Zeros~~\r\n\t* ~~Ones~~\r\n\t* ~~Random Normal~~ / Random Uniform\r\n\t* Xavier\r\n\t* He\r\n3. Optimizers\r\n * Stochastic Gradient Descent\r\n * Adam\r\n * RMSProp\r\n4. Regularizers\r\n\t* L1/L2 Norm\r\n\t* Dropout\r\n5. Preprocessing\r\n * One-hot encoding\r\n * ~~Normalization~~\r\n * Image transforms (TBD)\r\n\r\nA ~~strikethrough~~ indicates that feature has been successfully implemented.\r\n"
},
{
"alpha_fraction": 0.5468245148658752,
"alphanum_fraction": 0.5543595552444458,
"avg_line_length": 19.113636016845703,
"blob_id": "21095a93e9f1d4fe25fb832454aa72d8818c6289",
"content_id": "23c49568ec7ddf812b3cf3b8eed55236488b3b94",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 929,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 44,
"path": "/assort/regularizers.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n\r\ndef l1_reg(w, lmda):\r\n \"\"\"\r\n L1 Regularization Term\r\n\r\n Arguments\r\n ---------\r\n w : ndarray\r\n Model parameters to decay by regularization term\r\n lmda : float\r\n Regularization constant, lambda\r\n\r\n Returns\r\n -------\r\n float\r\n Regularization term to add to the cost\r\n \"\"\"\r\n return lmda / 2 * np.sum(w)\r\n\r\n\r\ndef l2_reg(w, lmda, derivative=False):\r\n \"\"\"\r\n L2 Regularization Term\r\n\r\n Arguments\r\n ---------\r\n w : ndarray\r\n Model parameters to decay by regularization term\r\n lmda : float\r\n Regularization constant, lambda\r\n derivative : bool\r\n Choose to return derivative of L2 term for backprop. Default is False\r\n\r\n Returns\r\n -------\r\n float\r\n Regularization term to add to the cost\r\n \"\"\"\r\n if derivative:\r\n return lmda * w\r\n else:\r\n return lmda / 2 * np.sum(np.dot(w, w.T))\r\n"
},
{
"alpha_fraction": 0.545918345451355,
"alphanum_fraction": 0.557823121547699,
"avg_line_length": 22.520000457763672,
"blob_id": "664142ac3de495bc3822326d565843094cf0f601",
"content_id": "2685105765948edde612d34deac322e2f70e96e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 25,
"path": "/assort/preprocessing/encode.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef one_hot(labels, k_classes):\n \"\"\"Encode each training label into a one-hot vector\n\n Arguments\n ---------\n labels : ndarray\n Column vector with shape (m, 1)\n k_classes : int\n Number of classes\n\n Returns\n -------\n ndarray\n encoded_labels as matrix with shape (m, k_classes)\n \"\"\"\n m = labels.shape[0]\n k = k_classes + 1\n encoded_labels = np.zeros((m, k), dtype='float32')\n for i in range(m):\n labidx = int(labels[i][0])\n encoded_labels[i, labidx] = 1\n return encoded_labels\n"
},
{
"alpha_fraction": 0.5835234522819519,
"alphanum_fraction": 0.6007604598999023,
"avg_line_length": 33.911502838134766,
"blob_id": "f606f8c0d6b5f83652f22f537291c9e7010c48c0",
"content_id": "9c42486ef2bfd430e45377e207118cfc393fe6e1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3945,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 113,
"path": "/assort/datasets/load_datasets.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\nimport numpy as np\n\nfrom . import download\nfrom . import MNISTReader\nfrom ..preprocessing import train_test_split\n\n\nDATADIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_profit():\n # Load data\n fname = 'chain_profits.txt'\n fpath = os.path.join('assort', 'datasets', fname)\n data = np.loadtxt(fpath, delimiter=',')\n\n # Prepare data\n m, n = data.shape[0], 1\n X_train = data[:, 0].reshape((m, n))\n y_train = data[:, 1].reshape((m, 1))\n\n # Make up some test points\n X_test = np.array([3.5, 7])\n\n return (X_train, y_train, X_test)\n\n\ndef get_housing():\n # Load data\n fname = 'house_prices.txt'\n fpath = os.path.join(DATADIR, 'housing', fname)\n data = np.loadtxt(fpath, delimiter=',')\n\n # Prepare data\n m, n = data.shape[0], data.shape[1] - 1\n X_train = data[:, 0:2].reshape((m, n))\n y_train = data[:, -1].reshape((m, 1))\n\n # Test, dummy data\n X_test = np.array([[2500, 3], [1000, 2], [1400, 3]])\n return (X_train, y_train, X_test)\n\n\ndef get_spam(seed, test_size=0.3):\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/'\n fname = \"spambase.data\"\n directory = os.path.join(DATADIR, 'spam')\n filepath = download(url, fname, directory)\n with open(filepath, 'r') as f:\n emails = np.asarray(\n [row for row in csv.reader(f, delimiter=',')], dtype=np.float32)\n np_X = emails[:, :-1]\n np_y = emails[:, -1].reshape(np_X.shape[0], 1)\n return train_test_split(np_X, np_y, seed=seed, test_size=test_size)\n\n\ndef get_iris(seed, test_size=0.3):\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/'\n fname = \"iris.data\"\n directory = os.path.join(DATADIR, 'iris')\n filepath = download(url, fname, directory)\n classes = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']\n X = []\n y = []\n with open(filepath, 'r') as f:\n csvreader = csv.reader(f, delimiter=',')\n for row in csvreader:\n if len(row) == 5:\n X.append(row[:-1])\n y.append(classes.index(row[-1]))\n else: # Last empty row can just be ignored\n pass\n np_X = np.asarray(X, dtype=np.float32)\n np_y = np.asarray(y, dtype=np.float32).reshape((len(y), 1))\n return train_test_split(np_X, np_y, seed=seed, test_size=test_size)\n\n\ndef get_mnist(download=True, serialize=False,\n binary=False, bin_digits=[0, 1], flatten=False):\n \"\"\"Retrieve MNIST datasets and return tuples of train/test sets\"\"\"\n\n # Load MNIST data - supply download and flatten flags to MNIST Reader\n directory = os.path.join(DATADIR, 'mnist')\n reader = MNISTReader(directory, download, flatten)\n\n # Pull the train and test sets stored as attributes of the reader\n (X_train, y_train) = reader.train_set\n (X_test, y_test) = reader.test_set\n\n # Flag - serialize arrays to default directory: assort\\datasets\\mnist\\\n if serialize:\n fpath = os.path.join(directory, 'mnist.npz')\n np.savez(fpath,\n X_train=X_train, y_train=y_train,\n X_test=X_test, y_test=y_test)\n print(\"Saved MNIST arrays to disk here: {}\\n\".format(fpath))\n\n # Flag - subset for binary classification\n if binary:\n # Subset by logical indexing\n a, b = bin_digits[0], bin_digits[1]\n train_digits = np.where(np.logical_or(y_train == a, y_train == b))\n test_digits = np.where(np.logical_or(y_test == a, y_test == b))\n X_train_bin = X_train[train_digits[0]] # (m_bin_train, 28, 28)\n y_train_bin = y_train[train_digits[0]] # (m_bin_train, 1)\n X_test_bin = X_test[test_digits[0]] # (m_bin_test, 28, 28)\n y_test_bin = y_test[test_digits[0]] # (m_bin_test, 1)\n return (X_train_bin, y_train_bin), (X_test_bin, y_test_bin)\n # Otherwise return full dataset of all 10-classes\n else:\n return (X_train, y_train), (X_test, y_test)\n"
},
{
"alpha_fraction": 0.45724907517433167,
"alphanum_fraction": 0.45724907517433167,
"avg_line_length": 32.625,
"blob_id": "a62325d30ae0456017f0aaeb1765a95bfd6be5d4",
"content_id": "367c5b2e21280002a28302e4cd2719af92bc0e3a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 8,
"path": "/assort/datasets/__init__.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "from .download_util import download\nfrom .mnist_util import MNISTReader\n\nfrom .load_datasets import (get_profit,\n get_housing,\n get_spam,\n get_iris,\n get_mnist)\n"
},
{
"alpha_fraction": 0.647912859916687,
"alphanum_fraction": 0.663339376449585,
"avg_line_length": 26.256410598754883,
"blob_id": "4e026815af8e04d0e528330ff83494efeca23eb2",
"content_id": "f88dbc7d88b53ea032de92e5d821fb59a3290cd0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1102,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 39,
"path": "/examples/iris.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nimport numpy as np\r\n\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nfrom assort.utils import load_datasets\r\nfrom assort.preprocessing import feature_scaling as norm\r\nfrom assort.preprocessing import one_hot as ova\r\nfrom assort.linear.logistic import SoftmaxRegression\r\n\r\n\r\ndef main():\r\n # Load Iris Dataset\r\n (X_train, y_train), (X_test, y_test) = load_datasets.get_iris(99)\r\n\r\n print(X_train.shape)\r\n print(y_train.shape)\r\n print(X_test.shape)\r\n print(y_test.shape)\r\n\r\n # Normalization\r\n X_train_norm = norm.standardize(X_train)\r\n X_test_norm = norm.standardize(X_test)\r\n\r\n # One-Hot Encoding\r\n iris_classes = int(np.max(y_train))\r\n y_train_ova = ova.one_hot_encode(y_train, iris_classes)\r\n y_test_ova = ova.one_hot_encode(y_test, iris_classes)\r\n\r\n # Train and evaluate the model\r\n model = SoftmaxRegression(epochs=5000, lr=0.00005, lmda=0.001)\r\n model.fit(X_train_norm, y_train_ova)\r\n print(model.evaluate(X_train_norm, y_train))\r\n print(model.evaluate(X_test_norm, y_test))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5410526394844055,
"alphanum_fraction": 0.546842098236084,
"avg_line_length": 28.158729553222656,
"blob_id": "bc377d2ff72fc787c459c754d57952787eaf1a71",
"content_id": "f022329102777c56508fd6f91673e9691f9898fd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3800,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 126,
"path": "/assort/cost_functions.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n\r\nclass CostFunction(object):\r\n \"\"\"CostFunction: Base class for all cost functions\r\n\r\n Performs basic error handling and inits common attributes\r\n \"\"\"\r\n\r\n def __init__(self, y_train, y_hat, X_train):\r\n super(CostFunction, self).__init__()\r\n self._check_array_dims(y_train, y_hat, X_train)\r\n self.y_train = y_train\r\n self.y_hat = y_hat\r\n self.loss = self.y_hat - self.y_train\r\n self.X_train = X_train\r\n self.m = X_train.shape[0]\r\n\r\n def _check_array_dims(self, y_train, y_hat, X_train):\r\n try:\r\n assert(y_train.shape == y_hat.shape)\r\n assert(y_train.shape[0] == X_train.shape[0])\r\n except ValueError:\r\n print(\"Array dimensions must match!\\n\\\r\n y_train: (m, 1),\\n\\\r\n y_hat: (m, 1),\\n\\\r\n X_train: (m, n)\\n\")\r\n\r\n\r\nclass MeanSquaredError(CostFunction):\r\n \"\"\"MeanSquaredError\r\n\r\n Arguments:\r\n y_train -- actual labels - vector with shape (m, 1)\r\n y_hat -- predicted labels - vector with shape (m, 1)\r\n X -- feature matrix, X, with shape (m, n)\r\n\r\n Properties:\r\n get_cost: Returns error between predicted and actual labels\r\n get_grad: Returns gradient of cost with respect to parameters\r\n\r\n Raises:\r\n ValueError: Check dimensions of input arrays\r\n \"\"\"\r\n\r\n def __init__(self, y_train, y_hat, X_train):\r\n super().__init__(y_train, y_hat, X_train)\r\n\r\n @property\r\n def get_cost(self):\r\n return (1 / (2 * self.m)) * np.sum(np.square(self.loss))\r\n\r\n @property\r\n def get_grads(self):\r\n return (1 / self.m) * np.dot(self.X_train.T, self.loss)\r\n\r\n\r\nclass BinaryCrossEntropy(CostFunction):\r\n \"\"\"BinaryCrossEntropy\r\n\r\n Arguments:\r\n y_train -- actual labels - vector with shape (m, 1)\r\n y_hat -- predicted labels - vector with shape (m, 1)\r\n X -- feature matrix, X, with shape (m, n)\r\n\r\n Properties:\r\n get_cost: Returns error between predicted and actual labels\r\n get_grad: Returns gradient of cost with respect to parameters\r\n\r\n Raises:\r\n ValueError: Check dimensions of input arrays\r\n \"\"\"\r\n\r\n def __init__(self, y_train, y_hat, X_train):\r\n super().__init__(y_train, y_hat, X_train)\r\n\r\n @property\r\n def get_cost(self):\r\n case_true = self.y_train * np.log(self.y_hat)\r\n case_false = (1 - self.y_train) * np.log(1 - self.y_hat)\r\n return -(1 / self.m) * np.sum(case_true + case_false)\r\n\r\n @property\r\n def get_grads(self):\r\n dw = (1 / self.m) * np.dot(self.X_train.T, self.loss)\r\n db = (1 / self.m) * np.sum(self.loss)\r\n grads = {\r\n \"dw\": dw,\r\n \"db\": db\r\n }\r\n return grads\r\n\r\n\r\nclass CategoricalCrossEntropy(CostFunction):\r\n \"\"\"CategoricalCrossEntropy\r\n\r\n Arguments:\r\n y_train -- actual labels - vector with shape (m, 1)\r\n y_hat -- predicted labels - vector with shape (m, 1)\r\n X -- feature matrix, X, with shape (m, n)\r\n\r\n Properties:\r\n get_cost: Returns error between predicted and actual labels\r\n get_grad: Returns gradient of cost with respect to parameters\r\n\r\n Raises:\r\n ValueError: Check dimensions of input arrays\r\n \"\"\"\r\n\r\n def __init__(self, y_train, y_hat, X_train):\r\n super().__init__(y_train, y_hat, X_train)\r\n\r\n @property\r\n def get_cost(self):\r\n return -(1 / self.m) * np.sum(self.y_hat * np.log(self.y_label))\r\n\r\n @property\r\n def get_grads(self):\r\n dZ = self.y_label - self.y_hat\r\n dw = -(1 / self.m) * np.dot(self.X_train.T, dZ)\r\n db = -(1 / self.m) * np.sum(dZ)\r\n grads = {\r\n \"dw\": dw,\r\n \"db\": db\r\n }\r\n return grads\r\n"
},
{
"alpha_fraction": 0.6834734082221985,
"alphanum_fraction": 0.6834734082221985,
"avg_line_length": 23.5,
"blob_id": "fdea7ec7720098c74e8a2a447e2f1f84bf1f1cff",
"content_id": "d131ce5999be2bdf6d5ca7ebad83fa23119f77ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 14,
"path": "/assort/__init__.py",
"repo_name": "yalotfi/AssortedAI",
"src_encoding": "UTF-8",
"text": "from assort.initializers import *\r\nfrom assort.cost_functions import *\r\n\r\n_INITIALIZER_CONFIG = {\r\n 'zeros': Zeros,\r\n 'ones': Ones,\r\n 'random_normal': RandomNormal\r\n}\r\n\r\n_COST_FUNC_CONFIG = {\r\n 'mean_squared_error': MeanSquaredError,\r\n 'binary_cross_entropy': BinaryCrossEntropy,\r\n 'categorical_cross_entropy': CategoricalCrossEntropy\r\n}\r\n"
}
] | 28 |
jind11/NeuralTextSimplification-Pytorch
|
https://github.com/jind11/NeuralTextSimplification-Pytorch
|
6e880a7e5d2019f286d45bdc6274e1d4a269b0e8
|
541e5f3b0e9d9cd5495d7ca7913b5a42bd21b15e
|
963c53a8f6288d70d6e0fcde32d44a049d15563b
|
refs/heads/master
| 2020-08-05T19:00:10.593237 | 2019-10-04T19:40:56 | 2019-10-04T19:40:56 | 212,667,280 | 6 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6357661485671997,
"alphanum_fraction": 0.6786578893661499,
"avg_line_length": 43.476924896240234,
"blob_id": "6f0659960ddcd98a2d5bca054a3385fdf7b61059",
"content_id": "1c7804979356270c7b7bde27b435e409ca4b608b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2891,
"license_type": "no_license",
"max_line_length": 338,
"num_lines": 65,
"path": "/README.md",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "# Exploring Neural Text Simplification--Pytorch Version\nThis is the reimplementation of the [NeuralTextSimplification](https://github.com/senisioi/NeuralTextSimplification) repository in Pytorch. The original repository is based on Lua Torch, which may not be able to be installed in some machines (at least in my machine), therefore I provide this pytorch version in case someone may need it. \n\nThe algorithm behind this code is from this paper: [Nisioi, Sergiu, et al. \"Exploring neural text simplification models.\" Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers). 2017.](https://www.aclweb.org/anthology/P17-2014/) \n\nIt is based on the standard LSTM based seq-to-seq translation model and OpenNMT is used as the code base. \n\n## How to use\n\n1. OpenNMT dependency: You first need to install the [OpenNMT](https://github.com/OpenNMT/OpenNMT-py) tool:\n\n```\npip install OpenNMT-py\n```\n\n2. Checkout this repository:\n\n```\ngit clone https://github.com/jind11/NeuralTextSimplification-Pytorch\n```\n\n3. Make a directory named \"models\", download the pre-trained released models [NTS](https://drive.google.com/file/d/1oRRWXTQ-JXSTxJ944X1JI-PZyovmtMxU/view?usp=sharing) and save into it. If you want to train your own model based on your data, you can use this command (remember to change the directory of the data and model save path):\n\n```\n./train.sh\n```\n\nWe also provide the EW-SEW dataset used to train the released pre-trained model in the \"data\" folder.\n\n4. Run translate.sh to get the translation results for your dataL\n\n```\nmkdir results\n./translate.sh\n```\n\n5. Run automatic evaluation metrics (nltk package is needed for this step):\n\n```\n./evaluate.sh\n```\n\n## Benchmark\n\nSince this is a reimplementation of an existing repository, we would like to compare the performance to the original one for quality checking based on two automatic metrics: SARI and BLEU.\n\n| Approach | Repository | SARI | BLEU |\n|------------------------------------|------------|------:|------:|\n| NTS default (beam 5, hypothesis 1) | Original | 30.65 | 84.51 |\n| NTS default (beam 5, hypothesis 1) | This one | 29.90 | 93.67 |\n| NTS SARI (beam 5, hypothesis 2) | Original | 37.25 | 80.69 |\n| NTS SARI (beam 5, hypothesis 2) | This one | 38.63 | 87.19 |\n| NTS BLEU (beam 12, hypothesis 1) | Original | 30.77 | 84.70 |\n| NTS BLEU (beam 12, hypothesis 1) | This one | 29.78 | 93.71 |\n\nFrom this table, we see that this reimplementation is comparable or even better than the original code. \n\nIn the end, we put the automatic metrics results for all four hypotheses for beam search of 5:\n\n| Hypothesis Number | SARI | BLEU |\n|-------------------|------:|------:|\n| 1 | 29.90 | 93.67 |\n| 2 | 38.63 | 87.19 |\n| 3 | 38.65 | 84.67 |\n| 4 | 37.92 | 84.19 |\n"
},
{
"alpha_fraction": 0.6411483287811279,
"alphanum_fraction": 0.7033492922782898,
"avg_line_length": 18.090909957885742,
"blob_id": "82b9f917fb90220830495e20daec357c448654db",
"content_id": "933676ed6bb0644d1c8f7024dd6db411df50fdb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 11,
"path": "/translate.sh",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nCUDA_VISIBLE_DEVICES=2 onmt_translate \\\n-model models/nts_step_20000.pt \\\n-src data/test_uni.en \\\n-replace_unk \\\n-verbose \\\n-beam_size 5 \\\n-n_best 4 \\\n-share_vocab \\\n-output results/nts_step_20000"
},
{
"alpha_fraction": 0.710659921169281,
"alphanum_fraction": 0.710659921169281,
"avg_line_length": 21,
"blob_id": "b039fb9c93d1cb9fc046400e8de3cc2425e1ec76",
"content_id": "7fffc1c7d756650e3bcf16c575a532bd1ae4bc29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 9,
"path": "/preprocess.sh",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nonmt_preprocess \\\n-train_src data/train.en \\\n-train_tgt data/train.sen \\\n-valid_src data/valid_uni.en \\\n-valid_tgt data/valid_uni.sen \\\n-share_vocab \\\n-save_data data/EW-SEW-NTS/ew-sew"
},
{
"alpha_fraction": 0.5925667881965637,
"alphanum_fraction": 0.6039488911628723,
"avg_line_length": 29.75,
"blob_id": "4a8e6ea25ff7fb7895db899ad42bba19b592b201",
"content_id": "e3012f1b17d00bd32c0535717a76081b8eb2094f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8610,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 280,
"path": "/evaluate.py",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport numpy as np\nimport codecs\nimport logging\nfrom SARI import SARIsent\nfrom nltk.translate.bleu_score import *\nsmooth = SmoothingFunction()\nfrom nltk import word_tokenize\nfrom textstat.textstat import textstat\nimport Levenshtein\nimport nltk\nfrom nltk.tokenize import RegexpTokenizer\nimport syllables_en\n\nTOKENIZER = RegexpTokenizer('(?u)\\W+|\\$[\\d\\.]+|\\S+')\nSPECIAL_CHARS = ['.', ',', '!', '?']\nlogging.basicConfig(format = u'[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s', level = logging.NOTSET)\n\n\ndef get_words(text=''):\n words = TOKENIZER.tokenize(text)\n filtered_words = []\n for word in words:\n if word in SPECIAL_CHARS or word == \" \":\n pass\n else:\n new_word = word.replace(\",\",\"\").replace(\".\",\"\")\n new_word = new_word.replace(\"!\",\"\").replace(\"?\",\"\")\n filtered_words.append(new_word)\n return filtered_words\n\ndef get_sentences(text=''):\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = tokenizer.tokenize(text)\n return sentences\n\ndef count_syllables(words):\n syllableCount = 0\n for word in words:\n syllableCount += syllables_en.count(word)\n return syllableCount\n\ndef files_in_folder(mypath):\n return [ os.path.join(mypath,f) for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath,f)) ]\n\ndef folders_in_folder(mypath):\n return [ os.path.join(mypath,f) for f in os.listdir(mypath) if os.path.isdir(os.path.join(mypath,f)) ]\n\ndef files_in_folder_only(mypath):\n return [ f for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath,f)) ]\n\ndef remove_features(sent):\n tokens = sent.split(\" \")\n return \" \".join([token.split(\"|\")[0] for token in tokens])\n\ndef remove_underscores(sent):\n return sent.replace(\"_\", \" \")\n\ndef replace_parant(sent):\n sent = sent.replace(\"-lrb-\", \"(\").replace(\"-rrb-\", \")\")\n return sent.replace(\"(\", \"-lrb-\").replace(\")\", \"-rrb-\")\n\ndef lowstrip(sent):\n return sent.lower().strip()\n\ndef normalize(sent):\n return replace_parant(lowstrip(sent))\n\ndef as_is(sent):\n return sent\n\ndef get_hypothesis(filename):\n hypothesis = '-'\n if \"_h1\" in filename:\n hypothesis = '1'\n elif \"_h2\" in filename:\n hypothesis = '2'\n elif \"_h3\" in filename:\n hypothesis = '3'\n elif \"_h4\" in filename:\n hypothesis = '4'\n return hypothesis\n\ndef mean(numbers):\n return float(sum(numbers)) / max(len(numbers), 1)\n\ndef print_scores(pairs, whichone = ''):\n # replace filenames by hypothesis name for csv pretty print\n for k,v in pairs:\n hypothesis = get_hypothesis(k)\n print(\"\\t\".join( [whichone, \"{:10.2f}\".format(v), k, hypothesis] ))\n\ndef SARI_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds, refs]]\n scores = []\n for src, pred, ref in zip(*files):\n references = [preprocess(r) for r in ref.split('\\t')]\n scores.append(SARIsent(preprocess(src), preprocess(pred), references))\n for fis in files:\n fis.close()\n return mean(scores)\n\n\n# BLEU doesn't need the source\ndef BLEU_file(source, preds, refs, preprocess=as_is):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [preds, refs]]\n scores = []\n references = []\n hypothese = []\n for pred, ref in zip(*files):\n references.append([word_tokenize(preprocess(r)) for r in ref.split('\\t')])\n hypothese.append(word_tokenize(preprocess(pred)))\n for fis in files:\n fis.close()\n # Smoothing method 3: NIST geometric sequence smoothing\n return corpus_bleu(references, hypothese, smoothing_function=smooth.method3)\n\n\ndef worddiff_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds]]\n worddiff = 0\n n = 0\n for src, pred in zip(*files):\n source = word_tokenize(preprocess(src))\n hypothese = word_tokenize(preprocess(pred))\n n += 1\n worddiff += len(source) - len(hypothese)\n\n worddiff /= float(n)\n for fis in files:\n fis.close()\n\n return worddiff / 100.0\n\n\ndef IsSame_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds]]\n issame = 0\n n = 0.\n for src, pred in zip(*files):\n source = preprocess(src)\n hypothese = preprocess(pred)\n n += 1\n issame += source == hypothese\n\n issame /= n\n for fis in files:\n fis.close()\n\n return issame / 100.0\n\n\ndef FKGL_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds]]\n score = 0\n n = 0.\n for src, pred in zip(*files):\n hypothese = preprocess(pred)\n words = get_words(hypothese)\n word_count = float(len(words))\n sentence_count = float(len(get_sentences(hypothese)))\n syllable_count = float(count_syllables(words))\n score += 0.39 * (word_count / sentence_count) + 11.8 * (syllable_count / word_count) - 15.59\n n += 1\n\n score /= n\n for fis in files:\n fis.close()\n\n return round(score, 2) / 100\n\n\ndef FKdiff_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds]]\n fkdiff = 0\n n = 0.\n for src, pred in zip(*files):\n # hypothese = preprocess(pred)\n # source = preprocess(src)\n hypothese = (pred)\n source = (src)\n # print(source)\n # print(hypothese)\n\n fkdiff += (textstat.flesch_reading_ease(hypothese) - textstat.flesch_reading_ease(source))\n n += 1\n # fkdiff= 1/(1+np.exp(-fkdiff))\n\n fkdiff /= n\n for fis in files:\n fis.close()\n\n return fkdiff / 100.0\n\n\ndef LD_file(source, preds, refs, preprocess):\n files = [codecs.open(fis, \"r\", 'utf-8') for fis in [source, preds]]\n LD = 0\n n = 0.\n for src, pred in zip(*files):\n hypothese = preprocess(pred)\n source = preprocess(src)\n LD += Levenshtein.distance(hypothese, source)\n n += 1\n\n LD /= n\n for fis in files:\n fis.close()\n\n return LD / 100.0\n\n\ndef score(source, refs, fold, METRIC_file, preprocess=as_is):\n # new_files = files_in_folder(fold)\n data = []\n for fis in fold:\n # ignore log files\n if \".log\" in os.path.basename(fis):\n continue\n logging.info(\"Processing \"+os.path.basename(fis))\n val = 100*METRIC_file(source, fis, refs, preprocess)\n logging.info(\"Done \"+str(val))\n data.append((os.path.basename(fis), val))\n data.sort(key=lambda tup: tup[1])\n data.reverse()\n return data, None\n\n\ndef map_to_array(score_dict):\n def get_beam_order_from_filename(filename):\n filename = filename.split('_')\n beam = int(filename[2][1:])\n hyp_order = int(filename[3][1])\n return beam, hyp_order, filename[1]\n\n score_arr_dict = {}\n for filename, val in score_dict:\n try:\n beam, hyp_order, subset = get_beam_order_from_filename(filename)\n except:\n beam, hyp_order, subset = 5, 1, 'test'\n if subset in score_arr_dict:\n score_arr_dict[subset][beam-5, hyp_order-1] = round(val, 2)\n else:\n score_arr_dict[subset] = np.zeros((8, 5))\n score_arr_dict[subset][beam - 5, hyp_order - 1] = round(val, 2)\n return score_arr_dict\n\n\nif __name__ == '__main__':\n try:\n source = sys.argv[1]\n logging.info(\"Source: \" + source)\n refs = sys.argv[2]\n logging.info(\"References in tsv format: \" + refs)\n pred_path = sys.argv[3]\n logging.info(\"Path of predictions: \" + pred_path)\n except:\n logging.error(\"Input parameters must be: \" + sys.argv[0]\n + \" SOURCE_FILE REFS_TSV (paste -d \\\"\\t\\\" * > reference.tsv) DIRECTORY_OF_PREDICTIONS\")\n sys.exit(1)\n\n '''\n SARI can become very unstable to small changes in the data.\n The newsela turk references have all the parantheses replaced\n with -lrb- and -rrb-. Our output, however, contains the actual\n parantheses '(', ')', thus we prefer to apply a preprocessing\n step to normalize the text.\n '''\n preds = open(pred_path, 'r').readlines()\n fold = []\n for idx in range(4):\n preds_tmp = preds[idx::4]\n filename_tmp = pred_path+'_h{}'.format(idx+1)\n fold.append(filename_tmp)\n open(filename_tmp, 'w').write(''.join(preds_tmp))\n\n sari_test, sari_arr = score(source, refs, fold, SARI_file, normalize)\n bleu_test, bleu_arr = score(source, refs, fold, BLEU_file, lowstrip)\n"
},
{
"alpha_fraction": 0.6004015803337097,
"alphanum_fraction": 0.6586345434188843,
"avg_line_length": 19.70833396911621,
"blob_id": "1b9d9b9ebad9e8dee6a98a7724d04175e291e860",
"content_id": "57aa5cd7a604cd6028ff0691dac82f9c18109440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 24,
"path": "/train.sh",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nCUDA_VISIBLE_DEVICES=2 onmt_train \\\n--data data/EW-SEW-NTS/ew-sew \\\n--save_model models/nts \\\n--world_size 1 \\\n--gpu_ranks 0 \\\n--batch_size 64 \\\n--train_steps 60000 \\\n--valid_steps 5000 \\\n--early_stopping 2 \\\n--max_grad_norm 5 \\\n--dropout 0.3 \\\n--feat_vec_size 20 \\\n--learning_rate_decay 0.7 \\\n--word_vec_size 500 \\\n--share_embeddings \\\n--share_decoder_embeddings \\\n--model_type text \\\n--encoder_type rnn \\\n--rnn_type LSTM \\\n--layers 2 \\\n--rnn_size 500 \\\n--global_attention general \\\n\n"
},
{
"alpha_fraction": 0.7250000238418579,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 19.16666603088379,
"blob_id": "a1fa7cba6c143486744422f46985924305d85653",
"content_id": "28fb366c687102028de7e6421d14788a9128a3ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 6,
"path": "/evaluate.sh",
"repo_name": "jind11/NeuralTextSimplification-Pytorch",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nDATA=data\nPRED=results/nts_step_20000\n\npython evaluate.py $DATA/test_uni.en $DATA/test_references.tsv $PRED"
}
] | 6 |
vladyslm/wizards-clash
|
https://github.com/vladyslm/wizards-clash
|
9c9cf20996b2880c83bcfe7b49b7979ae68d89a9
|
4ff86b416db40ed99725930921b9b07eaf70fb05
|
a64d37fe78eef9c9e94fcc8d2729ae64301832c3
|
refs/heads/main
| 2023-02-13T06:59:27.254327 | 2020-11-13T12:57:46 | 2020-11-13T12:57:46 | 310,931,300 | 0 | 0 | null | 2020-11-07T21:02:00 | 2020-11-13T12:58:09 | 2021-01-08T16:31:49 |
Python
|
[
{
"alpha_fraction": 0.6430656909942627,
"alphanum_fraction": 0.6489050984382629,
"avg_line_length": 22.620689392089844,
"blob_id": "7744e09e15a3cd1e35ef4eaf13cc9e4d23ae918b",
"content_id": "7dc6552ead83d736f040cf311f464ebcd7eef7ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1370,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 58,
"path": "/lib/input_controller/equation_generator.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import random\nimport math\nimport ast\nimport operator\nfrom configs.gameconf import GAME_DIFFICULTY\n# simple math problem generator\n# will generate an equations and return a tuple(equation, answer)\n\nOPER_LIST = [\"+\", \"-\"]\nOP_MAP = {\n ast.Add: operator.add,\n ast.Sub: operator.sub\n}\n\n\ndef tree_node(left, right, oper):\n string = f\"({left} {oper} {right})\"\n return string\n\n\ndef build_tree(num_nodes):\n if num_nodes == 1:\n return random.randrange(1, 10)\n\n num_left = math.floor(num_nodes / 2)\n left_subtree = build_tree(num_left)\n num_right = math.ceil(num_nodes / 2)\n right_subtree = build_tree(num_right)\n\n r_index = random.randrange(0, len(OPER_LIST))\n opr = OPER_LIST[r_index]\n return tree_node(left_subtree, right_subtree, opr)\n\n\nclass Calc(ast.NodeVisitor):\n\n def visit_BinOp(self, node):\n left = self.visit(node.left)\n right = self.visit(node.right)\n return OP_MAP[type(node.op)](left, right)\n\n def visit_Num(self, node):\n return node.n\n\n def visit_Expr(self, node):\n return self.visit(node.value)\n\n @classmethod\n def evaluate(cls, expression):\n tree = ast.parse(expression)\n calc = cls()\n return calc.visit(tree.body[0])\n\n\ndef generate_equation():\n problem = build_tree(GAME_DIFFICULTY)\n answer = Calc.evaluate(problem)\n return problem, answer\n"
},
{
"alpha_fraction": 0.5231788158416748,
"alphanum_fraction": 0.5298013091087341,
"avg_line_length": 28.140350341796875,
"blob_id": "5b7df892ba6020f5a10b85816a1aebd6ef34734e",
"content_id": "21a65ac14396d0279dd3675fe061d19cb9b663a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1661,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 57,
"path": "/lib/wizard/abstract_wizard.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import time\nfrom configs.gameconf import *\nfrom lib.utils import flip_sprites\n\n\nclass AbstractWizard:\n def __init__(self, game_obj, screen, side=False):\n self.tmp = False\n self.cur_anim = None\n self.idle = None\n self.fight = None\n self.move_to = None\n self.screen = screen\n self.height = game_obj[\"height\"]\n self.width = game_obj[\"width\"]\n self.init_obj(game_obj, side)\n\n self.lt = 0\n self.cur_frame = 0\n\n def init_obj(self, game_obj, side):\n if side:\n self.idle = flip_sprites(game_obj[\"idle\"])\n self.fight = flip_sprites(game_obj[\"fight\"])\n self.move_to = (OFFSET_X * (-1)) + self.width / 2\n else:\n self.idle = game_obj[\"idle\"]\n self.fight = game_obj[\"fight\"]\n self.move_to = OFFSET_X + self.width / 2\n\n def get_pos(self):\n pos_x = SW / 2 - self.move_to\n pos_y = SH - OFFSET_Y - self.height\n return pos_x, pos_y\n\n def anim_controller(self, anim, delay=.3):\n if time.perf_counter() - self.lt >= delay:\n self.lt = time.perf_counter()\n self.cur_frame += 1\n if self.cur_frame > len(anim) - 1:\n self.cur_frame = 0\n self.screen.blit(anim[self.cur_frame], self.get_pos())\n\n def do_idle(self):\n self.anim_controller(self.idle)\n\n def do_fight(self):\n self.anim_controller(self.fight)\n\n def action(self, anim):\n if self.cur_anim == anim:\n self.cur_anim = anim\n anim()\n else:\n self.cur_frame = 0\n self.cur_anim = anim\n anim()\n"
},
{
"alpha_fraction": 0.6324483752250671,
"alphanum_fraction": 0.6336283087730408,
"avg_line_length": 23.214284896850586,
"blob_id": "f841922ba5ba318d336c922cb0cc19ef52eaf819",
"content_id": "d7e53c96898d6254e7b0900e0c975d8648940631",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1695,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 70,
"path": "/lib/utils.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\nimport pygame\nimport json\nfrom configs.gameconf import SBOARD_ROOT_KEY, SCOREBOARD_FILE_NAME\n\n\ndef flip_sprite(sprite):\n return pygame.transform.flip(sprite, True, False)\n\n\ndef flip_sprites(sprite_list):\n flipped = []\n for sprite in sprite_list:\n sprite = flip_sprite(sprite)\n flipped.append(sprite)\n return flipped\n\n\ndef get_project_root():\n return Path(__file__).parent.parent\n\n\ndef create_sboard_json_struc(data=0):\n i_data = [data] if data != 0 else []\n struc = {\n SBOARD_ROOT_KEY: i_data\n }\n return struc\n\n\ndef read_json_data(path):\n try:\n with open(path, 'r') as reader:\n json_data = json.load(reader)\n except json.decoder.JSONDecodeError:\n json_data = None\n return json_data\n\n\ndef write_json(data, path, mode=\"w\"):\n with open(path, mode) as write:\n json.dump(data, write, ensure_ascii=False)\n\n\ndef init_scoreboard():\n data = create_sboard_json_struc()\n root = get_project_root()\n path = f\"{root}/{SCOREBOARD_FILE_NAME}\"\n write_json(data, path)\n\n\ndef save_score(score_data):\n root = get_project_root()\n path = f\"{root}/{SCOREBOARD_FILE_NAME}\"\n try:\n json_data = read_json_data(path)\n if json_data:\n json_data[SBOARD_ROOT_KEY].append(score_data)\n write_json(json_data, path)\n else:\n write_json(create_sboard_json_struc(score_data), path)\n except FileNotFoundError:\n write_json(create_sboard_json_struc(score_data), path)\n\n\ndef scale(list_anim, a_width, a_height):\n al = []\n for frame in list_anim:\n al.append(pygame.transform.scale(frame, (a_width, a_height)))\n return al\n"
},
{
"alpha_fraction": 0.5794320702552795,
"alphanum_fraction": 0.5832693576812744,
"avg_line_length": 27.326086044311523,
"blob_id": "db618832071a9858a5764773342aba8df7a5c050",
"content_id": "099c870bd82bd558c11ddc847d889d7453832d7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1303,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 46,
"path": "/game_screens/scoreboard.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from pygame.locals import *\nfrom configs.gameconf import *\nfrom lib.utils import read_json_data, get_project_root, init_scoreboard\n\nfrom setup import *\n\n\ndef get_scoreboard():\n root = get_project_root()\n path = f\"{root}/{SCOREBOARD_FILE_NAME}\"\n data = read_json_data(path)\n l_list = []\n try:\n for el in data[SBOARD_ROOT_KEY]:\n label = font.render(f\"{el['name']} - {el['score']}\", True, WHITE)\n l_list.append(label)\n except TypeError:\n init_scoreboard()\n return l_list\n\n\ndef display_scoreboard(labels_list, screen):\n space_between = 8\n offset_y = SBOARD_OFFSET_Y\n for i, label in enumerate(labels_list):\n pos_x = SW / 2 - label.get_width() // 2\n pos_y = offset_y + label.get_height() * i + space_between * i\n screen.blit(label, (pos_x, pos_y))\n\n\ndef scoreboard():\n running = True\n labels = get_scoreboard()\n while running:\n SCREEN.blit(SCOREBOARD_BG, (0, 0))\n display_scoreboard(labels, SCREEN)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n\n pygame.display.update()\n clock.tick(GAME_FPS)\n"
},
{
"alpha_fraction": 0.6175438761711121,
"alphanum_fraction": 0.6245614290237427,
"avg_line_length": 15.764705657958984,
"blob_id": "72f455e7363c6e642fd1269b6a494683b03afc62",
"content_id": "562a7851a081d6e9b405287970a60c9683f02b3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 17,
"path": "/assets/gameassets/cast/gameobj.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nimport os\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\ncast = [\n pygame.image.load(f\"{dir_path}/cast-sprite/magic_cast.png\")\n]\n\nheight = cast[0].get_height()\nwidth = cast[0].get_width()\n\nCAST = {\n \"cast\": cast,\n \"height\": height,\n \"width\": width\n}\n"
},
{
"alpha_fraction": 0.8571428656578064,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 36.33333206176758,
"blob_id": "1616da4b1a737a91a82c646f8827e08ae1dbd2e1",
"content_id": "187b3b68ef570d1db150ba9cb840c4a75ef5238a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 3,
"path": "/modules/problem_controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from lib.input_controller.problem_controller import ProblemController\n\nproblem_controller = ProblemController()\n"
},
{
"alpha_fraction": 0.6142484545707703,
"alphanum_fraction": 0.6177237033843994,
"avg_line_length": 29.289474487304688,
"blob_id": "0701aeb16b4f1e262ddcc7a06c59568315867a38",
"content_id": "b98567afaced8bb4c81d450e2da11cb4a824d61c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 38,
"path": "/lib/input_controller/problem_controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from lib.input_controller.equation_generator import generate_equation\n\n\nclass ProblemController:\n def __init__(self):\n self.problem = {\n \"problem\": None,\n \"answer\": None\n }\n self.input_controllers = []\n\n def add_input_controllers(self, input_c1, input_c2):\n self.input_controllers.append(input_c1)\n self.input_controllers.append(input_c2)\n\n def notify_input_controllers(self):\n for input_controller in self.input_controllers:\n input_controller.clean_input()\n if not input_controller.is_player:\n input_controller.set_countdown()\n\n def set_problem(self):\n self.notify_input_controllers()\n problem, answer = generate_equation()\n self.problem[\"problem\"] = problem\n self.problem[\"answer\"] = answer\n\n def get_problem(self):\n return self.problem[\"problem\"]\n\n def get_answer(self):\n print(self.problem[\"answer\"])\n return self.problem[\"answer\"]\n\n def clean_problem(self):\n self.problem[\"problem\"] = None\n self.problem[\"answer\"] = None\n self.notify_input_controllers()\n"
},
{
"alpha_fraction": 0.5338386297225952,
"alphanum_fraction": 0.5419599413871765,
"avg_line_length": 32.581817626953125,
"blob_id": "ef6d6990d5010314f5b765a9e69a4f8bf6a2e916",
"content_id": "dbec2121576bcebc8dd831a03cd8f9c5a3397a1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1847,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 55,
"path": "/lib/magic_cast/cast.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "# import pygame\nimport time\nfrom configs.gameconf import *\n\n\ndef get_frame_coef(anim):\n return GAME_FPS // len(anim)\n\n\nclass MagicCast:\n def __init__(self, flame_obj, cast_obj, screen):\n self.flame_height = flame_obj[\"height\"]\n self.flame_width = flame_obj[\"width\"]\n self.flame_anim = flame_obj[\"anim\"]\n self.screen = screen\n self.flame_offset_y = SH - FLAME_OFFSET_Y\n self.cast_offset_y = CAST_OFFSET_Y\n\n self.flame_frame_count = 0\n\n self.cast_height = cast_obj[\"height\"]\n self.cast_width = cast_obj[\"width\"]\n self.cast = cast_obj[\"cast\"]\n\n self.lt = 0\n self.cur_frame = 0\n\n def get_pos(self, pos_x, pos_y=None):\n y_pos = self.flame_offset_y if pos_y is None else pos_y\n x_pos = pos_x - self.flame_width // 2\n return x_pos, y_pos\n\n def draw_flame(self, anim, pos_x, pos_y=None):\n if time.perf_counter() - self.lt >= .1:\n self.lt = time.perf_counter()\n self.cur_frame += 1\n if self.cur_frame > len(anim) - 1:\n self.cur_frame = 0\n self.screen.blit(anim[self.cur_frame], self.get_pos(pos_x, pos_y))\n\n def draw_cast(self):\n c = (OFFSET_X - 40) // self.cast_width\n init_x_pos = (SW / 2) - self.cast_width // 2\n sprite_w = self.cast_width\n sprite_x_pos = sprite_w\n is_first = True\n for i in range(c):\n if is_first:\n self.screen.blit(self.cast[0], (init_x_pos, SH - CAST_OFFSET_Y))\n is_first = False\n else:\n self.screen.blit(self.cast[0], (init_x_pos + sprite_x_pos, SH - CAST_OFFSET_Y))\n self.screen.blit(self.cast[0], (init_x_pos - sprite_x_pos, SH - CAST_OFFSET_Y))\n # print(sprite_w)\n sprite_x_pos = sprite_w * i\n"
},
{
"alpha_fraction": 0.6308900713920593,
"alphanum_fraction": 0.6505235433578491,
"avg_line_length": 25.34482765197754,
"blob_id": "f861720dd870e4639a74daf74c1576c909eb1caa",
"content_id": "2f96e4dc488b7233a7984a8c27a8bff9325e4533",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 29,
"path": "/assets/gameassets/flame/gameobj.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nimport os\nfrom lib.utils import scale\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\nflame_anim = [\n pygame.image.load(f\"{dir_path}/flame-anim/f_1.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_2.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_3.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_4.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_5.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_6.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_7.png\"),\n pygame.image.load(f\"{dir_path}/flame-anim/f_8.png\")\n]\n\n\nrescaled = scale(flame_anim, 64, 140)\n\n\nheight = rescaled[0].get_height()\nwidth = rescaled[0].get_width()\n\nFLAME = {\n \"anim\": rescaled,\n \"height\": height,\n \"width\": width\n}\n"
},
{
"alpha_fraction": 0.577372133731842,
"alphanum_fraction": 0.59392911195755,
"avg_line_length": 33.639705657958984,
"blob_id": "5cb36c6cd3d58f5c615d99487e3a1854fe38775c",
"content_id": "58d402cb01db4d5f397014392c8f423e502aed3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4711,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 136,
"path": "/lib/game_controller/controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom configs.gameconf import PROBLEM_SCREEN_OFFSET_Y, SW, SH, GAME_FPS\nfrom lib.ui.icon import player_ui, enemy_ui\nfrom lib.ui.info import lvl_info\nfrom modules.score_state import score_state\n\n\nFLAME_INIT_POS = SW / 2\nPOINTS_FOR_ANSWER = 1\nMAX_SCORE = 100\nLIVES_COUNT = 3\nFLAME_STEP = 30\nSCORE = 100\n\n\nclass Controller:\n def __init__(self, player_wizard, enemy_wizard, cast, problem_controller, screen):\n self.player = player_wizard\n self.enemy = enemy_wizard\n self.magic_cast = cast\n self.problem_controller = problem_controller\n self.screen = screen\n self.flame_x_pos = FLAME_INIT_POS\n\n self.player_score = 0\n self.players_power = 0\n self.enemy_power = 0\n self.cur_lvl = 1\n\n self.players_life_left = LIVES_COUNT\n self.enemys_life_left = LIVES_COUNT\n\n self.problem_controller.set_problem()\n self.game_font = pygame.font.SysFont(\"arial\", 50, True)\n\n self.player_input_controller = None\n\n self.delay = GAME_FPS * 2 # 2sec\n self.pause = 0\n\n self.gameover = False\n self.stopgame = False\n\n def add_player_controller(self, input_controller):\n self.player_input_controller = input_controller\n\n def validate_input(self, answer): # answer - tuple(is_player, is_answer_correct)\n if answer[0]:\n if answer[1]:\n self.player_score += SCORE\n self.players_power += POINTS_FOR_ANSWER\n self.flame_x_pos += FLAME_STEP\n else:\n self.players_life_left -= 1\n self.flame_x_pos -= FLAME_STEP\n else:\n if answer[1]:\n self.enemy_power += POINTS_FOR_ANSWER\n self.flame_x_pos -= FLAME_STEP\n else:\n self.enemys_life_left -= 1\n self.flame_x_pos += FLAME_STEP\n self.problem_controller.set_problem()\n\n def get_flame_pos(self):\n return self.flame_x_pos\n\n def update_game_state(self):\n self.state_controller()\n\n def playing(self):\n player_ui.display_ui(self.screen, self.players_life_left, self.players_power, 10)\n enemy_ui.display_ui(self.screen, self.enemys_life_left, self.enemy_power, 10)\n users_cur_input = self.player_input_controller.get_str_input()\n lvl_info.display_info(self.screen, self.player_score, self.cur_lvl)\n problem_label = self.game_font.render(\n f\"{self.problem_controller.get_problem()} = {users_cur_input}\", True, (238, 231, 231)\n )\n p_label_x_offset = problem_label.get_width() // 2\n self.screen.blit(problem_label, (SW / 2 - p_label_x_offset, PROBLEM_SCREEN_OFFSET_Y))\n self.magic_cast.draw_cast()\n self.player.action(self.player.do_fight)\n self.enemy.action(self.enemy.do_fight)\n self.magic_cast.draw_flame(self.magic_cast.flame_anim, self.get_flame_pos())\n\n def player_win(self):\n lvl_info.display_info(self.screen, self.player_score, self.cur_lvl)\n self.player.action(self.player.do_idle)\n player_ui.display_ui(self.screen, self.players_life_left, self.players_power, 10)\n if self.delay <= self.pause:\n self.reset()\n self.pause += 1\n\n def player_lose(self):\n self.enemy.action(self.enemy.do_idle)\n enemy_ui.display_ui(self.screen, self.enemys_life_left, self.enemy_power, 10)\n self.gameover = True\n\n def get_game_status(self):\n if self.gameover:\n return \"gameover\"\n if self.players_life_left <= 0 or self.enemy_power >= 10 or self.flame_x_pos <= SW // 2 - 350:\n return \"lose\"\n if self.enemys_life_left <= 0 or self.players_power >= 10 or self.flame_x_pos >= SW // 2 + 350:\n return \"win\"\n return \"playing\"\n\n def reset(self):\n self.flame_x_pos = FLAME_INIT_POS\n\n self.players_power = 0\n self.enemy_power = 0\n self.cur_lvl += 1\n\n self.players_life_left = LIVES_COUNT\n self.enemys_life_left = LIVES_COUNT\n\n self.pause = 0\n\n def state_controller(self):\n stages = {\n \"lose\": self.player_lose,\n \"win\": self.player_win,\n \"playing\": self.playing,\n \"gameover\": self.game_over\n }\n state = self.get_game_status()\n stages[state]()\n\n def game_over(self):\n label = self.game_font.render(f\"Game Over\", True, (238, 231, 231))\n self.screen.blit(label, (SW // 2 - label.get_width() // 2, SH // 2 - label.get_height() // 2))\n if GAME_FPS / 4 <= self.pause:\n score_state.add_score(self.player_score)\n self.stopgame = True\n self.pause += 1\n"
},
{
"alpha_fraction": 0.7123494148254395,
"alphanum_fraction": 0.724397599697113,
"avg_line_length": 54.33333206176758,
"blob_id": "1ac81a79fd7c89c4828bec16f75d5fdaed97dec5",
"content_id": "2a95f1edde3082ea869ebaaf6cee11599a365ef6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 12,
"path": "/modules/menu_ui_el.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom configs.gameconf import SW, SH, SPACE_BETWEEN_BTN\n\nnew_game_sprite = pygame.image.load('assets/graphic/menu/newgame_btn.png').convert()\nnewgame_rect = new_game_sprite.get_rect()\nnewgame_rect.center = (SW / 2, SH / 2)\nscoreboard_sprite = pygame.image.load('assets/graphic/menu/scoreboard_btn.png').convert()\nsboard_rect = scoreboard_sprite.get_rect()\nsboard_rect.center = (SW / 2, SH / 2 + SPACE_BETWEEN_BTN + scoreboard_sprite.get_height())\nexit_sprite = pygame.image.load('assets/graphic/menu/exit_btn.png').convert()\nexit_rect = exit_sprite.get_rect()\nexit_rect.center = (SW / 2, SH / 2 + SPACE_BETWEEN_BTN * 2 + exit_sprite.get_height() * 2)\n"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6060606241226196,
"avg_line_length": 25.33333396911621,
"blob_id": "b1b6e232944d1d61e695dec5fc6266f3b18023e9",
"content_id": "b6703d28e2ccebbe90f126df67509a82e6619111",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 15,
"path": "/lib/input_controller/input_validator.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "\ndef validate(input_list, correct_answer):\n users_answer = \"\"\n for num in input_list:\n users_answer = users_answer + num\n try:\n users_answer = int(users_answer)\n users_answer = True if users_answer == correct_answer else False\n except ValueError:\n users_answer = False\n except:\n quit()\n return users_answer\n\n\n# print(validate([\"2\", \"3\"], 23))\n"
},
{
"alpha_fraction": 0.5352112650871277,
"alphanum_fraction": 0.5539906024932861,
"avg_line_length": 32.6315803527832,
"blob_id": "7b9b22340e4200966348afddf71c0e6762b2306b",
"content_id": "d408e2c94af71c44f0b842db838b8270552d6a04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1278,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 38,
"path": "/game_screens/add_score.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from lib.text_input import TextInput\nfrom configs.gameconf import GAME_FPS\nfrom modules.score_state import score_state\nfrom lib.utils import save_score\nfrom setup import *\n\ntextinput = TextInput()\n\n\nrec_bg = pygame.Rect(SW // 2 - 150, SH // 2 - 75, 300, 150)\nlabel = FONT_SMALL.render(f\"Enter your name:\", True, BLACK)\n\n\ndef add_score():\n running = True\n while running:\n SCREEN.blit(GAME_BG, (0, 0))\n pygame.draw.rect(SCREEN, GREY, rec_bg)\n SCREEN.blit(label, (SW / 2 - label.get_width() // 2, SH / 2 - label.get_height() // 2 - 50))\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n data = {\n \"name\": textinput.get_text(),\n \"score\": score_state.get_score() # TODO: should separate get_score and reset methods\n }\n\n save_score(data)\n running = False\n\n textinput.update(events)\n SCREEN.blit(textinput.get_surface(), (SW // 2 - textinput.get_surface().get_width() // 2, SH // 2))\n\n pygame.display.update()\n clock.tick(GAME_FPS)\n"
},
{
"alpha_fraction": 0.6936089992523193,
"alphanum_fraction": 0.701127827167511,
"avg_line_length": 28.55555534362793,
"blob_id": "4195296d97145fe76a4fd4943431b8c25c38ee5a",
"content_id": "f63ebf515655db65951527576fedabc887c10ee0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 18,
"path": "/assets/gameassets/icon/gameobj.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nimport os\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\nicon = pygame.image.load(f\"{dir_path}/sprites/icon.png\")\npower_filled = pygame.image.load(f\"{dir_path}/sprites/power_filled.png\")\npower_unfilled = pygame.image.load(f\"{dir_path}/sprites/power_unfilled.png\")\nflask = pygame.image.load(f\"{dir_path}/sprites/flask.png\")\n\nresized_flask = pygame.transform.scale(flask, (30, 35))\n\nICON = {\n \"icon\": icon,\n \"power_unfilled\": power_unfilled,\n \"power_filled\": power_filled,\n \"flask\": resized_flask\n}\n"
},
{
"alpha_fraction": 0.6436170339584351,
"alphanum_fraction": 0.6670212745666504,
"avg_line_length": 27.484848022460938,
"blob_id": "12585747a145cf022e00a10fda9b3f73108c00e8",
"content_id": "60794bb6b35533bbc8a0dc64fb62ceeee4cfd9af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 940,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 33,
"path": "/assets/gameassets/bluewizard/gameobj.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nimport os\nfrom lib.utils import scale\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\nidle_anim = [\n pygame.image.load(f\"{dir_path}/idle-anim/idle_1.png\"),\n pygame.image.load(f\"{dir_path}/idle-anim/idle_2.png\"),\n pygame.image.load(f\"{dir_path}/idle-anim/idle_3.png\"),\n pygame.image.load(f\"{dir_path}/idle-anim/idle_4.png\")\n]\nfight_anim = [\n # pygame.image.load(f\"{dir_path}/fight-anim/fight_1.png\"),\n # pygame.image.load(f\"{dir_path}/fight-anim/fight_2.png\"),\n pygame.image.load(f\"{dir_path}/fight-anim/fight_3.png\"),\n pygame.image.load(f\"{dir_path}/fight-anim/fight_4.png\"),\n]\n\n\nrescaled_fight_anim = scale(fight_anim, 218, 243)\nrescaled_idle_anim = scale(idle_anim, 218, 243)\n\nheight = rescaled_fight_anim[0].get_height()\nwidth = rescaled_fight_anim[0].get_width()\n\n\nBLUE_WIZARD = {\n \"idle\": rescaled_idle_anim,\n \"fight\": rescaled_fight_anim,\n \"height\": height,\n \"width\": width\n}\n"
},
{
"alpha_fraction": 0.5570470094680786,
"alphanum_fraction": 0.563758373260498,
"avg_line_length": 20.285715103149414,
"blob_id": "dfa009b1f57bfbcb6131c7a9699f3f44b508778f",
"content_id": "9200390c1736b52aeb3bb6bd2fc6a6098118a5be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 14,
"path": "/lib/score_controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "class ScoreController:\n def __init__(self):\n self.last_score = 0\n\n def add_score(self, score):\n self.last_score = score\n\n def get_score(self):\n tmp = self.last_score\n self.reset_score()\n return tmp\n\n def reset_score(self):\n self.last_score = 0\n"
},
{
"alpha_fraction": 0.6093906164169312,
"alphanum_fraction": 0.6193805932998657,
"avg_line_length": 30.28125,
"blob_id": "d685c45d8701b24a5623e7a8bf3cf95fd9fed80f",
"content_id": "7469a481545c3d1d31a6aa40f15c750c9b0739dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 32,
"path": "/lib/input_controller/npc_input_controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from time import time, perf_counter\nfrom random import randrange\nfrom lib.input_controller.input_controller import InputController\nfrom configs.gameconf import G_DIFFICULTY, ERROR_CHANCE\n\ndifficulty = G_DIFFICULTY[1]\n\n\nclass NpcInputController(InputController):\n def __init__(self, game_controller, is_player=True):\n super().__init__(game_controller, is_player)\n self.count_down = None\n self.start = None\n\n def think(self):\n pass\n\n def set_countdown(self):\n self.count_down = randrange(difficulty[0], difficulty[1])\n self.start = time()\n\n def is_ready(self):\n cur_time = time()\n diff = cur_time - self.start\n if diff >= self.count_down:\n coef = randrange(0, 100)\n print(f\"rand coef: {coef}\")\n chance = 100 - ERROR_CHANCE\n print(f\"chance: {chance}\")\n self.isCorrect = True if coef <= chance else False\n self.notify_controller()\n self.set_countdown()\n"
},
{
"alpha_fraction": 0.544672429561615,
"alphanum_fraction": 0.5466578602790833,
"avg_line_length": 27.50943374633789,
"blob_id": "451b7886a82a6c2abebc7e9e906b655e58d6ab53",
"content_id": "ec04a050e5a33d4500671c62e193ceed517661a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1511,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 53,
"path": "/game_screens/menu.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from pygame.locals import *\nfrom configs.gameconf import *\nfrom game_screens.game import game\nfrom game_screens.add_score import add_score\nfrom game_screens.scoreboard import scoreboard\nfrom modules.menu_ui_el import *\nfrom setup import *\n\n\ndef menu():\n click_event = False\n while True:\n SCREEN.blit(MENU_GB, (0, 0))\n\n new_game_btn = newgame_rect\n scoreboard_btn = sboard_rect\n exit_btn = exit_rect\n\n m_x, m_y = pygame.mouse.get_pos()\n if new_game_btn.collidepoint((m_x, m_y)):\n if click_event:\n print(\"new game clicked\")\n game()\n print(\"end game\")\n # print(f\"Your score: {score_state.get_score()}\")\n add_score()\n scoreboard()\n if scoreboard_btn.collidepoint((m_x, m_y)):\n if click_event:\n print(\"scoreboard clicked\")\n scoreboard()\n if exit_btn.collidepoint((m_x, m_y)):\n if click_event:\n pygame.quit()\n\n click_event = False\n\n SCREEN.blit(new_game_sprite, newgame_rect)\n SCREEN.blit(scoreboard_sprite, sboard_rect)\n SCREEN.blit(exit_sprite, exit_rect)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n if event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n click_event = True\n\n pygame.display.update()\n clock.tick(GAME_FPS)\n\n\nmenu()\n"
},
{
"alpha_fraction": 0.5947712659835815,
"alphanum_fraction": 0.6705882549285889,
"avg_line_length": 15.255319595336914,
"blob_id": "5bbf064dc0329a6a611fd984b58cd5259c6e94e4",
"content_id": "1bfa9532d250207e431015417b03f4f06689ee4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 47,
"path": "/configs/gameconf.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "# screen resolution\nSW = 1280\nSH = 720\n\n# game FPS\nGAME_FPS = 60\n\n# lvl_1 conf\n# offsets for wizards\nOFFSET_X = 400\nOFFSET_Y = 25\n\n# offsets for spells\nCAST_OFFSET_Y = 185\nFLAME_OFFSET_Y = 280\n\n# offsets for the problem screen\nPROBLEM_SCREEN_OFFSET_Y = 200\n\nGAME_DIFFICULTY = 3\nG_DIFFICULTY = {\n 1: (7, 10) # 1 - easy\n}\n\nERROR_CHANCE = 20 # error chance is 20%\n\n# UI\nICON_OFFSET_X = 30\nICON_OFFSET_Y = 30\nPOWER_OFFSET_X = 30\nPOWER_OFFSET_Y = 150\nFLASK_OFFSET_X = 150\nFLASK_OFFSET_Y = 100\nSCORE_OFFSET_Y = 60\nLVL_OFFSET_Y = 30\n\n# UI menu\nSPACE_BETWEEN_BTN = 20\n\n# file name\nSCOREBOARD_FILE_NAME = \"scoreboard.json\"\n\n# Scoreboard structure (json) -> {\"players\": [{\"name\": \"str\", \"score\": int}]}\nSBOARD_ROOT_KEY = \"players\"\n\n# UI scoreboard\nSBOARD_OFFSET_Y = 200\n\n"
},
{
"alpha_fraction": 0.6737657189369202,
"alphanum_fraction": 0.6747337579727173,
"avg_line_length": 37.98113250732422,
"blob_id": "819a43c030f430a2987fd63cd879f17dfe0c0d08",
"content_id": "81060a8ab795dbfced70e2973ac25e5ce3ec0f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2066,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 53,
"path": "/game_screens/game.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from assets.gameassets.bluewizard.gameobj import BLUE_WIZARD\nfrom assets.gameassets.flame.gameobj import FLAME\nfrom assets.gameassets.cast.gameobj import CAST\n\nfrom lib.wizard.abstract_wizard import AbstractWizard\nfrom lib.magic_cast.cast import MagicCast\nfrom lib.game_controller.controller import Controller\nfrom lib.input_controller.input_controller import InputController\nfrom lib.input_controller.npc_input_controller import NpcInputController\n\nfrom modules.problem_controller import problem_controller\n\nfrom configs.gameconf import *\nfrom setup import *\n\n\ndef game():\n fps = GAME_FPS\n\n p = AbstractWizard(BLUE_WIZARD, SCREEN)\n e = AbstractWizard(BLUE_WIZARD, SCREEN, True)\n flame = MagicCast(FLAME, CAST, SCREEN)\n\n controller = Controller(p, e, flame, problem_controller, SCREEN)\n player_input_controller = InputController(controller)\n npc = NpcInputController(controller, False)\n problem_controller.add_input_controllers(player_input_controller, npc)\n controller.add_player_controller(player_input_controller)\n npc.set_countdown()\n\n def update_screen():\n SCREEN.blit(GAME_BG, (0, 0))\n controller.update_game_state()\n pygame.display.update()\n\n while not controller.stopgame:\n npc.is_ready()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n controller.stopgame = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n player_input_controller.add_input(player_input_controller.raw_input)\n player_input_controller.get_keys_names()\n player_input_controller.validate_input(problem_controller.get_answer())\n elif event.key == pygame.K_BACKSPACE:\n player_input_controller.clean_input()\n elif event.key == pygame.K_q:\n controller.stopgame = True\n else:\n player_input_controller.add_raw_input(event.key)\n update_screen()\n clock.tick(fps)\n"
},
{
"alpha_fraction": 0.6802842020988464,
"alphanum_fraction": 0.7282415628433228,
"avg_line_length": 32.117645263671875,
"blob_id": "590f3536573b98644b35bef8480a6a0a1061c28a",
"content_id": "9a1ac4cdbadef8dc8148491e5fbd344fa84db223",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 17,
"path": "/setup.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom configs.gameconf import SW, SH\n\npygame.init()\nclock = pygame.time.Clock()\n\nSCREEN = pygame.display.set_mode((SW, SH))\npygame.display.set_caption(\"WizardsClash\")\nGREY = (211, 203, 202)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nfont = pygame.font.SysFont(\"arial\", 50, True)\nFONT_SMALL = pygame.font.SysFont(\"arial\", 28, True)\n\nMENU_GB = pygame.image.load('assets/graphic/menu/wizards_clash.png')\nGAME_BG = pygame.image.load('./assets/graphic/background/BG1.png')\nSCOREBOARD_BG = pygame.image.load('assets/graphic/scoreboard/scoreboard_bgv2.png')\n"
},
{
"alpha_fraction": 0.5758895874023438,
"alphanum_fraction": 0.5766158103942871,
"avg_line_length": 27.102041244506836,
"blob_id": "1731d62e866f126adbe935a43a931a5b4df52ed6",
"content_id": "c717e82e3ba35e2cacfd2d40789fe06deda1d29e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1377,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 49,
"path": "/lib/input_controller/input_controller.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom lib.input_controller.input_validator import validate\n\n\nclass InputController:\n def __init__(self, game_controller, is_player=True):\n self.input = None\n self.keys = []\n self.game_controller = game_controller\n self.isCorrect = None\n\n self.is_player = is_player\n self.raw_input = []\n\n def get_str_input(self):\n input_str = \"?\"\n if len(self.raw_input) != 0:\n input_str = \"\"\n for key_code in self.raw_input:\n key_name = pygame.key.name(key_code)\n input_str += key_name\n return input_str\n\n def get_keys_names(self):\n keys = []\n for key_code in self.input:\n key_name = pygame.key.name(key_code)\n keys.append(key_name)\n self.keys = keys\n\n def add_input(self, input_key):\n # self.input.append(input_key)\n self.input = input_key\n\n def validate_input(self, correct_answer):\n self.isCorrect = validate(self.keys, correct_answer)\n self.notify_controller()\n self.clean_input()\n\n def notify_controller(self):\n self.game_controller.validate_input((self.is_player, self.isCorrect))\n\n def clean_input(self):\n self.input = []\n self.keys = []\n self.raw_input = []\n\n def add_raw_input(self, char):\n self.raw_input.append(char)\n"
},
{
"alpha_fraction": 0.5777521133422852,
"alphanum_fraction": 0.5800616145133972,
"avg_line_length": 36.11428451538086,
"blob_id": "1fe33defd6710003f4d55c1efd814c9d62c6f0bf",
"content_id": "4a85af7af3ffb8c2e94dc484b9854b2ad7a8d29e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2598,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 70,
"path": "/lib/ui/icon.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from configs.gameconf import *\nfrom lib.utils import flip_sprite\nfrom assets.gameassets.icon.gameobj import ICON\n\n\nclass IconController:\n def __init__(self, game_obj, is_player=True):\n self.icon = game_obj[\"icon\"]\n self.flask = game_obj[\"flask\"]\n self.filled_power = game_obj[\"power_filled\"]\n self.unfilled_power = game_obj[\"power_unfilled\"]\n self.player = is_player\n\n if not self.player:\n self.icon = flip_sprite(self.icon)\n\n def get_icon_pos(self):\n if self.player:\n return ICON_OFFSET_X, ICON_OFFSET_Y\n pos_x = SW - ICON_OFFSET_X - self.icon.get_width()\n return pos_x, ICON_OFFSET_Y\n\n def get_power_pos(self):\n if self.player:\n return POWER_OFFSET_X, POWER_OFFSET_Y\n pos_x = SW - POWER_OFFSET_X - self.filled_power.get_width()\n return pos_x, POWER_OFFSET_Y\n\n def get_flask_pos(self):\n if self.player:\n return FLASK_OFFSET_X, FLASK_OFFSET_Y\n pos_x = SW - FLASK_OFFSET_X - self.flask.get_width()\n return pos_x, FLASK_OFFSET_Y\n\n def draw_icon(self, screen):\n screen.blit(self.icon, self.get_icon_pos())\n\n def draw_power(self, screen, filled_cell, max_cell):\n offset_between_cells = 8\n filled_cell_count = 0\n init_pos_x, init_pos_y = self.get_power_pos()\n for i in range(1, max_cell + 1):\n if self.player:\n pos_x = init_pos_x + (offset_between_cells * i) + (self.unfilled_power.get_width() * i)\n else:\n pos_x = init_pos_x - (offset_between_cells * i) - (self.unfilled_power.get_width() * i)\n if filled_cell_count < filled_cell:\n screen.blit(self.filled_power, (pos_x, init_pos_y))\n else:\n screen.blit(self.unfilled_power, (pos_x, init_pos_y))\n filled_cell_count += 1\n\n def draw_flask(self, screen, life_left):\n offset_between = 8\n init_pos_x, init_pos_y = self.get_flask_pos()\n for i in range(life_left):\n if self.player:\n pos_x = init_pos_x + (offset_between * i) + (self.flask.get_width() * i)\n else:\n pos_x = init_pos_x - (offset_between * i) - (self.flask.get_width() * i)\n screen.blit(self.flask, (pos_x, init_pos_y))\n\n def display_ui(self, screen, left_life, filled_cell, max_cell):\n self.draw_icon(screen)\n self.draw_flask(screen, left_life)\n self.draw_power(screen, filled_cell, max_cell)\n\n\nplayer_ui = IconController(ICON)\nenemy_ui = IconController(ICON, False)\n"
},
{
"alpha_fraction": 0.8292682766914368,
"alphanum_fraction": 0.8292682766914368,
"avg_line_length": 26.33333396911621,
"blob_id": "7cbe33d1023e9bb607ea7959a991bbe7a256ba6f",
"content_id": "206d12dc5e3124670b438a01892382db87304fd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 3,
"path": "/modules/score_state.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "from lib.score_controller import ScoreController\n\nscore_state = ScoreController()\n"
},
{
"alpha_fraction": 0.5661691427230835,
"alphanum_fraction": 0.5910447835922241,
"avg_line_length": 29.454545974731445,
"blob_id": "afbb24ffe9a140ea950d4791638c01e73325e53f",
"content_id": "c1e19937449dd80257654729e480ae104a518bb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1005,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 33,
"path": "/lib/ui/info.py",
"repo_name": "vladyslm/wizards-clash",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom configs.gameconf import *\npygame.font.init()\n\n\nclass LevelInfo:\n def __init__(self):\n self.info_font = pygame.font.SysFont(\"arial\", 30, True, False)\n\n def get_lvl_pos(self, label):\n x_pos = SW // 2 - label.get_width() // 2\n return x_pos, LVL_OFFSET_Y\n\n def get_score_pos(self, label):\n x_pos = SW // 2 - label.get_width() // 2\n return x_pos, SCORE_OFFSET_Y\n\n def draw_lvl(self, screen, lvl):\n label = self.info_font.render(f\"Level: {lvl}\", True, (255, 255, 255))\n # pos_x, pos_y = self.get_lvl_pos()\n # pos_x = pos_x - label.get_width() // 2\n screen.blit(label, self.get_lvl_pos(label))\n\n def draw_score(self, screen, score):\n label = self.info_font.render(f\"Score: {score}\", True, (255, 255, 255))\n screen.blit(label, self.get_score_pos(label))\n\n def display_info(self, screen, score, lvl):\n self.draw_lvl(screen, lvl)\n self.draw_score(screen, score)\n\n\nlvl_info = LevelInfo()\n"
}
] | 25 |
harol-lml/soundProyect
|
https://github.com/harol-lml/soundProyect
|
5c1027f0acca592ba3d59ad8c4ec3373df774120
|
fb99e6ed2df8e3bf1750c94fc57cc98d2231148b
|
e8cf6f746cd0e0342b5d7d06634ffff9e35a2a4d
|
refs/heads/master
| 2023-07-04T18:49:48.074932 | 2021-08-23T00:30:17 | 2021-08-23T00:30:17 | 386,386,802 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5943262577056885,
"alphanum_fraction": 0.6241135001182556,
"avg_line_length": 21.879121780395508,
"blob_id": "05ced31aebe15805682198928f96fabee0ceb19b",
"content_id": "eb49ca4eeeb454410fa7832db97080e5b7d84208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2115,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 91,
"path": "/AdruinoPrueba/Wifi/wifi/wifi.ino",
"repo_name": "harol-lml/soundProyect",
"src_encoding": "UTF-8",
"text": "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#include <WiFi.h>\n#include <WiFiClient.h>\n#include <WebServer.h>\n#include <ESPmDNS.h>\n\nconst char* ssid = \"PRUEBACLARO\";/*\"HOME-5B22\";*/\nconst char* password = \"12345678#\";/*\"5742634166\";*/\nconst int analogInPin = 34;\n\nint sensorValue = 0;\nint outputValue = 0; \n\nWebServer server(80);\n\nvoid handleRoot() {\n //digitalWrite(led, 1);\n sensorValue = analogRead(analogInPin);\n double lect=(sensorValue+83.2073)/11.003;\n String msn = String(lect);\n Serial.println(msn);\n server.send(200, \"text/plain\", msn);\n //digitalWrite(led, 0);\n}\n\nvoid handleNotFound() {\n //digitalWrite(led, 1);\n String message = \"File Not Found\\n\\n\";\n message += \"URI: \";\n message += server.uri();\n message += \"\\nMethod: \";\n message += (server.method() == HTTP_GET) ? \"GET\" : \"POST\";\n message += \"\\nArguments: \";\n message += server.args();\n message += \"\\n\";\n for (uint8_t i = 0; i < server.args(); i++) {\n message += \" \" + server.argName(i) + \": \" + server.arg(i) + \"\\n\";\n }\n server.send(404, \"text/plain\", message);\n //digitalWrite(led, 0);\n}\n\nvoid setup() {\n\n Serial.begin(115200);\n pinMode(analogInPin,INPUT);\n adcAttachPin(analogInPin);\n analogReadResolution(11);\n analogSetAttenuation(ADC_6db);\n WiFi.mode(WIFI_STA);\n WiFi.begin(ssid, password);\n Serial.println(\"\");\n\n while (WiFi.status() != WL_CONNECTED) {\n delay(500);\n Serial.print(\".\");\n }\n Serial.println(\"\");\n Serial.print(\"Connected to \");\n Serial.println(ssid);\n Serial.print(\"IP address: \");\n Serial.println(WiFi.localIP());\n\n if (MDNS.begin(\"esp32\")) {\n Serial.println(\"MDNS responder started\");\n }\n\n server.on(\"/\", handleRoot);\n\n server.on(\"/inline\", []() {\n server.send(200, \"text/plain\", \"this works as well\");\n });\n\n server.onNotFound(handleNotFound);\n\n server.begin();\n Serial.println(\"HTTP server started\");\n\n}\n\nvoid loop(){\n server.handleClient();\n sensorValue = analogRead(analogInPin);\n \n // change the analog out value:\n Serial.print(\"sensor = \");\n Serial.print(sensorValue);\n Serial.print(\"\\t out.db= \");\n Serial.println((sensorValue+83.2073)/11.003);\n delay(100);\n\n}\n"
},
{
"alpha_fraction": 0.6137894988059998,
"alphanum_fraction": 0.6459305286407471,
"avg_line_length": 20.6235294342041,
"blob_id": "200a39d51a22e21fffc0d220b36900cd6819e18d",
"content_id": "d3669ba51b5ab73d900f3c30890e83a5f03569dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1929,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 85,
"path": "/proSond.py",
"repo_name": "harol-lml/soundProyect",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, render_template\r\nfrom manejador_db import manejador_db\r\nimport math\r\nimport random\r\nimport time\r\nimport requests\r\n\r\napp = Flask(__name__)\r\nman=manejador_db()\r\n\r\[email protected](\"/\")\r\ndef home():\r\n\ttit=\"Monitoreo de Sonido\"\r\n\treturn render_template(\"index2.html\",tit=tit)\r\n\t\r\n\t\r\[email protected](\"/aleatorio\")\r\ndef aleatorio():\r\n\tcons=0\r\n\tconstr=\"\"\r\n\tresponse = requests.get('Http://192.168.0.9/') \r\n\t#response = requests.get('http://192.168.1.112/') \r\n\testampa=time.time()\r\n\tresp=response.text\r\n\tresponse.close()\r\n\tses=str(request.args[\"userSesion\"])\r\n\tif float(resp) <=30:\r\n\t\tcons=1 #bajo\r\n\t\tconstr=\"Bajo\"\r\n\tif (float(resp)>30) and (float(resp)<=50):\r\n\t\tcons=2 #normal\r\n\t\tconstr=\"Normal\"\r\n\tif (float(resp)>50) and (float(resp)<=75):\r\n\t\tcons=3 #Considerable\r\n\t\tconstr=\"Cosiderable\"\r\n\tif (float(resp)>75) and (float(resp)<=100):\r\n\t\tcons=4 #Alto\r\n\t\tconstr=\"Alto\"\r\n\tif (float(resp)>100) and (float(resp)<=120.5):\r\n\t\tcons=5 #Muy alto\r\n\t\tconstr=\"Muy alto\"\r\n\tif (float(resp)>120.5):\r\n\t\tcons=6 #Umbral de dolor\r\n\t\tconstr=\"Umbral de dolor\"\r\n\tprint(response.text)\r\n\tprint(estampa)\r\n\tprint(ses)\r\n\tman.agregar_num(estampa, resp, ses, constr, cons)\r\n\treturn \"{\\\"num\\\":\"+resp+\",\\\"cadena\\\":\"+str(cons)+\"}\"\r\n\r\[email protected](\"/consultar\")\r\ndef consultar():\r\n\treturn man.consultarTodo()\r\n\r\[email protected](\"/buscar\")\r\ndef buscar():\r\n\tsesion=request.args['busqueda']\r\n\treturn man.consultarSesion(sesion)\r\n\t\r\[email protected](\"/sesion\")\r\ndef sesion():\r\n\treturn man.initSesion()\r\n\t\r\[email protected](\"/centroide\")\r\ndef centroide():\r\n\tsesion=request.args['busqueda']\r\n\treturn man.analisis(sesion)\r\n\t\r\[email protected](\"/valorxy\")\r\ndef valxy():\r\n\tsesion=request.args['busqueda']\r\n\treturn man.val(sesion)\r\n\t\r\[email protected](\"/c1\")\r\ndef c1():\r\n\tsesion=request.args['busqueda']\r\n\treturn man.centro1(sesion)\r\n\t\r\[email protected](\"/c2\")\r\ndef c2():\r\n\tsesion=request.args['busqueda']\r\n\treturn man.centro2(sesion)\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run(\"0.0.0.0\")\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5696594715118408,
"alphanum_fraction": 0.5849571824073792,
"avg_line_length": 29.739885330200195,
"blob_id": "50ae0264ec66313ebd4093546e3494dd787a580b",
"content_id": "047ac37391df72f1ffe3562f08ddca8a248d05e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5496,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 173,
"path": "/manejador_db.py",
"repo_name": "harol-lml/soundProyect",
"src_encoding": "UTF-8",
"text": "from tinydb import TinyDB, where\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nclass manejador_db: \r\n\t\r\n\tdef agregar_num(self, estampa, numero, sesion, valor, nivelNum):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"estampa\")==estampa)\r\n\t\tif(len(lista)==0):\r\n\t\t\tdb.insert({\"estampa\":estampa, \"numero\":numero,\"nivel\": valor,\"nNum\": nivelNum,\"sesion\": sesion})\r\n\t\t\t\r\n\tdef consultarTodo(self):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.all()\r\n\t\tcad='<table border=\"2\">'\r\n\t\tfor elem in lista:\r\n\t\t\tcad=cad+\"<tr><td>Estampa</td><td>\"+str(elem[\"estampa\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Valor dB</td><td>\"+str(elem[\"numero\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Nivel</td><td>\"+str(elem[\"nivel\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Sesion</td><td>\"+str(elem[\"sesion\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td bgcolor='black' colspan='2'></td></tr>\"\r\n\t\tcad=cad+\"</table>\"\r\n\t\treturn cad\r\n\t\r\n\tdef initSesion(self):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.all()\r\n\t\tnum=0\r\n\t\tfor elem in lista:\r\n\t\t\tif int(elem['sesion']) > num:\r\n\t\t\t\tnum=int(elem['sesion'])\r\n\r\n\t\treturn str(num)\r\n\t\t\t\r\n\tdef consultarSesion(self, sesion):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"sesion\")==sesion)\r\n\t\tcad=\"<table border='2'>\"\r\n\t\tfor elem in lista:\r\n\t\t\tcad=cad+\"<tr><td>Estampa</td><td>\"+str(elem[\"estampa\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Valor dB</td><td>\"+str(elem[\"numero\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Nivel</td><td>\"+str(elem[\"nivel\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td>Sesion</td><td>\"+str(elem[\"sesion\"])+\"</td></tr>\"\r\n\t\t\tcad=cad+\"<tr><td bgcolor='black' colspan='2'></td></tr>\"\t\t\t\r\n\t\tcad=cad+\"</table>\"\r\n\t\treturn cad\r\n\t\t\r\n\tdef centro1(self, sesion):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"sesion\")==sesion)\r\n\t\taux=[]\r\n\t\tauxa=[]\r\n\t\tfor elem in lista:\r\n\t\t\taux.append([elem[\"numero\"],elem[\"nNum\"]])\r\n\t\t\t\r\n\t\tkmeans=KMeans(n_clusters=2)\r\n\t\tkmeans.fit(aux)\r\n\t\tcentroides= kmeans.cluster_centers_\r\n\t\tlista =centroides.tolist()\r\n\t\t\r\n\t\tfor elem in lista:\r\n\t\t\tauxa.append(elem[0])\r\n\t\t\t\r\n\t\tcad=str(auxa[0])+\";\"+str(auxa[1])\r\n\t\tcad=cad.strip()\r\n\t\treturn cad\r\n\t\t\r\n\tdef centro2(self, sesion):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"sesion\")==sesion)\r\n\t\taux=[]\r\n\t\tauxa=[]\r\n\t\tfor elem in lista:\r\n\t\t\taux.append([elem[\"numero\"],elem[\"nNum\"]])\r\n\t\t\t\r\n\t\tkmeans=KMeans(n_clusters=2)\r\n\t\tkmeans.fit(aux)\r\n\t\tcentroides= kmeans.cluster_centers_\r\n\t\tlista =centroides.tolist()\r\n\t\t\r\n\t\tfor elem in lista:\r\n\t\t\tauxa.append(elem[1])\r\n\t\t\t\r\n\t\tcad=str(auxa[0])+\";\"+str(auxa[1])\r\n\t\tcad=cad.strip()\r\n\t\treturn cad\r\n\t\t\r\n\tdef val(self, sesion):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"sesion\")==sesion)\r\n\t\taux=[]\r\n\t\tauxy=[]\r\n\t\tfor elem in lista:\r\n\t\t\taux.append(elem[\"nNum\"])\r\n\t\t\tauxy.append(elem[\"numero\"])\r\n\t\t\t\r\n\t\tcad=str(aux)+\";\"+str(auxy)\r\n\t\tcad=cad.replace(\"[\",\"\")\r\n\t\tcad=cad.replace(\"]\",\"\")\r\n\t\tcad=cad.strip()\r\n\t\treturn cad\r\n\t\r\n\tdef analisis(self, sesion):\r\n\t\tdb=TinyDB(\"numeros.json\")\r\n\t\tlista=db.search(where(\"sesion\")==sesion)\r\n\t\taux=[]\r\n\t\tauxnr=[]\r\n\t\tauxclas=[]\r\n\t\tcont=0\r\n\t\tnr=0\r\n\t\tclas=0\r\n\t\tn1=0\r\n\t\tn2=0\r\n\t\tn3=0\r\n\t\tn4=0\r\n\t\tn5=0\r\n\t\tn6=0\r\n\t\tfor elem in lista:\r\n\t\t\taux.append([elem[\"numero\"],elem[\"nNum\"]])\r\n\t\t\tauxnr.append([elem['numero']])\r\n\t\t\tauxclas.append([elem['nNum']])\r\n\t\t\tnr=nr+float(elem['numero'])\r\n\t\t\tclas=clas+elem[\"nNum\"]\r\n\t\t\tcont=cont+1\r\n\t\t\tif elem['nNum']==1:\r\n\t\t\t\tn1=n1+1\r\n\t\t\tif elem['nNum']==2:\r\n\t\t\t\tn2=n2+1\r\n\t\t\tif elem['nNum']==3:\r\n\t\t\t\tn3=n3+1\r\n\t\t\tif elem['nNum']==4:\r\n\t\t\t\tn4=n4+1\r\n\t\t\tif elem['nNum']==5:\r\n\t\t\t\tn5=n5+1\r\n\t\t\tif elem['nNum']==6:\r\n\t\t\t\tn6=n6+1\r\n\t\t\t\r\n\t\t\t\t\t\t\r\n\t\tkmeans=KMeans(n_clusters=2)\r\n\t\tkmeans.fit(aux)\r\n\t\tcentroides= kmeans.cluster_centers_\t\t\r\n\t\t\r\n\t\t\r\n\t\tcad =\"<center><table border='2'>\"\r\n\t\tcad=cad+\"<tr><td bgcolor=#F5B7B1 colspan='2'><center>Centroides</center></td></tr>\"\t\r\n\t\tcad= cad+\"<tr><td><center>Nivel de ruido </center></td><td><center>Clasificación</center></td></tr>\"\r\n\t\t\r\n\t\tfor elem in centroides:\r\n\t\t\tcad=cad+\"<tr><td>\"+str(elem[0])+\"</td><td>\"+str(elem[1])+\"</td></tr>\"\r\n\t\tn=np.array(auxnr)\r\n\t\tn = np.array(n, dtype=np.float64)\r\n\t\tcad= cad+\"<tr><td bgcolor=#F5B7B1 colspan='2'><center>Desviación Estandar</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Nivel de ruido </center></td><td><center>Clasificación</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>\"+str(np.std(n))+\"</center></td><td><center>\"+str(np.std(auxclas))+\"</center></td></tr>\"\r\n\r\n\t\tcad= cad+\"<tr><td bgcolor=#F5B7B1 colspan='2'><center>Promedio</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Nivel de ruido </center></td><td><center>Clasificación</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>\"+str(nr/cont)+\"</center></td><td><center>\"+str(clas/cont)+\"</center></td></tr>\"\r\n\t\t\r\n\t\tcad= cad+\"<tr><td bgcolor=#F5B7B1 colspan='2'><center>Clasificación</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Bajo</center></td><td><center>\"+ str(n1)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Normal</center></td><td><center>\"+ str(n2)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Cosiderable</center></td><td><center>\"+ str(n3)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Alto</center></td><td><center>\"+ str(n4)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Muy Alto</center></td><td><center>\"+ str(n5)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Umbral de dolor</center></td><td><center>\"+ str(n6)+\"</center></td></tr>\"\r\n\t\tcad= cad+\"<tr><td><center>Total de datos</center></td><td><center>\"+ str(cont)+\"</center></td></tr>\"\r\n\t\t\t\r\n\t\tcad= cad+\"</tabla></center>\"\r\n\t\treturn cad\r\n"
}
] | 3 |
IshjotSingh97/Django_Placement_Drive_App
|
https://github.com/IshjotSingh97/Django_Placement_Drive_App
|
d475c0c8031def4d97ab24ef3d42bd097372b222
|
7aa6340121ad84010db1ddc8d3410d43385e7606
|
f129aab62ffc504ca6c2c59d17a4e598e395cb44
|
refs/heads/master
| 2023-03-13T08:30:27.690706 | 2021-02-13T14:16:25 | 2021-02-13T14:16:25 | 243,995,231 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 35.88888931274414,
"blob_id": "638b7f212e93a4673d44733855032584af42a694",
"content_id": "1c33c162b7cef31326d6f8762ae96868c0c74684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 666,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 18,
"path": "/myapp/urls.py",
"repo_name": "IshjotSingh97/Django_Placement_Drive_App",
"src_encoding": "UTF-8",
"text": "\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n path('',views.index,name='index'),\n path('server',views.server,name='server'),\n path('index',views.index,name='index'),\n path('userfeedbackform',views.userfeedbackform,name='userfeedbackform'),\n path('submituserfeedbackform',views.submituserfeedbackform,name='submituserfeedbackform'),\n path('search',views.search,name='search'),\n path('about',views.about,name='about'),\n path('addtofavourite/<int:uid>/<int:pid>',views.addtofavourite,name='addtofavourite'),\n path('getfavourite/<int:uid>/',views.getfavourite,name='getfavourite'),\n\n\n]\n\n"
},
{
"alpha_fraction": 0.6600189805030823,
"alphanum_fraction": 0.6628680229187012,
"avg_line_length": 28.26388931274414,
"blob_id": "c07541ccb5606184b20567da8a9468e7f3d38ee4",
"content_id": "47773d34bcda9ee586f21e0dc814ba318c88a19f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2106,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 72,
"path": "/myapp/views.py",
"repo_name": "IshjotSingh97/Django_Placement_Drive_App",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import *\nfrom .forms import *\nfrom django.http import HttpResponse,JsonResponse\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\ndef error_404_view(request, exception):\n return render(request,'404.html')\n\ndef server(request):\n return HttpResponse(\"Server has started\")\n\ndef index(request):\n mydictionary = {\n \"posts\" : Post.objects.all(),\n \"message\" : \"Hey check out this amazing post at Placement Drive\"\n }\n return render(request,'index.html',context=mydictionary)\n\ndef search(request):\n query = request.GET['query']\n mydictionary = {\n \"posts\" : Post.objects.all().filter(Q(title__icontains=query)\n ).order_by('-date'),\n \"message\" : \"Hey check out this amazing post at Placement Drive\"\n }\n return render(request,'index.html',context=mydictionary)\n\ndef about(request):\n return render(request,'about.html')\n\ndef userfeedbackform(request):\n return render(request,'userfeedbackform.html')\n \n\n@login_required\ndef addtofavourite(request,uid,pid):\n try:\n obj = Favourite()\n obj.uid = uid\n obj.pid = pid\n obj.posttitle = Post.objects.get(id=pid).title\n obj.postlink = Post.objects.get(id=pid).link\n obj.save()\n mydictionary ={\n \"add\" : True,\n \"favourites\" : Favourite.objects.filter(uid=uid)\n }\n return render(request,'favourite.html',context=mydictionary)\n except:\n return redirect('account_login')\n\n@login_required\ndef getfavourite(request,uid):\n mydictionary ={\n \"favourites\" : Favourite.objects.filter(uid=uid)\n }\n return render(request,'favourite.html',context=mydictionary)\n\ndef submituserfeedbackform(request):\n if request.method == 'GET':\n obj = UserFeedback()\n obj.subject = request.GET['title']\n obj.feedback = request.GET['subject']\n obj.save()\n mydictionary = {\n \"feedback\" : True\n }\n return render(request,'userfeedbackform.html',context=mydictionary)"
},
{
"alpha_fraction": 0.6820027232170105,
"alphanum_fraction": 0.7198917269706726,
"avg_line_length": 31.130434036254883,
"blob_id": "71587c3ca87c200b8202439250ccf6fec50b66d1",
"content_id": "0311769d314890f572e738026c23692b21a4286e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 739,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 23,
"path": "/myapp/models.py",
"repo_name": "IshjotSingh97/Django_Placement_Drive_App",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass Post(models.Model):\n title = models.CharField(max_length=100)\n description = models.CharField(max_length=1000)\n date = models.DateField(auto_now_add=True)\n link = models.URLField(max_length=1000)\n image = models.URLField(max_length=1000)\n \n\nclass UserFeedback(models.Model):\n subject = models.CharField(max_length=100)\n feedback = models.CharField(max_length=1000)\n date = models.DateField(auto_now_add=True)\n\n\nclass Favourite(models.Model):\n uid = models.IntegerField()\n pid = models.IntegerField()\n posttitle = models.CharField(max_length=100)\n postlink = models.URLField(max_length=100)\n date = models.DateField(auto_now_add=True)\n"
},
{
"alpha_fraction": 0.7569444179534912,
"alphanum_fraction": 0.7881944179534912,
"avg_line_length": 32.882354736328125,
"blob_id": "e6b941998ebea7940ec83d8a058ee44809a9fc72",
"content_id": "809c6f9173149e677b379d9ff171138a2c5b0857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 17,
"path": "/README.md",
"repo_name": "IshjotSingh97/Django_Placement_Drive_App",
"src_encoding": "UTF-8",
"text": "# Django_Placement_Drive_App\nDjango_Placement_Drive_App - Students can look for jobs and internships.\n\n# How to run the project on localhost\n```\n1.Download this project as zip or clone this repository\n2.Move inside the current project directory\n3.run the command pip install -r requirements.txt\n4.run the command python manage.py makemigrations\n5.run the command python manage.py migrate\n6.run the command python manage.py runserver\n7.Move to http://127.0.0.1:8000/\n8.Please make sure it is http for localhost\n```\n\n# App Link\nhttps://djangoplacementdriveishjot.herokuapp.com/\n"
},
{
"alpha_fraction": 0.5066105723381042,
"alphanum_fraction": 0.53125,
"avg_line_length": 35.977779388427734,
"blob_id": "a77a4c53d0db1df29cc2be95b44cf6a78206b7e7",
"content_id": "fdf552761b8cf77dcdb8bf76d557a546e82e0fb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1664,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 45,
"path": "/myapp/migrations/0001_initial.py",
"repo_name": "IshjotSingh97/Django_Placement_Drive_App",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-03-11 22:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Favourite',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('uid', models.IntegerField()),\n ('pid', models.IntegerField()),\n ('posttitle', models.CharField(max_length=100)),\n ('postlink', models.URLField(max_length=100)),\n ('date', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=100)),\n ('description', models.CharField(max_length=1000)),\n ('date', models.DateField(auto_now_add=True)),\n ('link', models.URLField(max_length=100)),\n ('image', models.URLField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='UserFeedback',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('subject', models.CharField(max_length=100)),\n ('feedback', models.CharField(max_length=1000)),\n ('date', models.DateField(auto_now_add=True)),\n ],\n ),\n ]\n"
}
] | 5 |
PathomphongPromsit/GoogleCodejam2019
|
https://github.com/PathomphongPromsit/GoogleCodejam2019
|
9f1dd19adec406434e0dcf12fffa13d0169c5b36
|
4d99c52ac7fefd33edccc165f81171a76edeafcd
|
6c1f1a4e10a08b231e1d9c89708fc1e8490bb8c9
|
refs/heads/master
| 2020-05-05T03:10:22.760314 | 2019-04-11T10:23:31 | 2019-04-11T10:23:31 | 179,663,083 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.44146767258644104,
"alphanum_fraction": 0.45602795481681824,
"avg_line_length": 26.70967674255371,
"blob_id": "5687e862e3817842a3c9dd83410ebb59fb875b1e",
"content_id": "dd43c9dca5c4564143d97b4a4fad17f78467ec1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1717,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 62,
"path": "/qualificationRound/3.py",
"repo_name": "PathomphongPromsit/GoogleCodejam2019",
"src_encoding": "UTF-8",
"text": "import math\n\n\ndef isprime(data):\n prime = 0\n if data == 1 or data == 0:\n prime = 1\n else:\n for x in range(2, int(math.sqrt(data) + 1)):\n if (data % x == 0):\n prime = 1\n break\n if prime == 1:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n for case in range(int(input())):\n n, l = input().split()\n list_prime = []\n for i in reversed(range(2, int(n) + 1)):\n if isprime(i) and len(list_prime) < 26:\n list_prime.insert(0, i)\n if len(list_prime) == 26:\n break\n # print(list_prime)\n text = [int(x) for x in input().split()]\n # print(text)\n ans = ''\n first = text[0]\n secound = text[1]\n for prime in list_prime:\n if first % prime == 0 and int(first / prime) in list_prime:\n a = prime\n b = int(first / prime)\n\n if secound % a\n for prime_two in list_prime:\n if secound % prime_two == 0 and int(secound / prime_two) in list_prime:\n c = prime_two\n d = int(secound / prime_two)\n # print(a, b, c, d)\n\n if a == c:\n first_cha_prime = b\n elif a == d:\n first_cha_prime = b\n else:\n first_cha_prime = a\n # print(first_cha_prime)\n\n prime_cha_old = first_cha_prime\n ans += chr(list_prime.index(first_cha_prime) + 65)\n\n for cha in text:\n next_cha = cha / prime_cha_old\n ans += chr(list_prime.index(next_cha) + 65)\n prime_cha_old = next_cha\n\n print('Case #{}: {}'.format(case + 1, ans))"
},
{
"alpha_fraction": 0.35380834341049194,
"alphanum_fraction": 0.3562653660774231,
"avg_line_length": 20.421052932739258,
"blob_id": "56ac5ffff2c554902f281b2c7525c3902e637713",
"content_id": "056806de704a06863187cbbd257f9e9d5374e864",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 407,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 19,
"path": "/qualificationRound/2.py",
"repo_name": "PathomphongPromsit/GoogleCodejam2019",
"src_encoding": "UTF-8",
"text": "def swap(data):\n new = ''\n for i in data:\n if i == 'N':\n new += 'W'\n elif i == 'W':\n new += 'N'\n elif i == 'E':\n new += 'S'\n else:\n new += 'E'\n return new\n\n\nif __name__ == '__main__':\n for case in range(int(input())):\n n = int(input())\n old = input()\n print('Case #{}: {}'.format(case + 1, swap(old)))\n"
},
{
"alpha_fraction": 0.4517133831977844,
"alphanum_fraction": 0.46417444944381714,
"avg_line_length": 20.46666717529297,
"blob_id": "33ca2e35bd94dc554456dc8bf81ea0a83c03cc82",
"content_id": "f59baf3a04f5d28835f8aefe30ae533402e06591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/qualificationRound/1.py",
"repo_name": "PathomphongPromsit/GoogleCodejam2019",
"src_encoding": "UTF-8",
"text": "def find_four(data):\n secound = ''\n for i in data:\n if i == '4':\n secound += '1'\n else:\n secound += '0'\n return int(secound)\n\n\nif __name__ == '__main__':\n for case in range(int(input())):\n n = int(input())\n s = find_four(str(n)) \n print('Case #{}: {} {}'.format(case+1, s, n-s))"
}
] | 3 |
jordanbabe/Library-Management-System
|
https://github.com/jordanbabe/Library-Management-System
|
6fe6a9c80737ed0ce187ec154ae1604aa73666f7
|
c316f48e59452dc5ae50ca9760c100119afc9330
|
b737d9d428de88db5ff8e62626df99ada93224ce
|
refs/heads/master
| 2022-12-15T02:56:44.387381 | 2020-08-27T07:14:56 | 2020-08-27T07:14:56 | 290,704,249 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.47003504633903503,
"alphanum_fraction": 0.472247838973999,
"avg_line_length": 34.58940505981445,
"blob_id": "2cc546b788c1563a7c505843494e90dee26a2b80",
"content_id": "cc05fe8308eeef9f4f758397e989bf0e1e934a51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5423,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 151,
"path": "/Main.py",
"repo_name": "jordanbabe/Library-Management-System",
"src_encoding": "UTF-8",
"text": "#Main python File\n\n#Other three modules are imported for execution along with datetime from Python Standard Library.\nimport Borrow\nimport Display\nimport Return\nimport datetime\n\n\n#Opening the intial books file in read mode and conveting it into a 2D List for further actions.\n\ndate_time = datetime.datetime.now()\n\ncatalouge = open('Books.txt','r')\ncontents = catalouge.read()\n\nlist_one = contents.split('\\n')\nprint('\\n')\n\nlist_two = []\n\nfor each in list_one:\n list_two.append(each.split(','))\n\ncatalouge.close()\n\n####################################################################################################\n\n#Declaring a new list as return_books.\nreturn_books = []\n\n#Declaring a new list as borrower_cart.\nborrower_cart = []\n\n\nenter_name = str(input(\"Please enter your full name:\"))\nprint('\\n')\nprint(\"Hello\" + '\\t' + enter_name)\nprint('\\n')\nprint('\\t\\t' + \"Welcome to our Library.\")\nprint('\\n')\n\n\n\n\nquestion_one = str(input(\"Do you wish to access our Library menu?, please enter 'Yes' or 'No':\"))\n\n#Creating the main outer loop for library menu.\n \nif question_one == \"Yes\":\n done = True #Using done as a flag variable\n while done == True:\n Display.menu_display()1\n\n #Using exceptional handling to handle any errors in the user input.\n error_handler1 = True\n while error_handler1 == True:\n try:\n choice = int(input(\"Enter your desired choice:\"))\n \n\n \n if choice == 1:\n #Calling defined function to display all the available books in the Library.\n Display.display_data(list_two)\n \n elif choice == 2:\n print(\"The books available for borrowing are as follows:\")\n Display.display_data(list_two)\n print('\\n')\n \n \n \n #Asking user input for number of books desired and their Book ID.\n input_id = str(input(\"Please enter the BookID of your desired book.\"))\n\n \n #Calling the function that operates borrow action.\n borrower_books = Borrow.borrow_action(list_two,input_id)\n\n if len(borrower_books) > 0:\n #Appending the books borrowed into the borrower cart as a list.\n borrower_cart.append(borrower_books)\n\n #Calling previously defined function to display Borrower Cart.\n Display.display_borrow(borrower_cart)\n else:\n print('\\n')\n print('Please enter another valid Book ID.')\n \n \n\n \n \n elif choice == 3:\n Display.display_data(list_two)\n print('\\n')\n\n \n #User input for number of books they want to return, Book id and their borrow duration.\n input_return_id = str(input(\"Please enter the Book ID:\"))\n days_return = int(input(\"Enter the number of days since you borrowed the books:\"))\n print('\\n')\n \n\n #Calling the function that operates book return action.\n returned_items = Return.return_action(list_two,days_return,input_return_id)\n\n\n #Appending the returned items to the previously created list(return_books), if the returned_items list is not empty.\n if len(returned_items) > 0:\n return_books.append(returned_items)\n Display.display_return(return_books)\n \n \n else:\n print('\\n')\n print(\"Invalid Book ID!Please enter a valid Book ID.\")\n \n\n \n \n \n \n elif choice == 4:\n #Calling functions to write the 2D list back to inventory file.\n Display.write_list_to_file(list_two)\n\n #Calling functions to generate borrow and return notes.\n Return.return_note(return_books,enter_name,date_time)\n Borrow.borrow_note(borrower_cart, enter_name, date_time)\n\n #Exiting the program when the user chooses to.\n exit()\n\n else:\n print('\\n')\n print(\"Please Enter the correct option from the menu.\")\n error_handler1 = False\n\n except:\n print('\\n')\n print(\"Invlid Input! Please enter a valid input.\")\n\n \n \n#Else statement when the user doesn't want to access the Library Menu and exit the program.\nelse:\n print('\\n')\n print(\"Thank You. Please visit again when required.\")\n exit()\n \n\n \n \n\n \n\n \n"
},
{
"alpha_fraction": 0.41210582852363586,
"alphanum_fraction": 0.4193548262119293,
"avg_line_length": 51.03773498535156,
"blob_id": "bcdcdcf73e8ed674d64b035d5f00300c5d27e061",
"content_id": "81232a6b18e3bc4ea146ede1e06c257f97d969fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2759,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 53,
"path": "/Borrow.py",
"repo_name": "jordanbabe/Library-Management-System",
"src_encoding": "UTF-8",
"text": "\n#Defining a function to carry out all the borrow actions and procedures. \n\ndef borrow_action(list_two,input_id):\n temp_borrower_cart = []\n \n for each_value in list_two:\n if each_value[0] == input_id:\n if int(each_value[3]) > 0:\n #Decreasing the quantity of the requested book in the inventory.\n quantity_book = int(each_value[3]) - 1\n each_value[3] = str(quantity_book)\n for each_num in each_value:\n #Appending the requested book details to the borrower's cart.\n temp_borrower_cart.append(each_num)\n \n\n \n elif int(each_value[3]) < 0 or int(each_value[3]) == 0:\n print('\\n')\n print(\"Sorry for the inconvenience. We do not have the requested book at the moment.\") \n \n return temp_borrower_cart\n \n#Defining a function to generate a borrower receipt note for borrow transaction. \n\ndef borrow_note(list_two, enter_name, date_time):\n file_two = open(\"Borrownote.txt\", \"w\")\n file_two.write('=============================================================')\n file_two.write('\\n\\n')\n file_two.write('\\t\\t\\t' + \" Borrower Receipt:\")\n file_two.write('\\n\\n')\n file_two.write(\"Current Date and Time of transcation:\" + '\\t\\t' + str(date_time))\n file_two.write('\\n\\n')\n file_two.write(\"Name:\" + enter_name)\n file_two.write('\\n\\n')\n file_two.write(\"Book ID \\t Book Name \\t\\t Price\")\n file_two.write('\\n\\n')\n sum_ = 0\n for value in list_two:\n sign_remove = value[-1].replace('$',\"\")\n value[-1] = float(sign_remove)\n sum_ = value[-1] + sum_\n sum_ = round(sum_,2)\n file_two.write(str(value[0]) + '\\t\\t' + str(value[1]) + '\\t\\t' +'$' + str(value[-1]) + '\\t' + '\\n')\n file_two.write('\\n\\n')\n file_two.write('------------------------------------------------------------')\n file_two.write('\\n')\n file_two.write(\"Grand Total:\" + '\\t\\t\\t\\t\\t' + '$' + str(sum_))\n file_two.write('\\n\\n')\n file_two.write(\"Note: All books must be returned within 10 days.\")\n file_two.write('\\n\\n')\n file_two.write('===========================================================')\n file_two.close()\n"
},
{
"alpha_fraction": 0.4173566401004791,
"alphanum_fraction": 0.4268629252910614,
"avg_line_length": 48.907691955566406,
"blob_id": "a81baaa9000687313877f0c98bd406da17608325",
"content_id": "98a7987ffd644902ba61b808cbb9635e83522e2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3261,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 65,
"path": "/Return.py",
"repo_name": "jordanbabe/Library-Management-System",
"src_encoding": "UTF-8",
"text": "#Defining a function to operate on returnig actions and procedures.\n\ndef return_action(list_two,days_return,input_return_id):\n temp_returned_items = []\n grand_sum = 0\n for each_num in list_two:\n if each_num[0] == input_return_id:\n #Adding the returned books into our initial inentory.\n book_quantity = int(each_num[3]) + 1\n each_num[3] = str(book_quantity)\n \n for each_value in each_num:\n temp_returned_items.append(each_value)\n\n if days_return < 10:\n convert_ = each_num[-1].replace('$',\"\")\n fl_num = float(convert_)\n return_fine = 0\n \n\n elif days_return > 10:\n convert_one = each_num[-1].replace('$',\"\")\n\n #Calculating total fine accured on the basis of days and book price.\n fine_duration = days_return - 10\n fl_num1 = float(convert_one)\n return_fine = (0.25*(fl_num1)) * fine_duration\n return_fine = round(return_fine,2)\n \n temp_returned_items.append(days_return)\n temp_returned_items.append(return_fine)\n\n \n \n return temp_returned_items\n\n\n#Defining a function to generate a books returned receipt/note for the returning transaction.\n\ndef return_note(list_two,input_name,date_time):\n file_three = open(\"Returnnote.txt\",\"w\")\n file_three.write('==========================================================================================')\n file_three.write('\\n\\n')\n file_three.write('\\t\\t\\t' + \" Return Receipt:\")\n file_three.write('\\n\\n')\n file_three.write('Current Date and Time of Transaction:' + '\\t\\t\\t' + str(date_time))\n file_three.write('\\n\\n')\n file_three.write(\"Name:\" + input_name)\n file_three.write('\\n\\n')\n file_three.write(\"BOOK ID \\t Book Name \\t\\t Price \\t\\t Days Borrowed\\t\\t Fine\")\n file_three.write('\\n\\n')\n grand_sum = 0\n for value in list_two:\n sign_remove = value[4].replace('$',\"\")\n value[4] = float(sign_remove)\n grand_sum = value[4] + value[-1] + grand_sum\n grand_sum = round(grand_sum,2)\n file_three.write(str(value[0]) + '\\t\\t' + str(value[1]) + '\\t\\t' + '$' + str(value[4]) + '\\t\\t\\t' + str(value[5])+ '\\t\\t\\t' + '$' + str(value[-1])+'\\n') \n file_three.write('\\n\\n')\n file_three.write('-----------------------------------------------------------------------------------------------')\n file_three.write('\\n')\n file_three.write(\"Grand Total:\" + '\\t\\t\\t\\t\\t\\t' + '$' + str(grand_sum))\n file_three.write('\\n\\n')\n file_three.write('================================================================================================')\n file_three.close()\n \n"
},
{
"alpha_fraction": 0.5594610571861267,
"alphanum_fraction": 0.5694200396537781,
"avg_line_length": 35.10638427734375,
"blob_id": "da4fb8b50bd143e247cc3535f524766deeb8be36",
"content_id": "42c3fa6e728bd44379c636c8cfd8aaaa803db32b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1707,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 47,
"path": "/Display.py",
"repo_name": "jordanbabe/Library-Management-System",
"src_encoding": "UTF-8",
"text": "#Display.py\n\n#Defining a function to display the main library menu.\ndef menu_display():\n print('\\n')\n print(\"\"\"========================LIBRARY MENU=======================\n 1. Display all available books.\n 2. Request a book to borrow.\n 3. Return a borrowed book.\n 4. Exit\n \"\"\")\n\n#Defining a function to display all the available books in the Library Catalouge.\ndef display_data(list_two):\n print('\\n')\n print(\"BOOK ID \\t Book Name \\t\\t Author Name \\t\\t Price\")\n for every_item in list_two:\n print(every_item[0] + '\\t\\t' + every_item[1] + '\\t\\t' + every_item[2] + '\\t\\t' + every_item[4])\n print('\\n')\n\n\n#Defining a function to display the borrower's cart as per user's input.\ndef display_borrow(list_two):\n print('\\n')\n print(\"BOOK ID \\t Book Name \\t\\t Author Name \\t\\t Price\")\n for every_item in list_two:\n print(str(every_item[0]) + '\\t\\t' + str(every_item[1]) + '\\t\\t' + str(every_item[2]) + '\\t\\t' + str(every_item[-1]))\n print('\\n\\n')\n\n#Defining a function to display the returned items as per user's input.\ndef display_return(list_two):\n print('\\n')\n print(\"BOOK ID \\t Book Name \\t\\t Price \\t\\t Fine\")\n for every_item in list_two:\n print(str(every_item[0]) + '\\t\\t' + str(every_item[1]) + '\\t\\t' + str(every_item[4]) + '\\t\\t' + '$' + str(every_item[-1]))\n print('\\n\\n')\n\n\n# Defining function for writing the 2d list back to a inventory file. \n\ndef write_list_to_file(list_two):\n file_one =open(\"Inventory.txt\",\"w\")\n for each_item in list_two:\n line = ','.join(each_item) + ('\\n')\n file_one.write(line)\n file_one.close()\nprint('\\n') \n\n \n"
}
] | 4 |
arjamizo/bazy_project
|
https://github.com/arjamizo/bazy_project
|
058817122eed6cee086364b036b696a7fbb09c5b
|
dcdba53c6c55f394f555aa7f242db786d23c58a4
|
f80bbca07914b0e4dc8b50082c1e22a6f2487dca
|
refs/heads/master
| 2020-12-26T22:29:13.028352 | 2013-11-16T12:35:39 | 2013-11-16T12:35:39 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5729166865348816,
"alphanum_fraction": 0.71875,
"avg_line_length": 18.200000762939453,
"blob_id": "416b4a36e1f27d9e00e08d7287b79c19b39a799e",
"content_id": "a068553df66ce9755af98377b71311bad42e7410",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/backend/requirements.txt",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "Django==1.4.3\ndjango-bootstrap-form==0.2\ndjango-grappelli==2.4.7\nopenpyxl==1.6.2\nwsgiref==0.1.2\n"
},
{
"alpha_fraction": 0.5748662948608398,
"alphanum_fraction": 0.5828877091407776,
"avg_line_length": 26.846153259277344,
"blob_id": "7e925b0115fcb4fde41dec5f7da630548f3e73e3",
"content_id": "5883d65c95106256aea83aaec4f47014ef1012c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 748,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 26,
"path": "/backend/bazy/templatetags/base_extras.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "from django import template\r\nfrom django.core.urlresolvers import reverse, NoReverseMatch\r\n\r\nregister = template.Library()\r\n\r\[email protected]_tag\r\ndef navactive(request, urls):\r\n for url in urls.split():\r\n if url[0] == \"_\":\r\n if any(url[1:] in s for s in request.path.split('/')):\r\n return \"active\"\r\n elif request.path in reverse(url):\r\n return \"active\"\r\n return \"\"\r\n\r\[email protected]_tag\r\ndef navactive_contains(request, title):\r\n for t in title.split():\r\n if any(t in s for s in request.path.split('/')):\r\n return \"active\"\r\n return \"\"\r\n\r\[email protected]\r\ndef pln(pln):\r\n dollars = round(float(pln), 2)\r\n return \"%s%s PLN\" % (int(pln), (\"%0.2f\" % pln)[-3:])"
},
{
"alpha_fraction": 0.6404680013656616,
"alphanum_fraction": 0.6481503248214722,
"avg_line_length": 33.11618423461914,
"blob_id": "13fc9aa350666dc4ca6391017a794f2b214facca",
"content_id": "34fd4d872a619f7267eb9c13237a23724a2816a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8472,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 241,
"path": "/backend/bazy/views.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n# django\r\nfrom django.contrib.auth.models import User, Group\r\nfrom django.shortcuts import get_list_or_404, render, redirect\r\nfrom django.contrib.auth import logout as auth_logout\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib import messages\r\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\r\nfrom django.http import Http404, HttpResponse\r\nfrom django.conf import settings\r\nfrom django.db.models import Sum, Avg, Max, Min\r\nfrom django.utils.encoding import smart_str\r\n\r\n# internal\r\nfrom models import Newsy, Oplaty, Oplaty_type, Wplaty\r\nfrom decorators import login_mieszkaniec_required\r\n\r\n# python\r\nfrom datetime import date\r\nimport itertools\r\n\r\n# export\r\nfrom openpyxl import Workbook\r\n\r\ndef home(request):\r\n return render(request, 'home.html', {'title': 'Strona główna'})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_rozliczenia(request):\r\n mieszkaniec = request.user.get_profile()\r\n\r\n oplaty_suma = Oplaty.objects.exclude(oplaty_type__name='nadpłata')\\\r\n .filter(mieszkanie=mieszkaniec.mieszkanie).aggregate(sum=Sum('saldo'))\r\n wplaty_suma = Wplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie).aggregate(sum=Sum('saldo'))\r\n\r\n saldo = oplaty_suma['sum']-wplaty_suma['sum']\r\n nadplata = 0\r\n\r\n ostatnia_oplata = Oplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie).latest('pk')\r\n try:\r\n ostatnia_nadplata = Oplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie, oplaty_type__name='nadpłata').latest('pk')\r\n if ostatnia_oplata.data_platnosci <= ostatnia_nadplata.data_platnosci:\r\n nadplata = abs(ostatnia_nadplata.saldo)\r\n except Oplaty.DoesNotExist:\r\n nadplata = 0\r\n\r\n return render(request, 'panel/rozliczenia.html', {'title': 'Rozliczenia', 'saldo': saldo, 'nadplata': nadplata, 'ostatnia_oplata': ostatnia_oplata})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_oplaty_chart_1(request):\r\n mieszkaniec = request.user.get_profile()\r\n oplaty_all = get_list_or_404(Oplaty, mieszkanie=mieszkaniec.mieszkanie)\r\n oplata_suma = Oplaty.objects.exclude(oplaty_type__name='nadpłata')\\\r\n .filter(mieszkanie=mieszkaniec.mieszkanie).values('data_platnosci')\\\r\n .annotate(sum=Sum('saldo'))\r\n wplaty_suma = Wplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie)\\\r\n .values('data_wplaty').annotate(sum=Sum('saldo'))\r\n\r\n oplaty_by_type = {}\r\n for o in oplaty_all:\r\n if not o.oplaty_type.pk in oplaty_by_type:\r\n oplaty_by_type[o.oplaty_type.pk] = []\r\n oplaty_by_type[o.oplaty_type.pk].append(o)\r\n\r\n return render(request, 'panel/oplaty_chart_1.html',\r\n {'title': 'Oplaty (wykres)', 'oplaty': oplaty_by_type.items(),'oplata_suma':oplata_suma, 'wplaty_suma':wplaty_suma})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_oplaty_chart_2(request):\r\n mieszkaniec = request.user.get_profile()\r\n\r\n oplaty = Oplaty.objects.exclude(oplaty_type__name='nadpłata')\\\r\n .filter(mieszkanie=mieszkaniec.mieszkanie).values('oplaty_type__name')\\\r\n .annotate(avg=Avg('saldo'), max=Max('saldo'), min=Min('saldo'), sum=Sum('saldo'))\r\n\r\n return render(request, 'panel/oplaty_chart_2.html', {'title': 'Oplaty (wykres)', 'oplaty': oplaty})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_oplaty(request):\r\n today = date.today()\r\n\r\n # date validation\r\n try:\r\n month = int(request.GET.get('month', today.month))\r\n year = int(request.GET.get('year', today.year))\r\n except ValueError:\r\n raise Http404(\"invalid parms\")\r\n\r\n if not month in range(1, 13):\r\n raise Http404(\"invalid month\")\r\n if not year in range(2000, today.year+1):\r\n raise Http404(\"invalid year\")\r\n\r\n mieszkaniec = request.user.get_profile()\r\n\r\n # if does not exists -> 404\r\n oplaty = get_list_or_404(Oplaty,\r\n mieszkanie=mieszkaniec.mieszkanie,\r\n data_platnosci__month=month,\r\n data_platnosci__year=year,\r\n )\r\n\r\n pln_sum = sum([oplata.saldo for oplata in oplaty])\r\n\r\n # wyznaczanie nastpnych\r\n max_date = max([oplata.data_platnosci for oplata in oplaty])\r\n min_date = min([oplata.data_platnosci for oplata in oplaty])\r\n\r\n next = Oplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie, data_platnosci__gt=max_date)\r\n if len(next) >= 1:\r\n next = next[0]\r\n\r\n prev = Oplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie, data_platnosci__lt=min_date)\r\n if len(prev) >= 1:\r\n prev = prev[len(prev)-1]\r\n\r\n return render(request, 'panel/oplaty.html',\r\n {'title': 'Oplaty', 'oplaty': oplaty, 'pln_sum': pln_sum, 'next':next, 'prev':prev})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_komunikaty(request):\r\n mieszkaniec = request.user.get_profile()\r\n newsy = Newsy.objects.filter(mieszkancy=mieszkaniec)\r\n\r\n paginator = Paginator(newsy, settings.KOMUNIKATY_PER_PAGE)\r\n\r\n page = request.GET.get('page')\r\n try:\r\n newsy_paginate = paginator.page(page)\r\n except PageNotAnInteger:\r\n newsy_paginate = paginator.page(1)\r\n except EmptyPage:\r\n newsy_paginate = paginator.page(paginator.num_pages)\r\n\r\n return render(request, 'panel/komunikaty.html', {'title': 'Komunikaty', 'newsy': newsy_paginate, 'pages': paginator.page_range})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_komunikat(request, news_pk):\r\n mieszkaniec = request.user.get_profile()\r\n news = Newsy.objects.get(mieszkancy=mieszkaniec, pk=news_pk)\r\n\r\n # wyznaczanie nastpnych\r\n next = Newsy.objects.filter(mieszkancy=mieszkaniec, pk__gt=news_pk)\r\n if len(next) >= 1:\r\n next = next[len(next)-1]\r\n\r\n prev = Newsy.objects.filter(mieszkancy=mieszkaniec, pk__lt=news_pk)\r\n if len(prev) >= 1:\r\n prev = prev[0]\r\n\r\n return render(request, 'panel/komunikat.html', {'title': 'Komunikaty', 'news': news, 'next':next, 'prev':prev})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_export_main(request):\r\n mieszkaniec = request.user.get_profile()\r\n\r\n years = []\r\n for o in Oplaty.objects.filter(mieszkanie=mieszkaniec.mieszkanie).values('data_platnosci').annotate():\r\n if not o['data_platnosci'].year in years:\r\n years.append(o['data_platnosci'].year)\r\n\r\n return render(request, 'panel/export.html', {'title': 'Eksport', 'years':years})\r\n\r\n@login_mieszkaniec_required\r\ndef panel_export(request, year):\r\n mieszkaniec = request.user.get_profile()\r\n\r\n oplaty_all = Oplaty.objects.filter(\r\n mieszkanie=mieszkaniec.mieszkanie,\r\n data_platnosci__year=year\r\n ).order_by('data_platnosci')\r\n\r\n oplaty_type = Oplaty_type.objects.all()\r\n\r\n grouped = itertools.groupby(oplaty_all, lambda record: record.data_platnosci.strftime(\"%m-%Y\"))\r\n\r\n # generate\r\n wb = Workbook(encoding='utf-8')\r\n ws = wb.get_active_sheet()\r\n ws.title = u\"Opłaty\"\r\n\r\n # dict oplaty_type\r\n i = 0\r\n oplaty_type_to_id = {}\r\n for ot in oplaty_type:\r\n oplaty_type_to_id[ot.pk] = i\r\n i += 1\r\n\r\n # set data\r\n oplaty_list = []\r\n dates = []\r\n for date, items in grouped:\r\n dates.append(date)\r\n append_list = [0]*len(oplaty_type)\r\n for o in list(items):\r\n try:\r\n append_list[oplaty_type_to_id[o.oplaty_type.pk]] = o\r\n except:\r\n pass\r\n oplaty_list.append(append_list)\r\n\r\n for y in range(0, len(oplaty_list)):\r\n oplaty = oplaty_list[y]\r\n for x in range(0, len(oplaty_type)):\r\n d = ws.cell(row = x+1, column = y+1)\r\n try:\r\n d.value = float(oplaty[x].saldo)\r\n except:\r\n d.value = 0\r\n\r\n # set titles\r\n i = 1\r\n for v in [o.name for o in oplaty_type]:\r\n ws.cell(row=i, column=0).value = v\r\n i += 1\r\n i = 1\r\n for v in dates:\r\n ws.cell(row=0, column=i).value = v\r\n i += 1\r\n\r\n # set size\r\n ws.column_dimensions['A'].width = 25\r\n\r\n wb.save('export.xlsx')\r\n\r\n response = HttpResponse(mimetype='application/xlsx')\r\n response['Content-Disposition'] = 'attachment; filename=%s' % smart_str('export.xlsx')\r\n response.write(open('export.xlsx', 'rb').read())\r\n\r\n return response\r\n\r\n@login_mieszkaniec_required\r\ndef password_change_done(request):\r\n messages.success(request, 'Twoje nowe hasło zostało ustawione.')\r\n return redirect('panel')\r\n\r\n@login_required\r\ndef logout(request):\r\n messages.info(request, 'Zostałeś wylogowany.')\r\n auth_logout(request)\r\n return redirect('home')"
},
{
"alpha_fraction": 0.5196352601051331,
"alphanum_fraction": 0.5290483832359314,
"avg_line_length": 37.54069900512695,
"blob_id": "08aa8631ce7a7a41cf0590352a333cb166124fa9",
"content_id": "6862904232986b45750db748a50a78bb3b467f64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6810,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 172,
"path": "/backend/bazy/management/commands/populate.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\nfrom django.core.management.base import BaseCommand, CommandError\r\nfrom django.contrib.auth.models import User\r\nfrom django.db.models import Sum\r\nfrom optparse import make_option\r\nimport sqlite3, random, os.path\r\nfrom bazy.models import *\r\nimport datetime\r\n\r\nDATABASE_PATH = os.path.join(\"..\", \"populate.db\")\r\n\r\nclass Populate:\r\n\r\n def __init__(self, database, population_size, population_per_home):\r\n self.population_size = population_size\r\n self.population_per_home = population_per_home\r\n\r\n # clear\r\n self.names = []\r\n self.addresses = []\r\n\r\n self.database = database\r\n self.c = self.database.cursor()\r\n\r\n def __delete__(self, instance):\r\n self.database.commit()\r\n self.database.close()\r\n\r\n def get_names(self):\r\n for row in self.c.execute('SELECT imie,nazwisko FROM names ORDER BY RANDOM() LIMIT %d'\r\n % (self.population_size,)):\r\n self.names.append({\r\n 'imie': row[0],\r\n 'nazwisko': row[1]\r\n })\r\n\r\n def get_addressese(self):\r\n for row in self.c.execute('SELECT street,city,code FROM addresses ORDER BY RANDOM() LIMIT %d'\r\n % (self.population_size/self.population_per_home,)):\r\n self.addresses.append({\r\n 'ulica': row[0],\r\n 'miejscowosc': row[1],\r\n\r\n 'kod': row[2]\r\n })\r\n\r\n def start(self):\r\n self.get_names()\r\n self.get_addressese()\r\n\r\n # populate Oplaty_type\r\n oplaty_types = ['czynsz', 'kredyt', 'rozlicznienie wody',\\\r\n 'rozlicznie ciepła', 'garaż', 'wykup gruntu', 'fundusz remontowy', 'inne', 'nadpłata']\r\n for oplaty_type in oplaty_types:\r\n Oplaty_type(name=oplaty_type).save()\r\n\r\n oplaty_types = Oplaty_type.objects.exclude(name='nadpłata').all()\r\n\r\n homes_count = -1\r\n for i in range(len(self.names)):\r\n if i % self.population_per_home == 0:\r\n homes_count+=1\r\n brama = Brama(\r\n numer_bramy=random.randrange(1, 100),\r\n ulica=self.addresses[homes_count]['ulica'],\r\n miejscowosc=self.addresses[homes_count]['miejscowosc']\r\n )\r\n brama.save()\r\n\r\n user = User.objects.create_user('user%d' % (i,), 'user%[email protected]' % (i,), 'user%d' % (i,))\r\n\r\n mieszkanie = Mieszkanie(\r\n brama = brama,\r\n numer_mieszkania = (i % self.population_per_home)+1\r\n )\r\n mieszkanie.save()\r\n\r\n mieszkaniec = Mieszkaniec(\r\n user=user,\r\n imie=self.names[i]['imie'],\r\n nazwisko=self.names[i]['nazwisko'],\r\n mieszkanie=mieszkanie)\r\n mieszkaniec.save()\r\n\r\n # populate Oplaty\r\n for mieszkaniec in Mieszkaniec.objects.all():\r\n for miesiac in range(1,13):\r\n for oplaty_type in oplaty_types:\r\n if random.randrange(0,10) != 0:\r\n Oplaty(\r\n mieszkanie=mieszkaniec.mieszkanie,\r\n oplaty_type=oplaty_type,\r\n data_platnosci=datetime.date(2013, miesiac, 1),\r\n saldo=float(random.randrange(10,100)),\r\n ).save()\r\n # populate news for each user\r\n for i in range(0, 20):\r\n n = Newsy(tytul=\"To jest wiadomosc numer %d proszę się z nią zapoznać\" % (i,),tresc=\"testowa tresc\",)\r\n n.save()\r\n n.mieszkancy.add(*[m.id for m in Mieszkaniec.objects.exclude(pk=1).all()]),\r\n n.save()\r\n\r\n # populate Wplaty\r\n for mieszkaniec in Mieszkaniec.objects.all():\r\n oplata_suma = Oplaty.objects.exclude(oplaty_type__name='nadpłata')\\\r\n .filter(mieszkanie=mieszkaniec.mieszkanie).values('data_platnosci')\\\r\n .annotate(sum=Sum('saldo'))\r\n for o in oplata_suma:\r\n Wplaty(\r\n mieszkanie=mieszkaniec.mieszkanie,\r\n data_wplaty=o['data_platnosci'],\r\n saldo=o['sum'],\r\n ).save()\r\n # dodaj nadpłade dla acc user1\r\n mieszkaniec = Mieszkaniec.objects.get(pk=1)\r\n nadplata = Oplaty_type.objects.get(name='nadpłata')\r\n for miesiac in range(1,13):\r\n Oplaty(\r\n mieszkanie=mieszkaniec.mieszkanie,\r\n oplaty_type=nadplata,\r\n data_platnosci=datetime.date(2013, miesiac, 1),\r\n saldo=float(random.randrange(-100,-10)),\r\n ).save()\r\n\r\nclass Command(BaseCommand):\r\n args = 'population_size population_per_home'\r\n help = 'Populate database'\r\n option_list = BaseCommand.option_list + (\r\n make_option('--clear',\r\n action='store_true',\r\n dest='clear',\r\n default=False,\r\n help='Clear database before populating'),\r\n )\r\n\r\n def handle(self, *args, **options):\r\n\r\n if options['clear']:\r\n self.stdout.write('Clearing database...\\n')\r\n Oplaty.objects.all().delete()\r\n self.stdout.write('Flushed table \"oplaty\"\\n')\r\n Oplaty_type.objects.all().delete()\r\n self.stdout.write('Flushed table \"oplaty_type\"\\n')\r\n Mieszkaniec.objects.all().delete()\r\n self.stdout.write('Flushed table \"mieszkaniec\"\\n')\r\n Mieszkanie.objects.all().delete()\r\n self.stdout.write('Flushed table \"mieszkanie\"\\n')\r\n Brama.objects.all().delete()\r\n self.stdout.write('Flushed table \"brama\"\\n')\r\n Newsy.objects.all().delete()\r\n self.stdout.write('Flushed table \"newsy\"\\n')\r\n User.objects.filter(is_staff=False).delete()\r\n self.stdout.write('Flushed table \"user (except root account)\\n')\r\n\r\n if len(Mieszkaniec.objects.all()) == 0 and len(Brama.objects.all()) == 0 and len(Mieszkanie.objects.all()) == 0:\r\n self.stdout.write('Status: OK\\n')\r\n else:\r\n self.stdout.write('Status: ERROR\\n')\r\n\r\n if len(args) >= 1 and args[0].isdigit():\r\n population_size = int(args[0])\r\n else:\r\n population_size = input(\"Enter population size (default: 20): \")\r\n\r\n if len(args) >= 2 and args[1].isdigit():\r\n population_per_home = int(args[1])\r\n else:\r\n population_per_home = input(\"Enter population per home (default: 5): \")\r\n\r\n p = Populate(sqlite3.connect(DATABASE_PATH), population_size, population_per_home)\r\n p.start()\r\n self.stdout.write('Successfully populated\\n')"
},
{
"alpha_fraction": 0.7580174803733826,
"alphanum_fraction": 0.7842565774917603,
"avg_line_length": 67.5999984741211,
"blob_id": "0628fa0e1d0afa96ee489f3c090b997352bf3fdc",
"content_id": "58625122431013751317609139046f903dbfcb24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 5,
"path": "/database.sql",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "CREATE USER 'bazy_user'@'localhost' IDENTIFIED BY 'bazy_password';\nCREATE DATABASE bazy_projekt CHARACTER SET utf8; #thanks to giedek because of http://stackoverflow.com/questions/6681831/setting-django-mysql-site-to-use-utf-8\nGRANT INDEX, ALTER,CREATE,SELECT,INSERT,UPDATE,DELETE ON bazy_projekt.* TO 'bazy_user'@'localhost';\n\nUSE bazy_projekt;\n"
},
{
"alpha_fraction": 0.6255442500114441,
"alphanum_fraction": 0.6444122195243835,
"avg_line_length": 22.36440658569336,
"blob_id": "9e4847c077a3c47e2945f5015dd216c0a9499d7b",
"content_id": "24e81458ce465cb2b16926c819da5c3ac0e77b8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2756,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 118,
"path": "/code.html",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "<style>\n\tli {\n\t\tmargin-top: 20px;\n\t}\n\t#context {\n\t\twidth: 600px;\n\t\tmargin: 0 auto;\n\t\tmargin-top: 10px;\t\n\t}\n\ta:visited {\n\t\tcolor: blue;\n\t}\n</style>\n\n\n<div id=\"context\">\n\t<h2>Code highlights</h2>\n\n\n\t<ul>\n\t\t<li>\n\t\t\t<strong>custom manage commands </strong></br>\n\t\t\t<a \n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/management/commands/populate.py\">\n\t\t\t\tpython manage.py populate [--clear] [populate_size] [populate_per_home]\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>custom template tags</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/templatetags/base_extras.py\">\n\t\t\t\tnavactive, navactive_contains, pln\n\t\t\t</a><br/>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/templates/panel/oplaty.html#L52\">\n\t\t\t\tusage pln\n\t\t\t</a><br/>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/templates/panel/base.html#L8-L13\">\n\t\t\t\tusage navactive, navactive_contains\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>custom decorators</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/decorators.py\">\n\t\t\t\tlogin_mieszkaniec_required\n\t\t\t</a>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/views.py#L28\">\n\t\t\t\tusage login_mieszkaniec_required\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>flash massages</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/backend/settings.py#L125\">\n\t\t\t\tMESSAGE_STORAGE\n\t\t\t</a><br/>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/views.py#L239\">\n\t\t\t\tusage messages\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>exporting xls</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/views.py#L165\">\n\t\t\t\tfunction\n\t\t\t</a>\n\n\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>admin - grappelli</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/admin.py\">\n\t\t\t\tadmin.py\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>urls (names, regexp)</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/urls.py\">\n\t\t\t\turls.py\n\t\t\t</a>\n\t\t</li>\n\n\t\t<li>\n\t\t\t<strong>model</strong></br>\n\t\t\t<a\n\t\t\t\ttarget=\"_blank\"\n\t\t\t\thref=\"https://github.com/piotrgiedziun/bazy_project/blob/master/backend/bazy/models.py\">\n\t\t\t\tmodels.py\n\t\t\t</a>\n\t\t</li>\n\t</ul>\n\n\t<i>test server <a href=\"http://156.17.234.23:8080/auth/login/\">http://156.17.234.23:8080/auth/login/</a></i>\n\n</div>"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 28.5,
"blob_id": "93615cc5c3c61fb26701dff65a15c78f2a2548e1",
"content_id": "9c09fad83fb544876f9a9338db70acb55adca68e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 2,
"path": "/dropall.sql",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "drop database bazy_projekt;\ndrop user bazy_user@localhost;\n\n"
},
{
"alpha_fraction": 0.6547945141792297,
"alphanum_fraction": 0.6593607068061829,
"avg_line_length": 55.68421173095703,
"blob_id": "575df1dd178fa1fd8f46a1020e3e0b2e3dcf7116",
"content_id": "ca6068d7422768b1911491542411021bbf522bb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2190,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 38,
"path": "/backend/bazy/urls.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, url\r\nfrom django.contrib.auth.views import login as auth_login, password_reset,\\\r\n password_reset_done, password_reset_confirm, password_reset_complete,\\\r\n password_change_done, password_change\r\n\r\nimport views\r\n\r\nurlpatterns = patterns('bazy.views',\r\n url(r'^$', views.home, name='home'),\r\n\r\n # panels\r\n url(r'^panel/main$', views.panel_komunikaty, name='panel'),\r\n url(r'^panel/rozliczenia$', views.panel_rozliczenia, name='panel_rozliczenia'),\r\n url(r'^panel/komunikaty$', views.panel_komunikaty, name='panel_komunikaty'),\r\n url(r'^panel/komunikat/(?P<news_pk>\\d+)$', views.panel_komunikat, name='panel_komunikat'),\r\n url(r'^panel/oplaty/main$', views.panel_oplaty, name='panel_oplaty'),\r\n url(r'^panel/oplaty/chart/1$', views.panel_oplaty_chart_1, name='panel_oplaty_chart_1'),\r\n url(r'^panel/oplaty/chart/2$', views.panel_oplaty_chart_2, name='panel_oplaty_chart_2'),\r\n url(r'^panel/export$', views.panel_export_main, name='panel_export_main'),\r\n url(r'^panel/export/(?P<year>\\d+)$', views.panel_export, name='panel_export'),\r\n\r\n # auth\r\n url(r'^auth/logout$', views.logout, name='logout'),\r\n url(r'^auth/login/$', auth_login, {'template_name': 'auth/login.html', 'extra_context': {'title': 'Zaloguj'}}, name='login'),\r\n\r\n # pass change\r\n url(r'^panel/password/change/$', password_change, \r\n {'template_name': 'auth/password_change.html', 'post_change_redirect': '/panel/password/change/done/'}, name='password_change'),\r\n url(r'^panel/password/change/done/$', views.password_change_done, name='password_change_done'),\r\n # pass reset\r\n url(r'^auth/password/reset/$', password_reset,\r\n {'template_name': 'auth/password_reset.html', 'email_template_name': 'auth/password_reset_email.html'}, name='password_reset'),\r\n url(r'^auth/password/reset/done/$', password_reset_done),\r\n url(r'^auth/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,\r\n {'post_reset_redirect' : '/auth/password/done/'}),\r\n url(r'^auth/password/done/$', password_reset_complete,\r\n {'template_name': 'auth/password_reset_complete.html'}),\r\n)"
},
{
"alpha_fraction": 0.6219656467437744,
"alphanum_fraction": 0.6302545666694641,
"avg_line_length": 30.5,
"blob_id": "2c2aa91b78f1d4e95f6e334e28aaed7e62127b13",
"content_id": "727874f70ec4c633e1ff5fe6a5ecfeac5995fd53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3386,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 104,
"path": "/backend/bazy/models.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\nfrom django.db import models\r\nfrom django.contrib.auth.models import User\r\n\r\nclass Mieszkaniec(models.Model):\r\n mieszkanie = models.OneToOneField('Mieszkanie')\r\n user = models.ForeignKey(User, unique=True)\r\n imie = models.CharField(max_length=60)\r\n nazwisko = models.CharField(max_length=60)\r\n telefon = models.CharField(max_length=11)\r\n\r\n class Meta:\r\n db_table = \"mieszkaniec\"\r\n verbose_name = u\"Mieszkaniec\"\r\n verbose_name_plural = u\"Mieszkańcy\"\r\n\r\n def __unicode__(self):\r\n return u\"%s %s\" % (self.imie, self.nazwisko)\r\n\r\n @staticmethod\r\n def autocomplete_search_fields():\r\n return ('id__iexact', 'nazwisko__icontains','imie__icontains', 'mieszkanie__brama__ulica__icontains', 'mieszkanie__brama__miejscowosc__icontains')\r\n\r\nclass Brama(models.Model):\r\n numer_bramy = models.CharField(max_length=45)\r\n ulica = models.CharField(max_length=45)\r\n miejscowosc = models.CharField(max_length=45)\r\n kod_pocztowy = models.CharField(max_length=45)\r\n saldo = models.FloatField(default=0)\r\n\r\n @staticmethod\r\n def autocomplete_search_fields():\r\n return ('numer_bramy__iexact', 'miejscowosc__icontains', 'ulica__icontains', 'kod_pocztowy__icontains',)\r\n\r\n class Meta:\r\n db_table = \"brama\"\r\n verbose_name = u\"Brame\"\r\n verbose_name_plural = u\"Bramy\"\r\n\r\n def __unicode__(self):\r\n return u\"ul.%s brama %s \" % (self.ulica, self.numer_bramy)\r\n\r\nclass Mieszkanie(models.Model):\r\n brama = models.ForeignKey(Brama)\r\n numer_mieszkania = models.CharField(max_length=10)\r\n\r\n class Meta:\r\n db_table = \"mieszkanie\"\r\n verbose_name = u\"Mieszkanie\"\r\n verbose_name_plural = u\"Mieszkania\"\r\n\r\n def __unicode__(self):\r\n return \"%s %s mieszkanie %s\" % (self.brama.ulica, self.brama.numer_bramy, self.numer_mieszkania)\r\n\r\nclass Newsy(models.Model):\r\n mieszkancy = models.ManyToManyField(Mieszkaniec)\r\n tytul = models.CharField(max_length=60)\r\n tresc = models.TextField()\r\n data = models.DateTimeField(auto_now_add=True)\r\n\r\n class Meta:\r\n db_table = \"newsy\"\r\n ordering = ['-data', '-pk']\r\n verbose_name = u\"Newsa\"\r\n verbose_name_plural = u\"Newsy\"\r\n\r\n def __unicode__(self):\r\n return self.tytul\r\n\r\nclass Oplaty_type(models.Model):\r\n name = models.CharField(max_length=60)\r\n global_saldo = models.BooleanField(default=False)\r\n\r\n class Meta:\r\n db_table = \"oplaty_type\"\r\n verbose_name = u\"Typ opłate\"\r\n verbose_name_plural = u\"Typy opłat\"\r\n\r\n def __unicode__(self):\r\n return u\"%s\" % (self.name,)\r\n\r\nclass Oplaty(models.Model):\r\n mieszkanie = models.ForeignKey(Mieszkanie)\r\n oplaty_type = models.ForeignKey(Oplaty_type)\r\n data_platnosci = models.DateField()\r\n saldo = models.DecimalField(decimal_places=2, max_digits=10)\r\n\r\n class Meta:\r\n db_table = \"oplaty\"\r\n verbose_name = u\"Opłate\"\r\n verbose_name_plural = u\"Opłaty\"\r\n\r\n def __unicode__(self):\r\n return u\"Opłata #%d\" % (self.pk,)\r\n\r\nclass Wplaty(models.Model):\r\n data_wplaty = models.DateField()\r\n mieszkanie = models.ForeignKey(Mieszkanie)\r\n saldo = models.DecimalField(decimal_places=2, max_digits=10)\r\n\r\n class Meta:\r\n db_table = \"wplaty\"\r\n verbose_name = u\"Wpłate\"\r\n verbose_name_plural = u\"Wpłaty\""
},
{
"alpha_fraction": 0.4562813937664032,
"alphanum_fraction": 0.4603015184402466,
"avg_line_length": 27.323530197143555,
"blob_id": "c98c619b6db976c56c69a19cdcd373a9aad972fb",
"content_id": "89e255620f2b60e4fc8e60757c1c153c4aa9c855",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 995,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 34,
"path": "/backend/bazy/static/js/report.js",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "$(function() {\r\n // arrows handler\r\n $(\".report\").click(function(e) {\r\n var clicked = $(this);\r\n\r\n $(\".alert-success\").hide();\r\n $(\".modal-body, .modal-footer\").show();\r\n\r\n $(\"#message\").removeAttr('readonly');\r\n\r\n $('.date').text(clicked.attr('data-date'));\r\n $('.type').text(clicked.attr('data-type'));\r\n $('.price').text(clicked.attr('data-price'));\r\n $('#message').val('').focus();\r\n\r\n $('#reportModal').modal('show');\r\n e.preventDefault();\r\n return false;\r\n });\r\n\r\n $(\".send\").click(function(e) {\r\n var btn = $(this);\r\n btn.button('loading');\r\n $(\"#message\").attr('readonly', true);\r\n setTimeout(function () {\r\n btn.button('reset');\r\n $(\"#message\").removeAttr('readonly');\r\n $(\".modal-body, .modal-footer\").hide();\r\n $(\".alert-success\").show();\r\n }, 2000);\r\n e.preventDefault();\r\n return false;\r\n });\r\n});"
},
{
"alpha_fraction": 0.824999988079071,
"alphanum_fraction": 0.824999988079071,
"avg_line_length": 39,
"blob_id": "9c9cb3dccfc1d7a6a1244da23c09d4a6ac1ca175",
"content_id": "d5fc72b68d88dda64b546aeee382608988066ae9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 1,
"path": "/backend/dumpsql.sh",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "python manage.py sql bazy>sqlscheme.sql\n"
},
{
"alpha_fraction": 0.5381903648376465,
"alphanum_fraction": 0.5452408790588379,
"avg_line_length": 40.650001525878906,
"blob_id": "65ac18dd12ddb7ead745df4bda6fc77894c65598",
"content_id": "8f08e8da34d66b72f96e94f48fc4461bb92420fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 854,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 20,
"path": "/backend/templates/panel/oplaty_base.html",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "{% extends \"panel/base.html\" %}\r\n{% load base_extras %}\r\n{% load static from staticfiles %}\r\n\r\n{% block panel_content %}\r\n <div class=\"row-fluid\">\r\n <div class=\"span12 centered-pills\">\r\n <ul class=\"nav nav-pills\">\r\n <li class=\"{% navactive request 'panel_oplaty' %}\"><a href=\"{% url panel_oplaty %}\">Tabela</a></li>\r\n <li class=\"{% navactive request 'panel_oplaty_chart_1' %}\"><a href=\"{% url panel_oplaty_chart_1 %}\">Wykres opłat od czasie</a></li>\r\n <li class=\"{% navactive request 'panel_oplaty_chart_2' %}\"><a href=\"{% url panel_oplaty_chart_2 %}\">Wykres rozkłady wydatków</a></li>\r\n </ul>\r\n </div>\r\n </div>\r\n {% block oplaty_content %}{% endblock %}\r\n{% endblock %}\r\n\r\n{% block js %}\r\n <script src=\"{% static \"js/report.js\" %}\"></script>\r\n{% endblock %}"
},
{
"alpha_fraction": 0.5792108178138733,
"alphanum_fraction": 0.5871613621711731,
"avg_line_length": 21.420690536499023,
"blob_id": "6e93b9c6f82e27611e45963c8b92fe238dc221a0",
"content_id": "d83a07ea25cffea2017a2556eb0a315ab8df8c1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3398,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 145,
"path": "/backend/backend/settings.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\r\n# set of dirs\r\nimport os.path\r\nPROJECT_DIR = os.path.dirname(__file__)\r\nROOT_DIR = os.path.join(PROJECT_DIR, '..')\r\n\r\nDEBUG = True\r\nTEMPLATE_DEBUG = DEBUG\r\n\r\nADMINS = (\r\n # ('Your Name', '[email protected]'),\r\n)\r\n\r\nMANAGERS = ADMINS\r\n\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.sqlite3',\r\n 'NAME': os.path.join(ROOT_DIR, 'database.db'),\r\n 'USER': '',\r\n 'PASSWORD': '',\r\n 'HOST': '',\r\n 'PORT': '',\r\n },\r\n # 'default2': {\r\n # 'ENGINE': 'django.db.backends.mysql',\r\n # 'OPTIONS': {\r\n # 'read_default_file': os.path.join(ROOT_DIR,'my.cnf'),\r\n # },\r\n # }\r\n}\r\n\r\nTIME_ZONE = 'Poland'\r\nLANGUAGE_CODE = 'pl'\r\nSITE_ID = 1\r\nUSE_I18N = True\r\nUSE_L10N = True\r\nUSE_TZ = True\r\n\r\n# media\r\nMEDIA_ROOT = ''\r\nMEDIA_URL = ''\r\n\r\n# static\r\nSTATIC_ROOT = os.path.join(ROOT_DIR, 'static')\r\nSTATIC_URL = '/static/'\r\n\r\nSTATICFILES_DIRS = (\r\n os.path.join(ROOT_DIR, 'bazy', 'static'),\r\n)\r\n\r\nSTATICFILES_FINDERS = (\r\n 'django.contrib.staticfiles.finders.FileSystemFinder',\r\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\r\n)\r\n\r\nSECRET_KEY = 'l)_$(epj-6mf*(7v-c9@m$18%sxp#eg8kbsc0+v6wjs#ms^2=o'\r\n\r\nTEMPLATE_LOADERS = (\r\n 'django.template.loaders.filesystem.Loader',\r\n 'django.template.loaders.app_directories.Loader',\r\n)\r\n\r\nTEMPLATE_CONTEXT_PROCESSORS = (\r\n 'django.contrib.auth.context_processors.auth',\r\n 'django.core.context_processors.request',\r\n 'django.contrib.messages.context_processors.messages',\r\n)\r\n\r\nMIDDLEWARE_CLASSES = (\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n)\r\n\r\nROOT_URLCONF = 'backend.urls'\r\n\r\n\r\nWSGI_APPLICATION = 'backend.wsgi.application'\r\n\r\nTEMPLATE_DIRS = (\r\n os.path.join(ROOT_DIR, \"templates\"),\r\n)\r\n\r\nINSTALLED_APPS = (\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.sites',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'grappelli',\r\n 'django.contrib.admin',\r\n 'bazy',\r\n 'bootstrapform',\r\n)\r\n\r\nLOGGING = {\r\n 'version': 1,\r\n 'disable_existing_loggers': False,\r\n 'filters': {\r\n 'require_debug_false': {\r\n '()': 'django.utils.log.RequireDebugFalse'\r\n }\r\n },\r\n 'handlers': {\r\n 'mail_admins': {\r\n 'level': 'ERROR',\r\n 'filters': ['require_debug_false'],\r\n 'class': 'django.utils.log.AdminEmailHandler'\r\n }\r\n },\r\n 'loggers': {\r\n 'django.request': {\r\n 'handlers': ['mail_admins'],\r\n 'level': 'ERROR',\r\n 'propagate': True,\r\n },\r\n }\r\n}\r\n\r\n# django message\r\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\r\n\r\n# mail configuration\r\nif DEBUG:\r\n EMAIL_HOST = 'localhost'\r\n EMAIL_PORT = 1025\r\n EMAIL_HOST_USER = ''\r\n EMAIL_HOST_PASSWORD = ''\r\n EMAIL_USE_TLS = False\r\n DEFAULT_FROM_EMAIL = 'noreply@localhost'\r\n\r\nAUTH_PROFILE_MODULE = 'bazy.mieszkaniec'\r\n\r\nKOMUNIKATY_PER_PAGE = 5\r\nLOGIN_URL = \"/auth/login\"\r\nLOGIN_REDIRECT_URL = \"/panel/main\"\r\nGRAPPELLI_ADMIN_TITLE = \"<a href=\\\"http://156.17.234.23:8080/\\\">Spółdzielnia ABC</a>\"\r\n\r\n#fix\r\nDECIMAL_SEPARATOR = '.'\r\n"
},
{
"alpha_fraction": 0.6352128982543945,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 32.84000015258789,
"blob_id": "c74d2f230c20c05ed511ee6961da45859cc73202",
"content_id": "df327e141d3f284fc90a9779ed71a351803563a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 25,
"path": "/backend/bazy/decorators.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\nfrom django.shortcuts import redirect\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import logout\r\nfrom functools import wraps\r\nfrom models import Mieszkaniec\r\n\r\ndef login_mieszkaniec_required(view_func):\r\n @wraps(view_func)\r\n def _checklogin(request, *args, **kwargs):\r\n if not request.user.is_authenticated():\r\n return redirect('login')\r\n\r\n if not request.user.is_active:\r\n messages.error(request, \"Brak uprawnień\")\r\n return redirect('login')\r\n\r\n try:\r\n request.user.get_profile()\r\n return view_func(request, *args, **kwargs)\r\n except Mieszkaniec.DoesNotExist:\r\n messages.error(request, \"To konto nie posiada przypisanego mieszkańca\")\r\n logout(request)\r\n return redirect('login')\r\n return _checklogin"
},
{
"alpha_fraction": 0.6970954537391663,
"alphanum_fraction": 0.7136929631233215,
"avg_line_length": 29.125,
"blob_id": "e96b3c6e98935e0be2fae3175e8bc43b9e581403",
"content_id": "f0f4feba051002cbc8427a014d7c14491e1cd860",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 8,
"path": "/README.md",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "Bazy - Projekt (PWr)\n============\n\nInstalacja opisana w [bazy-projekt.pdf](https://github.com/piotrgiedziun/bazy_project/blob/master/bazy-projekt.pdf?raw=true)\n\n**start smtp server**\n\n python -m smtpd -n -c DebuggingServer localhost:1025\n"
},
{
"alpha_fraction": 0.5092592835426331,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 34.66666793823242,
"blob_id": "6f91631e789cae34f589ac732d34a96021d991ee",
"content_id": "8763b8ecb0d7822c40a3c247efbde3db22585159",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 3,
"path": "/runserver.sh",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "#sensible-browser http://156.17.234.23:8080\n#sudo \\\npython `find . -iname manage.py` runserver 0.0.0.0:8080\n\n"
},
{
"alpha_fraction": 0.6545378565788269,
"alphanum_fraction": 0.6570472717285156,
"avg_line_length": 37.849998474121094,
"blob_id": "1db2d1341f1862c89ed2d7ca989f4ca80756e60d",
"content_id": "1f2db2c99dec61eb23772e77924a9d0e3aa0f490",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2391,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 60,
"path": "/backend/bazy/admin.py",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "from bazy.models import *\r\nfrom django.contrib import admin\r\n\r\nclass MieszkanieAdmin(admin.ModelAdmin):\r\n raw_id_fields = ('brama',)\r\n autocomplete_lookup_fields = {\r\n 'fk': ['brama'],\r\n }\r\n def brama__ulica(self, o):\r\n return o.brama.ulica\r\n\r\n def brama__miejscowosc(self, o):\r\n return o.brama.miejscowosc\r\n\r\n list_display = ('numer_mieszkania', 'mieszkaniec', 'brama')\r\n list_filter = ['brama__ulica', 'brama__miejscowosc']\r\n search_fields = ['brama__ulica', 'brama__miejscowosc']\r\n\r\nclass OplatyAdmin(admin.ModelAdmin):\r\n\r\n def mieszkanie__mieszkaniec(self, o):\r\n return o.mieszkanie.mieszkaniec\r\n\r\n list_display = ('mieszkanie', 'mieszkanie__mieszkaniec', 'data_platnosci', 'oplaty_type', 'saldo')\r\n list_filter = ['mieszkanie__brama', 'mieszkanie', 'mieszkanie__mieszkaniec', 'data_platnosci', 'oplaty_type']\r\n search_fields = ['mieszkanie__brama__ulica', 'mieszkanie__brama__miejscowosc', 'mieszkanie__mieszkaniec__nazwisko']\r\n\r\nclass NewsyAdmin(admin.ModelAdmin):\r\n raw_id_fields = ('mieszkancy',)\r\n autocomplete_lookup_fields = {\r\n 'm2m': ['mieszkancy'],\r\n }\r\n\r\n def tresc__trim(self, o):\r\n return o.tresc[:50]+(len(o.tresc)>50 and [\"...\"] or [\"\"])[0]\r\n\r\n list_display = ('tytul', 'tresc__trim', 'data')\r\n list_filter = ['data', 'mieszkancy']\r\n search_fields = ('tytul', 'tresc__trim')\r\n\r\nclass MieszkaniecAdmin(admin.ModelAdmin):\r\n def imie__nazwisko(self, o):\r\n return \"%s %s\" % (o.imie, o.nazwisko)\r\n\r\n list_display = ('imie__nazwisko', 'imie', 'nazwisko', 'telefon', 'mieszkanie')\r\n list_filter = ['mieszkanie__brama__ulica', 'mieszkanie__brama__miejscowosc', 'mieszkanie__brama']\r\n search_fields = ['mieszkanie__brama__ulica', 'mieszkanie__brama__miejscowosc']\r\n\r\nclass WplatyAdmin(admin.ModelAdmin):\r\n list_display = ('data_wplaty', 'saldo')\r\n list_filter = ['mieszkanie__brama', 'mieszkanie', 'mieszkanie__mieszkaniec', 'data_wplaty']\r\n search_fields = ['mieszkanie__brama__ulica', 'mieszkanie__brama__miejscowosc', 'mieszkanie__mieszkaniec__nazwisko']\r\n\r\nadmin.site.register(Mieszkanie, MieszkanieAdmin)\r\nadmin.site.register(Newsy, NewsyAdmin)\r\nadmin.site.register(Mieszkaniec, MieszkaniecAdmin)\r\nadmin.site.register(Brama)\r\nadmin.site.register(Oplaty, OplatyAdmin)\r\nadmin.site.register(Oplaty_type)\r\nadmin.site.register(Wplaty, WplatyAdmin)\r\n"
},
{
"alpha_fraction": 0.37154150009155273,
"alphanum_fraction": 0.3754940629005432,
"avg_line_length": 37.98245620727539,
"blob_id": "7f2d4108a8ae6183a3448a2442f083950f74e407",
"content_id": "e9143b542a30f08280786011748a5a95d887af3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2279,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 57,
"path": "/backend/templates/panel/komunikaty.html",
"repo_name": "arjamizo/bazy_project",
"src_encoding": "UTF-8",
"text": "{% extends \"panel/base.html\" %}\r\n{% load i18n %}\r\n{% load static from staticfiles %}\r\n\r\n{% block panel_content %}\r\n {% if newsy %}\r\n <table class=\"table table-bordered\">\r\n <thead>\r\n <tr>\r\n <th width=\"10%\">#</th>\r\n <th width=\"65%\">Tytuł</th>\r\n <th>Data</th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n {% for news in newsy %}\r\n <tr>\r\n <td>{{ forloop.counter|add:newsy.start_index|add:-1 }}</td>\r\n <td><a href=\"{% url panel_komunikat news.pk %}\">{{ news.tytul|truncatechars:50 }}</a></td>\r\n <td>{{ news.data|date:\"D d M Y\" }}</td>\r\n </tr>\r\n {% endfor %}\r\n </tbody>\r\n </table>\r\n <div class=\"pagination\">\r\n <ul>\r\n {% if newsy.has_previous %}\r\n <li><a id=\"prev_page\" href=\"?page={{ newsy.previous_page_number }}\" class=\"prev\">‹‹ {% trans \"previous\" %}</a></li>\r\n {% else %}\r\n <li class=\"disabled prev\"><a href=\"#\">‹‹ {% trans \"previous\" %}</a></li>\r\n {% endif %}\r\n {% for page in pages %}\r\n {% if page %}\r\n {% ifequal page newsy.number %}\r\n <li class=\"current page active\"><a href=\"#\">{{ page }}</a></li>\r\n {% else %}\r\n <li><a href=\"?page={{ page }}\" class=\"page\">{{ page }}</a></li>\r\n {% endifequal %}\r\n {% else %}\r\n ...\r\n {% endif %}\r\n {% endfor %}\r\n {% if newsy.has_next %}\r\n <li><a id=\"next_page\" href=\"?page={{ newsy.next_page_number }}\" class=\"next\">{% trans \"next\" %} ››</a></li>\r\n {% else %}\r\n <li class=\"disabled next\"><a href=\"#\">{% trans \"next\" %} ››</a></li>\r\n {% endif %}\r\n </ul>\r\n </div>\r\n {% else %}\r\n Brak komunikatów\r\n {% endif %}\r\n{% endblock %}\r\n\r\n{% block js %}\r\n <script src=\"{% static \"js/arrows.js\" %}\"></script>\r\n{% endblock %}"
}
] | 18 |
msrocketgal/web-caesar
|
https://github.com/msrocketgal/web-caesar
|
d78f1599deb1206320d448fd4429a75d2d5391f1
|
e83f2e3fc5bd74942efd39b3d3b6dd8f32179f52
|
f8a36d58eed7574ff06757cd0a8d2b6cdac49881
|
refs/heads/master
| 2021-01-23T05:09:43.999335 | 2017-03-27T02:21:32 | 2017-03-27T02:21:32 | 86,282,008 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6758053302764893,
"alphanum_fraction": 0.6806031465530396,
"avg_line_length": 35.474998474121094,
"blob_id": "cb0cceb4f7e4c1d864b50ec7673dac598f2c55d7",
"content_id": "277946d6a3bf115422878c766d96fd24c9518241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1459,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 40,
"path": "/caesar.py",
"repo_name": "msrocketgal/web-caesar",
"src_encoding": "UTF-8",
"text": "def encrypt(text, rot):\n \"\"\" Write one more function called encrypt(text, rot), which receives as input\n a string and an integer & will return the result of rotating each letter in\n the text by rot places to the right. \"\"\"\n resultText = \"\"\n for ltr in text:\n resultText += (rotate_character(ltr,rot))\n return resultText\n\n\ndef alphabet_position(letter):\n \"\"\" write a function alphabet_position(letter), which receives a letter (that is,\n a string with only one alphabetic character) and returns the 0-based numerical\n position of that letter within the alphabet. \"\"\"\n\n alphabet = (\"abcdefghijklmnopqrstuvwxyz\")\n return int(alphabet.find(letter))\n\n\ndef rotate_character(char, rot):\n \"\"\" write helper function rotate_character(char, rot) which receives\n a character char (that is, a string of length 1), and an integer rot.\n Your function should return a new string of length 1, the result of rotating\n char by rot number of places to the right.\"\"\"\n import string\n alphabet = (\"abcdefghijklmnopqrstuvwxyz\")\n if char.lower() not in string.ascii_lowercase:\n return char\n if char == char.upper():\n uCase = True\n else:\n uCase = False\n lChar = char.lower()\n newCharPos = alphabet_position(lChar) + int(rot)\n while newCharPos > 25:\n newCharPos = newCharPos - 26\n newChar = alphabet[newCharPos]\n if uCase:\n newChar = newChar.upper()\n return newChar\n"
}
] | 1 |
romgvili/SpotiShare
|
https://github.com/romgvili/SpotiShare
|
584e8c5cd4589671977e0a86b074547bb83982c4
|
be68421aa6c18eb0cc38c09fddada6a8e6c64148
|
f5cd65d61c08284402b9c8bd985c1f0d40bddb46
|
refs/heads/master
| 2022-11-15T02:50:39.312590 | 2020-07-06T00:01:17 | 2020-07-06T00:01:17 | 265,402,200 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5625745058059692,
"alphanum_fraction": 0.5772745609283447,
"avg_line_length": 31.269229888916016,
"blob_id": "1aa994bc7641632218ad37a4f45b7fcb82f15fa3",
"content_id": "07b8ee4d68a70b4f9e428605fffb2301f85aeb4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5034,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 156,
"path": "/client 1.2.py",
"repo_name": "romgvili/SpotiShare",
"src_encoding": "UTF-8",
"text": "import socket\n\n__author__ = 'rom gvili'\n\nimport webbrowser\nimport spotipy\nfrom os import path\nimport spotipy.util as util\nimport sys\nimport keyboard\nimport time\n\n\ndef current_milli_time():\n return int(round(time.time() * 1000))\n\n\ndef isPlaying():\n try:\n if spoyifyObj.currently_playing()[\"is_playing\"]:\n return True\n return False\n except:\n return False\n\n\ntrackLists = []\naddress = '127.0.0.1'\nport = 8000\nbsize = 1024\nscope = 'playlist-modify-public streaming user-modify-playback-state user-read-playback-state user-read-currently-playing'\nname = input(\"enter username \")\ntry:\n try:\n if path.exists(f\".cache-{name}\"):\n file = open(f\".cache-{name}\")\n text = file.read()\n text= text[270:401]\n token = spotipy.SpotifyOAuth.refresh_access_token(text)\n\n except:\n token = util.prompt_for_user_token(name, scope,\n client_id='221e9a5fac5c4f40bb2de9c33ce7a863',\n client_secret='8c5e85b7165840bbb605b43924952889',\n redirect_uri='http://google.com/')\nexcept ConnectionError:\n webbrowser.open('https://imgur.com/a/zZ3OW0f')\n sys.exit()\nspoyifyObj = spotipy.Spotify(auth=token)\nuser = spoyifyObj.current_user()\n\n\ndef ChooseDevice():\n global device\n devices = spoyifyObj.devices()\n devices = devices['devices']\n try:\n devices[0]\n except:\n return False\n print(\"your devices:\\n\")\n for device in devices:\n print(device['name'])\n name = input(\"enter device name \")\n for devic in devices:\n if devic['name'] == name:\n device = devic\n return True\n return False\nif ChooseDevice()==False:\n print(\"please open spotify on the desired device and connect\")\n time.sleep(5)\n while ChooseDevice()==False:\n time.sleep(5)\ndisplayName = user['display_name']\nFollowers = user['followers']['total']\nprint(\"welcome to spotipy \" + displayName)\nprint(\"you have \" + str(Followers) + \" followers\")\n\nclientSocket = socket.socket()\nclientSocket.connect((address, port))\nprint(clientSocket.recv(bsize).decode('UTF8'))\nx = clientSocket.recv(34).decode('UTF8')\n\nprint(x)\nif \"T\" in x:\n track = clientSocket.recv(69).decode('UTF8')\n pos = clientSocket.recv(bsize).decode('UTF8')\n print(track)\n print(pos)\n spoyifyObj.start_playback(device['id'], None, [track], None,current_milli_time()-int(pos)+200)\n print(\"welcome to the room \" + displayName)\nelse:\n print(\"welcome to the room,youre the first visitor\")\n clientSocket.send(\"song is done\".encode())\n trackurl = []\n trackurl.append(clientSocket.recv(bsize).decode('UTF8'))\n print(trackurl)\n spoyifyObj.start_playback(device['id'], None, trackurl)\n while(isPlaying()==False):\n pass\n print(\"x\")\nwhile (True):\n time.sleep(5)\n print(\"if you'd like to add a song press esc!\")\n while isPlaying()!=False:\n if keyboard.is_pressed(\"esc\"):\n artistName = input(\"enter an artist name \")\n results = spoyifyObj.search(artistName, 1, 0, \"artist\")\n artist = results['artists']['items'][0]\n print(artist['name'] + \" has \" + str(artist['followers']['total']) + \" followers , and his genre is \" +\n artist['genres'][0])\n webbrowser.open(artist['images'][0]['url'])\n artistId = artist[\"id\"]\n trackUri = []\n trackArt = []\n z = 1\n albums = spoyifyObj.artist_albums(artistId)\n albums = albums['items']\n for item in albums:\n print(\"album : \" + item['name'])\n albumId = item['id']\n albumArt = item['images'][0]['url']\n trackResults = spoyifyObj.album_tracks(albumId)\n trackResults = trackResults['items']\n for item in trackResults:\n print(str(z) + \": \" + item['name'])\n trackUri.append(item['uri'])\n trackArt.append(albumArt)\n z = z + 1\n print()\n songSelection = input(\"enter song to add to Queue\")\n try:\n clientSocket.send(trackUri[int(songSelection) - 1].encode())\n print(clientSocket.recv(bsize).decode())\n print(\"if you'd like to add a song press esc!\")\n except:\n print(\"Ivalid song number\")\n print(\"if you'd like to retry press esc!\")\n print(\"a\")\n clientSocket.send(\"song is done\".encode())\n track = clientSocket.recv(bsize).decode()\n if \"!\" in track:\n track = track[0:36]\n pos = track[38:]\n print(pos)\n spoyifyObj.start_playback(device['id'], None, [track],None,pos)\n else:\n print(track)\n spoyifyObj.start_playback(device['id'], None, [track])\n while(isPlaying()==False):\n pass\n\n# clientSocket.send(str(i).encode())\n# data = clientSocket.recv(bsize)\n# data=data.decode('utf8')\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 33.5,
"blob_id": "b1085870986c8549b992a3e5ef8972e99c39dcad",
"content_id": "c93ea130ee9cbca65826bb7e739f534d97493075",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 2,
"path": "/README.md",
"repo_name": "romgvili/SpotiShare",
"src_encoding": "UTF-8",
"text": "# SpotiShare\na spotify listening room built using spotipy and pygame\n"
},
{
"alpha_fraction": 0.4370584487915039,
"alphanum_fraction": 0.44765573740005493,
"avg_line_length": 28.233009338378906,
"blob_id": "eb19938fede8292cb7e899b1841f87dc83d113c6",
"content_id": "8ca8ebff77cd8846c315e70bc5018c927ddf91af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3114,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 103,
"path": "/server 1.1.py",
"repo_name": "romgvili/SpotiShare",
"src_encoding": "UTF-8",
"text": "import socket\r\nimport threading\r\nimport queue\r\nimport time\r\nimport spotipy\r\n\r\nglobal songslist\r\nsongslist = []\r\nuntil = -5\r\ndef current_milli_time():\r\n return (round(time.time() * 1000))\r\n\r\n\r\ndaJoker = \"spotify:track:4uLU6hMCjMI75M1A2tKUQC\"\r\n\r\n\r\nclass ThreadedServer(threading.Thread):\r\n def __init__(self, host, port):\r\n super(ThreadedServer, self).__init__()\r\n self.plist = []\r\n self.host = host\r\n self.port = port\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.sock.bind((self.host, self.port))\r\n\r\n def run(self):\r\n self.sock.listen(5)\r\n while True:\r\n client, address = self.sock.accept()\r\n p = listenToClient(client, address)\r\n p.start()\r\n self.plist.append(p)\r\n\r\n\r\nclass listenToClient(threading.Thread):\r\n def __init__(self, client, addres):\r\n super(listenToClient, self).__init__()\r\n self.client = client\r\n self.adress = addres\r\n self.size = 1024\r\n\r\n def run(self):\r\n global songslist, t, cur,x,until\r\n print('runnung ', self.adress)\r\n self.client.send((\"connected successfully\").encode())\r\n self.client.sendall(x.encode())\r\n if x == \"T\":\r\n time.sleep(0.5)\r\n self.client.sendall(t.encode())\r\n time.sleep(0.5)\r\n self.client.sendall(str(cur).encode())\r\n x = \"T\"\r\n while True:\r\n try:\r\n data = self.client.recv(self.size)\r\n data = data.decode('UTF8')\r\n if data:\r\n if data == \"song is done\":\r\n print(\"X\")\r\n if until !=-5:\r\n if time.perf_counter()-until<5:\r\n self.client.send((t+\"!\").encode())\r\n else:\r\n t = songslist[0]\r\n until = time.perf_counter()\r\n until = time.perf_counter()\r\n self.client.send(t.encode())\r\n cur = current_milli_time()\r\n songslist.remove(t)\r\n else:\r\n t = daJoker\r\n self.client.send(daJoker.encode())\r\n cur = current_milli_time()\r\n until = time.perf_counter()\r\n else:\r\n print(data)\r\n songslist.append(data)\r\n self.client.send(\"added\".encode())\r\n\r\n\r\n\r\n\r\n except:\r\n return False\r\n\r\n\r\nif __name__ == \"__main__\":\r\n global x\r\n global t\r\n t = daJoker\r\n x = \"F\"\r\n while True:\r\n port_num = 8000\r\n try:\r\n port_num = int(port_num)\r\n break\r\n except ValueError:\r\n pass\r\n\r\n Ts = ThreadedServer('0.0.0.0', port_num)\r\n Ts.start()\r\n Ts.join()\r\n"
},
{
"alpha_fraction": 0.4119464159011841,
"alphanum_fraction": 0.4343262314796448,
"avg_line_length": 44.04789733886719,
"blob_id": "33352db027085b4024afe83ef8ee0fef3527a054",
"content_id": "5e1d27ad510fd6151217850111b5719eade79474",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31750,
"license_type": "no_license",
"max_line_length": 229,
"num_lines": 689,
"path": "/client1.8.py",
"repo_name": "romgvili/SpotiShare",
"src_encoding": "UTF-8",
"text": "__author__ = 'rom gvili'\r\nimport win32clipboard\r\nimport pygame as pg\r\nimport socket\r\nimport spotipy\r\nfrom os import path\r\nimport os\r\nimport spotipy.util as util\r\nimport sys\r\nimport numpy as np\r\nimport time\r\nimport io\r\nimport urllib.request\r\nfrom PIL import Image\r\nfrom resizeimage import resizeimage\r\ndef current_milli_time():\r\n return int(round(time.time() * 1000))\r\ndef resource_path(relative_path):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\r\ndef pygameanticrasher():\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n return False\r\n return True\r\ndef levenshtein(seq1, seq2):\r\n size_x = len(seq1) + 1\r\n size_y = len(seq2) + 1\r\n matrix = np.zeros ((size_x, size_y))\r\n for x in range(size_x):\r\n matrix [x, 0] = x\r\n for y in range(size_y):\r\n matrix [0, y] = y\r\n\r\n for x in range(1, size_x):\r\n for y in range(1, size_y):\r\n if seq1[x-1] == seq2[y-1]:\r\n matrix [x,y] = min(\r\n matrix[x-1, y] + 1,\r\n matrix[x-1, y-1],\r\n matrix[x, y-1] + 1\r\n )\r\n else:\r\n matrix [x,y] = min(\r\n matrix[x-1,y] + 1,\r\n matrix[x-1,y-1] + 1,\r\n matrix[x,y-1] + 1\r\n )\r\n return (matrix[size_x - 1, size_y - 1])\r\n\r\n\r\ndef main():\r\n while pygameanticrasher():\r\n smslist = []\r\n heb = 'אבגדהוזחטיכלמנסעפצקרשתץףך'\r\n address = '127.0.0.1'\r\n port = 8000\r\n bsize = 1024\r\n scope = 'playlist-modify-public streaming user-modify-playback-state user-read-playback-state user-read-currently-playing'\r\n screen = pg.display.set_mode((640, 480))\r\n pg.display.set_caption('SPOTISHARE by Rom Gvili')\r\n gameIcon = pg.image.load('C:/Users/rom/PycharmProjects/top5/graphics/icon.png')\r\n pg.display.set_icon(gameIcon)\r\n startscreen = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/start.png\")\r\n font = pg.font.Font(None, 32)\r\n clock = pg.time.Clock()\r\n input_box = pg.Rect(224, 150, 140, 32)\r\n color_inactive = pg.Color('lightskyblue3')\r\n color_active = pg.Color('dodgerblue2')\r\n color = color_inactive\r\n active = False\r\n text = 'enter username'\r\n done = False\r\n b = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text+= win32clipboard.GetClipboardData()\r\n\r\n else:\r\n text += event.unicode\r\n if b == True:\r\n break\r\n\r\n\r\n\r\n screen.blit(startscreen,(0,0))\r\n # Render the current text.\r\n txt_surface = font.render(text, True, color)\r\n # Resize the box if the text is too long.\r\n width = max(200, txt_surface.get_width()+10)\r\n input_box.w = width\r\n # Blit the text.\r\n screen.blit(txt_surface, (input_box.x+5, input_box.y+5))\r\n # Blit the input_box rect.\r\n pg.draw.rect(screen, color, input_box, 2)\r\n\r\n pg.display.flip()\r\n clock.tick(30)\r\n name = text\r\n pg.display.quit()\r\n try:\r\n if path.exists(f\".cache-{name}\"):\r\n file = open(f\".cache-{name}\")\r\n text = file.read()\r\n text = text[270:401]\r\n token = spotipy.SpotifyOAuth.refresh_access_token(text)\r\n else:\r\n token = util.prompt_for_user_token(name, scope,\r\n client_id='221e9a5fac5c4f40bb2de9c33ce7a863',\r\n client_secret='8c5e85b7165840bbb605b43924952889',\r\n redirect_uri='http://google.com/')\r\n\r\n except:\r\n token = util.prompt_for_user_token(name, scope,\r\n client_id='221e9a5fac5c4f40bb2de9c33ce7a863',\r\n client_secret='8c5e85b7165840bbb605b43924952889',\r\n redirect_uri='http://google.com/')\r\n spoyifyObj = spotipy.Spotify(auth=token)\r\n user = spoyifyObj.current_user()\r\n pg.init()\r\n screen = pg.display.set_mode((640, 480))\r\n pg.display.set_caption('SPOTISHARE by Rom Gvili')\r\n devicesimg = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/devices.png\")\r\n font = pg.font.SysFont(None, 34)\r\n font2 = pg.font.SysFont(None, 20)\r\n fontheb = pg.font.SysFont('arial',20)\r\n clock = pg.time.Clock()\r\n input_box = pg.Rect(224, 150, 140, 32)\r\n color_inactive = pg.Color('lightskyblue3')\r\n color_active = pg.Color('dodgerblue2')\r\n color = color_inactive\r\n active = False\r\n text = 'enter device'\r\n done = False\r\n global device\r\n devices = spoyifyObj.devices()\r\n devices = devices['devices']\r\n lis = \"your devices: \"\r\n for device in devices:\r\n lis += device['name'] + \", \"\r\n LENG = len(devices)\r\n text = \"enter the desired device\"\r\n text2 = font2.render(lis, True,(0,255,0), (0,0,128))\r\n textRect2 = text2.get_rect()\r\n textRect2.center = (640 // 2, 480 // 2)\r\n screen.blit(text2, textRect2)\r\n b = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text += win32clipboard.GetClipboardData()\r\n else:\r\n text += event.unicode\r\n if b == True:\r\n break\r\n devices = spoyifyObj.devices()['devices']\r\n if len(devices)!=LENG:\r\n lis = \"your devices: \"\r\n for device in devices:\r\n lis += device['name'] + \", \"\r\n LENG = len(devices)\r\n screen.blit(devicesimg, (0, 0))\r\n # Render the current text.\r\n txt_surface = font.render(text, True, color)\r\n for c in text:\r\n if c not in heb:\r\n txt_surface = font.render(text, True, color)\r\n else:\r\n txt_surface = fontheb.render(text[::-1], True, color)\r\n break\r\n # Resize the box if the text is too long.\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n # Blit the text.\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n # Blit the input_box rect.\r\n pg.draw.rect(screen, color, input_box, 2)\r\n text2 = font2.render(lis, True, (0, 255, 0), (0, 0, 128))\r\n textRect2 = text2.get_rect()\r\n textRect2.center = (640 // 2, 480 // 2)\r\n screen.blit(text2, textRect2)\r\n pg.display.flip()\r\n clock.tick(30)\r\n for d in devices:\r\n print(d['name'])\r\n if text == d['name']:\r\n device= d\r\n break\r\n displayName = user['display_name']\r\n Followers = user['followers']['total']\r\n bg = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/playing.png\")\r\n screen.blit(bg,(0,0))\r\n text2 = font2.render(\"welcome to SpotiShare \" + displayName+ \"!\", True, (0, 255, 0), (0, 0, 128))\r\n textRect2 = text2.get_rect()\r\n textRect2.center = (640 // 2, 480 // 2)\r\n screen.blit(text2, textRect2)\r\n text3 = font2.render(\"you have \" + str(Followers)+ \"followers!\", True, (0, 255, 0), (0, 0, 128))\r\n textRect3 = text3.get_rect()\r\n textRect3.center = (640 // 2, (480 // 2)+20)\r\n screen.blit(text3, textRect3)\r\n pg.display.flip()\r\n time.sleep(2)\r\n clientSocket = socket.socket()\r\n clientSocket.connect((address, port))\r\n clientSocket.settimeout(2)\r\n print(clientSocket.recv(22).decode('UTF8'))\r\n x = clientSocket.recv(34).decode('UTF8')\r\n\r\n def play():\r\n if \"T\" in x:\r\n try:\r\n spoyifyObj.start_playback(device['id'], None, [track], None, pos)\r\n except:\r\n print(\"plase check you're connected to the internet and spotify is open on the device\")\r\n time.sleep(5)\r\n play()\r\n else:\r\n try:\r\n spoyifyObj.start_playback(device['id'], None, trackurl)\r\n except:\r\n print(\"plase check you're connected to the internet and spotify is open on the device\")\r\n time.sleep(5)\r\n play()\r\n def isPlaying():\r\n try:\r\n if spoyifyObj.currently_playing()[\"is_playing\"]:\r\n return True\r\n return False\r\n except:\r\n return False\r\n if \"T\" in x:\r\n yo = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/welcome.png\")\r\n screen.blit(yo,(0,0))\r\n pg.display.flip()\r\n track = clientSocket.recv(69).decode('UTF8')\r\n pos = clientSocket.recv(bsize).decode('UTF8')\r\n play()\r\n while (isPlaying() == False):\r\n pass\r\n else:\r\n yo = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/first.png\")\r\n screen.blit(yo,(0,0))\r\n pg.display.flip()\r\n clientSocket.send(\"song is done\".encode())\r\n trackurl = []\r\n while True:\r\n try:\r\n trackurl.append(clientSocket.recv(bsize).decode('UTF8'))\r\n break\r\n except:\r\n pass\r\n\r\n play()\r\n while (isPlaying() == False):\r\n pass\r\n time.sleep(5)\r\n yo = pg.image.load(\"C:/Users/rom/PycharmProjects/top5/graphics/last.png\")\r\n screen.blit(yo,(0,0))\r\n addbutton = pg.Rect(500, 40, 100, 100)\r\n nextbutton = pg.Rect(500, 200, 100, 100)\r\n textbutton = pg.Rect(500, 360, 100, 100)\r\n nextbuttontext = font2.render(\"play next song\", True, (0, 255, 0), (0, 0, 128))\r\n nextbuttonRect = nextbuttontext.get_rect()\r\n nextbuttonRect.center = (550, 250)\r\n screen.blit(nextbuttontext, nextbuttonRect)\r\n addbuttontext = font2.render(\"add a song\", True, (0, 255, 0), (0, 0, 128))\r\n addbuttonRect = addbuttontext.get_rect()\r\n addbuttonRect.center = (550, 90)\r\n screen.blit(addbuttontext, addbuttonRect)\r\n smsbuttontext = font2.render(\"send a message\", True, (0, 255, 0), (0, 0, 128))\r\n smsbuttonRect = smsbuttontext.get_rect()\r\n smsbuttonRect.center = (550, 410)\r\n screen.blit(smsbuttontext, smsbuttonRect)\r\n def sendsms():\r\n color = color_inactive\r\n screen.blit(bg, (0, 0))\r\n text = \"enter a message to send\"\r\n txt_surface = font.render(text, True, color)\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n b = False\r\n active = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text += win32clipboard.GetClipboardData()\r\n else:\r\n text += event.unicode\r\n screen.blit(bg, (0, 0))\r\n txt_surface = font.render(text, True, color)\r\n for c in text:\r\n if c not in heb:\r\n txt_surface = font.render(text, True, color)\r\n else:\r\n txt_surface = fontheb.render(text[::-1], True, color)\r\n break\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n pg.display.flip()\r\n if b == True:\r\n break\r\n clientSocket.send((\"sms \" +name+\": \"+text).encode())\r\n screen.blit(yo, (0, 0))\r\n return\r\n def nextsongpressed():\r\n clientSocket.send(\"play next song\".encode())\r\n def addsongpressed():\r\n color = color_inactive\r\n screen.blit(bg,(0,0))\r\n text = \"enter artist name\"\r\n txt_surface = font.render(text, True,color )\r\n width = max(200, txt_surface.get_width()+10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x+5, input_box.y+5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n b=False\r\n active = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text += win32clipboard.GetClipboardData()\r\n else:\r\n text += event.unicode\r\n screen.blit(bg,(0,0))\r\n txt_surface = font.render(text, True, color)\r\n for c in text:\r\n if c not in heb:\r\n txt_surface = font.render(text, True, color)\r\n else:\r\n txt_surface = fontheb.render(text[::-1], True, color)\r\n break\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n pg.display.flip()\r\n if b == True:\r\n break\r\n artistName = text\r\n results = spoyifyObj.search(artistName, 1, 0, \"artist\")\r\n try:\r\n artist = results['artists']['items'][0]\r\n except:\r\n screen.blit(bg,(0,0))\r\n text = \"artist wasnt found\"\r\n txt_surface = font.render(text, True, color)\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n pg.display.flip()\r\n time.sleep(3)\r\n screen.blit(yo, (0, 0))\r\n return\r\n #print(artist['name'] + \" has \" + str(artist['followers']['total']) + \" followers , and his genre is \" + artist['genres'][0])\r\n urllib.request.urlretrieve(artist['images'][0]['url'],'pic.png')\r\n with open('pic.png', 'r+b') as f:\r\n with Image.open(f) as image:\r\n artistpic = resizeimage.resize_cover(image, [640, 480])\r\n artistpic.save('pic.png', image.format)\r\n artistpic = pg.image.load('pic.png')\r\n screen.blit(artistpic,(0,0))\r\n pg.display.flip()\r\n text = \"enter song name\"\r\n txt_surface = font.render(text, True, color)\r\n width = max(200, txt_surface.get_width()+10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x+5, input_box.y+5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n b=False\r\n active = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text += win32clipboard.GetClipboardData()\r\n else:\r\n text += event.unicode\r\n screen.blit(artistpic, (0, 0))\r\n txt_surface = font.render(text, True, color)\r\n for c in text:\r\n if c not in heb:\r\n txt_surface = font.render(text, True, color)\r\n else:\r\n txt_surface = fontheb.render(text[::-1], True, color)\r\n break\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n pg.display.flip()\r\n if b == True:\r\n break\r\n songname = text.upper()\r\n\r\n artistId = artist[\"id\"]\r\n albums = {}\r\n count = 0\r\n while len(albums) % 10 == 0:\r\n albums.update(spoyifyObj.artist_albums(artistId, None, None, 20,count))\r\n count += 20\r\n albums = spoyifyObj.artist_albums(artistId,None,None,50)\r\n albums = albums['items']\r\n close = []\r\n closeuri=[]\r\n albumsi = []\r\n names ={}\r\n def checker(na , di, al):\r\n if na in di:\r\n if di[na] == al:\r\n return False\r\n return True\r\n for album in albums:\r\n albumId = album['id']\r\n trackResults = spoyifyObj.album_tracks(albumId)\r\n trackResults = trackResults['items']\r\n for item in trackResults:\r\n if (songname in item['name'].upper() or songname == item['name'].upper()or levenshtein(songname,item['name'].upper())<4.0) and item['uri'] not in closeuri and checker(item['name'].upper(),names,album['name']):\r\n close.append(item)\r\n closeuri.append(item['uri'])\r\n albumsi.append(album['name'])\r\n names[item['name'].upper()] = album['name']\r\n if len(close)!=0:\r\n closest = \"\"\r\n mini = 5\r\n if len(close)>1:\r\n count = 1\r\n for ite in close:\r\n text4 = font2.render(str(count) + \". \" + ite['name'] + \" from \" + albumsi[count-1], True, (0, 255, 0),(0, 0, 128))\r\n textRect4 = text4.get_rect()\r\n textRect4.center = (640 // 2, (480 // 2)+count*15)\r\n screen.blit(text4, textRect4)\r\n count+=1\r\n pg.display.flip()\r\n text = \"enter song number\"\r\n txt_surface = font.render(text, True, color)\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n b = False\r\n active = False\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if input_box.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n active = not active\r\n else:\r\n active = False\r\n # Change the current color of the input box.\r\n color = color_active if active else color_inactive\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n if active:\r\n if event.key == pg.K_RETURN:\r\n b = True\r\n elif event.key == pg.K_BACKSPACE:\r\n text = text[:-1]\r\n elif event.key == pg.K_v and pg.key.get_mods() & pg.KMOD_CTRL:\r\n win32clipboard.OpenClipboard()\r\n text += win32clipboard.GetClipboardData()\r\n else:\r\n text += event.unicode\r\n screen.blit(artistpic, (0, 0))\r\n txt_surface = font.render(text, True, color)\r\n for c in text:\r\n if c not in heb:\r\n txt_surface = font.render(text, True, color)\r\n else:\r\n txt_surface = fontheb.render(text[::-1], True, color)\r\n break\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n count=1\r\n for ite in close:\r\n for c in ite['name']:\r\n if c in heb:\r\n text4 = fontheb.render(str(count) + \". \" + ite['name'][::-1] + \" from \" + albumsi[count - 1][::-1], True,(0, 255, 0), (0, 0, 128))\r\n break\r\n text4 = font2.render(str(count) + \". \" + ite['name'] + \" from \" + albumsi[count-1],True, (0, 255, 0), (0, 0, 128))\r\n textRect4 = text4.get_rect()\r\n textRect4.center = (640 // 2, (480 // 2) + count * 15)\r\n screen.blit(text4, textRect4)\r\n count += 1\r\n pg.display.flip()\r\n if b == True:\r\n break\r\n closest = close[int(text)-1]['uri']\r\n else:\r\n closest= close[0]['uri']\r\n clientSocket.send(closest.encode())\r\n print(clientSocket.recv(55).decode())\r\n screen.blit(yo, (0, 0))\r\n return\r\n screen.blit(yo, (0, 0))\r\n screen.blit(artistpic, (0, 0))\r\n text = \"song wasnt found\"\r\n txt_surface = font.render(text, True, color)\r\n width = max(200, txt_surface.get_width() + 10)\r\n input_box.w = width\r\n screen.blit(txt_surface, (input_box.x + 5, input_box.y + 5))\r\n pg.draw.rect(screen, color, input_box, 2)\r\n pg.display.flip()\r\n time.sleep(3)\r\n screen.blit(yo, (0, 0))\r\n return\r\n while True:\r\n while isPlaying() != False:\r\n try:\r\n data = \"\"\r\n data = clientSocket.recv(bsize)\r\n data = data.decode('UTF8')\r\n if data:\r\n if data[0:4] == \"next\":\r\n track = data[5:41]\r\n pos = data[42:]\r\n if pos == \"\":\r\n spoyifyObj.start_playback(device['id'], None, [track])\r\n else:\r\n spoyifyObj.start_playback(device['id'], None, [track], None, pos)\r\n while (isPlaying() == False):\r\n pass\r\n elif data[0:4] == \"sms \":\r\n if len(smslist) == 10:\r\n smslist.remove(smslist[9])\r\n smslist.insert(0,data[4:])\r\n print(smslist)\r\n\r\n except:\r\n pass\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = event.pos # gets mouse position\r\n if nextbutton.collidepoint(mouse_pos):\r\n nextsongpressed()\r\n if addbutton.collidepoint(mouse_pos):\r\n addsongpressed()\r\n if textbutton.collidepoint(mouse_pos):\r\n sendsms()\r\n screen.blit(yo,(0,0))\r\n count = 0\r\n for sms in smslist:\r\n for letter in sms:\r\n if letter in heb:\r\n text5 = fontheb.render(sms[::-1], True,(0, 255, 0), (0, 0, 128))\r\n break\r\n text5 = font2.render(sms, True,(0, 255, 0), (0, 0, 128))\r\n textRect5 = text5.get_rect()\r\n textRect5.center = (100, (11) + count * 25)\r\n textRect5.x = 40\r\n screen.blit(text5, textRect5)\r\n count += 1\r\n pg.display.flip()\r\n pg.draw.rect(screen, [255, 0, 0], addbutton) # draw button\r\n pg.draw.rect(screen, [255, 0, 0], nextbutton) # draw button\r\n pg.draw.rect(screen, [255, 0, 0], textbutton) # draw button\r\n screen.blit(nextbuttontext, nextbuttonRect)\r\n screen.blit(addbuttontext, addbuttonRect)\r\n screen.blit(smsbuttontext, smsbuttonRect)\r\n\r\n pg.display.update()\r\n if spoyifyObj.currently_playing()[\"progress_ms\"]==0:\r\n clientSocket.send(\"song is done\".encode())\r\n track = clientSocket.recv(bsize).decode()\r\n if \"!\" in track:\r\n track = track[0:36]\r\n pos = track[38:]\r\n spoyifyObj.start_playback(device['id'], None, [track],None,pos)\r\n else:\r\n spoyifyObj.start_playback(device['id'], None, [track])\r\n while(isPlaying()==False):\r\n pass\r\n else:\r\n spoyifyObj.start_playback(device['id'], None, [track], None,spoyifyObj.currently_playing()[\"progress_ms\"]+1000 )\r\n pg.QUIT()\r\n\r\n\r\nif __name__ == '__main__':\r\n pg.init()\r\n main()\r\n pg.quit()"
}
] | 4 |
ahmaddoulat/updated_story_gen_api
|
https://github.com/ahmaddoulat/updated_story_gen_api
|
6e5e1159000e66546d7028bc1b4ae16affe4f4e7
|
8cb42d2c919436a6dd25a3f21c2bbfff7822ac17
|
95fe19aab8e16493e36d761f770e65415ec30cde
|
refs/heads/master
| 2023-03-19T23:13:32.631610 | 2020-08-04T03:18:39 | 2020-08-04T03:18:39 | 284,858,840 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.49438202381134033,
"alphanum_fraction": 0.6910112500190735,
"avg_line_length": 15.181818008422852,
"blob_id": "7cdcdfa49c7186aa37c1140e9200a8dbf5780016",
"content_id": "62b29cb83681090283d555f25fcc860634209b7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 178,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 11,
"path": "/requirements.txt",
"repo_name": "ahmaddoulat/updated_story_gen_api",
"src_encoding": "UTF-8",
"text": "click==7.1.2\nFlask==1.1.1\nimportlib-metadata==1.7.0\ninflect==4.1.0\nitsdangerous==1.1.0\nJinja2==2.11.2\nMarkupSafe==1.1.1\npymongo==3.11.0\nstorygen==0.5\nWerkzeug==1.0.1\nzipp==3.1.0\n"
},
{
"alpha_fraction": 0.620127260684967,
"alphanum_fraction": 0.6408114433288574,
"avg_line_length": 37.09090805053711,
"blob_id": "fade5bf296d3f4964afa1745d9fdb2fda189572b",
"content_id": "22310de426a2cbc944404925b8b1bb681a47b36d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2514,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 66,
"path": "/app.py",
"repo_name": "ahmaddoulat/updated_story_gen_api",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify, request\nfrom pymongo import MongoClient\nfrom storygen.storygen import Story\nimport json\n\napp = Flask(__name__)\n\n\[email protected](\"/<string:student_id>/<string:structure>\", methods=['POST'])\ndef student_story_api(student_id, structure):\n features = json.loads(request.data)\n\n citizenship_type = \"1\" if features['citizenship_type'] == \"Yes\" else \"0\"\n nation_of_citizenship_desc = \"1\" if features['nation_of_citizenship_desc'] == \"Yes\" else \"0\"\n current_age = \"1\" if features['current_age'] == \"Yes\" else \"0\"\n primary_ethnicity = \"1\" if features['primary_ethnicity'] == \"Yes\" else \"0\"\n student_population_desc = \"1\" if features['student_population_desc'] == \"Yes\" else \"0\"\n student_population = \"1\" if features['student_population'] == \"Yes\" else \"0\"\n admissions_population_desc = \"1\" if features['admissions_population_desc'] == \"Yes\" else \"0\"\n advisor_count = \"1\" if features['advisor_count'] == \"Yes\" else \"0\"\n gpa = \"1\" if features['gpa'] == \"Yes\" else \"0\"\n credits_attempted = \"1\" if features['credits_attempted'] == \"Yes\" else \"0\"\n credits_passed = \"1\" if features['credits_passed'] == \"Yes\" else \"0\"\n academic_standing_desc = \"1\" if features['academic_standing_desc'] == \"Yes\" else \"0\"\n\n features_list = [\n citizenship_type,\n nation_of_citizenship_desc,\n current_age,\n primary_ethnicity,\n student_population_desc,\n student_population,\n admissions_population_desc,\n advisor_count,\n gpa,\n credits_attempted,\n credits_passed,\n academic_standing_desc\n ]\n\n selected_features = ''.join(features_list)\n\n if student_id is None:\n return {'message': 'There is no student ID', 'data': {}}, 404\n if structure is None:\n return {'message': 'There is no structure selected', 'data': {}}, 404\n\n client = MongoClient('localhost', 27017)\n db = client.eager_la_db\n collection = db.students_data_cleaned\n curr_student = collection.find({'student_id': int(student_id)})\n\n\n student_story = Story(curr_student[0], selected_features)\n\n if structure == \"temporal\":\n return jsonify(student_story.temporal_story), 201\n elif structure == \"default\":\n return jsonify(student_story.default_story), 201\n elif structure == \"outcome\":\n return jsonify(student_story.outcome_story), 201\n elif structure == \"test\":\n return jsonify(selected_features), 201\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n"
}
] | 2 |
d8ahazard/doo_dad
|
https://github.com/d8ahazard/doo_dad
|
d56b93faad22ca40ca746f0d5364490edfc1a1de
|
6c3787ec073cae086d5f5941af37ed3f3e87211a
|
71dfbf20cce82fef6296446323d52b566273f8e6
|
refs/heads/master
| 2021-01-19T05:27:28.463542 | 2013-04-17T03:15:19 | 2013-04-17T03:15:19 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6639534831047058,
"alphanum_fraction": 0.6686046719551086,
"avg_line_length": 28.65517234802246,
"blob_id": "6f395c1d44892e5672c97e05ff39f22167650344",
"content_id": "11b7500d960a821cf9c993a4406a845000757508",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1720,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 58,
"path": "/compile.py",
"repo_name": "d8ahazard/doo_dad",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python\n\nimport os, subprocess, logging\n\ndef prep()\n subprocess.check_output(\"java -jar apktool.jar if .\\source\\system\\framework\\framework-res.apk\", stderr=subprocess.STDOUT, shell=True ) \n subprocess.check_output(\"java -jar apktool.jar if .\\source\\system\\framework\\framework-miui-res.apk\" , stderr=subprocess.STDOUT, shell=True) \n\ndef decompile( work )\n prep()\n try:\n subprocess.check_output(\"java -jar apktool.jar d -b .\\source\\system\\%s .\\source\\system\\%s.out\" , stderr=subprocess.STDOUT, shell=True % ( work, work ) )\n except:\n return work\n continue\n\ndef compile( work )\n errors=list()\n try:\n subprocess.check_output(\"java -jar apktool.jar b .\\source\\system\\%s.out\" , stderr=subprocess.STDOUT, shell=True % ( work ) )\n except:\n return work\n continue\n try:\n os.remove('.\\source\\system\\%s.out\\build\\apk\\AndroidManifest.xml' % ( work ) )\n subprocess.check_output('winrar a -afzip -ep1 -df -r -m5 .\\source\\system\\%s .\\source\\system\\%s.out\\build\\apk\\*' % ( work, work ) )\n except:\n print(\"Failed to append to rar file.\") # need actual logging here\n return work\n\n\n#decompile\n#w=open('files.txt', r)\n#for files in w.readlines():\n# decompile(files)\n\n# loop through the working file list\nw.seek(0,0)\nretry=list()\nfor files in w.readlines():\n retry.append(compile(files))\n\nw.close()\n\n# retry \nwhile retry:\n attempt=1\n while attempt:\n attempt=raw_input(\"You have some failed file attempts. Would you like to retry? Y/N\" )\n if attempt.uppercase() == 'Y':\n attempt = 0\n for files in retry:\n retry.remove(files)\n retry.append(compile(files))\n else if attempt.uppercase() == 'N':\n attempt = 0\n retry = list()\n else: attempt = 1\n"
}
] | 1 |
msaghaei/ventilatorgraphics
|
https://github.com/msaghaei/ventilatorgraphics
|
d11438ffa9b86971154df9b3bec8c39597951389
|
139b26c5f6f40b5b085f98be525f26191293c6e9
|
91d5583b40e94801fe9dccf20e6101cc34415e52
|
refs/heads/master
| 2020-03-17T14:14:09.836574 | 2018-05-25T12:42:49 | 2018-05-25T12:42:49 | 133,651,652 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5204244256019592,
"alphanum_fraction": 0.5389920473098755,
"avg_line_length": 43.880950927734375,
"blob_id": "43aca896139ff8b98294a27f136e5081e2eb251e",
"content_id": "0f6590407f3b25bde73ceac3d97946f477d4d132",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2224,
"license_type": "no_license",
"max_line_length": 264,
"num_lines": 42,
"path": "/platforms/android/app/src/main/assets/www/vc/peak_pressure.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>فشار حداکثر</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"peak_pressure\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>فشار حداکثر</h1>\n </div>\n <div data-role=\"content\">\n <h1>فشار حداکثر</h1>\n <p>\n فشار حداکثر (P<sub>PEAK</sub>)، حداکثر فشاری است که در طول دم ایجاد میشود. بر طبق معادله حرکت ریه P<sub>PEAK</sub> بستگی به PEEP<sub>TOT</sub>، شدت جریان، مقاومت دمی، حجم جاری، و کمپلیانس سیستم تنفسی بستگی دارد (بشرطی که P<sub>MUS</sub> = 0 باشد).<br/>\n بنابراین هرگونه اختلال بیشتر در مکانیک ریه همراه است با افزایش فشار حداکثر.\n </p>\n <img src=\"2-8.jpg\" width=\"100%\" />\n <p>\n هنگام کنترل حجمی P<sub>PEAK</sub> تحت تاثیر کدام یک قرار میگیرد؟<br/>\n ۱ - PEEP<br/>\n ۲ - کمپلیانس ریه<br/>\n ۳ - تعداد تنفس<br/>\n ۴ - مقاومت<br/>\n ۵ - تمام موارد فوق\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۵ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.519556999206543,
"alphanum_fraction": 0.542648434638977,
"avg_line_length": 53.410255432128906,
"blob_id": "5d065a48a26bbf3ff23b74e02ffa2d27716258fe",
"content_id": "1a86a97c67633b21c358adea53212c980eb67149",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5124,
"license_type": "no_license",
"max_line_length": 292,
"num_lines": 78,
"path": "/platforms/browser/www/esophageal/transalveolar_pressure_at_end_expiration.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>فشار ترانس آلوئولار پایان بازدمی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"transalveolar_pressure_at_end_expiration\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>فشار ترانس آلوئولار پایان بازدمی</h1>\n </div>\n <div data-role=\"content\">\n <h1>فشار ترانس آلوئولار پایان بازدمی</h1>\n <p>\n P<sub>TA</sub>\n در پایان بازدم بصورت PEEP<sub>TOT</sub> منهای P<sub>ES</sub> که متعاقب یک انسداد پایان بدست آمده اند محاسبه میشود.\n P<sub>TA</sub>\n در پایان بازدم ممکن است گاهی در بیماران ARDS منفی شود. معنی آن این است که فشار اطراف حبابچه در ناحیه میانی توراسیک از فشار حبابچه بیشتر شده است. در نتیجه ممکن است حبابچه ها در پایان بازدم کولاپس پیدا کنند و باعث آتلکتروما شوند. بنابراین مقدار PEEP باید طوری تنظیم شود که مقدار\n P<sub>TA</sub>\n در پایان بازدم هموراه مثبت باشد تا از آتلکترما جلوگیری شود.\n </p>\n <p>\n <a href=\"#popupVideo0\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">تنظیم پیپ متناسب با فشار ترانس آلوئولار پایان بازدمی</a>\n <div data-role=\"popup\" id=\"popupVideo0\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10733096_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n تنظیم پیپ متناسب با فشار ترانس آلوئولار پایان بازدمی\n </p>\n </div>\n </p>\n <p>\n <a href=\"#popupVideo1\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">کاهش گام به گام پیپ متناسب با فشار ترانس آلوئولار پایان بازدمی</a>\n <div data-role=\"popup\" id=\"popupVideo1\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10733238_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n کاهش گام به گام پیپ متناسب با فشار ترانس آلوئولار پایان بازدمی\n </p>\n </div>\n </p>\n <img src=\"8-7.jpg\" width=\"100%\" />\n <p>\n در مورد\n P<sub>TA</sub>\n در پایان بازدم کدام مورد زیر غلط است<br>\n ۱ -\n P<sub>TA</sub>\n در پایان بازدم برای ارزیابی ریسک آتلکتروما بکار میرود<br>\n ۲ - P<sub>TA</sub>\n در پایان بازدم برای تنظیم PEEP در ARDS با تنفس پاسیو بکار میرود<br>\n ۳ - P<sub>TA</sub>\n در پایان بازدم برای ارزیابی ناحیه میانی توراسیک بکار میرود<br>\n ۴ - P<sub>TA</sub>\n در پایان بازدم برای ارزیابی کرنش (strain) وارد به ریه بکار میرود<br>\n ۵ - P<sub>TA</sub>\n در پایان بازدم باید مختصری مثبت باشد\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۴ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.380972683429718,
"alphanum_fraction": 0.3854522109031677,
"avg_line_length": 40.122806549072266,
"blob_id": "74c7740967ccbd730abecafa67b426b65d84dadc",
"content_id": "07b460f717dfd1f60fc8b3de0e937ab8553d106f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5104,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 114,
"path": "/platforms/browser/www/vc/index.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>کنترل حجمی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"toc\">\n <div data-role=\"header\"><a rel=\"external\" href=\"../toc.html\" data-icon=\"home\" data-iconpos=\"left\">خانه</a>\n <h1>کنترل حجمی</h1>\n </div>\n <div data-role=\"content\">\n <ul data-role=\"listview\" data-inset=\"true\">\n <li>\n <a rel=\"external\" href=\"shape_of_the_pressure_curve_vc.html\">\n الگوی منحنی فشار در کنترل حجمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"flow_pattern_vc.html\">\n الگوی منحنی شدت جریان در کنترل حجمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"resistive_component_of_the_pressure_curve_vc.html\">\n سهم مقاومتی منحنی فشار در کنترل حجمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"elastic_component_of_the_pressure_curve_vc.html\">\n سهم الاستیکی منحنی فشار در کنترل حجمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"the_pressure_curve_for_the_rc_model_vc.html\">\n منحنی فشار در مدل RC\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"single_breath_analysis_of_overdistension_and_recruitment.html\">\n تحلیل تک نفسی اتساع مفرط و بازگشائی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"stress_index.html\">\n اندکس استرس\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"peak_pressure.html\">\n فشار حداکثر\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"plateau_pressure.html\">\n فشار کفه ای\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"end_inspiratory_occlusion.html\">\n تکنیک انسداد پایان دمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"end_inspiratory_occlusion_with_leakage.html\">\n تکنیک انسداد پایان دمی همراه با نشت\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"end_inspiratory_occlusion_with_active_effort.html\">\n تکنیک انسداد پایان دمی همراه با فعالیت تنفسی بیمار\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"ascending_pressure_during_an_end_inspiratory_occlusion.html\">\n وقوع فشار رو به ازدیاد در تکنیک انسداد پایان دمی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"additional_resistance.html\">\n جزء اضافی مقاومت\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"increased_peak_pressure.html\">\n فشار حداکثر بالا\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"mean_airway_pressure.html\">\n فشار متوسط راه هوائی\n </a>\n </li>\n <li>\n <a rel=\"external\" href=\"driving_pressure_vc.html\">\n فشار رانش در کنترل حجمی\n </a>\n </li>\n </ul>\n </div>\n <div data-role=\"footer\">\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5664172172546387,
"alphanum_fraction": 0.5827876329421997,
"avg_line_length": 51.14634323120117,
"blob_id": "bc1511db0d09c431ce9fc2f8916699bbec3f89df",
"content_id": "2c4252c8fd24e091be7c749203e12fadef58186a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2736,
"license_type": "no_license",
"max_line_length": 403,
"num_lines": 41,
"path": "/platforms/browser/www/niv/delayed_cycling_and_patient_inspiratory_effort.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>سایکل دیررس و تلاش دمی بیمار</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"delayed_cycling_and_patient_inspiratory_effort\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>سایکل دیررس و تلاش دمی بیمار</h1>\n </div>\n <div data-role=\"content\">\n <h1>سایکل دیررس و تلاش دمی بیمار</h1>\n <p>\n سایکل دیررسی و توقف تلاش نیرومند دمی بیمار هر دو بصورت افزایش فشار در در انتهای دم تظاهر میکند. از روی الگوی شدت جریان دمی می توان این دو را از هم افتراق نمود. توقف تلاش نیرومند دمی بصورت الگوی شدت جریان گرد نمایش می یابد. در سایکل دیررس، شدت جریان دمی شکل مثلثی نرمال خود را دارا می باشد. در صورتی که علت سایکل تاخیری نشت تصادفی باشدیک شدت جریان باثبات در انتهای هواگیری ریه ها خواهیم داشت.\n </p>\n <img src=\"6-14.jpg\" width=\"100%\" />\n <p>\n با کدام یک می توان سایکل دیررس را از توقف تلاش نیرومند دمی افتراق داد؟<br/>\n ۱ - مشاهده منحنی فشار هنگام هواگیری ریه ها<br/>\n ۲ - مشاهده منحنی فشار هنگام بازدمی<br/>\n ۳ - از روی شکل منحنی شدت جریان دمی<br/>\n ۴ - از روی شکل منحنی شدت جریان بازدمی<br/>\n ۵ - با تغییر حساسیت ترایگر دمی\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۳ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5351916551589966,
"alphanum_fraction": 0.5588850378990173,
"avg_line_length": 53.150943756103516,
"blob_id": "f89859392f7bdd385c6f428f994903442da49cfe",
"content_id": "4dc233658ae7e99121bc0d03d8122d9e89d91738",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3509,
"license_type": "no_license",
"max_line_length": 439,
"num_lines": 53,
"path": "/platforms/android/app/src/main/assets/www/pv/quasi_static_pressure_volume_loop.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>لوپ فشار حجم نیمه استاتیک</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"quasi_static_pressure_volume_loop\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>لوپ فشار حجم نیمه استاتیک</h1>\n </div>\n <div data-role=\"content\">\n <h1>لوپ فشار حجم نیمه استاتیک</h1>\n <p>\n لوپ فشار-حجم نیمه استاتیک یک مانور تشخیصی برای ارزیابی ویژگی های الاستیک سیستم تنفس در هر سطح فشار در طول انبساط و و تخلیه ریه است. بیمار باید کاملا شل باشد. با استفاده از یک شدت جریان ثابت و بسیار کم و یا با استفاده از یک صعود فشاری آهسته (ramp) ریه ها را از فشار صفر تا ۴۰ سانتی متر آب متسع می کنیم و سپس از فشار ۴۰ تا صفر تخلیه میکنیم. شیب لوپ فشار-حجم بدست آمده در هر نقطه نشان دهنده کمپلیانس سیستم تنفس در آن لحظه می باشد.\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">لوپ فشار-حجم نیمه استاتیک</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10732256_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n لوپ فشار-حجم نیمه استاتیک\n </p>\n </div>\n </p>\n <img src=\"7-1.jpg\" width=\"100%\" />\n <p>\n در مورد لوپ فشار-حجم نیمه استاتیک کدام مورد زیر غلط است؟<br/>\n ۱ - در هر سطحی ار فشار قابل انجام است<br/>\n ۲ - نیاز به یک شدت جریان کم دارد<br/>\n ۳ - یک مانور تشخیصی است<br/>\n ۴ - اطلاعاتی در مورد مقاومت بدست میدهد<br/>\n ۵ - اطلاعاتی در مورد کمپلیانس در هر سطح فشار میدهد\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۴ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.525006890296936,
"alphanum_fraction": 0.5518099069595337,
"avg_line_length": 54.67692184448242,
"blob_id": "673b71ced9f58be57a0cf49b4089d60c8c962dbf",
"content_id": "5557c0fc39a6d5fbc17e8b95f53de5db172173b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 4265,
"license_type": "no_license",
"max_line_length": 409,
"num_lines": 65,
"path": "/www/expiration/end_expiratory_occlusion.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>تکنیک انسداد پایان بازدمی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"end_expiratory_occlusion\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>تکنیک انسداد پایان بازدمی</h1>\n </div>\n <div data-role=\"content\">\n <h1>تکنیک انسداد پایان بازدمی</h1>\n <p>\n هدف از انجام مانور انسداد پایان بازدمی اندازه گیری autoPEEP است. در طی این مانور دریچه های ونتیلاتور بسته می شوند و فشار داخل مدار ونتیلاتور به حالت تعادل با فشار داخل ریه بیمار میشود. تحت این شرایط فشاری که در پروکسیمال راه هوائی اندازه گرفته میشود، برابر است با فشار پایان بازدمی جبابچه (PEEP<sub>TOT</sub>). autoPEEP برابر است با حاصل تفریق PEEP<sub>TOT</sub> و PEEP تنظیم شده بر روی ونتیلاتور.\n </p>\n <p>\n <a href=\"#popupVideo0\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">انسداد پایان بازدمی در کنترل حجمی</a>\n <div data-role=\"popup\" id=\"popupVideo0\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10731720_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n انسداد پایان بازدمی در کنترل حجمی\n </p>\n </div>\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">انسداد پایان بازدمی در کنترل فشاری</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10731764_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n انسداد پایان بازدمی در کنترل فشاری\n </p>\n </div>\n </p>\n <img src=\"4-13.jpg\" width=\"100%\" />\n <p>\n autoPEEP:<br/>\n ۱ - اگر شدت جریان پایان بازدمی به صفر نرسد، موجود است<br/>\n ۲ - در طول مانور انسداد پایان بازدمی، از روی منحنی فشار می توان مقدار آن را حدس زد<br/>\n ۳ - از روی PEEP<sub>TOT</sub> و PEEP قابل محاسبه است<br/>\n ۴ - همراه است با ثابت زمانی بسیا کند بازدمی<br/>\n ۵ - تمام موارد فوق\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۵ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.6039493083953857,
"alphanum_fraction": 0.61661696434021,
"avg_line_length": 62.904762268066406,
"blob_id": "1abf45f4e235eb8f8ca1e968c33fbc60367ad15a",
"content_id": "d4dd5f61106b1346fee7f9da98448d1a2415415a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3680,
"license_type": "no_license",
"max_line_length": 799,
"num_lines": 42,
"path": "/platforms/android/app/src/main/assets/www/vc/single_breath_analysis_of_overdistension_and_recruitment.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>تحلیل تک نفسی اتساع مفرط و بازگشائی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"single_breath_analysis_of_overdistension_and_recruitment\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>تحلیل تک نفسی اتساع مفرط و بازگشائی</h1>\n </div>\n <div data-role=\"content\">\n <h1>تحلیل تک نفسی اتساع مفرط و بازگشائی</h1>\n <p>\n در کنترل حجمی با شدت جریان ثابت، شیب منحنی فشار نسبت مستقیم با الاستانس (و یا نسبت معکوس با کمپلیانس) دارد. می توانیم چنین نتیجه بگیریم که ثابت بودن سیب منحنی فشار در طول درم، نشان دهنده ثابت بودن کمپلیانس در طول دم می باشد. ثابت ماندن کمپلیانس در طول دم در واقع پیش فرض ما در مدل تک قسمتی سیستم تنفس بود. اما در سیستم تنفسی واقعی، مقدار کمپلیانس بندرت در طول دم ثابت باقی می ماند. در نتیجه تغییر در کمپلیانس در طول دم سبب تغییر در شیب منحنی فشار در طول دم میشود. یعنی بتدریج شیب کاهش می یابد ( و یا افزایش می یابد). انحراف تدریجی منحنی فشار به سوی پائین به معنی افزایش تدریجی کمپلیانس در طول دم است که نشان دهنده بازگشائی ریه در طول دم است. برعکس انحراف فزاینده منحنی فشار بسوی بالا به معنی کاهش پیشرونده کمپلیانس در طول دم است که نشان دهنده اتساع مفرط ریه است و خطر ولوتروما دارد.<br/>\n این تفسیر فقط در کنترل حجمی با شدت جریان ثابت و در حضور تنفس غیر فعال (اجباری) صادق است.\n </p>\n <img src=\"2-6.jpg\" width=\"100%\" />\n <p>\n در کنترل حجمی، میزان تغییرات فشار راه هوائی:<br/>\n ۱ - برای ریه طبیعی کاهش یابنده است<br/>\n ۲ - انحراف به سوی بالا نشان دهنده اتساع مفرط است<br/>\n ۳ - ممکن است نوسان داشته باشد<br/>\n ۴ - اگر سمت محدب منحنی به سوی بالا باشد، نشان دهنده بازگشائی ریه در طول دم است<br/>\n ۵ - با افزایش PEEP تغییری نمی کند\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۲ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5654399991035461,
"alphanum_fraction": 0.58815997838974,
"avg_line_length": 57.96226501464844,
"blob_id": "2c405087593dbdb7e3a9d56b5848e82ea1aa0baa",
"content_id": "2cd9c0861ea02f8dcc5b0c551fc39ce4efb22691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3979,
"license_type": "no_license",
"max_line_length": 484,
"num_lines": 53,
"path": "/platforms/android/app/src/main/assets/www/esophageal/transpulmonary_pressure_during_recruitment_maneuvers.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>فشار ترانس پولموناری هنگام مانور بازگشائی ریه</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"transpulmonary_pressure_during_recruitment_maneuvers\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>فشار ترانس پولموناری هنگام مانور بازگشائی ریه</h1>\n </div>\n <div data-role=\"content\">\n <h1>فشار ترانس پولموناری هنگام مانور بازگشائی ریه</h1>\n <p>\n هنگام اجرای مانور بازگشائی با روش فشار متسع کننده مداوم، چنانچه فشار مروی را اندازه گیری کنیم، می توانیم فشار ترانس پولموناری را مانیتورینگ نمائیم. برای بازگشائی قسمت های کولاپس شده و جلوگیری از آسیب ریوی، فشار ترانس پولموناری هنگام اجرای مانور نباید از 20 - 25 سانتی متر آب بیشتر شود. در بعشی موارد که فشار مروی بسیار بالا است، گاهی لازم میشود که فشار راه هوائی به مقادیر بسیار بالائی (مثلا 60 سانتی متر آب) برسد تا فشار ترانس پولموناری به حد مورد نظر برای بازگشائی برسد.\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">فشار ترانس پولموناری هنگام مانور بازگشائی ریه</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10733428_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n فشار ترانس پولموناری هنگام مانور بازگشائی ریه\n </p>\n </div>\n </p>\n <img src=\"8-12.jpg\" width=\"100%\" />\n <p>\n تمام موارد زیر صحیح است بجز یک مورد. فشار ترانس پولموناری هنگام اجرای مانور بازگشائی:<br/>\n ۱ - فشار مسئول بازگشائی حبابچه های کولاپس شده است<br/>\n ۲ - باید از مجموع کشش سطحی و فشار کمپرسیو بیشتر باشد<br/>\n ۳ - بستگی به مکانیک جدار سینه دارد<br/>\n ۴ - در لوپ فشار-حجم ترانس پولموناری دیده میشود<br/>\n ۵ - در تیتراسیون فشار راه هوائی لازم برای بازگشائی کمک میکند\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۳ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5874155163764954,
"alphanum_fraction": 0.6017736196517944,
"avg_line_length": 56.75609588623047,
"blob_id": "9f497924b7d3780ac74b46ef3e5baa2dca7bdfa7",
"content_id": "d29b69e62d2d33a3e4727a3df8444c369fc65353",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3169,
"license_type": "no_license",
"max_line_length": 663,
"num_lines": 41,
"path": "/www/ps/inspiratory_trigger.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>ترایگر دمی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"inspiratory_trigger\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>ترایگر دمی</h1>\n </div>\n <div data-role=\"content\">\n <h1>ترایگر دمی</h1>\n <p>\n تلاش دمی بیمار بصورت تغییری در منحنی فشار یا شدت جریان نمایش می یابد. هنگام ترایگر فشاری، تلاش دمی بیمار باید بتواند فشار راه هوائی را از حد PEEP به میزان آستانه ای تنظیم شده کاهش دهد. در این شرایط، قبل از ترایگر فشار راه هوائی کاهش می یابد در حالیکه مقدار شدت جریان صفر است. هنگام ترایگر شدت جریانی، یک شدت جریان بازدمی پایه در تمام مدار ونتیلاتور وجود دارد و وقتی بیمار تلاش دمی انجام میدهد، کسری از این شدت جریان بسوی بیمار منحرف میشود و به همان میزان از شدت جریان بازدمی کاسته میشود. وقتی که این کاهش به میزان آستانه ای تنظیم شده برسد ترایگر روی میدهد. تحت این شرایط در زمان قبل از ترایگر فشار راه هوائی کم میشود و شدت جریان مختضری افزایش می یابد.\n </p>\n <img src=\"5-2.jpg\" width=\"100%\" />\n <p>\n ترایگر دمی<br/>\n ۱ - بصورت افزایش فشار قبل از شروع تنفس مکانیکی بروز میکند<br/>\n ۲ - در صورت ترایگر شدت جریانی، در منحنی فشار قابل رؤیت نیست<br/>\n ۳ - شدت جریان همیشه با شروع تلاش دمی بیمار افزایش می یابد<br/>\n ۴ - بر حسب نوع ترایگر، منحنی شدت جریان در زمان قبل ترایگر متفاوت است<br/>\n ۵ - با سیستم ترایگر فشاری و شدت جریانی یکسان است\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۴ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5227025151252747,
"alphanum_fraction": 0.5481293201446533,
"avg_line_length": 50.943397521972656,
"blob_id": "b4267f4193d711aad6c348cb1392076c2cb1108e",
"content_id": "f33153fbe6bfb3c315fcd4d3e7e6a35905f8b001",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3272,
"license_type": "no_license",
"max_line_length": 309,
"num_lines": 53,
"path": "/www/vc/end_inspiratory_occlusion.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>تکنیک انسداد پایان دمی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"end_inspiratory_occlusion\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>تکنیک انسداد پایان دمی</h1>\n </div>\n <div data-role=\"content\">\n <h1>تکنیک انسداد پایان دمی</h1>\n <p>\n در بیمارانی که ریه نرمال دارند، با یک انسداد پایان دمی حداقل 0.5 ثانیه می توان P<sub>PLAT</sub> را بطور دقیق تعیین کرد. اما در بیماران ریوی دارای ریه غیر غیر هموژن مدت انسداد طولانی تر تا 5 ثانیه ممکن است لازم شود تا فشار به حد پلاتو برسد. این انسداد طولانی پایان دمی را باید بصورت دستی فراهم کرد.\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">انسداد پایان دمی در کنترل حجمی</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10731484_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n انسداد پایان دمی در کنترل حجمی\n </p>\n </div>\n </p>\n <img src=\"2-10.jpg\" width=\"100%\" />\n <p>\n در کنترل حجمی P<sub>PLAT</sub> با کدام روش اندازه گیری میشود؟<br/>\n ۱ - در صورت نرمال بودن ریه، یک مکث پایان دمی کوتاه<br/>\n ۲ - در بیمار COPD یک انسداد 5 ثانیه ای به روش دستی<br/>\n ۳ - در COPD، یک مکث پایان دمی کوتاه<br/>\n ۴ - در ARDS، یک مکث پایان دمی کوتاه<br/>\n ۵ - تمام موارد فوق بجز ۳\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۵ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5481407642364502,
"alphanum_fraction": 0.5693891048431396,
"avg_line_length": 55.8301887512207,
"blob_id": "3deeaf937c0fe554c9f65902abb6d482ddb5f5e2",
"content_id": "699c5fe09808302e70f0cffbc1a2cac297fef60c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3783,
"license_type": "no_license",
"max_line_length": 657,
"num_lines": 53,
"path": "/platforms/android/app/src/main/assets/www/pc/flow_curve_pc.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>منحنی شدت جریان در کنترل فشاری</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"flow_curve_pc\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>منحنی شدت جریان در کنترل فشاری</h1>\n </div>\n <div data-role=\"content\">\n <h1>منحنی شدت جریان در کنترل فشاری</h1>\n <p>\n در مدهای کنترل مقدار و الگوی فشاری شدت جریان دمی در واقع حاصل کار ونتیلاتور است برای رسیدو و حفظ یک مقدار و الگوی فشار از قبل تعیین شده. شدت جریان الگوی خاصی دارد، بدین صورت که در ابتدا به سرعت افزایش می یابد و سپی با الگوی نمائی کاهش پیدا میکند. شدت جریان دمی در اثر گرادیان فشاری بین ابتدای راه هوائی و حبابچه ها بوجود می آید. این گرادیان در ابتدای دم یعنی زمانی که فشار آلوئول برابر PEEP توتال است، در خداکثر خود می باشد. سپس در طول دم فشار حبابچه ها شروع به افزایش می کند و در نتیجه آن گرادیان فشاری مذکور کاهش می یابد. اگر زمان دم به اندازه کافی ادامه یابد، وقتی که فشار حبابچه های به فشاری ابتدای راه هوائی میرسد، شدت جریان دمی صفر می شود.\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">منحنی شدت جریان دمی</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10731532_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n منحنی شدت جریان دمی\n </p>\n </div>\n </p>\n <img src=\"3-1.jpg\" width=\"100%\" />\n <p>\n شدت جریان دمی در کنترل فشاری:<br/>\n ۱ - مربعی شکل است<br/>\n ۲ - شکل آن قابل انتخاب توسط کاربر است<br/>\n ۳ - به علت گرادیان فشاری ایجاد میشود<br/>\n ۴ - در انتهای دم به حداکثر خود میرسد<br/>\n ۵ - هرگز به صفر نمیرسد\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۳ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.573674738407135,
"alphanum_fraction": 0.5893980264663696,
"avg_line_length": 53.29268264770508,
"blob_id": "5228fcd28bcbdc23bda3a4fa4cc5d0145eaa4145",
"content_id": "bf72fbe6ac87fc68957d79943fba4e535bce1fed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2872,
"license_type": "no_license",
"max_line_length": 327,
"num_lines": 41,
"path": "/www/vc/end_inspiratory_occlusion_with_active_effort.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>تکنیک انسداد پایان دمی همراه با فعالیت تنفسی بیمار</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"end_inspiratory_occlusion_with_active_effort\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>تکنیک انسداد پایان دمی همراه با فعالیت تنفسی بیمار</h1>\n </div>\n <div data-role=\"content\">\n <h1>تکنیک انسداد پایان دمی همراه با فعالیت تنفسی بیمار</h1>\n <p>\n هنگام اندازه گیری P<sub>PLAT</sub> چنانچه بیمار تلاش فعال دمی انجام دهد و در نتیجه آن یک موج فشار منفی در ریه بیمار ایجاد گردد، و یا تلاش فعال بازدمی انجام دهد و در نتیجه یک موج فشار مثبت در ریه ایجاد کند، تحت این شرایط مقدار P<sub>PLAT</sub> فاقد ثبات است و اندازه گیری آن توصیه نمی شود چرا که عضلات بیمار شل نیست.\n </p>\n <img src=\"2-12.jpg\" width=\"100%\" />\n <p>\n هنگام مانور انسداد پایان دمی کدام مورد زیر صحیح نیست؟<br/>\n ۱ - اگر عضلات بیمار کاملا شل باشد و نشتی وجود نداشته باشد، فشار پلاتو دارای ثبات می گردد<br/>\n ۲ - در صورت تلاش دمی یک موج فشار منفی ایجاد میشود<br/>\n ۳ - بین دو موج فشار غیرطبیعی می توان فشار پلاتو را اندازه گرفت<br/>\n ۴ - در صورت تلاش بازدمی یک موج فشار مثبت ایجاد میشود<br/>\n ۵ - افت تدریجی فشار پلاتو دلیل وجود نشت است\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۳ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.553553581237793,
"alphanum_fraction": 0.5710710883140564,
"avg_line_length": 47.73170852661133,
"blob_id": "9b0284cae74b3f5fbe8ed3d0c3a7040680956bca",
"content_id": "34b50c35e71b9ccd1935e74cdde7003fd3e3ecd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2522,
"license_type": "no_license",
"max_line_length": 282,
"num_lines": 41,
"path": "/platforms/android/app/src/main/assets/www/esophageal/hysteresis.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>هستره زیس</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"hysteresis\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>هستره زیس</h1>\n </div>\n <div data-role=\"content\">\n <h1>هستره زیس</h1>\n <p>\n هیستره زیس موجود در لوپ فشار-حجم تنفسی اساسا بخاطر بازگشائی هنگام هواگیری و بازبسته شدن هنگام تخلیه می باشد. نتیجه مهم اینکه لوپ فشار-حجم جدار سینه فاقد هیستره زیس می باشد. بنابراین تمام هیستره زیس موجود در لوپ فشار-حجم ترانس پولموناری همانند لوپ فشار-حجم تنفسی می باشد.\n </p>\n <img src=\"8-11.jpg\" width=\"100%\" />\n <p>\n در مورد هیستره زیس کدام مورد زیر غلط است؟<br/>\n ۱ - هیستره زیس در لوپ فشار-حجم تنفسی دیده میشود<br/>\n ۲ - هیستره زیس بکمک مکانیک جدار سینه توجیه میشود<br/>\n ۳ - هیستره زیس اساسا بخاطر بازگشائی و بازبسته شدن ریه می باشد<br/>\n ۴ - هیستره زیس در لوپ فشار-حجم ترانس پولموناری دیده میشود<br/>\n ۵ - در بیماران دارای استعداد برای بازگشائی ریه، هیستره زیس زیاد است\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۲ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.549461305141449,
"alphanum_fraction": 0.56611168384552,
"avg_line_length": 48.80487823486328,
"blob_id": "85fba54cccd7d2a80eb0d7f28fe19a19ec1ec268",
"content_id": "58f8aec82d1a37f0d7e9348c4729006507f5d341",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2560,
"license_type": "no_license",
"max_line_length": 459,
"num_lines": 41,
"path": "/www/vc/the_pressure_curve_for_the_rc_model_vc.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>منحنی فشار در مدل RC</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"the_pressure_curve_for_the_rc_model_vc\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>منحنی فشار در مدل RC</h1>\n </div>\n <div data-role=\"content\">\n <h1>منحنی فشار در مدل RC</h1>\n <p>\n در کنترل حجمی با شدت جریان مربع شکل، چنانچه تنها هر دو جزء مدل یعنی مقاومت و الاستانس وجود داشته باشد (به عنوان مثال لوله تراشه و بالون هر دو باشد)، در این صورت منحنی فشار یک افزایش سریع اولیه ناشی از مقاومت دارد (فشار مقاومتی)، و بدنبال آن افزایش تدریجی و خطی در فشار در طول دم روی میدهد که شیب آن بستگی به الاستانس سیستم تنفسی دارد (فشار الاستیکی). همچنانکه در معادله فشار مشهود است، فشار تابعی از کمپلیانس، حجم جاری، مقاومت و شدت جریان می باشد.\n </p>\n <img src=\"2-5.jpg\" width=\"100%\" />\n <p>\n در کنترل حجمی، منحنی فشار در طول دم تحت تاثیر کدام قرار میگیرد:<br/>\n ۱ - حجم جاری<br/>\n ۲ - شدت جریان<br/>\n ۳ - مقاومت<br/>\n ۴ - کمپلیانس<br/>\n ۵ - تمام موارد فوق\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۵ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5116741061210632,
"alphanum_fraction": 0.5285643339157104,
"avg_line_length": 41.82978820800781,
"blob_id": "0e3e8049ba4aeb1fba06db5f478a9eb60ca3d259",
"content_id": "d6ddcd1168127f039255fcf185e1f354364fcef5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2353,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 47,
"path": "/platforms/android/app/src/main/assets/www/pc/peak_inspiratory_flow_pc.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>شدت جریان حداکثر دمی در کنترل فشاری</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"peak_inspiratory_flow_pc\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>شدت جریان حداکثر دمی در کنترل فشاری</h1>\n </div>\n <div data-role=\"content\">\n <h1>شدت جریان حداکثر دمی در کنترل فشاری</h1>\n <p>\n حداکثر شدت جریان دمی به دو عامل بستگی دارد: شدت گرادیان فشاری و مقاومت دمی:\n </p>\n <p style=\"direction: ltr\" align=\"center\">\n Peak Flow = (setP<sub>PEAK</sub> - PEEP<sub>TOT</sub>) / R<sub>INSP</sub>\n </p>\n <p>\n در این فرمول setP<sub>PEAK</sub> مقدار فشار دمی تنظیم شده توسط کاربر می باشد.\n </p>\n <img src=\"3-2.jpg\" width=\"100%\" />\n <p>\n در کنترل فشاری شدت جریان حداکثر دمی چه موقع کاهش می یابد؟<br/>\n ۱ - افزایش PEEP داخلی<br/>\n ۲ - افزایش فشار دمی توسط کاربر<br/>\n ۳ - کاهش الاستانس<br/>\n ۴ - کاهش مقاومت<br/>\n ۵ - افزایش فشار رانش\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۱ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5751398801803589,
"alphanum_fraction": 0.5891286730766296,
"avg_line_length": 55.8636360168457,
"blob_id": "ff0425139aadfd9649545f727a4097946e493496",
"content_id": "9c58d937995acda97508c8ba347ebe2b9e5075e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3307,
"license_type": "no_license",
"max_line_length": 535,
"num_lines": 44,
"path": "/www/niv/leak_rate.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>میزان نشت</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"leak_rate\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>میزان نشت</h1>\n </div>\n <div data-role=\"content\">\n <h1>میزان نشت</h1>\n <p>\n وقتی که ترایگر شدت جریانی برای دم بکار میرود، اگر علاوه بر متفاوت بودن سطح زیر منحنی شدت جریان بین دم و بازدم، در پایان بازدم (قبل از ترایگر دمی)، شدت جریان بالاتر از خط پایه (مثبت) باشد، ما به وجود یک نشت مداوم مشکوک می شویم. سپس می توان مقدار نشت را به ازای یک مقدار خاص PEEP بدست آورد. در نشت مداوم، میزان نشت با افزایش فشار افزایش می یابد. اما اگر فقط سطح زیر منحنی شدت جریان، بین دم و بازدم تفاوت داشت ولی شدت جریان در پایان بازدم صفر بود، این پدیده علامت این است که نشت مداوم نیست و اساسا هنگام هواگیری ریه روی میدهد.\n </p>\n <img src=\"6-3.jpg\" width=\"100%\" />\n <p>\n در مورد وجود و نوع نشت کدام یک صحیح است؟<br/>\n ۱ - وجود شدت جریان مثبت در پایان بازدم علامت نشت مداوم است<br/>\n ۲ - وجود یک شدت جریان منفی در پایان بازدم علامت فقدان نشت تصادفی است<br/>\n ۳ - قرار داشتن منحنی شدت جریان بر روی خط پایه در پایان بازدم نشانه نشت مداوم است<br/>\n ۴ - قرار داشتن منحنی شدت جریان بر روی خط پایه در پایان بازدم نشانه نشت منقطع است<br/>\n ۵ - در صورت وجود نشت تصادفی از ماسک تهویه غیرتهاجمی، فشار بازدمی کمتر از PEEP خواهد بود\n </p>\n <p style=\"display: none;\" id=\"detailedinfo\">\n مورد ۵ - این یعنی وجود نشت از دریچه بازدمی\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۱ = پاسخ\");$(\"#detailedinfo\").toggle(true);});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 21,
"blob_id": "90dba4155637f281f44e664582ee83f0f6a89afb",
"content_id": "7fe2c0ba67b754d2cc815fa958c230cb529ff9e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 1,
"path": "/README.md",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "# Ventilator Graphics\n"
},
{
"alpha_fraction": 0.6055743098258972,
"alphanum_fraction": 0.6199324131011963,
"avg_line_length": 55.380950927734375,
"blob_id": "f617adb4017dd218ed20ed3da9669080fc579f8d",
"content_id": "b774476c3fc82868f265098638077500efe5cd66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3211,
"license_type": "no_license",
"max_line_length": 554,
"num_lines": 42,
"path": "/platforms/android/app/src/main/assets/www/basics/mandatory_and_triggered_breaths.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>تنفس اجباری و تنفس برانگیخته</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"mandatory_and_triggered_breaths\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>تنفس اجباری و تنفس برانگیخته</h1>\n </div>\n <div data-role=\"content\">\n <h1>تنفس اجباری و تنفس برانگیخته</h1>\n <p>\n تنفس خودبخود وقتی است که دم توسط بیمار برانگیخته (ترایگر یا شروع) شود و توسط بیمار نیز سایکل (تمام) شود. اگر دم توسط ونتیلاتور برانگیخته شود و یا توسط ونتیلاتور سایکل شود (و یا هر دو) در این صورت تنفس حاصله اجباری خواهد بود. درک این نکته از نظر نامگذاری و تقسیمبندی مدهای مختلف اهمیت اساسی دارد. وقتی دم توسط بیمار برانگیخته شود، یک دندانه بسوی پایین در منحنی فشار (و یا انحراف به سمت بالا در منحنی شدت جریان) قبل از صعود فشار دمی روی میدهد. منحنی سمت راست در شکل زیر لحظه برانگیخته شدن دم توسط بیمار را با یک علامت مثلث کوچک نشان داده است.\n </p>\n <img src=\"1-8.jpg\" width=\"100%\" />\n <p>\n وقتی بیمار دم را بر می انگیزد\n<br/>\n۱ – همیشه بین تلاش دمی بیمار و انتقال جریان گاز تاخیری وجود دارد<br/>\n۲ – افزایش مختصری در منحنی شدت جریان قبل از برانگیخته شدن دم نشان دهنده انگیزش شدت جریانی است<br/>\n۳ – وقوع یک جریان مختصر گازی در لحظه صفر قبل از برانگیخته شدن دم نشان دهنده انگیزش فشاری است<br/>\n۴ – اگر رانش (drive) تنفسی بیمار شدید باشد، دندانه فشاری ایجاد شده در لحظه انگیزش دم عمیقتر است<br/>\n۵ – تمام موارد فوق صحیح است\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۵ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5279526114463806,
"alphanum_fraction": 0.5520177483558655,
"avg_line_length": 49.96226501464844,
"blob_id": "04d83786ccaced6dc13bca96b004e62b116ef415",
"content_id": "a35440b584128b3b7b07ba25a780bd451700f33c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3206,
"license_type": "no_license",
"max_line_length": 327,
"num_lines": 53,
"path": "/platforms/browser/www/expiration/autopeep_without_dynamic_hyperinflation.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>اتوپیپ بدون اتساع مفرط دینامیک</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"autopeep_without_dynamic_hyperinflation\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>اتوپیپ بدون اتساع مفرط دینامیک</h1>\n </div>\n <div data-role=\"content\">\n <h1>اتوپیپ بدون اتساع مفرط دینامیک</h1>\n <p>\n اتوپیپ عموما با اتساع مفرط دینامیک همراه است. یعنی ریه ها نمی توانند بطور کامل تخلیه شوند. اما گاهی دیده میشود که در بعضی بیماران PEEP<sub>TOT</sub> ممکن است از PEEP تنظیم شده بر روی ونتیلاتور بیشتر باشد ولی بازدم بطور کامل انجام میشود. علت این پدیده وجود باری بر روی قفسه سینه است مانند هیپرتانسیون شدید داخل شکمی.\n </p>\n <p>\n <a href=\"#popupVideo\" data-rel=\"popup\" data-position-to=\"window\" class=\"ui-btn ui-corner-all ui-shadow ui-btn-inline ui-btn-icon-left ui-icon-video\">اتوپیپ بدون اتساع مفرط دینامیک</a>\n <div data-role=\"popup\" id=\"popupVideo\" class=\"ui-content\">\n <video controls=\"true\" width=\"100%\" height=\"100%\">\n <source src=\"https://download-tls-cdn.edge-cdn.net/videodb/5501/videodb_5501_53500_10731852_hp.mp4\" type=\"video/mp4\">\n Your browser does not support HTML5 video.\n </video>\n <p>\n اتوپیپ بدون اتساع مفرط دینامیک\n </p>\n </div>\n </p>\n <img src=\"4-14.jpg\" width=\"100%\" />\n <p>\n اتوپیپ بدون اتساع مفرط دینامیک در کدام مورد روی میدهد؟<br/>\n ۱ - افزایش فشار داخل شکم<br/>\n ۲ - آمفیزم ریوی<br/>\n ۳ - آسم شدید حاد<br/>\n ۴ - ترایگر معکوس<br/>\n ۵ - پلورال افیوژن وسیع\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۱ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5644850730895996,
"alphanum_fraction": 0.5813282132148743,
"avg_line_length": 49.682926177978516,
"blob_id": "63e8289050797c0c0059a916a56dfbb1521fa57d",
"content_id": "bfc5dab5294bb9c9a22084d0f7a28ab22dd6075e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2650,
"license_type": "no_license",
"max_line_length": 352,
"num_lines": 41,
"path": "/platforms/android/app/src/main/assets/www/expiration/bi_compartmental_expiration.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>بازدم دو کمپارتمانی</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"bi_compartmental_expiration\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>بازدم دو کمپارتمانی</h1>\n </div>\n <div data-role=\"content\">\n <h1>بازدم دو کمپارتمانی</h1>\n <p>\n بازدم دو کمپارتمانی هنگامی روی میدهد که مکانیک تنفسی دو ریه با هم متفاوت باشند. یک کمپارتمان سریع است (کمپلیانس کم و مقاومت طبیعی) و دیگری آهسته (کمپلیانس و مقاومت بالا). هنگام بازدم ابتدا کمپارتمان سریع تخلیه میشود و سپس آهسته. این حالت در بیماران COPD که یک ریه آنها پیوندی باشد روی میدهد. ریه پیوندی کمپارتمان سریع و ریه بیمار آهسته است.\n </p>\n <img src=\"4-10.jpg\" width=\"100%\" />\n <p>\n در مورد ریه های غیر قرینه، کدام یک صحیح است؟<br/>\n ۱ - منحنی شدت جریان بازدمی تغییری نمی کند<br/>\n ۲ - کمپارتمان دارای ثابت زمانی بلندتر سریعتر تخلیه میشود<br/>\n ۳ - کمپارتمان دارای ثابت زمانی کوتاه تر سریعتر تخلیه میشود<br/>\n ۴ - توزیع تهویه تغییری نمی کند<br/>\n ۵ - فقط کمپارتمان دارای ثابت زمانی کوتاه تر تهویه میشود\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۳ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
},
{
"alpha_fraction": 0.5453540086746216,
"alphanum_fraction": 0.5464601516723633,
"avg_line_length": 31.285715103149414,
"blob_id": "59e74787b51dc0883d7564fbf9cf35d993908718",
"content_id": "687e9cbb1bfcca6c9de756beb08eee1259620ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 28,
"path": "/platforms/android/app/src/main/assets/www/esophageal/build_file.py",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\n\nfile_name = input(\"File name: \")\nfile_name = file_name.replace(\" \", \"_\").lower()\npath = Path(file_name + \".html\")\nif path.exists():\n print(file_name + \" already exists!\")\n exit()\npage_title = input(\"title: \")\ndata = open(\"page_template.html\").read()\ndata = data.replace(\"$$page_id$$\", file_name).replace(\"$$page_title$$\", page_title)\nout_file = open(file_name + \".html\", \"w\")\nout_file.write(data)\nout_file.flush()\nout_file.close()\nlink_str = \"\"\"<li>\n <a href=\"$$page_id$$.html\">\n $$page_title$$\n </a>\n </li>\n $$LI$$\"\"\"\nindex_data = open(\"index.html\").read()\nnew_li = link_str.replace(\"$$page_id$$\", file_name).replace(\"$$page_title$$\", page_title)\nindex_data = index_data.replace(\"$$LI$$\", new_li, 1)\nf = open(\"index.html\", \"w\")\nf.write(index_data)\nf.flush()\nf.close()\n"
},
{
"alpha_fraction": 0.5924132466316223,
"alphanum_fraction": 0.6065375208854675,
"avg_line_length": 59.43902587890625,
"blob_id": "4613e4554867ba2fa28c313caeb5c8766a913305",
"content_id": "395055c1ed4da467dd989ba9cfe85adec460e37a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3362,
"license_type": "no_license",
"max_line_length": 897,
"num_lines": 41,
"path": "/platforms/browser/www/ps/early_cycling.html",
"repo_name": "msaghaei/ventilatorgraphics",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n \n <head>\n <title>سایکل نارس</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n <meta http-equiv=\"Content-Language\" content=\"fa\">\n <script src=\"../js/jquery-1.10.2.min.js\"></script>\n <script src=\"../js/rtl.jquery.mobile-1.4.0.js\"></script>\n <link href=\"../css/themes/default/rtl.jquery.mobile-1.4.0.css\" rel=\"stylesheet\" />\n </head>\n \n <body>\n <div data-role=\"page\" id=\"early_cycling\">\n <div data-role=\"header\">\n <a rel=\"external\" href=\"index.html\" data-icon=\"back\" data-iconpos=\"left\">بازگشت</a>\n <h1>سایکل نارس</h1>\n </div>\n <div data-role=\"content\">\n <h1>سایکل نارس</h1>\n <p>\n سایکل هنگامی نارس است که شدت جریان ونتیلاتور خاتمه یافته ولی بیمار هنوز تلاش دمی نشان میدهد. در اینجا شکل منحنی شدت جریان و فشار هردو در ابتدای بازدم دستخوش تغییر میشوند. در منحنی شدت جریان، مقدار شدت جریان حداکثر بازدمی کاهش می یابد و سپس بلافاصله به دنباله اوج شدت جریان بازدمی، تغییری در جهت منحنی شدت جریان ممکند است روی دهد و حتی ممکن است جهت آن معکوس شود (گفتیم که معکوس شدن جهت منحنی نشان دهنده تغییر جهت جریان از بیرون به داخل یا بالعکس است). بنابراین در منحنی شدت جریان دندانه ای بسوی صفر (خط پایه) پیدا میشود که دلالت بر طولانی شدن مدت تقلای دمی بیمار دارد. در منحنی فشار، در ابتدای بازدم افت سریعی در فشار بسوی صفر ممکن است روی دهد که حتی ار PEEP نیز ممکن است کمتر شود. بنابریان در منحنی فشار، در ابتدای بازدم یک موج با تحدب رو به سمت بالا پیدا میشود که نشان دهنده ادامه تلاش دمی بیمار است. در صورتی که تلاش بیمار خیلی شدید باشد، ادامه این تلاش ممکن است سبب ترایگر دوبل شود.\n </p>\n <img src=\"5-18.jpg\" width=\"100%\" />\n <p>\n سایکل نارس کدام مورد را ایجاد میکند؟<br/>\n ۱ - تعداد تنفس را کم میکند<br/>\n ۲ - حجم جاری را زیاد میکند<br/>\n ۳ - کار تنفس را کم میکند<br/>\n ۴ - مدت هواگیری ریه ها را کاهش میدهد<br/>\n ۵ - سبب تلاش فعال بازدمی می گردد\n </p>\n </div>\n <div data-role=\"footer\"><button id=\"toggleanswer\" data-icon=\"check\">پاسخ</button><script>$( \"#toggleanswer\" ).click(function() {$(\"#toggleanswer\").text(\"۴ = پاسخ\");});</script>\n <h1>دکتر محمود سقائی</h1>\n </div>\n </div>\n </body>\n\n</html>\n"
}
] | 22 |
pparas007/TwitterGender
|
https://github.com/pparas007/TwitterGender
|
3f94f6cfa7b6c2163b8e96c8df09f53d1e5741e4
|
7ff8283db81672db2ccfc1695351ba4a0bff297e
|
4b9cf14dfd85ce6d68d751e72edbdab0f4d8a6b4
|
refs/heads/master
| 2020-04-13T00:07:41.560739 | 2018-12-22T19:44:17 | 2018-12-22T19:44:17 | 162,838,279 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.58219975233078,
"alphanum_fraction": 0.5977458357810974,
"avg_line_length": 45.685184478759766,
"blob_id": "493647e5ce6afd7bb5e8dc03b836b59ad2552364",
"content_id": "6d00a65d23ca974090ab0f366b1a7d8260882317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2573,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 54,
"path": "/MLProject/stats.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\n\r\n#produces descriptive stats about a pandas.core.series.Series\r\n#flag = 0, categorical series, by element of a Series\r\n#flag = 1, categorical series, by element length of a Series\r\n#flag = 2, categorical series, no extra info\r\ndef descriptive_stats(series, file, flag = 0):\r\n #<class 'pandas.core.frame.DataFrame'>\r\n #<class 'pandas.core.series.Series'>\r\n #print(type(series))\r\n #if type(series) == 'pandas.core.series.Series':\r\n # print(\"Series\")\r\n \r\n file.write(\"\\n\")\r\n file.write(\"Stats for series: \" + series.name + \"\\n\")\r\n file.write(\"-------------------------------------------\\n\")\r\n file.write(\"Length:\" + str(len(series)) + \"\\n\")\r\n file.write(\"# of NaN or Nulls \" + str(series.isna().sum()) + \"\\n\")\r\n file.write(\"Describe output:\" + \"\\n\")\r\n file.write(str(series.describe(include = 'all')) + \"\\n\")\r\n file.write(\"categorical:\" + str(pd.api.types.is_categorical_dtype(series)) + \"\\n\")\r\n if pd.api.types.is_categorical_dtype(series):\r\n if flag == 1:\r\n seriesDF = series.str.len().value_counts().to_frame(name = 'count')\r\n seriesDF.rename_axis(\"color_len\", inplace = True)\r\n total = seriesDF ['count'].sum()\r\n seriesDF ['perc'] = seriesDF['count'] * 100 / total\r\n file.write(str(seriesDF) + \"\\n\")\r\n elif flag == 0:\r\n file.write(\"Unique values:\\n\")\r\n file.write(str(series.unique()) + \"\\n\")\r\n file.write(\"Value counts output:\\n\")\r\n file.write(str(series.value_counts(dropna = False)) + \"\\n\")\r\n else:\r\n file.write(\"Value counts output in bins:\\n\")\r\n file.write(str(series.value_counts(bins = 10)) + \"\\n\")\r\n file.write(\"\\n\")\r\n\r\n#6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,\r\n#13:link color, 17:retweet count, 18:sidebar color, 19:tweet text, 21:tweet count\r\ndef stats(X, y):\r\n #creates stats_dataset.txt file with stats of the dataset\r\n file = open(\"stats_dataset.txt\", \"w\")\r\n descriptive_stats(y.astype('category'), file)\r\n descriptive_stats(X.iloc[:, 0], file)\r\n descriptive_stats(X.iloc[:, 1], file)\r\n descriptive_stats(X.iloc[:, 2].astype('category'), file, 2)\r\n descriptive_stats(X.iloc[:, 3], file)\r\n descriptive_stats(X.iloc[:, 4].astype('category'), file, 1)\r\n descriptive_stats(X.iloc[:, 5], file)\r\n descriptive_stats(X.iloc[:, 6].astype('category'), file, 1)\r\n descriptive_stats(X.iloc[:, 7].astype('category'), file, 2)\r\n descriptive_stats(X.iloc[:, 8], file)\r\n file.close()"
},
{
"alpha_fraction": 0.5654382705688477,
"alphanum_fraction": 0.5987420082092285,
"avg_line_length": 35.0059700012207,
"blob_id": "2c97c35efccc5760116756ebb4d158139accdc3c",
"content_id": "6756ec0404913f213035e5fb1f53c90977b0d297",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12401,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 335,
"path": "/MLProject/svmGPV2.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import preprocessing\r\nimport utils\r\nimport stats\r\nimport plotting\r\n\r\nimport numpy as np\r\nprint(\"numpy version:\", np.__version__)\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nprint(\"pandasversion:\", pd.__version__)\r\nimport sklearn\r\nprint(\"sklearn version:\", sklearn.__version__)\r\nfrom sklearn.preprocessing import LabelEncoder, Imputer, OneHotEncoder, StandardScaler\r\nimport colorsys\r\nfrom collections import Counter\r\nfrom colors import rgb, hsv, hex\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import metrics #, grid_search\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ndef grid_search(X,y):\r\n C_array = [0.001, 0.01, 0.1, 1, 10]\r\n gamma_array = [0.001, 0.01, 0.1, 1]\r\n hyperparameters = {'C': C_array, 'gamma' : gamma_array}\r\n grid_search = GridSearchCV(SVC(kernel='rbf'), hyperparameters, cv=10)\r\n grid_search.fit(X, y)\r\n return grid_search.best_params_.get('C'),grid_search.best_params_.get('gamma')\r\n\r\n\"\"\"\r\ndef processColor2(color,most_fequent_color):\r\n #this function first processes different length hex values to make them 6-length\r\n \r\n #this function need to be improved to properly understand & handle 2,5,length values\r\n # currently all doubtful values are replaced with most frequently occuring color \r\n return_color=''\r\n if(len(color) == 6):\r\n return_color=color\r\n elif(len(color) == 2):\r\n return_color=color[0]+color[0]+color[0]+color[1]+color[1]+color[1]\r\n elif(len(color) == 3 or len(color) == 4):\r\n return_color=color[0]+color[0]+color[1]+color[1]+color[2]+color[2]\r\n else:\r\n return_color=most_fequent_color\r\n \r\n #separate r,g,b and convert them to integer from hex\r\n r,g,b=int(return_color[0:2],16),int(return_color[2:4],16),int(return_color[4:6],16)\r\n \r\n #convert rgb to hsv: copied from internet\r\n h, s, v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\r\n #h, s, v = colorsys.rgb_to_hls(r/255., g/255., b/255.)\r\n h, s, v = 360 * h, 100 * s, 100 * v\r\n \r\n return h, s, v\r\n \r\n\r\ndef processColor(color,most_fequent_color):\r\n #this function first processes different length hex values to make them 6-length\r\n\r\n #separate r,g,b and convert them to integer from hex\r\n r,g,b=int(return_color[0:2],16),int(return_color[2:4],16),int(return_color[4:6],16)\r\n \r\n #convert rgb to hsv: copied from internet\r\n h, l, s = colorsys.rgb_to_hls(r/255., g/255., b/255.)\r\n h, l, s = 360 * h, 100 * l, 100 * s\r\n \r\n return h, l, s\r\n\r\ndef colorCode2(column):\r\n # where there is no color value in column, replace it with the most common color value\r\n #print(column)\r\n #print('column.shape:', column.shape)\r\n #print('column.type:', type(column))\r\n #print('column[0].shape:', column[0].shape)\r\n #print('column[0] type:',type(column[0]))\r\n #print('column[0][0] type:',type(column[0][0]))\r\n # commenting it out, since elements are already str. If used, elements become numpy.str_\r\n #column=column.astype(str)\r\n ##print('after column.shape:', column.shape)\r\n #print('after column.type:', type(column))\r\n #print('after column[0].shape:', column[0].shape)\r\n #print('after column[0] type:',type(column[0]))\r\n #print('after column[0][0] type:',type(column[0][0]))\r\n #print(column)\r\n \r\n for i in range(0,(len(column))):\r\n h,l, s = processColor2(column[i])\r\n #hue value ranges from 0-360\r\n #divide it into 3 parts and put 1, 2 or 3 in color column \r\n if(h<=120):\r\n column[i]=1.\r\n elif(h<=240):\r\n column[i]=2.\r\n elif(h<=360):\r\n column[i]=3.\r\n #else:\r\n # column[i]=4.\r\n column=np.reshape(column,(len(column),1))\r\n return (column)\r\n\"\"\"\r\n\r\n\"\"\"\r\ndef colorCode(column):\r\n # where there is no color value in column, replace it with the most common color value\r\n #print(column)\r\n #print('column.shape:', column.shape)\r\n #print('column.type:', type(column))\r\n #print('column[0].shape:', column[0].shape)\r\n #print('column[0] type:',type(column[0]))\r\n #print('column[0][0] type:',type(column[0][0]))\r\n # commenting it out, since elements are already str. If used, elements become numpy.str_\r\n #column=column.astype(str)\r\n ##print('after column.shape:', column.shape)\r\n #print('after column.type:', type(column))\r\n #print('after column[0].shape:', column[0].shape)\r\n #print('after column[0] type:',type(column[0]))\r\n #print('after column[0][0] type:',type(column[0][0]))\r\n #print(column)\r\n\r\n map(str.strip,column)\r\n column=column[:,0]\r\n most_fequent_color=Counter(column).most_common(1)\r\n most_fequent_color=most_fequent_color[0][0]\r\n \r\n for i in range(0,(len(column))):\r\n h,s,v=processColor(column[i],most_fequent_color)\r\n #hue value ranges from 0-360\r\n #divide it into 3 parts and put 1, 2 or 3 in color column \r\n if(h<=120):\r\n column[i]=1.\r\n elif(h<=240):\r\n column[i]=2.\r\n elif(h<=360):\r\n column[i]=3.\r\n #else:\r\n # column[i]=4.\r\n column=np.reshape(column,(len(column),1))\r\n return (column)\r\n\r\ndef plotDataSimple(X,y):\r\n Xtemp = X.copy()\r\n ytemp = y.copy()\r\n #Xtemp = X.copy().values\r\n #ytemp = y.copy().values\r\n print(\"yplotdata\", y)\r\n #index=np.argwhere(np.logical_and(ytemp!='male',ytemp!='female'))\r\n #ytemp=np.delete(ytemp,index,axis=0)\r\n #Xtemp=np.delete(Xtemp,index,axis=0)\r\n #ytemp=LabelEncoder().fit_transform(ytemp)\r\n\r\n # plots the data points with o for the positive examples and x for the negative examples. output is saved to file graph.png\r\n fig, ax = plt.subplots(figsize=(12,8))\r\n\r\n ## Using conditions\r\n male = ytemp>0\r\n female = ytemp<=0\r\n #ax.scatter(Xtemp[male,4], Xtemp[male,6], c='b', marker='o', label='Male')\r\n #ax.scatter(Xtemp[female,4], Xtemp[female,6], c='r', marker='x', label='Female')\r\n ax.scatter(Xtemp[:,2], ytemp, c='r', marker='x', label='Data')\r\n\r\n ax.set_xlabel('#tweets')\r\n ax.set_ylabel('gender')\r\n plt.show()\r\n \r\n fig, ax = plt.subplots(figsize=(12,8))\r\n #ax.scatter(Xtemp[male,4], Xtemp[male,6], c='b', marker='o', label='Male')\r\n #ax.scatter(Xtemp[female,4], Xtemp[female,6], c='r', marker='x', label='Female')\r\n ax.scatter(Xtemp[:,4], ytemp, c='b', marker='o', label='Data')\r\n\r\n ax.set_xlabel('#tweets')\r\n ax.set_ylabel('gender')\r\n plt.show()\r\n\r\ndef plotData(X,y):\r\n Xtemp = X.copy()\r\n ytemp = y.copy()\r\n #Xtemp = X.copy().values\r\n #ytemp = y.copy().values\r\n print(\"yplotdata\", y)\r\n #index=np.argwhere(np.logical_and(ytemp!='male',ytemp!='female'))\r\n #ytemp=np.delete(ytemp,index,axis=0)\r\n #Xtemp=np.delete(Xtemp,index,axis=0)\r\n #ytemp=LabelEncoder().fit_transform(ytemp)\r\n\r\n # plots the data points with o for the positive examples and x for the negative examples. output is saved to file graph.png\r\n fig, ax = plt.subplots(figsize=(12,8))\r\n\r\n ## Using conditions\r\n male = ytemp>0\r\n female = ytemp<=0\r\n #ax.scatter(Xtemp[male,4], Xtemp[male,6], c='b', marker='o', label='Male')\r\n #ax.scatter(Xtemp[female,4], Xtemp[female,6], c='r', marker='x', label='Female')\r\n \r\n ax.scatter(Xtemp[male,2], Xtemp[male,4], c='b', marker='o', label='Male')\r\n ax.scatter(Xtemp[female,2], Xtemp[female,4], c='r', marker='x', label='Female')\r\n\r\n ax.set_xlabel('Test 1')\r\n ax.set_ylabel('Test 2') \r\n plt.show()\r\n #fig.savefig('graph.png') \r\n\r\ndef utilsData(X, y):\r\n Xtemp = X\r\n ytemp = y\r\n #dataset = pd.read_csv('dataset.csv',encoding = \"latin1\", usecols=(5,6,8,11,13,17,18,21))\r\n #divide into dependent and independent variables \r\n #X = dataset.iloc[:, 1:].values\r\n #y = dataset.iloc[:, 0].values\r\n \r\n Xtemp = X.copy()\r\n ytemp = y.copy()\r\n\r\n #There are 20050 records\r\n print(\"Stats for y: \")\r\n print(ytemp.describe(include='all'))\r\n print(ytemp.value_counts(dropna=False))\r\n print(\" \")\r\n print(\"Stats for X: \")\r\n print(Xtemp.describe(include='all'))\r\n #print(Xtemp.unique())\r\n for i in range(0,(len(Xtemp.columns))):\r\n print(\"column\", Xtemp.iloc[:,i].name, \", NaN count:\", Xtemp.iloc[:,i].isna().sum())\r\n if i == 3 or i == 5:\r\n #print(Xtemp.iloc[:,i].unique())\r\n print(\"skipping\")\r\n else:\r\n print(Xtemp.iloc[:,i].value_counts(bins=10))\r\n #if pd.api.types.is_categorical_dtype(Xtemp.iloc[:,i]):\r\n # print(\"is categorical\")\r\n # print(Xtemp.iloc[:,i].unique())\r\n #else:\r\n # print(\"is NOT categorical\")\r\n # print(Xtemp.iloc[:,i].value_counts(bins=10))\r\n #print(Xtemp.iloc[:,i].unique())\r\n #print(Xtemp.iloc[:,i].value_counts(dropna=False))\r\n #num retweets\r\n #gpv_count, gpv_division = np.histogram(Xtemp.iloc[:,i], bins = [0,1,2,3,4,5,6,7,8,9])\r\n #Xtemp.iloc[:,i].hist(bins=gpv_division)\r\n #print(gpv_division)\r\n\r\n #print(type(Xtemp.iloc[:,3].hist(bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\r\n #print(type(Xtemp.iloc[:,i].hist(bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).figure))\r\n #Xtemp.iloc[:,3].hist(bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).figure\r\n \r\n cuts = pd.cut(Xtemp.iloc[:,4], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\r\n print(cuts.value_counts())\r\n print(\"cuts end\")\r\n #print(type(cuts.value_counts()))\r\n #print(type(cuts.value_counts().plot(kind='bar')))\r\n\r\n #.figure.show()\r\n #Xtemp.iloc[:,4].plot.hist(grid=True, bins= [0, 10], rwidth=0.9, color='#607c8e')\r\n Xtemp.iloc[:,4].plot.hist(grid=True, bins=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], rwidth=0.9, color='#607c8e')\r\n plt.title('Title')\r\n plt.xlabel('#retweets')\r\n plt.ylabel('count')\r\n plt.grid(axis='y', alpha=1)\r\n #plt.show()\r\n \r\n\r\n #column = column.astype('str')\r\n #mask = (column.str.len() != 6)\r\n\r\n #lengths = column.str.len()\r\n #print(lengths.value_counts())\r\n column = Xtemp.iloc[:,3]\r\n linkColorStats = column.str.len().value_counts().to_frame(name='count')\r\n linkColorStats.rename_axis(\"link_color_len\", inplace=True)\r\n total = linkColorStats ['count'].sum()\r\n linkColorStats ['perc'] = linkColorStats ['count'] * 100 / total\r\n print(linkColorStats)\r\n\r\n column = Xtemp.iloc[:,5]\r\n sidebarColorStats = column.str.len().value_counts().to_frame(name='count')\r\n sidebarColorStats.rename_axis(\"sidebar_color_len\", inplace=True)\r\n total = sidebarColorStats['count'].sum()\r\n sidebarColorStats ['perc'] = sidebarColorStats ['count'] * 100 / total\r\n print(sidebarColorStats)\r\n #plotData(Xtemp, ytemp)\r\n\"\"\"\r\n\r\ndef main():\r\n #5:gender, 6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,\r\n #13:link color, 14:name, 17:retweet count, 18:sidebar color, 21:tweet count\r\n # (5, 6, 8, 11, 13, 17, 18, 21))\r\n dataset = pd.read_csv('dataset.csv',encoding = \"latin1\", usecols = (5, 6, 8, 10, 11, 13, 17, 18, 21))\r\n #divide into dependent and independent variables \r\n X = dataset.iloc[:, 1:]\r\n y = dataset.iloc[:, 0]\r\n\r\n stats.stats(X, y)\r\n\r\n #print(\"help\")\r\n #print(type(y.values))\r\n #print(type(X.values))\r\n print(\"printing\")\r\n\r\n X, y = preprocessing.preprocessData(X.values, y.values)\r\n\r\n plotting.plot(X, y)\r\n\r\n \"\"\"\r\n utilsData(X, y)\r\n \r\n #turning it into numpy ndarrays\r\n X = X.values\r\n y = y.values\r\n\r\n # data preprocessing to clean and arrange data.\r\n X,y=preprocessData(X,y)\r\n #plotData(X, y)\r\n\r\n plotDataSimple(X, y)\r\n \"\"\"\r\n #after preprocessing: split data into training and test set\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\r\n\r\n #fit the data to the classifier\r\n svc = SVC(kernel = 'rbf',gamma=0.5, random_state = 0)\r\n svc.fit(X_train, y_train)\r\n\r\n # predict the test data using the model\r\n y_pred = svc.predict(X_test)\r\n cm = confusion_matrix(y_test, y_pred)\r\n \r\n #find accuracy percentage\r\n print(\"Accuracy: \", metrics.accuracy_score(y_test, y_pred))\r\n\r\n #hyperparameter tuning through grid search \r\n #best_C, best_gamma = grid_search(X, y)\r\n #print('Predicted best hyperparameters through hyperparameter-tuning', best_C, best_gamma)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5604539513587952,
"alphanum_fraction": 0.5800960063934326,
"avg_line_length": 28.171052932739258,
"blob_id": "058b62b4cd9aaa6d780c6a8851df68997be2bbfa",
"content_id": "ce9c6fdaf6bbc7011eb8315430e644cc7b0177c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2291,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 76,
"path": "/MLProject/analysis.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 16 14:28:41 2018\r\n\r\n@author: 007Paras\r\n\"\"\"\r\n\r\nimport preprocessing\r\nimport stats\r\nimport plotting\r\nimport tuning\r\nimport numpy as np\r\nprint(\"numpy version:\", np.__version__)\r\nimport pandas as pd\r\nprint(\"pandasversion:\", pd.__version__)\r\nimport sklearn\r\nprint(\"sklearn version:\", sklearn.__version__)\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import metrics #, grid_search\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.svm.libsvm import predict_proba\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom nltk.corpus import stopwords\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('punkt')\r\nimport re\r\n\r\ndef process(x1):\r\n porter = PorterStemmer()\r\n stop_words = set(stopwords.words('english'))\r\n map={}\r\n max_freq=0\r\n f= open(\"stats.txt\",\"w\")\r\n for i in range(0,len(x1)):\r\n x1[i]=x1[i].lower()\r\n regex = re.compile('[^a-zA-Z]')\r\n x1[i]=regex.sub(' ', x1[i])\r\n token_words=word_tokenize(x1[i])\r\n\r\n stem_sentence=\"\"\r\n for word in token_words:\r\n word=porter.stem(word)\r\n if word not in stop_words: \r\n if(len(word)>2):\r\n if(map.get(word)==None):\r\n map[word]=1\r\n else:\r\n map[word]=(map.get(word)+1)\r\n if(max_freq<map[word]):\r\n max_freq=map[word]\r\n \r\n print(max_freq)\r\n for i in range(3,max_freq+1):\r\n for key in map:\r\n if(i==map[key]):\r\n print(key+' '+str(map[key])+'\\n')\r\n f.write(key+' '+str(map[key])+'\\n')\r\n f.close() \r\ndef main():\r\n dataset = pd.read_csv('dataset.csv', encoding = \"latin1\", usecols = (5,10,19))\r\n dataset = dataset.replace(np.nan, '', regex=True)\r\n x1 = dataset.iloc[:, 1].values\r\n x2 = dataset.iloc[:, 2].values\r\n y = dataset.iloc[:, 0].values\r\n x1=x1+' '+x2\r\n process(x1)\r\n \r\n \r\nif __name__ == '__main__':\r\n main()"
},
{
"alpha_fraction": 0.6054971814155579,
"alphanum_fraction": 0.6443006992340088,
"avg_line_length": 29.769229888916016,
"blob_id": "4fb64af1753f3557682ac6500bcab53a96444910",
"content_id": "b08868e9f2f4a5588ca41fb640ec53a835ff63a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1237,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 39,
"path": "/MLProject/tuning.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 26 19:08:13 2018\r\n\r\n@author: 007Paras\r\n\"\"\"\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.feature_selection import RFE\r\nimport numpy as np\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ndef grid_search(X,y):\r\n C_array = [0.001, 0.01, 0.1, 1, 10]\r\n gamma_array = [0.001, 0.01, 0.1, 1]\r\n hyperparameters = {'C': C_array, 'gamma' : gamma_array}\r\n grid_search = GridSearchCV(SVC(kernel = 'rbf'), hyperparameters, cv=10)\r\n grid_search.fit(X, y)\r\n return grid_search.best_params_.get('C'),grid_search.best_params_.get('gamma')\r\n\r\ndef postModelStats(X_train,y_train):\r\n #linear_model = SVC(kernel = 'linear', gamma = 0.5, random_state = 0)\r\n linear_model = SVC(kernel = 'linear')\r\n linear_model.fit(X_train, y_train)\r\n \r\n #print(linear_model.coef_)\r\n selector = RFE(linear_model, 30, step=1)\r\n selector = selector.fit(X_train, y_train)\r\n \r\n top_features=np.where(selector.support_)[0]\r\n print(top_features)\r\n #print(selector.ranking_)\r\n return top_features\r\n\r\ndef crossValidate(X, y, model):\r\n k_folds = 5\r\n scores = cross_val_score(model, X, y, cv = k_folds)\r\n return scores"
},
{
"alpha_fraction": 0.6271105408668518,
"alphanum_fraction": 0.6562599539756775,
"avg_line_length": 39.01960754394531,
"blob_id": "5dadb4b56656a5a892de6dc5103a55749ab0a2f7",
"content_id": "b72f2ab86d3d2c7883b62ee5038fd591ac7b011d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6278,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 153,
"path": "/MLProject/svm.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import LabelEncoder, Imputer, OneHotEncoder, StandardScaler\r\nimport colorsys\r\nfrom collections import Counter\r\nfrom colors import rgb, hsv, hex\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import metrics, grid_search\r\nfrom sklearn.grid_search import GridSearchCV\r\n\r\ndef grid_search(X,y):\r\n C_array = [0.001, 0.01, 0.1, 1, 10]\r\n gamma_array = [0.001, 0.01, 0.1, 1]\r\n hyperparameters = {'C': C_array, 'gamma' : gamma_array}\r\n grid_search = GridSearchCV(SVC(kernel='rbf'), hyperparameters, cv=10)\r\n grid_search.fit(X, y)\r\n return grid_search.best_params_.get('C'),grid_search.best_params_.get('gamma')\r\n\r\ndef processColor(color,most_fequent_color):\r\n #this function first processes different length hex values to make them 6-length\r\n \r\n #this function need to be improved to properly understand & handle 2,5,length values\r\n # currently all doubtful values are replaced with most frequently occuring color \r\n return_color=''\r\n if(len(color) == 6):\r\n return_color=color\r\n elif(len(color) == 2):\r\n return_color=color[0]+color[0]+color[0]+color[1]+color[1]+color[1]\r\n elif(len(color) == 3 or len(color) == 4):\r\n return_color=color[0]+color[0]+color[1]+color[1]+color[2]+color[2]\r\n else:\r\n return_color=most_fequent_color\r\n \r\n #separate r,g,b and convert them to integer from hex\r\n r,g,b=int(return_color[0:2],16),int(return_color[2:4],16),int(return_color[4:6],16)\r\n \r\n #convert rgb to hsv: copied from internet\r\n h, s, v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\r\n h, s, v = 360 * h, 100 * s, 100 * v\r\n \r\n return h, s, v\r\n \r\ndef colorCode(column):\r\n # where there is no color value in column, replace it with the most common color value\r\n column=column.astype(str)\r\n map(str.strip,column)\r\n column=column[:,0]\r\n most_fequent_color=Counter(column).most_common(1)\r\n most_fequent_color=most_fequent_color[0][0]\r\n \r\n for i in range(0,(len(column))):\r\n h,s,v=processColor(column[i],most_fequent_color)\r\n #hue value ranges from 0-360\r\n #divide it into 3 parts and put 1, 2 or 3 in color column \r\n if(h<=120):\r\n column[i]=1.\r\n elif(h<=240):\r\n column[i]=2.\r\n elif(h<=360):\r\n column[i]=3.\r\n #else:\r\n # column[i]=4.\r\n column=np.reshape(column,(len(column),1))\r\n return (column)\r\n\r\ndef preprocessData(X,y):\r\n #remove rows from both X and y, where gender is not specified \r\n #index=np.argwhere(np.logical_and(np.logical_and(y!='male',y!='female'),y!='brand'))\r\n index=np.argwhere(np.logical_and(y!='male',y!='female'))\r\n y=np.delete(y,index,axis=0)\r\n X=np.delete(X,index,axis=0)\r\n \r\n\r\n #LabelEncoder is used to transform categorical variable(i.e 'male','female' & 'brand'), to \r\n # numerical variable (0, 1 & 2)\r\n y=LabelEncoder().fit_transform(y)\r\n \r\n #remove rows from X & y with gender confidence less than some threshold\r\n #Here the assumption is that such rows might affect the model badly\r\n #need to look into this ...\r\n gender_confidence_threshold=0.6\r\n index=np.argwhere(X[:,0]<gender_confidence_threshold) \r\n y=np.delete(y,index,axis=0)\r\n X=np.delete(X,index,axis=0)\r\n\r\n #remove rows from X and y with confidence in profile less than some threshold\r\n #Here the assumption is that such rows might affect the model badly\r\n #need to look into this ...\r\n profile_confidence_threshold=0.6\r\n index=np.argwhere(X[:,1]<profile_confidence_threshold) \r\n y=np.delete(y,index,axis=0)\r\n X=np.delete(X,index,axis=0)\r\n \r\n # reomove unusefull column like profile confidence and gender confidence now whcih \r\n # do not add any value as model features \r\n X=np.delete(X,0,axis=1) #gender confidence column removed\r\n X=np.delete(X,0,axis=1) #profile confidence column removed\r\n \r\n #color coding\r\n #colorCode function assigns values between 1,2,3,4 according to its hue value\r\n #colorCode function needs to be improved in future\r\n X[:,1:2]= colorCode(X[:,1:2])\r\n X[:,3:4]= colorCode(X[:,3:4])\r\n #at this point we have color columns' values in category 1,2,3,4 according to their hue values\r\n #such categorical data needs to be handled by adding dummy columns to represent each category\r\n #OneHotEncoder is he class which converts single column into 3(or 4) different columns for each category\r\n #.. containing value 0 or 1.\r\n encoder_color=OneHotEncoder(categorical_features=[1,3])\r\n X=encoder_color.fit_transform(X).toarray()\r\n \r\n #Scaling\r\n #fit all the features between range of -1 & 1, to avoid overemphasize on a particular feature\r\n standardScalar=StandardScaler()\r\n X=standardScalar.fit_transform(X)\r\n \r\n #preprocessing completed\r\n return X,y\r\n\r\n \r\ndef main():\r\n #5:gender, 6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,\r\n #13:link color, 14:name, 17:retweet count, 18:sidebar color, 21:tweet count\r\n \r\n dataset = pd.read_csv('dataset.csv',encoding = \"latin1\", usecols=(5,6,8,11,13,17,18,21))\r\n #divide into dependent and independent variables \r\n X = dataset.iloc[:, 1:].values\r\n y = dataset.iloc[:, 0].values\r\n # data preprocessing to clean and arrange data.\r\n X,y=preprocessData(X,y)\r\n \r\n #after preprocessing: split data into training and test set\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\r\n \r\n #fit the data to the classifier\r\n svc = SVC(kernel = 'rbf',gamma=0.5, random_state = 0)\r\n svc.fit(X_train, y_train)\r\n \r\n # predict the test data using the model\r\n y_pred = svc.predict(X_test)\r\n cm = confusion_matrix(y_test, y_pred)\r\n \r\n #find accuracy percentage\r\n print(\"Accuracy: \",metrics.accuracy_score(y_test,y_pred))\r\n \r\n #hyperparameter tuning through grid search \r\n best_C,best_gamma=grid_search(X,y)\r\n print('Predicted best hyperparameters through hyperparameter-tuning',best_C,best_gamma)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\n"
},
{
"alpha_fraction": 0.5766562223434448,
"alphanum_fraction": 0.614145040512085,
"avg_line_length": 46.313514709472656,
"blob_id": "3ae15148e391aa0e416ef4ba7ad142231af86ddd",
"content_id": "7a75c6ae9c643c1125aa303b75c46ad1d8ebe663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8936,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 185,
"path": "/MLProject/svmGPV.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import preprocessing\r\nimport stats\r\nimport plotting\r\nimport tuning\r\nimport numpy as np\r\nprint(\"numpy version:\", np.__version__)\r\nimport pandas as pd\r\nprint(\"pandasversion:\", pd.__version__)\r\nimport scikitplot as skplt\r\nimport matplotlib.pyplot as plt\r\n\r\nimport sklearn\r\nprint(\"sklearn version:\", sklearn.__version__)\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import metrics #, grid_search\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.svm.libsvm import predict_proba\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\ndef process(name, method, X, y, X_train, y_train, X_test, y_test):\r\n print('\\n\\n#################### ' + name + ' - Report ####################\\n')\r\n #print('\\n\\n#################### Support Vector Machines - Report ####################\\n')\r\n clf = None\r\n \r\n if method == 'lr':\r\n ### Logistic Regression\r\n clf = LogisticRegression(solver = 'lbfgs')\r\n elif method == 'svc_linear':\r\n #fit the data to the classifier\r\n clf = LinearSVC(C = 1, max_iter=5000)\r\n elif method == 'svc_rbf':\r\n #fit the data to the classifier\r\n clf = SVC(kernel = 'rbf', gamma = 0.01, C = 1, probability = True)\r\n elif method == 'knc':\r\n #fit the data to the classifier\r\n clf = KNeighborsClassifier(n_neighbors = 3)\r\n\r\n scores = tuning.crossValidate(X_train, y_train, clf)\r\n print(\"K-fold Cross-Validated Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\r\n clf.fit(X_train, y_train)\r\n\r\n # predict the test data using the model\r\n y_pred = clf.predict(X_test)\r\n cm = confusion_matrix(y_test, y_pred)\r\n\r\n #find accuracy percentage\r\n print(\"Accuracy: \", metrics.accuracy_score(y_test, y_pred))\r\n \r\n #find f1 score\r\n print(\"F1 score: \",metrics.f1_score(y_test, y_pred, average=\"macro\"))\r\n \r\n skplt.metrics.plot_roc_curve(y_test, y_pred)\r\n plt.show()\r\n \r\n print('\\nAverage confidence in prediction: ')\r\n\r\n if method == 'knc':\r\n conf = clf.score(X_test, y_test)\r\n print(conf)\r\n else:\r\n conf = clf.decision_function(X_test)\r\n print(np.sum(abs(conf)) / len(conf))\r\n print('\\nConfusion Matrix:')\r\n print('\\tMale\\tFemale\\nMale\\t',cm[0, 0],'\\t',cm[0, 1],'\\t\\nFemale\\t',cm[1, 0],'\\t',cm[1, 1],'\\n')\r\n \r\n target_names = ['male','female']\r\n print(classification_report(y_test, y_pred, target_names = target_names))\r\n\r\n if method == 'lr':\r\n plotting.trainingVsAccuracyLogReg(X, y)\r\n elif method == 'svc_linear' or method == 'svc_rbf':\r\n plotting.trainingVsAccuracySVC(X, y, method)\r\n #elif method == 'knc':\r\n #plotting.trainingVsAccuracy???(X, y)\r\n\r\n #hyperparameter tuning through grid search \r\n #best_C, best_gamma = tuning.grid_search(X, y)\r\n #print('Predicted best hyperparameters through hyperparameter-tuning', best_C, best_gamma)\r\n\r\ndef main():\r\n #14:name\r\n names_dataset = dataset = pd.read_csv('dataset.csv', encoding = \"latin1\", usecols = (14,))\r\n #5:gender, 6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,\r\n #13:link color, 17:retweet count, 18:sidebar color, 19:tweet text, 21:tweet count\r\n dataset = pd.read_csv('dataset.csv', encoding = \"latin1\", usecols = (5, 6, 8, 10, 11, 13, 17, 18, 19, 21))\r\n \r\n #words = pd.read_csv('manually_filtered_stats.csv', encoding = \"latin1\", usecols = (0,))\r\n #divide into dependent and independent variables \r\n #6:gender_confidence, 8:confidence in profile, 10:description, 11:no of favourited tweets,\r\n #13:link color, 17:retweet count, 18:sidebar color, 19:tweet text, 21:tweet count\r\n X = dataset.iloc[:, 1:]\r\n #5:gender\r\n y = dataset.iloc[:, 0]\r\n \r\n #10:description, 19:tweet text\r\n description_and_tweet = pd.read_csv('dataset.csv', encoding = \"latin1\", usecols = (10, 19))\r\n description_and_tweet = description_and_tweet.replace(np.nan, '', regex=True)\r\n x1 = description_and_tweet.iloc[:, 0].values\r\n x2 = description_and_tweet.iloc[:, 1].values\r\n description_and_tweet_combined = x1+' '+x2\r\n \r\n #swap # of favorite tweets and link_color column\r\n #link_color_col = numpy.copy(X[:, 1])\r\n #X[:, 1] = X[:, 0]\r\n #X[:, 0] = link_color_col\r\n\r\n #swap # of favorite tweets and sidebar_color column\r\n #sidebar_color_col = numpy.copy(X[:, 3])\r\n #X[:, 3] = X[:, 1]\r\n #X[:, 1] = sidebar_color_col\r\n\r\n #Might need to be updated/reviewed because of change of columns\r\n stats.stats(X, y)\r\n\r\n X, y = preprocessing.preprocessData(X.values, y.values, names_dataset.values, description_and_tweet_combined)\r\n \r\n \"\"\"\r\n #84 columns\r\n X_feature_names = [\r\n 16 dummy features extracted from link color 0-15\r\n 16 dummy features extracted from sidebar color 16-31\r\n '# of favorite tweets', 32 \r\n '# of retweets', 33\r\n '# of tweets', 34\r\n 5 features extracted from description, 35-39\r\n 5 features extracted from tweet text, 40-44\r\n 3 features extracted from name (dummy), 45-47\r\n 36 features extracted from tweet text 48-83\r\n ]\r\n \"\"\"\r\n \r\n #Might need to be updated/reviewed because of change of columns\r\n #It doesn't seem it is affected by order of columns, but with dummy variables, it might generate too many plots\r\n \r\n feature_names = ['lk_red', 'lk_red-orange', 'lk_orange-brown', 'lk_orange-yellow', 'lk_yellow', 'lk_yellow-green', 'lk_green', \r\n 'lk_green-cyan', 'lk_cyan', 'lk_cyan-blue', 'lk_blue', 'lk_blue-magenta', 'lk_magenta', 'lk_magenta-pink', 'lk_pink', 'lk_pink-red',\r\n 'sb_red', 'sb_red-orange', 'sb_orange-brown', 'sb_orange-yellow', 'sb_yellow', 'sb_yellow-green', 'sb_green', \r\n 'sb_green-cyan', 'sb_cyan', 'sb_cyan-blue', 'sb_blue', 'sb_blue-magenta', 'sb_magenta', 'sb_magenta-pink', 'sb_pink', 'sb_pink-red',\r\n '# of favorite tweets', '# of retweets', '# of tweets',\r\n '# of hashtags in description', 'URLs present in description', '# of emoticons used in description', 'length of description', '# of mentions in description',\r\n '# of hashtags in tweet text', 'URLs present in tweet text', '# of emoticons used in tweet text', 'length of tweet text', '# of mentions in tweet text',\r\n 'feature 1 from name', 'feature 2 from name', 'feature 3 from name',\r\n 'women word_freq', 'bitch word_freq', 'nation word_freq', 'tec word_freq', 'season word_freq',\r\n 'hair word_freq', 'dad word_freq', 'player word_freq', 'cat word_freq', 'polit word_freq',\r\n 'blogger word_freq', 'radio word_freq', 'pushawardslizquen word_freq', 'boy word_freq', 'author word_freq',\r\n 'footbal word_freq', 'kid word_freq', 'travel word_freq', 'social word_freq', 'heart word_freq',\r\n 'vote word_freq', 'food word_freq', 'guy word_freq', 'beauti word_freq', 'lover word_freq',\r\n 'via word_freq', 'writer word_freq', 'artist word_freq', 'man word_freq', 'sport word_freq',\r\n 'fuck word_freq', 'girl word_freq', 'fan word_freq', 'game word_freq', 'love word_freq',\r\n 'weather word_freq'\r\n ]\r\n\r\n #[ 0 8 11 12 14 15 16 22 24 25 32 34 35 38 44 46 47 48 53 60 63 69 71 72 76 77 79 80 81 82]\r\n index_temp = [0, 8, 11, 12, 14, 15, 16, 22, 24, 25, 32, 34, 35, 38, 44, 46, 47, 48, 53, 60, 63, 69, 71, 72, 76, 77, 79, 80, 81, 82]\r\n print(\"first line: \", X[0, :])\r\n plotting.plot(X, y, feature_names, index_temp)\r\n\r\n #Might need to be updated/reviewed because of change of columns\r\n #This is happening over the entire dataset and should only happen on the continuous variables\r\n X = preprocessing.scale(X)\r\n \r\n #select top features using Reverse Feature Elimination\r\n #not affected by order of columns\r\n top_features = tuning.postModelStats(X, y)\r\n\r\n #print(\"top_features:\", top_features)\r\n #[ 0 8 11 12 14 15 16 22 24 25 32 34 35 38 44 46 47 48 53 60 63 69 71 72 76 77 79 80 81 82]\r\n \r\n X = X[:, top_features]\r\n \r\n #after preprocessing: split data into training and test set\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\r\n \r\n #Each run will take approximately 10 to 15 minutes given the number of features used in the model\r\n process('Support Vector Classifier - RBF Kernel', 'svc_rbf', X, y, X_train, y_train, X_test, y_test)\r\n process('Logistic Regression', 'lr', X, y, X_train, y_train, X_test, y_test)\r\n process('Support Vector Classifier - Linear Kernel', 'svc_linear', X, y, X_train, y_train, X_test, y_test)\r\n process('K nearest Classifier', 'knc', X, y, X_train, y_train, X_test, y_test)\r\n \r\n\r\nif __name__ == '__main__':\r\n main()"
},
{
"alpha_fraction": 0.5602177381515503,
"alphanum_fraction": 0.5856203436851501,
"avg_line_length": 34.45454406738281,
"blob_id": "96acf45e3776053b3f5978ba2fd3ff1a3dad72fe",
"content_id": "222b2ed97ee3691b0d54ba21274bc476842bd8ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4409,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 121,
"path": "/MLProject/plotting.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport random\r\nimport plotly.plotly as py\r\nimport plotly.tools as tls\r\nimport matplotlib.mlab as mlab\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\ndef plot(X, y, feature_names, index_list):\r\n #for i in range(0, len(X[0, :])):\r\n \r\n #Not very useful\r\n for i in index_list:\r\n print(\"Plotting X[:,\", i, \"] vs y\")\r\n print(\"Skipping plotting X vs Y\")\r\n #plotXY(X, y, i, feature_names)\r\n \r\n\r\n #for i in range(0, len(X[0, :])):\r\n for i in index_list:\r\n #for j in range(i + 1, len(X[0, :])):\r\n for j in index_list:\r\n if j <= i:\r\n continue\r\n print(\"Plotting X[:,\", i, \"] vs X[:,\", j , \"] vs y\")\r\n print(\"Skipping plotting X vs X vs Y\")\r\n #plotXXY(X, y, i, j, feature_names)\r\n\r\ndef plotXY(X, y, indexX, feature_names):\r\n # plots the data points with o for the positive examples and x for the negative examples. output is saved to file graph.png\r\n x = X[:, indexX]\r\n fig, ax = plt.subplots(figsize = (12, 8))\r\n\r\n index = random.sample(range(0, len(y)), 100)\r\n yTemp = y[index]\r\n xTemp = x[index]\r\n\r\n ax.scatter(xTemp, yTemp, c = 'r', marker = 'x', label = 'Data')\r\n ax.set_title(feature_names[indexX] + ' vs. Gender')\r\n ax.set_xlabel(feature_names[indexX])\r\n ax.set_ylabel('Gender')\r\n plt.show()\r\n\r\ndef plotXXY(X, y, indexX, indexY, feature_names):\r\n x1 = X[:, indexX]\r\n x2 = X[:, indexY]\r\n # plots the data points with o for the positive examples and x for the negative examples. output is saved to file graph.png\r\n fig, ax = plt.subplots(figsize = (12, 8))\r\n \r\n index = random.sample(range(0, len(y)), 100)\r\n yTemp = y[index]\r\n x1Temp = x1[index]\r\n x2Temp = x2[index]\r\n\r\n indexPos = yTemp > 0\r\n indexNeg = yTemp == 0\r\n\r\n ax.scatter(x1Temp[indexPos], x2Temp[indexPos], c = 'b', marker = '+', label = 'Male')\r\n ax.scatter(x1Temp[indexNeg], x2Temp[indexNeg], c = 'r', marker = 'x', label = 'Female')\r\n ax.set_title(feature_names[indexX] + ' vs. ' + feature_names[indexY])\r\n ax.set_xlabel(feature_names[indexX])\r\n ax.set_ylabel(feature_names[indexY])\r\n plt.show()\r\n\r\ndef trainingVsAccuracySVC(X, y, method):\r\n i=0\r\n accuracy=np.zeros((20, 2))\r\n for trainingSize in range (500,len(X),500):\r\n testSize = len(X) - trainingSize\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = trainingSize, test_size = testSize, random_state=0)\r\n if method == 'svc_linear':\r\n svc = SVC(kernel = 'linear')\r\n elif method == 'svc_rbf':\r\n svc = SVC(kernel = 'rbf', gamma = 'auto')\r\n svc.fit(X_train, y_train)\r\n y_pred = svc.predict(X_test)\r\n \r\n accuracy[i][0]=trainingSize\r\n accuracy[i][1]=metrics.accuracy_score(y_test, y_pred)\r\n i+=1\r\n \r\n index = np.argwhere(accuracy[:,0]<=1)\r\n accuracy = np.delete(accuracy, index, axis = 0)\r\n \r\n fig, ax = plt.subplots(figsize = (12, 8))\r\n #x1,x2,y1,y2=fig.axis()\r\n plt.axis((500,10000,0.10,1.00))\r\n plt.plot(accuracy[:,0], accuracy[:,1])\r\n ax.set_title('Training size vs. Accuracy')\r\n ax.set_xlabel('Training Size')\r\n ax.set_ylabel('Accuracy')\r\n plt.show()\r\n\r\ndef trainingVsAccuracyLogReg(X, y):\r\n i = 0\r\n accuracy = np.zeros((20, 2))\r\n for trainingSize in range (500, len(X), 500):\r\n testSize = len(X) - trainingSize\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = trainingSize, test_size = testSize, random_state = 0)\r\n logisticRegr = LogisticRegression(solver = 'lbfgs')\r\n logisticRegr.fit(X_train, y_train)\r\n y_pred = logisticRegr.predict(X_test)\r\n \r\n accuracy[i][0] = trainingSize\r\n accuracy[i][1] = metrics.accuracy_score(y_test, y_pred)\r\n i += 1\r\n index = np.argwhere(accuracy[:, 0] <= 1)\r\n accuracy = np.delete(accuracy, index, axis = 0)\r\n \r\n fig, ax = plt.subplots(figsize = (12, 8))\r\n #x1,x2,y1,y2=fig.axis()\r\n plt.axis((500, 10000, 0.10, 1.00))\r\n plt.plot(accuracy[:, 0], accuracy[:, 1])\r\n ax.set_title('Training size vs. Accuracy')\r\n ax.set_xlabel('Training Size')\r\n ax.set_ylabel('Accuracy')\r\n plt.show()"
},
{
"alpha_fraction": 0.5631729364395142,
"alphanum_fraction": 0.5863326787948608,
"avg_line_length": 42.709232330322266,
"blob_id": "696f0304c3fe761382173c1f33dce66344e84268",
"content_id": "90c4e3e9731ee957f3f99640a7a33d798b0dea56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22755,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 509,
"path": "/MLProject/preprocessing.py",
"repo_name": "pparas007/TwitterGender",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport sklearn\r\nfrom sklearn.preprocessing import LabelEncoder, Imputer, OneHotEncoder, StandardScaler\r\nimport colorsys\r\nfrom collections import Counter\r\nfrom colors import rgb, hsv, hex\r\nfrom sklearn.svm import SVC\r\n#from sklearn.cross_validation import train_test_split\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn import metrics\r\nimport re\r\n#for outlier detection, box plot\r\nimport seaborn as sns\r\nimport gender_guesser.detector as gender\r\n\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom nltk.corpus import stopwords\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('punkt')\r\n \r\ndef correctColorFormat(X, col_index):\r\n #this function first processes different length hex values to make them 6-length\r\n lenVectorized = np.vectorize(len)\r\n\r\n #this function need to be improved to properly understand & handle 2,5,length values\r\n # currently all doubtful values are replaced with most frequently occuring color \r\n index = np.argwhere(np.logical_or(lenVectorized(X[:, col_index]) == 3, lenVectorized(X[:, col_index]) == 4))\r\n if index.size > 0:\r\n for i in np.nditer(index):\r\n X[i, col_index] = X.item((i, col_index))[0] * 2 + X.item((i, col_index))[1] * 2 + X.item((i, col_index))[2] * 2\r\n \r\n index = np.argwhere(lenVectorized(X[:, col_index]) == 2)\r\n if index.size > 0:\r\n for i in np.nditer(index):\r\n X[i, col_index] = X.item((i, col_index))[0] * 3 + X.item((i, col_index))[1] * 3\r\n \r\n index = np.argwhere(lenVectorized(X[:, col_index]) == 1)\r\n if index.size > 0:\r\n for i in np.nditer(index):\r\n X[i, col_index] = X.item((i, col_index))[0] * 6\r\n \r\n return X\r\n\r\ndef convertToHLS(color):\r\n #this function first processes different length hex values to make them 6-length\r\n \r\n #separate r,g,b and convert them to integer from hex\r\n r, g, b = int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16)\r\n \r\n \"\"\"\r\n #convert rgb to hsv: copied from internet\r\n h, s, v = colorsys.rgb_to_hsv(r/255., g/255., b/255.)\r\n h, s, v = 360 * h, 100 * s, 100 * v\r\n \"\"\"\r\n\r\n #convert to hls, a different color space model than hsv\r\n h, l, s = colorsys.rgb_to_hls(r / 255., g / 255., b / 255.)\r\n h, l, s = 360 * h, 100 * l, 100 * s\r\n return h, l, s\r\n\r\ndef colorCode(column):\r\n #Simple wheel\r\n #color_borders = [60, 180, 300]\r\n #red, green, blue\r\n\r\n #Complex wheel\r\n #http://www.workwithcolor.com/red-color-hue-range-01.htm\r\n color_borders = [10, 20, 40, 50, 60, 80, 140, 169, 200, 220, 240, 280, 320, 330, 345, 355]\r\n #red, red-orange, orange-brown, orange-yellow, yellow,\r\n #yellow-green, green, green-cyan, cyan, cyan-blue\r\n #blue, blue-magenta, magenta, magenta-pink, pink,\r\n #pink-red\r\n\r\n for i in range(0, (len(column))):\r\n h, l, s = convertToHLS(column[i])\r\n h = int(h)\r\n column[i] = h\r\n #hue value ranges from 0-360\r\n #divide it into 3 parts and put 1, 2 or 3 in color column \r\n \r\n #General logic for any color wheel; replaces code commented below\r\n if h > color_borders[-1]:\r\n column[i] = 0\r\n else:\r\n j = 0\r\n for border in color_borders:\r\n if h <= border:\r\n column[i] = j\r\n break\r\n j += 1\r\n \r\n \"\"\"\r\n #Simple\r\n if (h <= 60 or h > 300):\r\n column[i] = 0\r\n elif (h <= 180):\r\n column[i] = 1\r\n elif (h <= 300):\r\n column[i] = 2\r\n \"\"\"\r\n #column=np.reshape(column,(len(column),1))\r\n return (column)\r\n\r\n#dealing with inconsistent data\r\ndef cleanData(X, y, names, description_and_tweet_combined):\r\n #remove rows from both X and y, where gender is not specified\r\n total = len(y)\r\n index = np.argwhere(np.logical_and(y != 'male', y != 'female'))\r\n print(\"# rows removed because of gender is not male nor female:\", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n #remove rows from the other two feature groups too\r\n names = np.delete(names, index, axis = 0)\r\n description_and_tweet_combined = np.delete(description_and_tweet_combined, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n \r\n lenVectorized = np.vectorize(len)\r\n #remove rows from X and y with link color that have invalid values\r\n index = np.argwhere(np.logical_or(lenVectorized(X[:, 4]) < 6, lenVectorized(X[:, 4]) > 6))\r\n print(\"# rows that could be removed because their link color length is different than 6:\", len(index), \", percentage potentially removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n \r\n #salvaging some records by correcting the hex RGB color format\r\n correctColorFormat(X, 4)\r\n\r\n lenVectorized = np.vectorize(len)\r\n #remove rows from X and y with link color that have invalid values\r\n index = np.argwhere(np.logical_or(lenVectorized(X[:, 4]) < 6, lenVectorized(X[:, 4]) > 6))\r\n print(\"# rows removed because their link color length is different than 6:\", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n #remove rows from the other two feature groups too\r\n names = np.delete(names, index, axis = 0)\r\n description_and_tweet_combined = np.delete(description_and_tweet_combined, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n\r\n index = np.argwhere(np.logical_or(lenVectorized(X[:, 6]) < 6, lenVectorized(X[:, 6]) > 6))\r\n print(\"# rows that could be removed because their sidebar color length is different than 6:\", len(index), \", percentage potentially removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n \r\n #salvaging some records by correcting the hex RGB color format\r\n correctColorFormat(X, 6)\r\n\r\n index = np.argwhere(np.logical_or(lenVectorized(X[:, 6]) < 6, lenVectorized(X[:, 6]) > 6))\r\n print(\"# rows removed because their sidebar color length is different than 6:\", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n #remove rows from the other two feature groups too\r\n names = np.delete(names, index, axis = 0)\r\n description_and_tweet_combined = np.delete(description_and_tweet_combined, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n\r\n return X, y, names, description_and_tweet_combined\r\n\r\n#filter data\r\ndef filterData(X, y, names, description_and_tweet_combined):\r\n total = len(y)\r\n \r\n #remove rows from X & y with gender confidence less than some threshold\r\n #Here the assumption is that such rows might affect the model badly\r\n #need to look into this ...\r\n gender_confidence_threshold = 0.6\r\n index = np.argwhere(X[:, 0] < gender_confidence_threshold)\r\n print(\"# rows removed because their gender confidence is below threshold of\", gender_confidence_threshold, \": \", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n #remove rows from the other two feature groups too\r\n names = np.delete(names, index, axis = 0)\r\n description_and_tweet_combined = np.delete(description_and_tweet_combined, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n\r\n #remove rows from X and y with confidence in profile less than some threshold\r\n #Here the assumption is that such rows might affect the model badly\r\n #need to look into this ...\r\n profile_confidence_threshold = 0.6\r\n index = np.argwhere(X[:, 1] < profile_confidence_threshold) \r\n print(\"# rows removed because their profile confidence is below threshold of:\", profile_confidence_threshold, \": \", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n #remove rows from the other two feature groups too\r\n names = np.delete(names, index, axis = 0)\r\n description_and_tweet_combined = np.delete(description_and_tweet_combined, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n \r\n #remove irrelevant columns like profile confidence and gender confidence which \r\n #do not add any value as model features \r\n X = np.delete(X, 0, axis = 1) #gender confidence column removed\r\n X = np.delete(X, 0, axis = 1) #profile confidence column removed\r\n\r\n #process description and tweet columns\r\n new_columns1 = processTextColumn(X[:, 0])\r\n new_columns2 = processTextColumn(X[:, 5])\r\n #adding 5 colums, such as count of hashtag used, as features extracted from description\r\n X = np.concatenate((X, new_columns1), axis = 1)\r\n #adding 5 colums, such as count of hashtag used, as features extracted from tweet text\r\n X = np.concatenate((X, new_columns2), axis = 1)\r\n X = np.delete(X, 0, axis = 1) #description column removed\r\n X = np.delete(X, 4, axis = 1) #tweet text column removed\r\n \r\n \"\"\"\r\n #15 columns\r\n X_feature_names = [\r\n '# of favorite tweets', \r\n 'Link color hex value',\r\n '# of retweets', \r\n 'Sidebar color hex value',\r\n '# of tweets', \r\n '# of hashtags in description', \r\n 'URLs present in description', \r\n '# of emoticons used in description', \r\n 'length of description', \r\n '# of mentions in description' \r\n '# of hashtags in tweet text', \r\n 'URLs present in tweet text', \r\n '# of emoticons used in tweet text', \r\n 'length of tweet text', \r\n '# of mentions in tweet text' \r\n ]\r\n \"\"\"\r\n return X, y, names, description_and_tweet_combined\r\n\r\n\r\ndef processTextColumn(column):\r\n emoticons = [line.rstrip('\\n') for line in open('emoticons.txt')]\r\n \r\n #0:count of hashtag used, 1:are urls used, 2:count of emoticons used, 3:length of profile description,\r\n #4:count of @ used\r\n new_columns = np.ndarray(shape = (len(column), 5), dtype = int)\r\n \r\n for i in range(0, (len(column))):\r\n description_string = str(column[i])\r\n if (len(description_string) != 0):\r\n hashTags = description_string.count('#')\r\n new_columns[i, 0] = hashTags\r\n urls = re.findall('https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+', description_string)\r\n if (len(urls) != 0):\r\n new_columns[i, 1] = 1\r\n else:\r\n new_columns[i, 1] = 0\r\n emoticons_count = 0\r\n for emoticon in emoticons:\r\n if(emoticon in description_string):\r\n emoticons_count += 1\r\n new_columns[i, 2] = emoticons_count\r\n new_columns[i, 3] = len(description_string)\r\n at = description_string.count('@')\r\n new_columns[i, 4] = at\r\n return new_columns\r\n\r\ndef processNamesColumn(column):\r\n new_columns = np.ndarray(shape = (len(column), 1), dtype = int)\r\n gender_detector = gender.Detector()\r\n \r\n for i in range(0, (len(column))):\r\n a_name = str(column[i])\r\n started=False;\r\n a_word=\"\"\r\n new_columns[i, 0] = 0\r\n \r\n regex = re.compile('[^a-zA-Z]')\r\n a_name=regex.sub(' ', a_name)\r\n a_name=re.sub(r'([A-Z])', r' \\1', a_name)\r\n words=a_name.split()\r\n for word in words:\r\n prediction=gender_detector.get_gender(word)\r\n #print(word,' ',prediction)\r\n if(prediction=='mostly_female'):\r\n new_columns[i, 0] = 2\r\n continue\r\n elif(prediction=='mostly_male'):\r\n new_columns[i, 0] = 1\r\n continue\r\n elif(prediction=='female'):\r\n new_columns[i, 0] = 2\r\n break\r\n elif(prediction=='male'):\r\n new_columns[i, 0] = 1\r\n break\r\n #print(words,' ',new_columns[i, 0])\r\n \r\n return new_columns\r\n\r\ndef processDescriptionAndTweetCombined(description_and_tweet_combined):\r\n porter = PorterStemmer()\r\n stop_words = set(stopwords.words('english'))\r\n map={}\r\n max_freq = 0\r\n dict={}\r\n #with open('manually_filtered_stats.txt') as f:\r\n #lines = f.readlines()\r\n with open('manually_filtered_stats_advanced.txt') as f:\r\n c=0\r\n for line in f:\r\n line=line.strip().split(' ')[0]\r\n dict[line]=c\r\n c=c+1\r\n\r\n new_columns = np.ndarray(shape = (len(description_and_tweet_combined), len(dict)), dtype = int)\r\n \r\n for i in range(0,len(description_and_tweet_combined)):\r\n description_and_tweet_combined[i]=description_and_tweet_combined[i].lower()\r\n regex = re.compile('[^a-zA-Z]')\r\n description_and_tweet_combined[i]=regex.sub(' ', description_and_tweet_combined[i])\r\n token_words=word_tokenize(description_and_tweet_combined[i])\r\n\r\n for word in token_words:\r\n word=porter.stem(word)\r\n if(dict.get(word)!=None):\r\n new_columns[i][dict.get(word)]=new_columns[i][dict.get(word)]+1\r\n \r\n \r\n #for i in range(0,len(description_and_tweet_combined)):\r\n # for j in range(0,len(dict)):\r\n # print(new_columns[i][j], end = '')\r\n #print()\r\n return new_columns\r\n\r\ndef transformData(X, y, names, description_and_tweet_combined):\r\n print('Converting link color and sidebar color to categories of hue')\r\n #color coding\r\n #colorCode transforms a RGB hex value into a color index, given a color wheel\r\n #1:link color, 3:sidebar color\r\n X[:, 1] = colorCode(X[:, 1])\r\n X[:, 3] = colorCode(X[:, 3])\r\n\r\n \"\"\"\r\n #15 columns\r\n X_feature_names = [\r\n '# of favorite tweets', \r\n 'Link color index',\r\n '# of retweets', \r\n 'Sidebar color index',\r\n '# of tweets', \r\n '# of hashtags in description', \r\n 'URLs present in description', \r\n '# of emoticons used in description', \r\n 'length of description', \r\n '# of mentions in description' \r\n '# of hashtags in tweet text', \r\n 'URLs present in tweet text', \r\n '# of emoticons used in tweet text', \r\n 'length of tweet text', \r\n '# of mentions in tweet text' \r\n ]\r\n \"\"\"\r\n\r\n #names\r\n gender_Columns_deduced_from_names = processNamesColumn(names[:, 0])\r\n encoder_gender = OneHotEncoder(categorical_features = [0])\r\n #Becomes a 3 columns\r\n gender_Columns_deduced_from_names = encoder_gender.fit_transform(gender_Columns_deduced_from_names).toarray()\r\n #print(\"shape: gender_Columns_deduced_from_names: \", gender_Columns_deduced_from_names.shape)\r\n X = np.concatenate((X, gender_Columns_deduced_from_names), axis = 1)\r\n \r\n #description_and_tweet_combined\r\n #Becomes a 36 columns, representing the word frequency/word count for each of the words listed \r\n #on manually_filtered_stats_advanced.txt from the description and tweet text combined\r\n words_columns = processDescriptionAndTweetCombined(description_and_tweet_combined)\r\n #print(\"shape: words_columns: \", words_columns.shape)\r\n X = np.concatenate((X, words_columns), axis = 1)\r\n \r\n \"\"\"\r\n #54 columns\r\n X_feature_names = [\r\n '# of favorite tweets', 0 \r\n 'Link color hex value', 1\r\n '# of retweets', 2\r\n 'Sidebar color hex value', 3\r\n '# of tweets', 4\r\n 5 features extracted from description, 5-9\r\n 5 features extracted from tweet text, 10-14\r\n 3 features extracted from name (dummy), 15-17\r\n 36 features extracted from tweet text 18-53\r\n ]\r\n \"\"\"\r\n return X, y\r\n\r\ndef boxplot_metrics(X, col_index):\r\n Q1 = np.quantile(X[:, col_index], 0.25)\r\n Q3 = np.quantile(X[:, col_index], 0.75)\r\n IQR = Q3 - Q1\r\n lower_bound = Q1 - 1.5 * IQR\r\n upper_bound = Q3 + 1.5 * IQR\r\n return lower_bound, upper_bound, IQR, Q1, Q3\r\n\r\ndef removeOutliers(X, y, cols, feature_names):\r\n total = len(y)\r\n for col_index in cols:\r\n sns.boxplot(x=X[:, col_index].astype(float))\r\n lower_bound, upper_bound, IQR, Q1, Q3 = boxplot_metrics(X, col_index)\r\n print(feature_names[col_index])\r\n print(\"lower_bound: %f, upper_bound: %f, IQR: %f, Q1: %f, Q3: %f\" % (lower_bound, upper_bound, IQR, Q1, Q3))\r\n index = np.argwhere((X[:, col_index] < (Q1 - 1.5 * IQR)) | (X[:, col_index] > (Q3 + 1.5 * IQR)))\r\n print(\"# rows removed because the\", feature_names[col_index], \"is outside the boxplot whiskers [\", lower_bound, \": \", upper_bound, \"]:\", len(index), \", percentage removed: \", round(len(index) * 100 / total, 2), \"%\")\r\n y = np.delete(y, index, axis = 0)\r\n X = np.delete(X, index, axis = 0)\r\n print(\"# rows remaining:\", len(y))\r\n plt.show()\r\n return X, y\r\n\r\n#Convert categorical features to dummy variables\r\n#This operation affects the order of the numpy ndarray columns\r\ndef encodeData(X, y):\r\n #Receives X with 54 columns\r\n \r\n y = LabelEncoder().fit_transform(y)\r\n\r\n #Depending on the colorCode() method, at this point we could have hue color values in columns 1 and 3, or categorical values that represent different colors\r\n #If we have categorical values we need to encode them with dummy variables\r\n #OneHotEncoder converts a categorical feature/column into n binary variables, where n is the number of categories\r\n #OneHotEncoder doesn't generate n-1 variables, it generates n variables\r\n encoder_color = OneHotEncoder(categorical_features = [1, 3])\r\n \r\n X = encoder_color.fit_transform(X).toarray()\r\n\r\n #new order of columns?\r\n #Might be possible to avoid by moving dummy variable columns to the end\r\n \"\"\"\r\n #84 columns\r\n X_feature_names = [\r\n 16 dummy features extracted from link color 0-15\r\n 16 dummy features extracted from sidebar color 16-31\r\n '# of favorite tweets', 32 \r\n '# of retweets', 33\r\n '# of tweets', 34\r\n 5 features extracted from description, 35-39\r\n 5 features extracted from tweet text, 40-44\r\n 3 features extracted from name (dummy), 45-47\r\n 36 features extracted from tweet text 48-83\r\n ]\r\n \"\"\"\r\n return X, y\r\n\r\n# Receives a ndarray, not 'pandas.core.series.Series' object\r\ndef preprocessData(X, y, names, description_and_tweet_combined):\r\n if not isinstance(y, np.ndarray):\r\n print(\"y argument needs to be of type ndarray, but it currently is\", type(y))\r\n if not isinstance(X, np.ndarray):\r\n print(\"X argument needs to be of type ndarray, but it currently is\", type(X))\r\n\r\n print(\"Cleaning data\")\r\n X, y, names, description_and_tweet_combined = cleanData(X, y, names, description_and_tweet_combined)\r\n print(\"Filtering data\")\r\n X, y, names, description_and_tweet_combined = filterData(X, y, names, description_and_tweet_combined)\r\n print(\"Transforming data\")\r\n X, y = transformData(X, y, names, description_and_tweet_combined)\r\n \r\n #54 columns\r\n feature_names = [\r\n '# of favorite tweets', #0\r\n 'Link color hue', #1\r\n '# of retweets', #2\r\n 'Sidebar color hue', #3\r\n '# of tweets', #4\r\n '# of hashtags in description', #5\r\n 'URLs present in description', #6\r\n '# of emoticons used in description', #7\r\n 'length of description', #8\r\n '# of mentions in description' #9\r\n #5 features extracted from tweet text, 10-14\r\n #3 features extracted from name (dummy), 15-17\r\n #36 features extracted from tweet text 18-53\r\n ]\r\n \r\n #Outlier identification and removal\r\n #Link color (col 1): One of the link colors (0084B4) is so common (50% of entire dataset) than the IQR becomes 0, and thus only one value would remain after outlier removal. Hence, avoiding removal\r\n #No retweet or retweet count (col 2): It does vary but 0 is the most frequent occurrence, making IQR equal to 0 and the only value that will remain after outlier removal would be 0\r\n #Sidebar color (col 3): Should we do it?\r\n #No of hashtags (col 5): IQR becomes 0, and # of hashtags after outlier removal becomes 0 for all rows\r\n #No of emoticons (col 7): IQR becomes 0, and # of hashtags after outlier removal becomes 0 for all rows\r\n #Length of description (col 8): don't think we should remove\r\n #No of @, or ats count (col 9): IQR becomes 0\r\n \r\n #X, y = removeOutliers(X, y, [0, 1, 2, 3, 4, 5, 7, 8, 9], feature_names)\r\n #X, y = removeOutliers(X, y, [0, 2, 3, 4, 8], feature_names)\r\n X, y = removeOutliers(X, y, [0, 2, 4, 8], feature_names)\r\n #X, y = removeOutliers(X, y, [0, 2, 4], feature_names)\r\n\r\n #after this method, feature_names variable will no longer be accurate if dummy encoding occurred in the method below\r\n X, y = encodeData(X, y)\r\n \r\n \"\"\"\r\n #84 columns\r\n X_feature_names = [\r\n 16 dummy features extracted from link color 0-15\r\n 16 dummy features extracted from sidebar color 16-31\r\n '# of favorite tweets', 32 \r\n '# of retweets', 33\r\n '# of tweets', 34\r\n 5 features extracted from description, 35-39\r\n 5 features extracted from tweet text, 40-44\r\n 3 features extracted from name (dummy), 45-47\r\n 36 features extracted from tweet text 48-83\r\n ]\r\n \"\"\"\r\n #preprocessing completed\r\n return X, y\r\n\r\n\r\ndef scale(X):\r\n #Scaling\r\n #fit all the features between range of -1 & 1, to avoid overemphasize on a particular feature\r\n standardScalar = StandardScaler()\r\n #need to scale to speed up recursive feature elimination\r\n X = standardScalar.fit_transform(X)\r\n return X"
}
] | 8 |
abaltsen/project
|
https://github.com/abaltsen/project
|
f57d1b805a596f4058d192823c4ec76633c1c1a7
|
9b702071a24f4f7d94d0cc396db8acb3309e9edc
|
c36cbdc69e81c72c917acf2440ee93c9fce6dbed
|
refs/heads/master
| 2021-05-09T00:29:07.794799 | 2018-01-31T21:27:09 | 2018-01-31T21:27:09 | 119,744,703 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5551813244819641,
"alphanum_fraction": 0.5712435245513916,
"avg_line_length": 39.6315803527832,
"blob_id": "6d3d4123612b83622d283b0ac3f5ef71871411b2",
"content_id": "2e11652cb0de81f87d53c7499646c00990e29e5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3860,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 95,
"path": "/vcf_ann.py",
"repo_name": "abaltsen/project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport urllib2\n\nparser = argparse.ArgumentParser(prog='VCF Annotation Tool', description='A program that parses a given VCF file and outputs a variant annotations table.')\nparser.add_argument('input_file', help = 'Name of the vcf input file to be parsed')\nparser.add_argument('output_file', help = 'Name of the output file created. VCF file suggested')\nargs = parser.parse_args()\n\nf = open(args.output_file, \"w\")\n\nvar_type = \"TYPE\"\ndepth_of_seq = \"DP\"\nreads_supp_var = \"AO\"\nreads_supp_ref = \"RO\"\nallele_freq = 'allele_freq\"'\nadd_info = '\"Consequence\"'\ngene = '\"Gene\"'\n\ndef find_info(field_name):\n indices = [i for i, j in enumerate(info_subfields) if field_name in j]\n #in case where RO is found in PRO use second index\n if field_name.startswith('RO'):\n i = indices[1]\n else:\n i = indices[0]\n x = info_subfields[i][len(field_name)+1:]\n #if multiple counts of variant reads are present, use first count\n #unless field is variant type\n if x.count(',') >= 1 and field_name != 'TYPE':\n reads = x.split(',')\n x = reads[0]\n return x\n \ndef percent_calc(x, y):\n var_reads = (int(x)/(int(y)+float(x)))*100\n ref_reads = (int(y)/(int(y)+float(x)))*100\n return str(round(var_reads, 2)) + ':' + str(round(ref_reads, 2))\n \ndef exac_file_search(keyword):\n u = urllib2.urlopen(\"http://exac.hms.harvard.edu/rest/variant/variant/\" + chr + \"-\" + pos + \"-\" + ref + \"-\" + alt + \"\").read() \n #searches through website to find first instance of field keyword,\n #pulls keyword between comma indeces\n if u.find(keyword) > 0:\n index = u.find(keyword)\n end = u.find(\",\", index, index+100)\n x = u[index+14:end]\n x = x.replace('_', ' ').replace('\"', '')\n #if no keyword is determined by ExAC . placeholder is written\n else:\n x = \".\"\n return x\n \nwith open(args.input_file, \"r\") as file:\n print \"Annotating files...\"\n for line in file:\n if line[1] == \"#\": \n #parses through meta-information of the input file\n continue\n if line[1] == \"C\" and line[0] == \"#\": \n #once header line of input file is reached, print new header to output file\n f.write('{:10s} {:15s} {:10s} {:20s} {:25s} {:20s} {:20s}'.format('VAR TYPE', 'DEPTH SEQ COV', '# READS', '% READS (VAR VS ALT)', 'ALLELE FREQ', 'GENE', 'ADDITIONAL INFO'))\n continue\n else: \n linelist = line.strip().split() \n #defines each variable split in data line fields from line of input file\n (chr, pos, id, ref, alt, qual, filter, info) = linelist[0:8] \n info_subfields = info.split(';')\n \n #1.writes type of variation to table in output file\n #type can be complex, del, ins, mnp, or snp ranked most to least serious\n vt = find_info(var_type)\n if vt.count(',') >= 1:\n types = vt.split(',')\n vt = types[0]\n \n #2.writes depth of sequence coverage to output file\n ds = find_info(depth_of_seq)\n \n #3.writes number of reads supporting variant to output file\n rsv = find_info(reads_supp_var)\n \n #4.writes % of supporting reads, variant vs reference to output file\n rsr = find_info(reads_supp_ref)\n pr = percent_calc(rsv, rsr) \n \n #5.writes ExAC browser allele frequency to output file\n af = exac_file_search(allele_freq)\n \n #6.additional info, pull out 'Consequence' and 'Gene' of variant from exac file\n ai = exac_file_search(add_info)\n ge = exac_file_search(gene)\n \n f.write('{:10s} {:15s} {:10s} {:20s} {:25s} {:20s} {:20s}'.format(vt, ds, rsv, pr, af, ge, ai))\n \nf.close()\n"
},
{
"alpha_fraction": 0.7755101919174194,
"alphanum_fraction": 0.7795918583869934,
"avg_line_length": 60,
"blob_id": "acb8c1b29c504cd679f5c6420bd0fdf79d72420c",
"content_id": "c27f21421e3af0f35b015916f9212a698fcfa6c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 4,
"path": "/README.md",
"repo_name": "abaltsen/project",
"src_encoding": "UTF-8",
"text": "# VCF File Parser and annotation tool\n\nThis python file is run given 2 arguments. The first argument is for the input VCF file, and the second is for the output (VCF recommended) file. \nExample: python vcf-beta.py Challenge_data.vcf output.vcf \n"
}
] | 2 |
Pro-Machina/Soft-Computing
|
https://github.com/Pro-Machina/Soft-Computing
|
ee6861f249c530e17068c01671564ae064af0dc5
|
15d8184fd7ecdc417c472100bf7d376ca7c36dbd
|
8cca2a5702af11ce604c0b96e7469c31e652574d
|
refs/heads/master
| 2020-12-14T02:18:04.982202 | 2020-01-23T21:50:57 | 2020-01-23T21:50:57 | 234,602,950 | 0 | 0 |
MIT
| 2020-01-17T17:40:33 | 2020-01-23T20:23:42 | 2020-01-23T21:50:57 |
Python
|
[
{
"alpha_fraction": 0.7791095972061157,
"alphanum_fraction": 0.789383590221405,
"avg_line_length": 72,
"blob_id": "1b3aa682e5358ae9ccbd0d765602f01e2b159e3d",
"content_id": "a214d7b04b1c32c717dcf1af9c8d563319d1c9c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 584,
"license_type": "permissive",
"max_line_length": 224,
"num_lines": 8,
"path": "/ParticleSwarmOptimisation/README.md",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Particle Swarm Optimisation\nParticle swarm optimisation is a method that replicates swarm behaviour to find the optimum required solution. The position of each swarm particle is updated besed on the personal best and global best solutions found so far.\n\n## Code style\nPython is used to implement the algorithm. Only two libraries, namely, numpy and random are used.\n\n## Parameters\nChanging these parameters alters the accuracy and speed of the algorithm: (1) Inertia weight: w, (2) Acceleration coefficient: c1 and c2, (3) Number of swarm particles: n, (4) Number of iterations: w\n"
},
{
"alpha_fraction": 0.781215488910675,
"alphanum_fraction": 0.7878453135490417,
"avg_line_length": 74.41666412353516,
"blob_id": "fd0d5a122179b4358b258635fad8e9e4d8b85b05",
"content_id": "2a449770ef93a92cdd8ed79bc02b9d165245adee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 905,
"license_type": "permissive",
"max_line_length": 233,
"num_lines": 12,
"path": "/AntColonyOptimisation/README.md",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Ant Colony Optimisation\nAnt colony optimisation is an evolutionary algorithm that draws it's analogy from the behaviour of ants and how they could reach to their food using the shortest path possible\n\n## Code style\nThe code has been written in python with the use of two libraries, namely, numpy and random\n\n## Parameters\nChanging the parameters would effect the accuracy and computation time of the algorithm, these parameters are:\n(1) Initial pheromone value: tau, (2) Number of ants: ants, (3) Evaporation coefficient: rho, (4) alpha and beta vales, (5) Pheromone count for each new trail: q, (6) Distance to cost conversion factor: k\n\n## Traveling Salesman Problem\nThe algorithm solves a TSP problem which states that: Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city? (Source: Wikipedia)\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 63.400001525878906,
"blob_id": "7485724c72827ac28a729ff2a1546ec88fef25e5",
"content_id": "5d31e9c2f3d49056700eb51780f0ff6a23b6a516",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 322,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 5,
"path": "/README.md",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Soft-Computing\nSoft Computing algorithms are nature inspired algorithms that find a near optimal solution in a relatively less computation and time.\n\n## Popular algorithms\nGenetic Algorithm, Fuzzy Logic, Simulated Annealing, Neural Networks, Ant Colony Optimisation, Artificial Immune System and a combination of these.\n"
},
{
"alpha_fraction": 0.6230658292770386,
"alphanum_fraction": 0.6351205706596375,
"avg_line_length": 44.557376861572266,
"blob_id": "fea97fb3195a51636c34264e2abdce173e6fb43a",
"content_id": "06524c7c2c32d41274d08ccf13d90ba1cd08a3f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5558,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 122,
"path": "/AntColonyOptimisation/aco.py",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Created by Pro-Machina\n# Ant Colony Optimization in Traveling Salesman Problem\n# Assuming that the salesman has to start from a given city (input) and has to return to the same city at the end of the trip ..\n# and all the costs are symmetric, i.e, cost for traveling from city i to j is same as that of traveling from city j to i ..\n# each city can be visited only once\n\nimport numpy as np # Major use: Arrays and Matrices\nimport random # Major use: Random number generation\n\ntau = 100 # Initial pheromone value of arc between cities\nants = 9000 # Number of ants (iterations), this becomes the termination criteria\nrho = 0 # Evaporation coefficient (if taken 0, its for simplicity)\nalpha = 1 # Alpha value, used in probability calculation\nbeta = 1 # Beta value, used in probability calculation\nq = 1000 # Pheromone count (used while updating the phromone of the arcs after an iteration)\nk = 0.2 # Distance to cost conversion parameter C = k*D (Assumed linear behaviour)\n\ndef prob (no_city, dist_array, tau_array, city_trav):\n \"\"\" Returns a matrix that has values of probability of an ant travelling between two cities \"\"\"\n\n # We alter the distance and pheromone array such that values corresponding to cities already traveled are 0\n # The formula for probability of traveling between city i and j is given by:\n # (Tij(t)^a)/(dij(t)^b)\n # Pij(t) = ---------------------------------- Where T: tau, d: distance or cost, a: alpha, b: beta, t: t-th iteration of the algorithm \n # sum-j( (Tij(t)^a)/(dij(t)^b) ) \n \n temp_dist = dist_array.copy() # Note: .copy() is necessary to avoid changes in the original matrix\n temp_tau = tau_array.copy()\n last_city = 0\n index = 0\n for i in range(0, no_city):\n if (city_trav[i] != 0):\n index = city_trav[i] - 1\n temp_dist[:, index] = 0\n temp_tau[:, index] = 0\n \n sum_prob = 0\n prob_array = np.zeros(no_city) # The array contains probability of travelling between two cities\n for c in range(0, no_city):\n if (temp_dist[index][c] != 0):\n prob_array[c] = ((temp_tau[index][c])**alpha)/((temp_dist[index][c])**beta)\n sum_prob = sum_prob + prob_array[c]\n if (sum_prob != 0):\n prob_array = prob_array/sum_prob\n\n return prob_array\n\ndef city_selection (prob_array):\n \"\"\" Returns the next city number based on roulette wheel selection \"\"\"\n\n col = int(np.shape(prob_array)[0]) # Number of columns in prob_array\n rand = random.uniform(0, 1) # Random number generation\n roulette = 0\n for c in range(0, col):\n if (prob_array[c] != 0):\n if ( (rand > roulette) & (rand < (roulette + prob_array[c])) ):\n return (c+1) # (c+1) is the required city number\n roulette = roulette + prob_array[c] \n\ndef total_cost (city_trav, dist_array):\n \"\"\" Calculates the total cost of travel, based on total distance travelled \"\"\"\n\n col = int(np.shape(city_trav)[0]) \n total_dist = 0\n end_city = 0\n for c in range(0, (col-1)):\n total_dist = total_dist + dist_array[city_trav[c] - 1][city_trav[c+1] - 1] \n end_city = city_trav[col-1] - 1\n total_dist = total_dist + dist_array[city_trav[0]][end_city] # Accounts for returning to the base city\n\n return (k*total_dist)\n\ndef pheromone_update (city_trav, dist_array, tau_array):\n \"\"\" Updates the pheromone for the arcs between the cities \"\"\"\n\n new_tau = q/total_cost(city_trav, dist_array) # Updates the value based on total cost of the route\n col = int(np.shape(city_trav)[0])\n r = 0\n k = 0\n for c in range(0, (col-1)):\n r = (city_trav[c] - 1)\n k = (city_trav[c+1] - 1)\n tau_array[r][k] = ((1-rho)*tau_array[r][k]) + new_tau\n tau_array[k][r] = tau_array[r][k] # To preserve symmetry in the matrix\n\n return tau_array\n\n\n# Main program starts\nno_city = int(input('Enter the number of cities: ')) # Input the number of cities in the problem\nstart_city = int(input('Enter the starting city: ')) # Salesman starts from tis city\ndist_array = np.zeros((no_city, no_city)) # The array is the matrix of distance between the cities\ntau_array = np.zeros((no_city, no_city)) # The array contains feromone values of arcs between the cities\ncity_trav = np.zeros(no_city)\ncity_trav = city_trav.astype(int) # Necessary to make the values integer, so that the city numbers could be used as an index for other matrices\n\n# The following loop takes input to fill up the distance matrix and sets initial values for other matrices\n# Initialising segment\nfor r in range(0, no_city):\n for c in range(0, no_city):\n if (dist_array[r][c] == 0):\n if (r != c):\n tau_array[r][c] = tau # Initialise the pheromone values\n tau_array[c][r] = tau_array[r][c] # To preserve symmetry\n dist_array[r][c] = float(input('Enter distance for city ' + str(r+1) + ', city ' + str(c+1) + ' : '))\n dist_array[c][r] = dist_array[r][c] # Distance matrix is a symmetric matrix\n\nwhile(ants > 0):\n city_trav = np.zeros(no_city)\n city_trav = city_trav.astype(int)\n city_trav[0] = start_city\n\n for c in range(0, no_city):\n if ( city_trav[c] == 0 ):\n city_trav[c] = city_selection(prob(no_city, dist_array, tau_array, city_trav))\n\n tau_array = pheromone_update(city_trav, dist_array, tau_array)\n ants = ants - 1\n\ncity_trav = np.append(city_trav, [start_city])\nprint ('The final path generated by Ant Colony Optimization is: ')\nprint (city_trav)\n"
},
{
"alpha_fraction": 0.6070842742919922,
"alphanum_fraction": 0.6231551170349121,
"avg_line_length": 39.38410568237305,
"blob_id": "b4f2ff7d281dd26e5ad3f49d0c71082dfdd4deb1",
"content_id": "d9305388415abf7c8e029980946d90fcb2256d45",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6098,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 151,
"path": "/ParticleSwarmOptimisation/pso.py",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Created by Pro-Machina\n# This is an implementation of Particle Swarm Optimisation algorithm for the function:\n# Maximize: f(x) = 1 - (x^2) + 2x\n# Matrices are classified into position and fitness matrices, majorly only position matrices are used\n\nimport numpy as np\nimport random\n\n# Paramenters are taken as\nw = 0.7 # Inertia weight (larger -> greater global search, smaller -> greater local search)\nc1 = 0.2 # Acceleratin coefficient 1\nc2 = 0.6 # Acceleration coefficient 2\niterations = 100 # Number of iterations to go through\n# (c1 > c2 : greater local search ability)\n# (c2 > c1 : greater global search ability)\n\ndef find_fitness (swarm_pos):\n \"\"\" Finds the fitness of the swarm with respect to their positions \"\"\"\n # This function needs to be updated after changing the fitness function\n\n swarm_size = int(np.shape(swarm_pos)[0])\n if (np.ndim(swarm_pos) > 1):\n # Since global best is also an input in this function and it's a 1D array, the below line of code would give an error if the condition is not implemented\n n_var = int(np.shape(swarm_pos)[1])\n\n swarm_fit = np.zeros((swarm_size, 1))\n for r in range(0, swarm_size):\n swarm_fit[r] = 1 - ((swarm_pos[r])**2) + (2*(swarm_pos[r])) # Make changes here if there is any change in fitness function\n if (np.ndim(swarm_pos) > 1):\n # Seperately adding the column index for array with more than 1 dimensions\n swarm_fit[r] = 1 - ((swarm_pos[r][0])**2) + (2*(swarm_pos[r][0])) # Make changes here if there is any change in fitness function\n\n # Swarm fitness is a (swarm_size X 1) dimensional fitness matrix\n return swarm_fit\n\ndef find_global_best (swarm_pos, global_best, max_min = 'max'):\n \"\"\" Finds the global best and returns the corresponding position, enter 'min' if its a minimisation problem, 'max' otherwise \"\"\"\n\n swarm_fit = find_fitness(swarm_pos)\n swarm_size = int(np.shape(swarm_pos)[0])\n n_var = int(np.shape(swarm_pos)[1])\n\n\n if (max_min == 'min'):\n for r in range(0, swarm_size):\n if (float(swarm_fit[r][0]) < float(find_fitness(global_best)[0])):\n global_best = (swarm_pos[r][:]).copy()\n else:\n for r in range(0, swarm_size):\n if (float(swarm_fit[r][0]) > float(find_fitness(global_best)[0])):\n global_best = (swarm_pos[r][:]).copy()\n\n # Global best is a (1 X n_var) dimensional position matrix\n return global_best\n\ndef find_local_best (swarm_pos, local_best, max_min = 'max'):\n \"\"\" Keeps a track of the personal best of a swarm and returns the same, enter 'min' if its a minimisation problem, 'max' otherwise \"\"\"\n\n swarm_fit = find_fitness(swarm_pos)\n swarm_size = int(np.shape(swarm_pos)[0])\n n_var = int(np.shape(swarm_pos)[1])\n\n if (max_min == 'min'):\n for r in range(0, swarm_size):\n for c in range(0, n_var):\n if (float(swarm_fit[r][0]) < float(find_fitness(local_best[r][:])[0])):\n local_best[r][:] = (swarm_pos[r][:]).copy()\n else:\n for r in range(0, swarm_size):\n for c in range(0, n_var):\n if (float(swarm_fit[r][0]) > float(find_fitness(local_best[r][:])[0])):\n local_best[r][:] = (swarm_pos[r][:]).copy()\n\n # Local besst is a (swarm_size X n_var) dimensional position matrix\n return local_best\n\ndef update_vel (swarm_vel, swarm_pos, global_best, local_best ):\n \"\"\" Returns the updated velocity vector for each swarm particle \"\"\"\n\n r1 = random.uniform(0, 1)\n r2 = random.uniform(0, 1)\n new_vel = swarm_vel.copy()\n swarm_size = int(np.shape(swarm_pos)[0])\n n_var = int(np.shape(swarm_pos)[1])\n\n for r in range(0, swarm_size):\n for c in range(0, n_var):\n new_vel[r][c] = (w*swarm_vel[r][c]) + ( c1*( r1*(local_best[r][c] - swarm_pos[r][c]) ) ) + ( c2*( r2*(global_best[0] - swarm_pos[r][c]) ) )\n if (n_var > 1):\n new_vel[r][c] = (w*swarm_vel[r][c]) + ( c1*( r1*(local_best[r][c] - swarm_pos[r][c]) ) ) + ( c2*( r2*(global_best[0][c] - swarm_pos[r][c]) ) )\n\n # New velocity is a (swarm_size X n_var) dimensional position type matrix\n return new_vel\n\ndef update_position (swarm_pos, swarm_vel):\n \"\"\" Returns the updated position of the swarm particles \"\"\"\n\n swarm_size = int(np.shape(swarm_pos)[0])\n n_var = int(np.shape(swarm_pos)[1])\n new_pos = swarm_pos.copy()\n\n for r in range(0, swarm_size):\n for c in range(0, n_var):\n new_pos[r][c] = swarm_pos[r][c] + swarm_vel[r][c]\n\n # New position is a (swarm_size X n_var) dimensional position matrix\n return new_pos\n\n\n# Main program starts\nswarm_size = int(input('Enter the swarm size: '))\nn_var = int(input('Enter the number of variables: '))\n\nvar_range = np.zeros((n_var, 2))\nfor r in range(0, n_var):\n var_range[r][0] = float(input('Enter min value for variable %d: ' % (r+1)))\n var_range[r][1] = float(input('Enter max value for variable %d: ' % (r+1)))\n\n# Initialize the swarm particles' positions\nswarm_pos = np.zeros((swarm_size, n_var))\n#print(swarm_pos)\nfor r in range(0, swarm_size):\n for c in range(0, n_var):\n swarm_pos[r][c] = random.uniform(var_range[c][0], var_range[c][1])\n\n# Initialize the swarm particles' velocity\nswarm_vel = np.zeros((swarm_size, n_var))\nfor r in range(0, swarm_size):\n for c in range(0, n_var):\n swarm_vel[r][c] = random.uniform(-1, 1)\n\n# Start the iterations\nglobal_best = np.zeros((1, n_var))\nlocal_best = np.zeros((swarm_size, n_var))\n\nwhile (iterations > 0):\n\n global_best = find_global_best(swarm_pos, global_best, max_min = 'max')\n local_best = find_local_best(swarm_pos, local_best, max_min = 'max')\n swarm_vel = update_vel(swarm_vel, swarm_pos, global_best, local_best)\n swarm_pos = update_position(swarm_pos, swarm_vel)\n\n iterations = iterations - 1 \n\nprint('')\nprint('Converging through Particle Swarm Optimization') \nprint('')\nprint('The Final Solution is: %f' % global_best)\nprint('')\nprint('The value of thee function at this position is: %f' % find_fitness(global_best))\nprint('')\n"
},
{
"alpha_fraction": 0.7667887806892395,
"alphanum_fraction": 0.7789987921714783,
"avg_line_length": 67.25,
"blob_id": "276069d35eb9c0f20239ee78a40bb311bc3c0a3f",
"content_id": "94224fbeaa22eaf72f784aa050d065f33feb834e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 819,
"license_type": "permissive",
"max_line_length": 249,
"num_lines": 12,
"path": "/GeneticAlgorithm/README.md",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Genetic Algorithm (basic)\nGenetic Algorithm is a heuristic algorithm that draws analogy from gene evolution. It is a method of soft computing to reach to an aproximate optimal solution. \n\n## Motivation\nA near optimal solution can be reached using the algorithm with a lot less computation relative to an algorithm implementing hard computing (gives exact solution).\n\n## Code style\nPython is used to implement the algorithm. Only two libraries, namely, math and random are used.\n\n## Parameters\nChanging these parameters alters the accuracy and speed of the algorithm:\n(1) Precision level: epsi, (2) String/Chromosome length: l, (3) Mutation probability: pm, (4) Number of iterations: iteration, (5) Maximum and minimum values assigned to the variables: x1_max, x2_max, x1_min, x2_min, (6) Pool size: pool_size (input)\n"
},
{
"alpha_fraction": 0.5331746935844421,
"alphanum_fraction": 0.5587905645370483,
"avg_line_length": 30.33333396911621,
"blob_id": "2708e6ded172017b7b6bb0b16141f3b5fd72ed27",
"content_id": "786975c5b8d7a1087dd9f10855f39ea7a926372c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7144,
"license_type": "permissive",
"max_line_length": 191,
"num_lines": 228,
"path": "/GeneticAlgorithm/ga.py",
"repo_name": "Pro-Machina/Soft-Computing",
"src_encoding": "UTF-8",
"text": "# Created by Pro-Machina\n# This algorithm finds the minimum for the function (x1 - 2)^2 + (x2 - 3)^2\n# Answer should be x1 = 2 and x2 = 3\n# This code is the simplest form of Genetic Algorithm\n\nimport math\nimport random\n\nepsi = 2 # Precision level \n\nl = 30 # String length of x1 and/or x2\npm = 0.2 # Probability of mutaion to occur on a gene\niteration = 500 # Total iterations that the algorithm should run for\n\n# x1 and x2 range from -4 to 4\n# This range can be changed for each x1 and x2 separately depending on the problem\nx1_min = -4\nx2_min = -4\nx1_max = 4\nx2_max = 4\n\ndef calc_f (string, l, pre_post):\n \"\"\" To return the function value, input pre_post = 0 to iterate first half of the chromosome, 1 otherwise \"\"\"\n\n if (pre_post == 0):\n var = string[:l]\n x_min = x1_min\n x_max = x1_max\n elif (pre_post == 1):\n var = string[l:]\n x_min = x2_min\n x_max = x2_max\n\n # d is the decoded value of the chromosome\n d = 0\n # x is the value of variable which depends on it's min and max value along with the length of the chromosome and the decoded value\n x = 0\n count = l - 1\n\n for bits in var:\n # This loop calculates the decimal value of the binary chromosome in iterative manner\n d = d + ( bits*(2**count) )\n count = count - 1\n #print(d)\n x = x_min + ( ( (x_max - x_min)/((2**l) - 1) )*d )\n \n if (pre_post == 0):\n #print ((x - 2)**2)\n return ( (x - 2)**2 )\n elif (pre_post == 1):\n #print ((x - 3)**2)\n return ( (x - 3)**2 ) \n\ndef calc_x (string, l, pre_post):\n \"\"\" Calculate x value or decoded value of the chromosome \"\"\"\n \n # The function is similar to calc_f function\n if (pre_post == 0):\n var = string[:l]\n x_min = x1_min\n x_max = x1_max\n elif (pre_post == 1):\n var = string[l:]\n x_min = x2_min\n x_max = x2_max\n\n d = 0\n x = 0\n count = l - 1\n\n for bits in var:\n d = d + ( bits*(2**count) )\n count = count - 1\n\n x = x_min + ( ( (x_max - x_min)/((2**l) - 1) )*d )\n return x\n\ndef rand_string (l):\n \"\"\" Assigns 0 or 1 with equal probability to a gene of a chromosome of length l \"\"\"\n\n string = [0]*l\n for i in range(0, l):\n if random.uniform(0,1) < 0.5:\n string[i] = 0\n else:\n string[i] = 1\n\n return string\n\ndef single_point_crossover (mating_pool, mating_pool_size, l):\n \"\"\" Crossover from the mid-point of the string of the mating pool \"\"\"\n\n i = 0\n temp_spc1 = [0] # Stores later half of the 1st chromosome\n temp_spc2 = [0] # Stores later half of the next chromosome\n return_pool = mating_pool\n while ( i < (mating_pool_size - 1) ):\n temp_spc1 = mating_pool[i][:l] \n temp_spc2 = mating_pool[i+1][:l]\n\n return_pool[i][l:] = mating_pool[i][l:]\n return_pool[i][:l] = temp_spc2\n return_pool[i+1][l:] = mating_pool[i+1][l:]\n return_pool[i+1][:l] = temp_spc1\n\n i = i + 1\n #print (return_pool)\n return return_pool\n\ndef mutation (pool, pool_size, l, pm):\n \"\"\" Mutation occurs with a probability of pm on each gene of each chromosome in the pool \"\"\"\n\n for chromes in pool:\n for gene in chromes:\n if random.uniform(0, 1) < pm:\n if ( gene == 1 ):\n gene = 0\n else:\n gene = 1\n return pool\n\ndef find_best_ans (pool, pool_size):\n \"\"\" From all the solutions, finds the index of the best solution in the pool \"\"\"\n\n count = 0\n index = 0\n f_array_min = 10000000 # A large number so that every other number is smaller by default\n f_array = [0]*pool_size # f_array stores all the function values of the chromosomes\n for chromes in pool:\n # This loop finds the index of the chromosome with the best function value in the pool\n f_array[count] = calc_f(chromes, l, 0) + calc_f(chromes, l, 1)\n if ( f_array[count] < f_array_min ): \n f_array_min = f_array[count]\n index = count\n\n count = count + 1\n\n return index\n\ndef check_entry (mating_pool, mating_pool_size):\n \"\"\" Function returns a true value if any chromosome in the pool is 0 \"\"\"\n\n value = 0\n for i in range(0, mating_pool_size):\n if (mating_pool[i] == 0):\n value = 1\n return value\n\ndef stop_rep (pool, pool_size, l):\n \"\"\" If any repetation is present in the pool, the function repairs it \"\"\"\n\n # This function just adds a redundancy to avoid a redundant solution \n i = 0\n j = 0\n while (i < pool_size):\n while (j < pool_size):\n if (i != j):\n if (pool[i] == pool[j]):\n pool[i] = rand_string(2*l)\n j = j + 1\n i = i + 1\n return pool\n# End of function definitions\n\"\"\"\"\"\"\n# Main program starts\npool_size = int(input('Enter the pool size: ')) # To get better accuracy, keep the pool size more than 100\nmating_pool_size = pool_size # Mating pool is kept as big as the original pool, can be changed here\nmating_pool = [0]*mating_pool_size \npool = [0]*pool_size\nf_array = [0]*pool_size\nrank_count = 0\niter_algo = 0\n\nfor i in range(0, pool_size):\n # The loop adds an initial random population in the pool\n pool[i] = rand_string(2*l)\n rank_count = rank_count + 1\n\nwhile(iter_algo < iteration):\n pool = stop_rep(pool, pool_size, l)\n count = 0\n for chrom in pool:\n f_array[count] = calc_f(chrom, l, 0) + calc_f(chrom, l, 1)\n count = count + 1\n\n # Sort the pool along with it's f values from best to worst\n temp1 = 0\n temp2 = [0]*2*l\n swap = 1\n while(swap >= 0):\n for i in range( 0, (pool_size-1) ):\n if (f_array[i + 1] < f_array[i]):\n temp1 = f_array[i + 1]\n f_array[i + 1] = f_array[i]\n f_array[i] = temp1\n\n temp2 = pool[i + 1]\n pool[i + 1] = pool[i]\n pool[i] = temp2\n\n swap = swap + 1\n else:\n swap = swap - 1\n\n # We go by ranking selection to select for mating\n val = True\n j = 0\n while (val):\n if (check_entry(mating_pool, mating_pool_size) == 1):\n for i in range(0, pool_size):\n if (random.uniform(0, 1) < i/rank_count):\n if (j < mating_pool_size):\n mating_pool[j] = pool[i] # Mating pool is filled with best solution at the top (redundancy to sorting)\n j = j + 1\n else:\n val = False \n\n pool = single_point_crossover(mating_pool, mating_pool_size, l) # Returned pool has two best solutions crossover at the top and follows similar pattern to the worst solution at the bottom\n pool = mutation (pool, pool_size, l, pm)\n\n iter_algo = iter_algo + 1\n\nindex = find_best_ans (pool, pool_size) # To find the best solution in the pool generated\nprint (\"From the Genetic Algorithm, the best solution is:\") \nx1 = calc_x (pool[index], l, 0)\nx2 = calc_x (pool[index], l, 1)\nprint (\"x1: %f, x2: %f\" % (x1, x2))\nprint ('Given function was: (x1-2)^2 + (x2-3)^2')\n"
}
] | 7 |
PoliteCat420/KaraKKoffee
|
https://github.com/PoliteCat420/KaraKKoffee
|
90b408ad040da88035d61a15574a867add4b8cd7
|
93c73a8b36589c39a3e165cd685cb47afb81934a
|
3ef0cdad30409391de3918dc1ac8de5b078a8adf
|
refs/heads/master
| 2020-04-08T11:48:51.085612 | 2019-12-31T11:40:18 | 2019-12-31T11:40:18 | 159,321,064 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7661691308021545,
"alphanum_fraction": 0.7810945510864258,
"avg_line_length": 32.5,
"blob_id": "f1145140089448c8b95992d0fd35356b0a4b56d8",
"content_id": "a28636cffeeae008ed733b6953d7c53a851ed810",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 6,
"path": "/README.md",
"repo_name": "PoliteCat420/KaraKKoffee",
"src_encoding": "UTF-8",
"text": "# KaraKKoffee\nCS 005 Final Project\n\nWelcome to the humble page of my Freshman Year CS Project!\nI hope you find this as enjoyable as we did (but obviously without the caffeine-induced all-nighters!)\n:)\n"
},
{
"alpha_fraction": 0.46807801723480225,
"alphanum_fraction": 0.522720456123352,
"avg_line_length": 34.750892639160156,
"blob_id": "9c9c340cfd25dcf4a6b9cea8267a8905c829430e",
"content_id": "3036eb03ff83ba58514abccc7da263ef0ed65184",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29995,
"license_type": "no_license",
"max_line_length": 246,
"num_lines": 839,
"path": "/Hope.pyde",
"repo_name": "PoliteCat420/KaraKKoffee",
"src_encoding": "UTF-8",
"text": "\"\"\"Changes Made:\n 1. [REVERTED] Removed offset for displaying images (since we already took those into account when we were positioning objects)\n 2. Changed the function of gravity() to be NOTHING if grav==0 (instead of setting vy=0)\n 3. [REVERTED] INERTIA?\n 4. Made poof animation unaffected by gravity\n 5. Removed self.d for door class\n 6. Removed self.r (radius for collision) for gate class\n 7. Added a self.pressed boolean for button class (which would be linked to a specific gate triggering its display)\n 8. Changed the door and gate to being a SINGLE object and not a list (since we only need 1 of each)\n \n \n Media:\n 1. GPA and CM music (in update of player)\n 2. \n\n TO DO:\n 1. little push at top of moving platform (gravity)\n \n PROBLEMS:\n 1. Does not bring back gate when button isn't pressed\n 2. Blocking function for gate (player movement keys) will block player unless BOTH buttons are pressed at the same time\n 3. Need to add time delay between obstacles\n\n\"\"\"\n\nadd_library('minim')\nimport os\nimport time\npath=os.getcwd()\nplayer = Minim(this)\n\nclass Creature:\n def __init__(self,x,y,r,g,img,w,h,F):\n self.x=x\n self.y=y\n self.r=r #Radius of creature\n self.g=g #Gravity\n self.vx=0\n self.vy=0\n self.w=w #Width of image\n self.h=h #Height of image\n self.F=F #Total no of frames\n self.f=0 #Which frame exactly\n self.img = loadImage(path+\"/images/\"+img)\n self.dir = 1\n self.wall=0\n self.grav=0\n \n def gravity(self):\n if self.grav==1:\n if self.y+self.r < self.g: #If character is above ground. used to be self.y + self.r\n self.vy += 0.25 #Give character velocity in positive y-axis (falling down) \n \n if self.vy > self.g - (self.y+self.r):\n self.vy = self.g - (self.y+self.r) #To ensure character stops at ground EXACTLY (and not below)\n # else:\n # pass\n #self.vy = 0 #-10 (else we keep velocity in y as 0) #CHANGED THE GRAVITY WHEN grav==0 TO STAYING THE EXACT SAME\n \n def update(self):\n self.gravity()\n self.x += self.vx\n self.y += self.vy\n \n def display(self):\n self.update()\n \n if isinstance (self, rGPA or rCM or Fail or Trip):\n self.f = 1\n \n elif isinstance (self, Poof): \n self.f = (self.f+0.1)%self.F\n elif self.vx != 0: #If the character is moving, then we cycle through the frames\n self.f = (self.f+0.3)%self.F\n else:\n self.f = 4 #Else we keep the same (stationary) frame (ONLY WORKS FOR MARIO TEMPLATE SINCE f=3 IS STATIONARY THERE)\n \n if self.dir >0 and self.F>1:\n image(self.img,self.x-self.w//2,self.y-self.h//2,self.w,self.h,int(self.f)*self.w,0,int(self.f+1)*self.w,self.h) #int(self.f)*self.w, means to choose the x,y coords of the entire image (all frames) corresponding to which frame we need\n elif self.dir < 0 and self.F>1:\n image(self.img,self.x-self.w//2,self.y-self.h//2,self.w,self.h,int(self.f+1)*self.w,0,int(self.f)*self.w,self.h) #switching x1 with x2 and y1 with y2, to flip the image horizontally\n elif self.dir >0 and self.F==1:\n image(self.img,self.x-self.w//2,self.y-self.h//2, self.w,self.h)#, 0,0, self.w,self.h) FLIPPING IMAGES!!!\n elif self.dir < 0 and self.F==1:\n image(self.img,self.x-self.w//2,self.y-self.h//2, self.w,self.h)#, self.w,0, 0,self.h)\n \n \nclass Player(Creature):\n def __init__(self,x,y,r,g,img,w,h,F, k):\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.keyHandler=k\n print(self.keyHandler)\n self.kill = player.loadFile(path+\"/sounds/kill.mp3\")\n self.gameover = player.loadFile(path+\"/sounds/gameover.wav\")\n self.star = player.loadFile(path+\"/sounds/coin.mp3\")\n self.money=0 #Total CM of player\n self.grade=0 #Total GPA of player\n self.fail=0 #Flag for fail object of P1\n self.trip=0 #Flag of trip object of P1\n self.quiz=0 #Flag of quiz object of P1\n self.GPA=0 #Flag of gpa object of P1\n self.coffee=0 #Flag of coffee object of P1\n self.CM=0 #Flag of money object of P1\n self.ox=x\n self.oy=y\n self.dead = False\n\n \n def update(self):\n \n for p in game.platforms:\n if self.isAbove(p):\n self.g=p.y\n self.grav=1\n break\n else:\n self.grav=1\n self.g = game.g #Else we stick to original ground\n \n for p in game.mplatforms:\n if self.isAbove(p): #(if character is above the platform AND within the width of platform) and not self.isInside(p) #and not self.keyHandler['UP']:\n self.g=p.y\n \n self.gravity()\n \n if self.vy==0:\n self.wall=0\n \n if self.x-self.r<0: #Left Boundary wall condition\n self.x=self.r\n \n if self.x+self.r>1440: #Right Boundary wall condition\n self.x=1440-self.r\n \n if self.y<0: #Roof condition\n self.y=self.r\n self.vy=5\n self.gravity()\n \n if self.keyHandler['LEFT'] and self.wall==0:\n for b in game.buttons:\n if b.pressed==0:\n if game.gate.x-2+game.gate.w<=self.x-self.w//2<=game.gate.x+2+game.gate.w and game.gate.y<=self.y<=game.gate.y+game.gate.h:\n return\n else:\n self.vx = -5\n self.dir = -1\n \n elif self.keyHandler['RIGHT'] and self.wall==0:\n for b in game.buttons:\n if b.pressed==0:\n if game.gate.x-2<=self.x+self.w//2<=game.gate.x+2 and game.gate.y<=self.y<=game.gate.y+game.gate.h:\n return\n else:\n self.vx = 5\n self.dir = 1\n else:\n self.vx=0 \n # ATTEMPT AT INERTIA\n # if self.vx>0:\n # self.vx= -0.5\n # elif self.vx<0:\n # self.vx= 0.5\n \n if self.keyHandler['UP'] and self.g <= self.y+20: #and self.grav==0: #AND statement ensures that we cannot double jump\n self.vy = -10\n \n self.x += self.vx\n self.y += self.vy\n \n for p in game.platforms:\n #if (p.x-20)<=self.x<=(p.x+p.w+20) and (p.y-10)<=self.y<=(p.y+p.h+40):\n if self.isInside(p):\n self.vy=7\n self.vx=0\n self.wall=1\n \n for b in game.buttons:\n #if b.x-2<=self.x+self.w//2<=b.x+2 and b.y<=self.y<=b.y+b.h:\n if self.isAbove(b):\n self.g=b.y\n b.pressed=1\n print(b.pressed)\n else:\n #b.pressed=0 NEED TO DISPLAY BARRIER ONCE BUTTON IS NOT PUSHED\n self.g=game.g\n \n \n for t in game.oTrip:\n if self.cdistance(t) <= self.r + t.r and self.trip==0:\n game.poof.append(Poof(self.x,self.y,50,0,\"Poof.png\",128,128,10))\n bags=self.money #Amount of money player had before touching \n grav=self.g\n self.money=0\n num=(bags//50) #Calculating the number of money objects corresponding to money lost\n for i in range (num-(num//2)): #Making half of money objects fall on right\n game.CM.append(rCM(self.x+(70*(i+1)),self.y+20,20,grav,\"Money.png\",40,40,1,self.y,self.y,0)) #Appending money objects to display at point of character\n for i in range (num//2): #Making half of money objects fall on left\n game.CM.append(rCM(self.x-(70*i),self.y+20,20,grav,\"Money.png\",40,40,1,self.y,self.y,0)) #Appending money objects to display at point of character\n self.x=self.ox\n self.y=self.oy\n \n for f in game.oFail:\n if self.cdistance(f) <= self.r + f.r and self.fail==0:\n game.poof.append(Poof(self.x,self.y,50,0,\"Poof.png\",128,128,10))\n if self.grade>=0.5:\n self.grade-=0.5\n else:\n self.grade=0\n self.x=self.ox\n self.y=self.oy\n #self.fail=1\n \n for q in game.oQuiz:\n if self.cdistance(q) <= self.r + q.r and self.quiz==0:\n if self.grade>=0.2:\n self.grade-=0.2\n else:\n self.grade=0\n self.quiz=1\n if self.cdistance(q) > self.r + q.r:\n self.quiz=0\n \n for s in game.oStarbucks:\n if self.cdistance(s) <= self.r + s.r and self.coffee==0:\n self.money-=150\n # if self.money>=150:\n # self.money-=150\n # else:\n # self.money=0\n self.coffee=1\n # break\n \n if self.cdistance(s) > 70: #self.w//2 + s.w//2:\n self.coffee=0\n \n for m in game.CM:\n if self.cdistance(m) <= self.r + m.r:\n self.money+=50\n game.CM.remove(m)\n del m\n self.star.rewind()\n self.star.play()\n \n for g in game.GPA:\n if self.cdistance(g) <= self.r + g.r:\n self.grade+=0.2\n game.GPA.remove(g)\n del g\n self.star.rewind()\n self.star.play()\n \n #if self.x>=(game.door.x-20) and self.x<=(game.door.x+game.door.w+20) and self.y>=(game.door.y-10) and self.y<=(game.door.y+game.door.h+40):\n if self.isInside(game.door):\n self.dead = True\n game.alive_players-=1\n \n def cdistance(self,e): #COLLISION DETECTION\n return (((self.x+self.w//2)-(e.x+e.w//2))**2+((self.y+self.h//2)-(e.y+e.h//2))**2)**0.5\n\n def isAbove(self,platform):\n return self.x in range(platform.x, platform.x+platform.w) and self.y <= platform.y\n \n def isInside(self,platform):\n return self.x in range(platform.x, platform.x+platform.w) and self.y in range(platform.y,platform.y+platform.h)\n \n def closestWall(self,platform):\n if abs(self.x-platform.x)<abs((self.x-(platform.x+platform.w))):\n x = platform.x\n xdist = abs(self.x-platform.x)\n else:\n x = platform.x+platform.w\n xdist = abs((self.x-(platform.x+platform.w)))\n if abs(self.y-platform.y)<abs((self.y-(platform.y+platform.h))):\n y = platform.y\n ydist = abs(self.y-platform.y)\n else:\n y = platform.y+platform.h\n ydist = abs((self.y-(platform.y+platform.h)))\n \n if xdist < ydist:\n return x, self.y\n else:\n return self.x, y\n \nclass rGPA(Creature):\n def __init__(self,x,y,r,g,img,w,h,F):\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.cx = x\n self.cy = y #NO MOVEMENT\n \n def gravity(self):\n return\n\nclass rCM(Creature): #Creature unaffected by gravity (FLYING)\n def __init__(self,x,y,r,g,img,w,h,F,y1,y2,move): #y1 and y2 are the vertical endpoints\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.y1=y1\n self.y2=y2\n self.vy=3\n self.dir = -1\n self.move=move\n \n def update(self): #Giving endpoints for the creature to move between (VERTICAL MOVEMENT)\n \n if self.move==1:\n if self.y < self.y1:\n self.vy = 3\n elif self.y > self.y2:\n self.vy = -3\n \n elif self.move==0:\n self.vy=0\n \n self.y += self.vy\n \nclass Fail(Creature):\n def __init__(self,x,y,r,g,img,w,h,F,x1,x2): #x1 and x2 are the horizontal endpoints\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.x1=x1\n self.x2=x2\n self.vx = 3\n \n def update(self):\n self.gravity()\n \n if self.x > self.x2:\n self.vx = -3\n self.dir = -1\n elif self.x < self.x1:\n self.vx = 3\n self.dir = 1\n \n self.x += self.vx\n self.y += self.vy\n \nclass Trip(Creature):\n def __init__(self,x,y,r,g,img,w,h,F,t,r1,move):\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.cx = x\n self.cy = y\n self.t = t\n self.r1 = r1\n self.move=move\n \n def update(self):\n \n if self.move==1:\n self.t = self.t+1\n \n self.x = self.cx + self.r1 * cos (self.t * PI/180)\n self.y = self.cy + self.r1 * sin (self.t * PI/180)\n \nclass Quiz(Creature):\n def __init__(self,x,y,r,g,img,w,h,F,x1,x2): #x1 and x2 are the horizontal endpoints\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.x1=x1\n self.x2=x2\n self.vx = 2\n \n def update(self):\n self.gravity()\n \n if self.x > self.x2:\n self.vx = -2\n self.dir = -1\n elif self.x < self.x1:\n self.vx = 2\n self.dir = 1\n \n self.x += self.vx\n self.y += self.vy\n \nclass Coffee(Creature):\n def __init__(self,x,y,r,g,img,w,h,F,x1,x2): #x1 and x2 are the horizontal endpoints\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n self.x1=x1\n self.x2=x2\n self.vx = 3\n \n def update(self):\n self.gravity()\n \n if self.x > self.x2:\n self.vx = -3\n self.dir = -1\n elif self.x < self.x1:\n self.vx = 3\n self.dir = 1\n \n self.x += self.vx\n self.y += self.vy\n \nclass Poof(Creature):\n def __init__(self,x,y,r,g,img,w,h,F):\n Creature.__init__(self,x,y,r,g,img,w,h,F)\n \n def gravity(self):\n return \n \n def update(self):\n if int(self.f) == 9:\n game.poof.remove(self)\n del self\n return\n \nclass Platform: #STATIONARY\n def __init__(self,x,y,w,h):\n self.x=x\n self.y=y\n self.w=w\n self.h=h\n self.img = loadImage(path+\"/images/platform1.png\")\n \n def display(self):\n #rect(self.x,self.y,self.w,self.h)\n image(self.img,self.x,self.y,self.w,self.h)\n \nclass mPlatform:\n def __init__(self,x,y,w,h,m=0,y1=0,y2=0):\n self.x=x\n self.y=y\n self.y1=y1\n self.y2=y2\n self.vy=0\n self.w=w\n self.h=h\n self.m=m #Flag to check whether it's a stationary platform (m=0) or a moving one (m=1)\n self.img = loadImage(path+\"/images/platform.png\")\n \n def display(self):\n self.update()\n rect(self.x,self.y,self.w,self.h)\n #image(self.img,self.x,self.y,self.w,self.h)\n \n def update(self): #Making the platform move between vertical points\n if self.m==1:\n if self.y < self.y1:\n self.vy = 3\n elif self.y > self.y2:\n self.vy = -3\n \n \"\"\"if self.vy > self.g - (self.y+self.r):\n self.vy = self.g - (self.y+self.r)\"\"\"\n \n elif self.m==0:\n self.vy=0\n \n self.y += self.vy\n \nclass Door:\n def __init__(self,x,y,w,h):\n self.x=x\n self.y=y\n self.w=w\n self.h=h\n self.img = loadImage(path+\"/images/door.png\")\n #self.d=1 #WHY DID I NEED THIS? FOR GATE?\n \n def display(self):\n #rect(self.x,self.y,self.w,self.h)\n image(self.img,self.x,self.y,self.w,self.h)\n \nclass Gate(Door):\n def __init__(self,x,y,w,h):\n Door.__init__(self,x,y,w,h)\n self.opened=1\n #self.r=20 NO NEED?\n self.img = loadImage(path+\"/images/Gate.png\")\n \n def display(self):\n print(\"display\")\n for g in game.buttons:\n print(g.pressed)\n if g.pressed==1:\n return\n rect(self.x,self.y,self.w,self.h)\n #image(self.img,self.x,self.y,self.w,self.h)\n \nclass Button(Platform):\n def __init__(self,x,y,w,h):\n Platform.__init__(self,x,y,w,h)\n self.pressed=0\n \n\nclass Game:\n def __init__ (self,w,h,g,l): #width, height and ground of the game board\n self.w=w\n self.h=h\n self.g=g\n self.l=l\n \n self.CMImg=loadImage(path+\"/images/Money.png\")\n self.GPAImg=loadImage(path+\"/images/GPA.png\")\n self.instImg=loadImage(path+\"/images/Inst.png\") #INSERT THE SCREENSHOT HERE\n self.ENDImg=loadImage(path+\"/images/Congratulations.jpg\")\n \n self.state = \"menu\"\n self.pause = False\n self.pauseSound = player.loadFile(path+\"/sounds/pause.mp3\")\n self.winSound = player.loadFile(path+\"/sounds/Congratulations!.mp3\")\n \n self.music = player.loadFile(path+\"/sounds/game.mp3\")\n if self.l==1:\n self.music.play()\n \n self.BGImg=loadImage(path+\"/images/BG\"+str(self.l)+\".jpg\")\n \n \n self.oFail = [] #Fail that makes you restart position as well as reduces GPA by 0.5\n self.oTrip = [] #Trip that makes you restart position as well as reduces CM to 0\n self.oStarbucks = [] #Coffee cup that reduces your CM by 150\n self.oQuiz = [] #Quiz paper that makes you lose your GPA by 0.4\n self.CM = [] #Campus Money \n self.GPA = [] #GPA\n self.platforms=[]\n self.mplatforms=[]\n # self.door=[]\n # self.gate=[]\n self.buttons=[]\n self.players=[]\n self.poof=[]\n self.alive_players = 2\n #for i in range(3):\n \n if self.l==1:\n self.lvl1()\n elif self.l==2:\n self.lvl2()\n elif self.l==3:\n self.lvl3()\n elif self.l==4:\n self.lvl4()\n \n def lvl1(self):\n self.p1 = Player(50,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-1\n self.p2 = Player(100,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-2\n \n self.players.append(self.p1)\n self.players.append(self.p2)\n #print(len(self.players))\n \n \n self.platforms.append(Platform(0,150,1216,50)) #Platform-3\n self.platforms.append(Platform(400,350,1100,50)) #Platform-2\n self.platforms.append(Platform(0,550,1216,50)) #Platform-1\n \n self.platforms.append(Platform(-299,0,300,768)) #WALLS\n self.platforms.append(Platform(1439,0,300,768))\n \n #BUTTON FOR BARRIER\n self.buttons.append(Button(0,540,50,10)) #Bottom left\n self.buttons.append(Button(900,340,50,10)) #Top right\n \n #self.platforms.append(Platform(0,718,1440,50)) #Ground\n \n self.door=Door(200,80,70,70) #Door\n self.gate=Gate(600,170,50,180) #Gate\n \n for i in range(5):\n self.GPA.append(rGPA(50+i*60,100,20,self.g,\"GPA.png\",50,50,1)) #GPA hat objects (NO MOVEMENT)\n for i in range(5):\n self.GPA.append(rGPA(250+i*60,270,20,self.g,\"GPA.png\",50,50,1))\n \n for i in range(5):\n self.CM.append(rCM(300,275+i*50,20,self.g,\"Money.png\",40,40,1,250,500,1)) #Campus money objects (y1=top height limit, y2=bottom height limit, last argument is move: =1 for vertical movement, =0 for no vertical movement) \n \n # self.CM.append(rCM(300,275,20,self.g,\"Money.png\",40,40,1,300,500,1)) \n # self.CM.append(rCM(300,250,20,self.g,\"Money.png\",40,40,1,300,500,1))\n # self.CM.append(rCM(300,225,20,self.g,\"Money.png\",40,40,1,300,500,1))\n \n self.oStarbucks.append(Coffee(100,105,20,self.g,\"Coffee.png\",30,40,1,100,500))\n \n #self.oQuiz.append(Quiz(200,80,20,self.g,\"Quiz.png\",50,50,1,100,300)) #Creating quiz objects\n \n #self.oTrip.append(Trip(50,100,42,self.g,\"Trip.png\",75,75,1,50,850,0)) #Creating Trip objects\n #self.oFail.append(Fail(50,100,42,self.g,\"Fail.png\",50,50,1,50,450)) #Creating Fail objects \n \n def lvl2(self):\n self.p1 = Player(50,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-1\n self.p2 = Player(1350,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-2\n \n self.players.append(self.p1)\n self.players.append(self.p2)\n #print(len(self.players))\n \n \n #Top platforms \n \n #Left \n self.platforms.append(Platform(0,150,500,50)) \n self.oFail.append(Fail(50,125,20,self.g,\"Fail.png\",50,50,1,50,450))\n #self.oStarbucks.append(Coffee(40,130,10,self.g,\"Coffee.png\",30,40,1,40,470))\n \n for i in range(3):\n self.GPA.append(rGPA(50+i*60,125,20,self.g,\"GPA.png\",50,50,1)) #GPA hat objects (NO MOVEMENT)\n \n #Right\n self.platforms.append(Platform(940,150,500,50))\n self.oStarbucks.append(Coffee(1410,130,10,self.g,\"Coffee.png\",30,40,1,980,1410))\n \n for i in range(7):\n self.CM.append(rCM(1020+i*60,130,20,self.g,\"Money.png\",40,40,1,300,500,0))\n \n #self.platforms.append(Platform(500,300,440,50)) #Middle tiny platform\n \n #Bonus platforms\n \n #Left\n self.platforms.append(Platform(75,300,270,50)) \n \n self.oTrip.append(Trip(220,355,20,self.g,\"Trip.png\",75,75,1,180,130,1))\n \n self.CM.append(rCM(100,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n self.GPA.append(rGPA(150,275,20,self.g,\"GPA.png\",50,50,1))\n self.CM.append(rCM(210,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n self.GPA.append(rGPA(260,275,20,self.g,\"GPA.png\",50,50,1))\n self.CM.append(rCM(320,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n \n #Right\n self.platforms.append(Platform(1070,300,270,50)) \n \n self.oTrip.append(Trip(1215,355,20,self.g,\"Trip.png\",75,75,1,0,130,1))\n \n self.CM.append(rCM(1095,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n self.GPA.append(rGPA(1145,275,20,self.g,\"GPA.png\",50,50,1))\n self.CM.append(rCM(1205,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n self.GPA.append(rGPA(1255,275,20,self.g,\"GPA.png\",50,50,1))\n self.CM.append(rCM(1305,280,20,self.g,\"Money.png\",40,40,1,300,500,0))\n \n #Moving Platforms\n self.mplatforms.append(mPlatform(500,651,150,50,1,330,600)) #Left\n self.mplatforms.append(mPlatform(790,651,150,50,1,330,600)) #Right\n \n for i in range(8):\n self.CM.append(rCM(720,100+i*60,20,self.g,\"Money.png\",40,40,1,100,700,1))\n \n self.platforms.append(Platform(-299,0,300,768)) #WALLS\n self.platforms.append(Platform(1439,0,300,768))\n \n #self.platforms.append(Platform(0,718,1440,50)) #Ground\n self.oStarbucks.append(Coffee(200,698,10,self.g,\"Coffee.png\",30,40,1,50,1350))\n self.oStarbucks.append(Coffee(650,698,10,self.g,\"Coffee.png\",30,40,1,50,1350))\n self.oStarbucks.append(Coffee(1150,698,10,self.g,\"Coffee.png\",30,40,1,50,1350))\n \n self.door=Door(200,80,70,70) #Door\n \n \n def lvl3(self):\n self.p1 = Player(50,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-1\n self.p2 = Player(100,668,20,self.g,\"Player1.png\",52,70,8,{'LEFT':False, 'RIGHT':False, 'UP':False}) #Player-2\n \n self.players.append(self.p1)\n self.players.append(self.p2)\n \n \n self.platforms.append(Platform(0,150,500,50))\n self.platforms.append(Platform(940,150,500,50))\n \n def instDisp(self):\n background(255)\n image(self.instImg,0,-20,1440,768) #INSERT THE SCREENSHOT\n \n def update(self): #LEVEL CHANGE CONDITIONS\n if self.l==1 and self.alive_players==0:\n game.__init__(1440,768,718,2)\n game.state=\"play\"\n self.lvl2()\n elif self.l==2 and self.alive_players==0:\n game.__init__(1440,768,718,3)\n game.state=\"play\"\n elif self.l==3 and self.alive_players==0:\n game.music.pause()\n time.sleep(0.3)\n game.state=\"win\"\n \n def display(self):\n \n background(0)\n self.update()\n \n stroke(255)\n #line(0,self.g,self.w,self.g)\n \n #BACKGROUND IMAGES\n image(self.BGImg,0,0,self.w,self.h)\n \n for p in self.platforms: #Displaying Platforms\n p.display()\n \n for m in self.mplatforms: #Displaying Moving Platforms\n m.display()\n \n for f in self.oFail: #Displaying Fail restarts\n f.display()\n \n for t in self.oTrip: #Displaying Holiday Trip restarts\n t.display()\n \n for s in self.oStarbucks: #Displaying Starbucks cups\n s.display()\n \n for q in self.oQuiz: #Displaying Quiz papers\n q.display()\n \n for g in self.GPA: #Displaying GPA hats\n g.display() \n \n for m in self.CM: #Displaying Campus Money\n m.display()\n \n for p in self.poof: #Displaying smoke when player restarts\n p.display()\n \n for b in self.buttons:\n b.display()\n \n if self.l==1: #GATES ONLY IN LEVEL 1\n for g in game.buttons:\n if g.pressed==0:\n self.gate.display() \n \n self.door.display()\n \n if self.p1.dead == False:\n self.p1.display()\n if self.p2.dead == False:\n self.p2.display()\n \n \n textSize(20) #Counters\n fill(255)\n image(self.CMImg,game.w-35,23, 20,20) #Player-1 Money counter\n text(\"P1:\"+str(self.p1.money),game.w-150,40)\n image(self.GPAImg,game.w-35,55, 20,20) #Player-1 GPA counter\n text(str(self.p1.grade),game.w-120,75)\n image(self.CMImg,game.w-35,93, 20,20) #Player-2 Money counter\n text(\"P2:\"+str(self.p2.money),game.w-150,110)\n image(self.GPAImg,game.w-35,127, 20,20) #Player-2 GPA counter\n text(str(self.p2.grade),game.w-120,145)\n\n \ngame = Game(1440,768,718,3) #Window dimensions, ground value and the level of the game\n\ndef setup():\n size(game.w, game.h)\n background(0)\n \n\ndef draw():\n \n if game.state == \"menu\":\n background(0)\n textSize(20)\n \n if game.w//2.5 < mouseX < game.w//2.5 + 200 and game.h-170 < mouseY < game.h-150:\n fill(160,160,160)\n else:\n fill(224,224,224)\n text(\"Play Game\", game.w//2.5, game.h-150)\n if game.w//2.5 < mouseX < game.w//2.5 + 200 and game.h-120 < mouseY < game.h-90:\n fill(160,160,160)\n else:\n fill(224,224,224)\n text(\"Instructions\", game.w//2.5, game.h-100)\n \n elif game.state == \"play\":\n if game.pause == True:\n textSize(70)\n fill(255)\n rect(game.w//3,100,50,50)\n fill(0,255,255)\n text(\"Paused\",game.w//3,100)\n \n else:\n background(0)\n game.display()\n \n elif game.state == \"inst\":\n if not game.pause:\n game.instDisp()\n if game.w//2.5+70 < mouseX < game.w//2.5 + 260 and game.h-50 < mouseY < game.h-10: #Use same ratios, but diff positions according to image used\n fill(224,224,224)\n else:\n fill(160,160,160)\n text(\"Play Game!\", game.w//2.5+80, game.h-20)\n \n elif game.state==\"win\":\n background(255)\n image(game.ENDImg,0,0)\n game.winSound.play()\n game.music.pause()\n \ndef mouseClicked():\n if game.state==\"menu\" and game.w//2.5 < mouseX < game.w//2.5 + 200 and game.h-170 < mouseY < game.h-150:\n game.state=\"play\"\n game.music.play()\n elif game.state==\"menu\" and game.w//2.5 < mouseX < game.w//2.5 + 200 and game.h-120 < mouseY < game.h-90:\n game.state=\"inst\"\n elif game.state==\"inst\" and game.w//2.5+70 < mouseX < game.w//2.5 + 260 and game.h-50 < mouseY < game.h-10:\n game.state=\"play\"\n game.music.play()\n \ndef keyPressed():\n if keyCode == LEFT:\n game.p1.keyHandler['LEFT']=True\n elif keyCode == RIGHT:\n game.p1.keyHandler['RIGHT']=True\n elif keyCode == UP:\n game.p1.keyHandler['UP']=True\n elif key in ['a','A']:\n game.p2.keyHandler['LEFT']=True\n elif key in ['d','D']:\n game.p2.keyHandler['RIGHT']=True\n elif key in ['w','W']:\n game.p2.keyHandler['UP']=True\n elif key in ['p','P']: #Pause functions\n game.pause = not game.pause\n game.pauseSound.rewind()\n game.pauseSound.play()\n \n if game.pause == True:\n game.music.pause()\n else:\n game.music.play()\n \ndef keyReleased():\n if keyCode == LEFT:\n game.p1.keyHandler['LEFT']=False\n elif keyCode == RIGHT:\n game.p1.keyHandler['RIGHT']=False \n elif keyCode == UP:\n game.p1.keyHandler['UP']=False\n elif key in ['a','A']:\n game.p2.keyHandler['LEFT']=False\n elif key in ['d','D']:\n game.p2.keyHandler['RIGHT']=False\n elif key in ['w','W']:\n game.p2.keyHandler['UP']=False\n"
}
] | 2 |
TatuArvela/boards
|
https://github.com/TatuArvela/boards
|
e82e50c06bb1db2173fb0d0d963b387d658ebf5e
|
8e53b642fc85ae294f86387c6bcef85eb671dae0
|
74ac06a3158a1dc5b0efa18a473e859d324eafb6
|
refs/heads/master
| 2021-12-21T02:18:55.549043 | 2020-07-12T13:07:55 | 2020-07-12T13:07:55 | 243,713,530 | 0 | 0 |
MIT
| 2020-02-28T08:26:51 | 2020-02-28T08:33:55 | 2020-07-12T13:07:55 |
Python
|
[
{
"alpha_fraction": 0.5874999761581421,
"alphanum_fraction": 0.5916666388511658,
"avg_line_length": 25.72222137451172,
"blob_id": "0d2142f15db41fae5b95ed8940fbcce4fd2e47c7",
"content_id": "3831ece59ddb0c59353ecc6ad473b7eb17a84bb7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 480,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 18,
"path": "/django/templates/base_card.html",
"repo_name": "TatuArvela/boards",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n\n{% block content %}\n{% block card-before %}{% endblock %}\n<div class=\"card overflow-hidden bg-light my-4\">\n {% block card-title--hide %}\n <div class=\"card-header bg-dark text-light font-weight-bold p-2\">\n {% block card-title %}{% endblock %}\n </div>\n {% endblock %}\n {% block card-body--hide %}\n <div class=\"card-body\">\n {% block card-body %}{% endblock %}\n </div>\n {% endblock %}\n</div>\n{% block card-after %}{% endblock %}\n{% endblock %}"
},
{
"alpha_fraction": 0.7416918277740479,
"alphanum_fraction": 0.7462235689163208,
"avg_line_length": 18.5,
"blob_id": "94690473a2a0d420f39b0c8149601d9d43474004",
"content_id": "676676a9c018e14503ab71fc3bfa305bef38bbbb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 662,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 34,
"path": "/README.md",
"repo_name": "TatuArvela/boards",
"src_encoding": "UTF-8",
"text": "# Boards\n\nBoards is a sample message board web application. This repository contains multiple implementations with identical features built with different frameworks. The UI is built with Bootstrap.\n\n## Features\n\n* List of boards\n* List of topics in a board\n* List of replies in a topic\n* Adding topics\n* Adding replies\n* Login / Logout\n\n## Versions\n\n### Django / Python\n\nReference implementation, which is based on the tutorial [A Complete Beginner's Guide to Django](https://simpleisbetterthancomplex.com/series/beginners-guide/1.11/).\n\n### Rails / Ruby\n\nWork in progress.\n\n### Laravel / PHP\n\nPlanned.\n\n### Spring / Kotlin\n\nPlanned.\n\n### ASP.NET Core\n\nPlanned."
},
{
"alpha_fraction": 0.6844512224197388,
"alphanum_fraction": 0.6844512224197388,
"avg_line_length": 35.44444274902344,
"blob_id": "16d144c5ca1928ab3033449a14728019cacade1b",
"content_id": "bbda06c33a9faa461ed97d734639229752daab3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 656,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 18,
"path": "/django/project/urls.py",
"repo_name": "TatuArvela/boards",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include\nfrom django.urls import path, re_path\nfrom django.contrib import admin\nfrom boards import views\n\nurlpatterns = [\n path(\"\", views.root, name=\"root\"),\n re_path(r\"^boards/$\", views.boards, name=\"boards\"),\n path(\"boards/<int:pk>/\", views.board_topics, name=\"board_topics\"),\n path(\"boards/<int:pk>/new/\", views.new_topic, name=\"new_topic\"),\n path(\"topics/<int:pk>/\", views.topic_replies, name=\"topic_replies\"),\n path(\"admin/\", admin.site.urls),\n]\n\n# Add Django site authentication urls (for login, logout, password management)\nurlpatterns += [\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n]\n"
},
{
"alpha_fraction": 0.5766128897666931,
"alphanum_fraction": 0.5860214829444885,
"avg_line_length": 33.338462829589844,
"blob_id": "054a6ba188c5460590de3d57a67abbdb04cdeafb",
"content_id": "4beebee7c5e50190f588c2c3ef30db66a5af0e0e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2232,
"license_type": "permissive",
"max_line_length": 150,
"num_lines": 65,
"path": "/django/boards/migrations/0002_auto_20200225_1407.py",
"repo_name": "TatuArvela/boards",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.3 on 2020-02-25 14:07\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('boards', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='topic',\n old_name='starter',\n new_name='created_by',\n ),\n migrations.RemoveField(\n model_name='post',\n name='updated_by',\n ),\n migrations.RemoveField(\n model_name='topic',\n name='last_updated',\n ),\n migrations.AddField(\n model_name='board',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='board',\n name='created_by',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='edited_by',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='edited_posts', to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='topic',\n name='created_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='post',\n name='created_by',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_posts', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='post',\n name='updated_at',\n field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.6046974658966064,
"alphanum_fraction": 0.6094745397567749,
"avg_line_length": 33.41095733642578,
"blob_id": "acac7c82b33d41f32e23573c1500395c809a95b9",
"content_id": "e6cc00e2d69470ab3b0d3ec96f9c306e5d313e33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2512,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 73,
"path": "/django/boards/views.py",
"repo_name": "TatuArvela/boards",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count, Max\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .forms import NewPostForm, NewTopicForm\nfrom .models import Board, Post, Topic\n\n\ndef root(request):\n response = redirect(boards)\n return response\n\n\ndef boards(request):\n logged_out = request.GET.get(\"loggedOut\")\n boards = []\n for board in Board.objects.all().prefetch_related(\"topics\"):\n display_board = {}\n display_board[\"pk\"] = board.pk\n display_board[\"name\"] = board.name\n display_board[\"topic_count\"] = board.topics.count\n display_board[\"post_count\"] = board.topics.aggregate(Count(\"posts\"))[\n \"posts__count\"\n ]\n display_board[\"last_update\"] = board.topics.aggregate(Max(\"posts__updated_at\"))[\n \"posts__updated_at__max\"\n ]\n boards.append(display_board)\n return render(request, \"boards.html\", {\"boards\": boards, \"logged_out\": logged_out})\n\n\ndef board_topics(request, pk):\n board = get_object_or_404(Board, pk=pk)\n return render(request, \"topics.html\", {\"board\": board})\n\n\n@login_required\ndef new_topic(request, pk):\n user = request.user\n board = get_object_or_404(Board, pk=pk)\n if user.is_authenticated and request.method == \"POST\":\n form = NewTopicForm(request.POST)\n if form.is_valid():\n topic = form.save(commit=False)\n topic.board = board\n topic.created_by = user\n topic.save()\n post = Post.objects.create(\n message=form.cleaned_data.get(\"message\"), topic=topic, created_by=user\n )\n return redirect(\"topic_replies\", pk=topic.pk)\n else:\n form = NewTopicForm()\n return render(request, \"new_topic.html\", {\"board\": board, \"form\": form})\n\n\ndef topic_replies(request, pk):\n user = request.user\n topic = get_object_or_404(Topic, pk=pk)\n if request.method == \"POST\":\n if user.is_authenticated:\n form = NewPostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.topic = topic\n post.created_by = user\n post.save()\n return redirect(\"topic_replies\", pk=topic.pk)\n else:\n return redirect(\"login\")\n else:\n form = NewPostForm()\n return render(request, \"replies.html\", {\"topic\": topic, \"form\": form})\n"
}
] | 5 |
H4mid2019/insta-comment
|
https://github.com/H4mid2019/insta-comment
|
735d08f5175013cc1a4932247a6f676dd3f132f1
|
e2b717c46d83f72bdd4738788fdf76b2350e11a3
|
76b59efcc8fab21f2bdccd474921364529c28aac
|
refs/heads/main
| 2023-04-11T06:06:04.192068 | 2021-04-23T10:49:12 | 2021-04-23T10:49:12 | 360,839,526 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7662337422370911,
"alphanum_fraction": 0.7792207598686218,
"avg_line_length": 50.33333206176758,
"blob_id": "d09babbeedbfdf8e4fe7b4cbb7a6b640de3d1bf3",
"content_id": "5e459162e8b0937e3928f3208e2bbd28f350f6ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 3,
"path": "/README.md",
"repo_name": "H4mid2019/insta-comment",
"src_encoding": "UTF-8",
"text": "# insta-comment\n\nit search along the comments of one post and base on the count of comments, it shows you the top 10 username who commented on your post.\n"
},
{
"alpha_fraction": 0.5200262665748596,
"alphanum_fraction": 0.5338148474693298,
"avg_line_length": 20.77142906188965,
"blob_id": "883e2d76ea7de9856d5a8ebdadae35687960e7b6",
"content_id": "57a2657f1ee72ae1852349aab219159791acd03f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1523,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 70,
"path": "/insta.py",
"repo_name": "H4mid2019/insta-comment",
"src_encoding": "UTF-8",
"text": "import sys\nimport time\n\nimport requests as req\nfrom config import POST_ID, TOKEN\n\nurl = f\"https://graph.facebook.com/v10.0/{POST_ID}?fields=comments.limit(4000)%7Buser%2Cusername%7D&access_token={TOKEN}\"\n\nDATA = []\n\n\ndef get(next_url: str):\n global DATA\n r = req.get(next_url)\n r_json = r.json()\n data = r_json.get('data')\n if data:\n DATA = [*DATA, *data]\n try:\n f_next = r_json.get('paging').get('next')\n if f_next:\n get(f_next)\n except AttributeError:\n print(\"there is n\\'t any other page\")\n print(\"comments_count\", len(DATA))\n counter()\n return\n return\n\n\ndef main():\n global DATA\n r_first = req.get(url)\n res_json = r_first.json()\n time.sleep(1)\n try:\n error = res_json.get('error').get('message')\n if error:\n print(error)\n sys.exit(0)\n except AttributeError:\n print('No error')\n pass\n data_first = r_first.json().get('comments').get('data')\n DATA = [*DATA, *data_first]\n get(res_json.get('comments').get('paging').get('next'))\n return\n\n\ndef counter():\n global DATA\n out = {}\n for i in DATA:\n if out.get(i.get('username')):\n out[i.get('username')] += 1\n else:\n out[i.get('username')] = 1\n\n sort_out = sorted(out.items(), key=lambda x: x[1], reverse=True)\n\n c = 0\n for i in sort_out:\n if c <= 10:\n print(i[0], i[1])\n c += 1\n return\n\n\nif __name__ == '__main__':\n main()"
}
] | 2 |
Balsrepo/main_folder
|
https://github.com/Balsrepo/main_folder
|
fde7e5761523f69fb6d43eade83a77810c3ad37b
|
06f1b2275fb84837bdf1afe15a6544843e9ce832
|
ea945330f451b58a10bfc04187a894f44dd8b3bb
|
refs/heads/master
| 2020-11-30T05:18:23.664278 | 2020-11-19T10:29:57 | 2020-11-19T10:29:57 | 230,314,394 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7267206311225891,
"alphanum_fraction": 0.7287449240684509,
"avg_line_length": 40.20833206176758,
"blob_id": "73407b6dac7deaef1bc1c0b7a3e25769ef1f4278",
"content_id": "1e5655894bf0eb45644a0e3fa9ac04cf9691b6c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 988,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 24,
"path": "/python3_challenges/medium.py",
"repo_name": "Balsrepo/main_folder",
"src_encoding": "UTF-8",
"text": "#FizzBuzz Interview Question\n\n#List of Multiples\ndef list_of_multiples(num, no_of_multiples):\n return [num*i for i in range(1,no_of_multiples+1)]\n\n#Calculate the Profit\n# You work for a manufacturer and have been asked to calculate the total profit made on the sales of a product. \n# You are given a dictionary containing the cost price per unit (in dollars), \n# sell price per unit (in dollars), and the starting inventory.\n# Return the total profit made, rounded to the nearest dollar. \n# Assume all of the inventory has been sold.\ndef calculate_profit(inp_dict):\n return inp_dict[\"inventory\"]*(inp_dict[\"sell_price\"] - inp_dict[\"cost_price\"])\n\n\n#How Many Solutions Does This Quadriatic Have?\n\n#All Occurrences of an Element in a List\n# Create a function that returns the indices of all occurrences of an item in the list.\ndef get_indices(inp_list, search_item):\n return [lambda search_item: if search_item in inp_list ]\n\nprint(get_indices([\"a\", \"a\", \"b\", \"a\", \"b\", \"a\"], \"a\"))"
},
{
"alpha_fraction": 0.4912891983985901,
"alphanum_fraction": 0.5505226254463196,
"avg_line_length": 16.717391967773438,
"blob_id": "150f1ad5f12f9f950c7735d00613d8da95d9b5b4",
"content_id": "76df996bf06c562e7976431a666227aedfb06abc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 46,
"path": "/algebraic_structure/README.md",
"repo_name": "Balsrepo/main_folder",
"src_encoding": "UTF-8",
"text": "-------\r\nAlgebraic Structure\r\n-------\r\n\r\nAlgebraic Structure is based on the properties of binary homogenous operations\r\n \r\n \r\nDefintion source: \r\nhttps://www.geeksforgeeks.org/groups-discrete-mathematics/\r\n\r\n-----------\r\nHow to Run:\r\n-----------\r\n\r\nInput: An integer 'x' \r\nOutput: A matrix showing (Zx,+) and (Zx,*).\r\n\r\n>python math.py\r\n\r\n \r\nEnter the number: 4 \r\n(Z4 ,+) \r\n +(0, 1, 2, 3) \r\n0 [0, 1, 2, 3] \r\n1 [1, 2, 3, 0] \r\n2 [2, 3, 0, 1] \r\n3 [3, 0, 1, 2]\r\n\r\n(Z4 ,*) \r\n *(0, 1, 2, 3) \r\n0 [0, 0, 0, 0] \r\n1 [0, 1, 2, 3] \r\n2 [0, 2, 0, 2] \r\n3 [0, 3, 2, 1] \r\n\r\n\r\n-------\r\nNote: \r\n-------\r\nThis code is made for fun to understand the concepts algebraic structure.\r\nSometimes small example can make us understand things clearly.\r\n\r\n-----\r\nTags:\r\n-----\r\n#Rings #Groups #Mathematics #Mathematics #Ring #AlgebraicStructure #Algebraic #structure\r\n"
},
{
"alpha_fraction": 0.6918008923530579,
"alphanum_fraction": 0.7108345627784729,
"avg_line_length": 27.45833396911621,
"blob_id": "652f60b3ad48d79113a7c6d3a10eba824af3d044",
"content_id": "2a5d19f202be51c73f2cce13752d099ddcbc37b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2732,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 96,
"path": "/python3_challenges/very_easy.py",
"repo_name": "Balsrepo/main_folder",
"src_encoding": "UTF-8",
"text": "# All titles of the challenges are commented on top of the functions\n# Functions can be called and return statements can be replace with print statement \n\n\n#Return the sum of two numbers\ndef sum_of_2_nums(a,b):\n return a+b \n\n#Return the Next Number from the Integer Passed\ndef return_next_number(num):\n return num+1\n\n#Convert Minutes into Seconds\ndef mins_to_secs(mins):\n return mins*60\n\n#Area of Triangle #where b: base h: height\ndef area_of_traingle(b,h):\n return (b*h)/2\n\n#Convert Hours into Seconds\ndef hours_into_secs(hours):\n return 3600*hours # i.e. 1 hour is equal to 3600 seconds\n\n#Maximum Edge of a Triangle\ndef max_edge_of_traingle(a,b):\n return (a+b)-1\n\n#Return the Remainder from Two Numbers\ndef remainder_from_two_numbers(num1,num2):\n return num1%num2\n\n#Return the First Element in a List\n#Duplicate\n##Return the Last Element in a List ##just replace return statement input_list[0] with input_list[-1]\ndef first_element_in_a_list(input_list):\n return input_list[0] #\n\n#To the Power of ____\ndef power_of_(num,power):\n return num**power\n\n#The Farm Problem\n#chicken= 2 legs, cows= 4 legs, pigs= 4 legs\ndef farm_problem(no_of_chicken,no_of_cows,no_of_pigs):\n return (no_of_chicken*2) + (no_of_cows*4) + (no_of_pigs*4)\n\n#String to Integer and Vice versa\ndef string_to_integer(inp):\n return int(inp) # or str(inpt) #for integer to string\n\n#Is the Number Less than or Equal to Zero\ndef less_than_or_equal_to_zero(num):\n return True if num<=0 else False\n\n#Find the Largest Number in a List\ndef largest_number_in_a_list(input_list):\n ## can also be used for minimum number in a list if return and if statements are replaced\n return max(input_list) ## min(input_list)\n ## using for loop \n # maximum = 0\n # for i in input_list:\n # if i > maximum: ## if i < maximum:\n # maximum = i\n # return maximum\n\n#Concatenating two Integer Lists\ndef concatenation(list1,list2):\n return list1+list2\n\n#Convert Hours and Minutes into Seconds\ndef hours_and_minutes_to_seconds(hours,minutes):\n return (hours*3600) + (minutes*60)\n\n#Profitable Gamble\ndef profitable_gamble(prob,prize,pay):\n return True if (prob*prize)>pay else False\n\n#Check if an Integer is Divisible by Five\n#Duplicate\n##Multiple of 100\n##replace 5 with 100 to check if given number is multiple by 100\ndef check_if_divisible_by_five(num):\n return num%5==0 #return statement used without if else statement\n\n#Difference of Max and Min Numbers in a List\n#Duplicate\n#Maximum Difference\ndef difference_of_max_and_min(input_list):\n return max(input_list) - min(input_list)\n\n#Testing K ^ K == N ## ^ exponentiation symbol \ndef ktok(k,n):\n return (k**k)==n\n\n#Compare Strings by Count of Characters\n"
},
{
"alpha_fraction": 0.5918854475021362,
"alphanum_fraction": 0.6085919141769409,
"avg_line_length": 16.41666603088379,
"blob_id": "430024c16c7eb4aa6a3234b34d377c7a356a3de6",
"content_id": "b96694222a49cf932f5a3144aea56c441d0ff428",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 24,
"path": "/python3_challenges/README.md",
"repo_name": "Balsrepo/main_folder",
"src_encoding": "UTF-8",
"text": "# Python Challenges from Edabit - URL Link: https://edabit.com/challenges/python3\n\nAll levels are in seperate files and it will be updated periodically !\n\n---------------\nList of Levels:\n---------------\n\nVery easy - Level 0\n\nEasy - Level 1\n\nMedium - Level 2\n\nHard - Level 3\n\nVery Hard - Level 4\n\nExpert - Level 5\n\n-----\nNote: \n-----\nThere are multiple ways/solutions to solve one problem. \n"
},
{
"alpha_fraction": 0.5969811081886292,
"alphanum_fraction": 0.6332075595855713,
"avg_line_length": 28.450000762939453,
"blob_id": "838c097d869af991cd7f1aebc992e002951da8ce",
"content_id": "4349c0bda055960e4cbce166f61b4f25ccd7dbc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5300,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 180,
"path": "/python3_challenges/easy.py",
"repo_name": "Balsrepo/main_folder",
"src_encoding": "UTF-8",
"text": "#Get the Century\n## All dates will be between 1000 and 2010\ndef get_century(year):\n # can be achieved without using nested if statements \n # use math.ceil inbuilt function\n from math import ceil\n return str(ceil(year/100)) +\" century\"\n # if year == 1000:\n # return \"10th Century\"\n # elif year in range(1000,1100):\n # return \"11th Century\"\n # elif year in range(1100,1200):\n # return \"12th Century\"\n # elif year in range(1200,1300):\n # return \"13th Century\"\n # elif year in range(1300,1400):\n # return \"14th Century\"\n # elif year in range(1400,1500):\n # return \"15th Century\"\n # elif year in range(1500,1600):\n # return \"16th Century\"\n # elif year in range(1600,1700):\n # return \"17th Century\"\n # elif year in range(1600,1700):\n # return \"17th Century\"\n # elif year in range(1700,1800):\n # return \"18th Century\"\n # elif year in range(1800,1900):\n # return \"19th Century\"\n # elif year in range(1900,2000):\n # return \"20th Century\"\n # elif year in range(2000,2010):\n # return \"21st Century\"\n # else:\n # return \"Invalid Year\"\n\n\n#Is the Number Symmetrical?\ndef check_symmetrical(num):\n temp1 = list(str(num))[::-1] #reverses the given number and converts\n temp2 = list(str(num)) #converts the given number into a list of strings\n return temp1==temp2 #can be acheived in one line : return list(str(num))[::-1] == list(str(num))\n\n#Equality of 3 Values\ndef equality_of_3_values(a,b,c):\n eq,inp_list=0,[a,b,c]\n for i in inp_list:\n eq=inp_list.count(i)\n return eq\n \n#Is the Word an Isogram?\ndef isogram(input_string):\n pass\n\n#! Return the List of Sublists\n# Write a function that takes three arguments (x, y, z) and returns a list containing x sublists (e.g. [[], [], []]), each containing y number of item z.\n# x Number of sublists contained within the main list.\n# y Number of items contained within each sublist.\n# z Item contained within each sublist.\ndef list_of_sublists(x,y,z):\n import numpy as np\n arr = np.zeros([x,y])\n arr = arr.tolist()\n for i in range(x):\n for j in range(y):\n arr[i][j] = z\n return arr\n\n#Remove Every Vowel from a String\ndef remove_vowel_from_string(input_string):\n vowels = \"\"\"aeiouAEIOU\"\"\" # works also with capitalized letters\n removed_vowels = \"\"\n for i in input_string:\n if i not in vowels:\n removed_vowels = removed_vowels + i\n return removed_vowels\n\n#Return the Index of All Capital Letters\ndef index_of_all_capital_letters(input_letters):\n indexes=[]\n for i in range(len(input_letters)):\n if input_letters[i].isupper():\n indexes.append(i)\n return indexes\n\n#Snail Race\n\n\n#Count Letters in a Word Search\ndef count_letters_in_word_search(input_list,word_to_count):\n import itertools\n return list(itertools.chain(*input_list)).count(word_to_count)\n #or we can use loops to create a flat list from all sublists and get the count from flat list\n\n\n#ATM PIN Code Validation\ndef isValidPIN(input_pin):\n length = len(input_pin)\n if length==4 or length==6 and input_pin.isdigit():\n return True\n else:\n return False #can be written in one line return statement\n\n#!Total Volume\n# Given a list of boxes, create a function that returns the total volume of all those boxes combined together. \n# A box is represented by a list with three elements: length, width and height.\n# For instance, total_volume([2, 3, 2], [6, 6, 7], [1, 2, 1]) \n# should return 266 since (2 x 3 x 2) + (6 x 6 x 7) + (1 x 2 x 1) = 12 + 252 + 2 = 266.\ndef total_volume(*lists):\n\n def test(listss):\n test=1\n for i in listss:\n test = test * i\n return test\n\n volume = 0\n for i in lists:\n volume = volume + test(i)\n return volume\n\n\n#Calculate the Median\ndef median(input_list):\n from math import ceil\n in_list=sorted(input_list)\n list_length = len(in_list) \n middle = list_length/2\n if list_length%2==0:\n print(list_length%2)\n return (in_list[int(middle)] + in_list[int(middle)-1])/2\n else:\n return (in_list[ceil(middle)])\n \n\n#Positive Count / Negative Sum\ndef post_count_and_negative_sum(input_list):\n pos_count , negative_sum = 0 , 0\n for i in input_list:\n if i > 0:\n pos_count = pos_count + 1\n else:\n negative_sum = negative_sum + i\n return pos_count, negative_sum\n \n\n#Remove Duplicates from a List\ndef remove_dups(input_list):\n # return list(set(input_list)) #one line way to remove duplicates\n for i in input_list:\n if input_list.count(i) >= 2:\n input_list.remove(i)\n return input_list\n\n\n#Count the Arguments\ndef count_arguments(*inps):\n return len(inps)\n\n#Narcissistic Numbers\ndef narcissistic_numbers(inp):\n digits = str(inp)\n temp = 0 \n for i in digits:\n temp = int(i)**len(digits) + temp \n return True if temp == inp else False\n\n#!First and Last Index\n# def first_last_index(inp_list,letter):\n# try:\n# return list(inp_list).index(letter)\n# except ValueError:\n# return None\n\n# print(first_last_index(\"hello\",\"d\"))\n\n\n#Sort Numbers in Descending Order\ndef sort_descending(numbers):\n pass"
}
] | 5 |
sujin-philip/VTKmUsersGuide
|
https://github.com/sujin-philip/VTKmUsersGuide
|
f592966c6ffc0e4895dbfe5d693d90c31ce6ab18
|
504597be6043e9c2bfff79a2843c3565012ec1d3
|
f7a01e957c7d51b98932f5e778ef9ef781324029
|
refs/heads/master
| 2020-06-02T16:46:37.264064 | 2017-05-30T22:12:36 | 2017-05-30T22:12:36 | 94,098,743 | 0 | 0 | null | 2017-06-12T13:22:45 | 2016-01-15T00:33:48 | 2017-05-30T22:13:01 | null |
[
{
"alpha_fraction": 0.6408643126487732,
"alphanum_fraction": 0.6717724204063416,
"avg_line_length": 30.24786376953125,
"blob_id": "c3535612c02dbf73abcd5bceafb466cfcba45717",
"content_id": "e5c1e9b0d47d3a714fe7d439cab8a042dd2c0396",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3656,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 117,
"path": "/examples/ArrayHandlePermutation.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandlePermutation.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray1(const ArrayHandleType array)\n{\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == 3,\n \"Permuted array has wrong size.\");\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == 3,\n \"Permuted portal has wrong size.\");\n\n VTKM_TEST_ASSERT(test_equal(portal.Get(0), 0.3),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(1), 0.0),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(2), 0.1),\n \"Permuted array has wrong value.\");\n}\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray2(const ArrayHandleType array)\n{\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == 5,\n \"Permuted array has wrong size.\");\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == 5,\n \"Permuted portal has wrong size.\");\n\n VTKM_TEST_ASSERT(test_equal(portal.Get(0), 0.1),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(1), 0.2),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(2), 0.2),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(3), 0.3),\n \"Permuted array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(4), 0.0),\n \"Permuted array has wrong value.\");\n}\n\nvoid Test()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandlePermutation.cxx\n ////\n typedef vtkm::cont::ArrayHandle<vtkm::Id> IdArrayType;\n typedef IdArrayType::PortalControl IdPortalType;\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float64> ValueArrayType;\n typedef ValueArrayType::PortalControl ValuePortalType;\n\n // Create array with values [0.0, 0.1, 0.2, 0.3]\n ValueArrayType valueArray;\n valueArray.Allocate(4);\n ValuePortalType valuePortal = valueArray.GetPortalControl();\n valuePortal.Set(0, 0.0);\n valuePortal.Set(1, 0.1);\n valuePortal.Set(2, 0.2);\n valuePortal.Set(3, 0.3);\n\n // Use ArrayHandlePermutation to make an array = [0.3, 0.0, 0.1].\n IdArrayType idArray1;\n idArray1.Allocate(3);\n IdPortalType idPortal1 = idArray1.GetPortalControl();\n idPortal1.Set(0, 3);\n idPortal1.Set(1, 0);\n idPortal1.Set(2, 1);\n vtkm::cont::ArrayHandlePermutation<IdArrayType,ValueArrayType>\n permutedArray1(idArray1, valueArray);\n //// PAUSE-EXAMPLE\n CheckArray1(permutedArray1);\n //// RESUME-EXAMPLE\n\n // Use ArrayHandlePermutation to make an array = [0.1, 0.2, 0.2, 0.3, 0.0]\n IdArrayType idArray2;\n idArray2.Allocate(5);\n IdPortalType idPortal2 = idArray2.GetPortalControl();\n idPortal2.Set(0, 1);\n idPortal2.Set(1, 2);\n idPortal2.Set(2, 2);\n idPortal2.Set(3, 3);\n idPortal2.Set(4, 0);\n vtkm::cont::ArrayHandlePermutation<IdArrayType,ValueArrayType>\n permutedArray2(idArray2, valueArray);\n //// PAUSE-EXAMPLE\n CheckArray2(permutedArray2);\n //// RESUME-EXAMPLE\n ////\n //// END-EXAMPLE ArrayHandlePermutation.cxx\n ////\n\n IdArrayType idArray = idArray2;\n CheckArray2(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandlePermutation.cxx\n ////\n vtkm::cont::make_ArrayHandlePermutation(idArray,valueArray)\n ////\n //// END-EXAMPLE MakeArrayHandlePermutation.cxx\n ////\n );\n}\n\n} // anonymous namespace\n\nint ArrayHandlePermutation(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6360419988632202,
"alphanum_fraction": 0.6521835327148438,
"avg_line_length": 25.83976173400879,
"blob_id": "6badac0a9c6221f2e4e45a146c3c208dead327db",
"content_id": "932dcd4d0c516f520c36cb38b6275ecf53b82fdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9045,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 337,
"path": "/examples/OtherGlut.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#ifdef __APPLE__\n// Glut is depricated on apple, but is sticking around for now. Hopefully\n// someone will step up and make FreeGlut or OpenGlut compatible. Or perhaps\n// we should move to GLFW. For now, just disable the warnings.\n#pragma GCC diagnostic ignored \"-Wdeprecated-declarations\"\n#endif\n\n#include <vtkm/io/reader/VTKDataSetReader.h>\n\n#include <vtkm/rendering/Actor.h>\n#include <vtkm/rendering/Camera.h>\n#include <vtkm/rendering/CanvasGL.h>\n#include <vtkm/rendering/MapperGL.h>\n#include <vtkm/rendering/View3D.h>\n\n#ifdef __APPLE__\n#include <GLUT/glut.h>\n#else\n#include <GL/glut.h>\n#endif\n\nnamespace OtherGlutExample {\n\nvtkm::rendering::View3D *gViewPointer = NULL;\n\nint gButtonState[3] = { GLUT_UP, GLUT_UP, GLUT_UP };\nint gMousePositionX;\nint gMousePositionY;\nbool gNoInteraction;\n\nvoid DisplayCallback()\n{\n gViewPointer->Paint();\n glutSwapBuffers();\n if (gNoInteraction)\n {\n delete gViewPointer;\n gViewPointer = NULL;\n exit(0);\n }\n}\n\nvoid WindowReshapeCallback(int width, int height)\n{\n gViewPointer->GetCanvas().ResizeBuffers(width, height);\n}\n\nvoid MouseButtonCallback(int buttonIndex, int state, int x, int y)\n{\n gButtonState[buttonIndex] = state;\n gMousePositionX = x;\n gMousePositionY = y;\n}\n\n////\n//// BEGIN-EXAMPLE MouseRotate.cxx\n////\nvoid DoMouseRotate(vtkm::rendering::View &view,\n vtkm::Id mouseStartX,\n vtkm::Id mouseStartY,\n vtkm::Id mouseEndX,\n vtkm::Id mouseEndY)\n{\n vtkm::Id screenWidth = view.GetCanvas().GetWidth();\n vtkm::Id screenHeight = view.GetCanvas().GetHeight();\n\n // Convert the mouse position coordinates, given in pixels from 0 to\n // width/height, to normalized screen coordinates from -1 to 1. Note that y\n // screen coordinates are usually given from the top down whereas our\n // geometry transforms are given from bottom up, so you have to reverse the y\n // coordiantes.\n vtkm::Float32 startX = (2.0f*mouseStartX)/screenWidth - 1.0f;\n vtkm::Float32 startY = -((2.0f*mouseStartY)/screenHeight - 1.0f);\n vtkm::Float32 endX = (2.0f*mouseEndX)/screenWidth - 1.0f;\n vtkm::Float32 endY = -((2.0f*mouseEndY)/screenHeight - 1.0f);\n\n view.GetCamera().TrackballRotate(startX, startY, endX, endY);\n}\n////\n//// END-EXAMPLE MouseRotate.cxx\n////\n\n////\n//// BEGIN-EXAMPLE MousePan.cxx\n////\nvoid DoMousePan(vtkm::rendering::View &view,\n vtkm::Id mouseStartX,\n vtkm::Id mouseStartY,\n vtkm::Id mouseEndX,\n vtkm::Id mouseEndY)\n{\n vtkm::Id screenWidth = view.GetCanvas().GetWidth();\n vtkm::Id screenHeight = view.GetCanvas().GetHeight();\n\n // Convert the mouse position coordinates, given in pixels from 0 to\n // width/height, to normalized screen coordinates from -1 to 1. Note that y\n // screen coordinates are usually given from the top down whereas our\n // geometry transforms are given from bottom up, so you have to reverse the y\n // coordiantes.\n vtkm::Float32 startX = (2.0f*mouseStartX)/screenWidth - 1.0f;\n vtkm::Float32 startY = -((2.0f*mouseStartY)/screenHeight - 1.0f);\n vtkm::Float32 endX = (2.0f*mouseEndX)/screenWidth - 1.0f;\n vtkm::Float32 endY = -((2.0f*mouseEndY)/screenHeight - 1.0f);\n\n view.GetCamera().Pan(endX-startX, endY-startY);\n}\n////\n//// END-EXAMPLE MousePan.cxx\n////\n\n////\n//// BEGIN-EXAMPLE MouseZoom.cxx\n////\nvoid DoMouseZoom(vtkm::rendering::View &view,\n vtkm::Id mouseStartY,\n vtkm::Id mouseEndY)\n{\n vtkm::Id screenHeight = view.GetCanvas().GetHeight();\n\n // Convert the mouse position coordinates, given in pixels from 0 to height,\n // to normalized screen coordinates from -1 to 1. Note that y screen\n // coordinates are usually given from the top down whereas our geometry\n // transforms are given from bottom up, so you have to reverse the y\n // coordiantes.\n vtkm::Float32 startY = -((2.0f*mouseStartY)/screenHeight - 1.0f);\n vtkm::Float32 endY = -((2.0f*mouseEndY)/screenHeight - 1.0f);\n\n view.GetCamera().Zoom(endY-startY);\n}\n////\n//// END-EXAMPLE MouseZoom.cxx\n////\n\nvoid MouseMoveCallback(int x, int y)\n{\n if (gButtonState[0] == GLUT_DOWN)\n {\n DoMouseRotate(*gViewPointer, gMousePositionX, gMousePositionY, x, y);\n }\n else if (gButtonState[1] == GLUT_DOWN)\n {\n DoMousePan(*gViewPointer, gMousePositionX, gMousePositionY, x, y);\n }\n else if (gButtonState[2] == GLUT_DOWN)\n {\n DoMouseZoom(*gViewPointer, gMousePositionY, y);\n }\n\n gMousePositionX = x;\n gMousePositionY = y;\n\n glutPostRedisplay();\n}\n\nvoid SaveImage()\n{\n std::cout << \"Saving image.\" << std:: endl;\n\n vtkm::rendering::Canvas &canvas = gViewPointer->GetCanvas();\n\n ////\n //// BEGIN-EXAMPLE SaveCanvasImage.cxx\n ////\n canvas.SaveAs(\"MyVis.ppm\");\n ////\n //// END-EXAMPLE SaveCanvasImage.cxx\n ////\n}\n\n////\n//// BEGIN-EXAMPLE ResetCamera.cxx\n////\nvoid ResetCamera(vtkm::rendering::View &view)\n{\n vtkm::Bounds bounds = view.GetScene().GetSpatialBounds();\n view.GetCamera().ResetToBounds(bounds);\n //// PAUSE-EXAMPLE\n std::cout << \"Position: \" << view.GetCamera().GetPosition() << std::endl;\n std::cout << \"LookAt: \" << view.GetCamera().GetLookAt() << std::endl;\n std::cout << \"ViewUp: \" << view.GetCamera().GetViewUp() << std::endl;\n std::cout << \"FOV: \" << view.GetCamera().GetFieldOfView() << std::endl;\n std::cout << \"ClipRange: \" << view.GetCamera().GetClippingRange() << std::endl;\n //// RESUME-EXAMPLE\n}\n////\n//// END-EXAMPLE ResetCamera.cxx\n////\n\nvoid ChangeCamera(vtkm::rendering::Camera &camera)\n{\n // Just set some camera parameters for demonstration purposes.\n ////\n //// BEGIN-EXAMPLE CameraPositionOrientation.cxx\n ////\n camera.SetPosition(vtkm::make_Vec(10.0, 6.0, 6.0));\n camera.SetLookAt(vtkm::make_Vec(0.0, 0.0, 0.0));\n camera.SetViewUp(vtkm::make_Vec(0.0, 1.0, 0.0));\n camera.SetFieldOfView(60.0);\n camera.SetClippingRange(0.1, 100.0);\n ////\n //// END-EXAMPLE CameraPositionOrientation.cxx\n ////\n}\n\nvoid ObliqueCamera(vtkm::rendering::View &view)\n{\n ////\n //// BEGIN-EXAMPLE AxisAlignedCamera.cxx\n ////\n view.GetCamera().SetPosition(vtkm::make_Vec(0.0, 0.0, 0.0));\n view.GetCamera().SetLookAt(vtkm::make_Vec(0.0, 0.0, -1.0));\n view.GetCamera().SetViewUp(vtkm::make_Vec(0.0, 1.0, 0.0));\n vtkm::Bounds bounds = view.GetScene().GetSpatialBounds();\n view.GetCamera().ResetToBounds(bounds);\n ////\n //// END-EXAMPLE AxisAlignedCamera.cxx\n ////\n ////\n //// BEGIN-EXAMPLE CameraMovement.cxx\n ////\n view.GetCamera().Azimuth(45.0);\n view.GetCamera().Elevation(45.0);\n ////\n //// END-EXAMPLE CameraMovement.cxx\n ////\n}\n\nvoid KeyPressCallback(unsigned char key, int x, int y)\n{\n switch (key)\n {\n case 'q':\n case 'Q':\n delete gViewPointer;\n gViewPointer = NULL;\n exit(0);\n break;\n case 's':\n case 'S':\n SaveImage();\n break;\n case 'r':\n case 'R':\n ResetCamera(*gViewPointer);\n break;\n case 'c':\n case 'C':\n ChangeCamera(gViewPointer->GetCamera());\n break;\n case 'o':\n case 'O':\n ObliqueCamera(*gViewPointer);\n }\n glutPostRedisplay();\n (void)x; (void)y;\n}\n\nint main(int argc, char *argv[])\n{\n // Initialize GLUT window and callbacks\n glutInit(&argc, argv);\n glutInitWindowSize(960, 600);\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH);\n glutCreateWindow(\"VTK-m Example\");\n\n glutDisplayFunc(DisplayCallback);\n glutReshapeFunc(WindowReshapeCallback);\n glutMouseFunc(MouseButtonCallback);\n glutMotionFunc(MouseMoveCallback);\n glutKeyboardFunc(KeyPressCallback);\n\n // Initialize VTK-m rendering classes\n vtkm::cont::DataSet surfaceData;\n try\n {\n vtkm::io::reader::VTKDataSetReader reader(\"data/cow.vtk\");\n surfaceData = reader.ReadDataSet();\n }\n catch (vtkm::io::ErrorIO &error)\n {\n std::cout << \"Could not read file:\" << std::endl\n << error.GetMessage() << std::endl;\n }\n catch (...)\n {\n throw;\n }\n\n ////\n //// BEGIN-EXAMPLE SpecifyColorTable.cxx\n ////\n vtkm::rendering::Actor actor(surfaceData.GetCellSet(),\n surfaceData.GetCoordinateSystem(),\n surfaceData.GetField(\"RandomPointScalars\"),\n vtkm::rendering::ColorTable(\"thermal\"));\n ////\n //// END-EXAMPLE SpecifyColorTable.cxx\n ////\n\n vtkm::rendering::Scene scene;\n scene.AddActor(actor);\n\n vtkm::rendering::MapperGL mapper;\n vtkm::rendering::CanvasGL canvas;\n\n gViewPointer =\n ////\n //// BEGIN-EXAMPLE ViewBackgroundColor.cxx\n ////\n new vtkm::rendering::View3D(\n scene, mapper, canvas, vtkm::rendering::Color(1.0f, 1.0f, 1.0f));\n ////\n //// END-EXAMPLE ViewBackgroundColor.cxx\n ////\n gViewPointer->Initialize();\n\n if ((argc > 1) && (strcmp(argv[1], \"--no-interaction\") == 0))\n {\n gNoInteraction = true;\n }\n else\n {\n gNoInteraction = false;\n }\n\n // Start the GLUT rendering system. This function typically does not return.\n glutMainLoop();\n\n return 0;\n}\n\n} // namespace OtherGlutExample\n\nint OtherGlut(int argc, char *argv[])\n{\n return OtherGlutExample::main(argc, argv);\n}\n"
},
{
"alpha_fraction": 0.7346428632736206,
"alphanum_fraction": 0.737500011920929,
"avg_line_length": 26.989999771118164,
"blob_id": "3d6b9e30f2d157e364005dbca5431d67fe216dd9",
"content_id": "1afe8e5f396fb5fcaf545fb6f6b2e3a842e7a41c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 2800,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 100,
"path": "/examples/CMakeLists.txt",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "\nset(example_src\n ArrayHandle.cxx\n ArrayHandleAdapt.cxx\n ArrayHandleCast.cxx\n ArrayHandleCompositeVector.cxx\n ArrayHandleConstant.cxx\n ArrayHandleCoordinateSystems.cxx\n ArrayHandleCounting.cxx\n ArrayHandleDerived.cxx\n ArrayHandleDiscard.cxx\n ArrayHandleGroupVec.cxx\n ArrayHandleImplicit.cxx\n ArrayHandlePermutation.cxx\n ArrayHandleTransform.cxx\n ArrayHandleZip.cxx\n BasicGlut.cxx\n CellEdgesFaces.cxx\n CellOperations.cxx\n CellShapes.cxx\n ColorTables.cxx\n CoreDataTypes.cxx\n CustomDeviceAdapter.cxx\n DataSetCreation.cxx\n DeviceAdapterTag.cxx\n DeviceAdapterAlgorithms.cxx\n DynamicArrayHandle.cxx\n EnvironmentModifierMacros.cxx\n ErrorHandling.cxx\n FractalWorklets.cxx\n FunctionInterface.cxx\n IO.cxx\n ListTags.cxx\n Matrix.cxx\n NewtonsMethod.cxx\n OtherGlut.cxx\n ProvidedFilters.cxx\n ScatterCounting.cxx\n ScatterUniform.cxx\n SumOfAngles.cxx\n SimpleHistogram.cxx\n Timer.cxx\n Traits.cxx\n TransferringArguments.cxx\n TriangleQuality.cxx\n UsePointElevationWorklet.cxx\n UseWorkletMapCellToPoint.cxx\n UseWorkletMapField.cxx\n UseWorkletMapPointToCell.cxx\n )\n\n# Set up compiling and testing of examples.\nif (BUILD_EXAMPLES)\n find_package(VTKm REQUIRED\n COMPONENTS Serial TBB Rendering OpenGL GLUT\n )\n include_directories(${VTKm_INCLUDE_DIRS})\n\n if(VTKm_VERSION_FULL MATCHES \"[0-9]+.[0-9]+.[0-9]+-.*\")\n # If we are in between VTK-m versions, change the version to give the\n # commit we are currently documenting.\n set(VTKm_GUIDE_VERSION ${VTKm_VERSION_FULL} PARENT_SCOPE)\n else()\n # If we are at an actual release (as far as we can tell), make sure the\n # version matches up.\n if(NOT VTKm_GUIDE_VERSION STREQUAL VTKm_VERSION)\n message(SEND_ERROR \"VTKm_GUIDE_VERSION seems to be set wrong. Change in CMakeLists.txt\")\n endif()\n endif()\n\n set(test_prog ExampleTests)\n create_test_sourcelist(test_src ${test_prog}.cxx ${example_src})\n add_executable(${test_prog} ${test_src})\n target_include_directories(${test_prog} PRIVATE ${VTKm_INCLUDE_DIRS})\n target_link_libraries(${test_prog} ${VTKm_LIBRARIES})\n target_compile_options(${test_prog} PRIVATE ${VTKm_COMPILE_OPTIONS})\n\n foreach (test ${example_src})\n get_filename_component(tname ${test} NAME_WE)\n add_test(NAME ${tname}\n COMMAND ${test_prog} ${tname} --no-interaction\n )\n endforeach()\n\n if(NOT WIN32)\n execute_process(\n COMMAND ${CMAKE_COMMAND} -E\n create_symlink ${CMAKE_SOURCE_DIR}/data ${CMAKE_CURRENT_BINARY_DIR}/data\n )\n else()\n execute_process(\n COMMAND ${CMAKE_COMMAND} -E\n copy_directory ${CMAKE_SOURCE_DIR}/data ${CMAKE_CURRENT_BINARY_DIR}/data\n )\n endif()\nendif()\n\ninclude(ExtractExample.cmake)\n\nextract_examples(created_files ${example_src})\nadd_custom_target(example-listings DEPENDS ${created_files})\n"
},
{
"alpha_fraction": 0.657982349395752,
"alphanum_fraction": 0.6750245094299316,
"avg_line_length": 24.913705825805664,
"blob_id": "5dc38d341e7d819cdc40f357ae5afc0a31b9d5da",
"content_id": "90994d1ce34c94dd18ed8033190700a20492fb37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5105,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 197,
"path": "/examples/ListTags.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE CustomTypeLists.cxx\n////\n#define VTKM_DEFAULT_TYPE_LIST_TAG MyCommonTypes\n\n#include <vtkm/ListTag.h>\n#include <vtkm/TypeListTag.h>\n//// PAUSE-EXAMPLE\nnamespace {\n//// RESUME-EXAMPLE\n\n// A list of 2D vector types.\nstruct Vec2List\n : vtkm::ListTagBase<vtkm::Id2,\n vtkm::Vec<vtkm::Float32,2>,\n vtkm::Vec<vtkm::Float64,2> > { };\n\n// An application that uses 2D geometry might commonly encounter this list of\n// types.\nstruct MyCommonTypes : vtkm::ListTagJoin<Vec2List,vtkm::TypeListTagCommon> { };\n////\n//// END-EXAMPLE CustomTypeLists.cxx\n////\n\n} // anonymous namespace\n\n#include <vtkm/VecTraits.h>\n\n#include <vtkm/testing/Testing.h>\n\n#include <algorithm>\n#include <string>\n#include <vector>\n\n////\n//// BEGIN-EXAMPLE BaseListTags.cxx\n////\n#include <vtkm/ListTag.h>\n//// PAUSE-EXAMPLE\nnamespace {\n//// RESUME-EXAMPLE\n\n// Placeholder classes representing things that might be in a template\n// metaprogram list.\nclass Foo;\nclass Bar;\nclass Baz;\nclass Qux;\nclass Xyzzy;\n\n// The names of the following tags are indicative of the lists they contain.\n\nstruct FooList : vtkm::ListTagBase<Foo> { };\n\nstruct FooBarList : vtkm::ListTagBase<Foo,Bar> { };\n\nstruct BazQuxXyzzyList : vtkm::ListTagBase<Baz,Qux,Xyzzy> { };\n\nstruct QuxBazBarFooList : vtkm::ListTagBase<Qux,Baz,Bar,Foo> { };\n\nstruct FooBarBazQuxXyzzyList\n : vtkm::ListTagJoin<FooBarList, BazQuxXyzzyList> { };\n////\n//// END-EXAMPLE BaseListTags.cxx\n////\n\nclass Foo {};\nclass Bar {};\nclass Baz {};\nclass Qux {};\nclass Xyzzy {};\n\nstruct ListTagsFunctor\n{\n std::string FoundTags;\n\n template<typename T>\n void operator()(T) {\n this->FoundTags.append(vtkm::testing::TypeName<T>::Name());\n }\n\n void operator()(Foo) { this->FoundTags.append(\"Foo\"); }\n void operator()(Bar) { this->FoundTags.append(\"Bar\"); }\n void operator()(Baz) { this->FoundTags.append(\"Baz\"); }\n void operator()(Qux) { this->FoundTags.append(\"Qux\"); }\n void operator()(Xyzzy) { this->FoundTags.append(\"Xyzzy\"); }\n};\n\ntemplate<typename ListTag>\nvoid TryListTag(ListTag, const char *expectedString)\n{\n ListTagsFunctor checkFunctor;\n vtkm::ListForEach(checkFunctor, ListTag());\n std::cout << std::endl\n << \"Expected \" << expectedString << std::endl\n << \"Found \" << checkFunctor.FoundTags << std::endl;\n VTKM_TEST_ASSERT(checkFunctor.FoundTags == expectedString, \"List wrong\");\n}\n\nvoid TestBaseListTags()\n{\n TryListTag(FooList(), \"Foo\");\n TryListTag(FooBarList(), \"FooBar\");\n TryListTag(BazQuxXyzzyList(), \"BazQuxXyzzy\");\n TryListTag(QuxBazBarFooList(), \"QuxBazBarFoo\");\n TryListTag(FooBarBazQuxXyzzyList(), \"FooBarBazQuxXyzzy\");\n}\n\nvoid TestCustomTypeLists()\n{\n#ifdef VTKM_USE_64BIT_IDS\n TryListTag(Vec2List(), \"vtkm::Vec< vtkm::Int64, 2 >vtkm::Vec< vtkm::Float32, 2 >vtkm::Vec< vtkm::Float64, 2 >\");\n TryListTag(MyCommonTypes(), \"vtkm::Vec< vtkm::Int64, 2 >vtkm::Vec< vtkm::Float32, 2 >vtkm::Vec< vtkm::Float64, 2 >vtkm::Int32vtkm::Int64vtkm::Float32vtkm::Float64vtkm::Vec< vtkm::Float32, 3 >vtkm::Vec< vtkm::Float64, 3 >\");\n#else\n TryListTag(Vec2List(), \"vtkm::Vec< vtkm::Int32, 2 >vtkm::Vec< vtkm::Float32, 2 >vtkm::Vec< vtkm::Float64, 2 >\");\n TryListTag(MyCommonTypes(), \"vtkm::Vec< vtkm::Int32, 2 >vtkm::Vec< vtkm::Float32, 2 >vtkm::Vec< vtkm::Float64, 2 >vtkm::Int32vtkm::Int64vtkm::Float32vtkm::Float64vtkm::Vec< vtkm::Float32, 3 >vtkm::Vec< vtkm::Float64, 3 >\");\n#endif\n}\n\n////\n//// BEGIN-EXAMPLE ListForEach.cxx\n////\nstruct MyArrayBase {\n // A virtual destructor makes sure C++ RTTI will be generated. It also helps\n // ensure subclass destructors are called.\n virtual ~MyArrayBase() { }\n};\n\ntemplate<typename T>\nstruct MyArrayImpl : public MyArrayBase {\n std::vector<T> Array;\n};\n\ntemplate<typename T>\nvoid PrefixSum(std::vector<T> &array)\n{\n T sum(typename vtkm::VecTraits<T>::ComponentType(0));\n for (typename std::vector<T>::iterator iter = array.begin();\n iter != array.end();\n iter++)\n {\n sum = sum + *iter;\n *iter = sum;\n }\n}\n\nstruct PrefixSumFunctor {\n MyArrayBase *ArrayPointer;\n\n PrefixSumFunctor(MyArrayBase *arrayPointer) : ArrayPointer(arrayPointer) { }\n\n template<typename T>\n void operator()(T) {\n typedef MyArrayImpl<T> ConcreteArrayType;\n ConcreteArrayType *concreteArray =\n dynamic_cast<ConcreteArrayType *>(this->ArrayPointer);\n if (concreteArray != NULL)\n {\n PrefixSum(concreteArray->Array);\n }\n }\n};\n\nvoid DoPrefixSum(MyArrayBase *array)\n{\n PrefixSumFunctor functor = PrefixSumFunctor(array);\n vtkm::ListForEach(functor, vtkm::TypeListTagCommon());\n}\n////\n//// END-EXAMPLE ListForEach.cxx\n////\n\nvoid TestPrefixSum()\n{\n MyArrayImpl<vtkm::Id> array;\n array.Array.resize(10);\n std::fill(array.Array.begin(), array.Array.end(), 1);\n DoPrefixSum(&array);\n for (vtkm::Id index = 0; index < 10; index++)\n {\n VTKM_TEST_ASSERT(array.Array[index] == index+1, \"Got bad prefix sum.\");\n }\n}\n\nvoid Test()\n{\n TestBaseListTags();\n TestCustomTypeLists();\n TestPrefixSum();\n}\n\n} // anonymous namespace\n\nint ListTags(int, char *[])\n{\n return vtkm::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6367146372795105,
"alphanum_fraction": 0.6640926599502563,
"avg_line_length": 33.16067123413086,
"blob_id": "9a372e80f9426a471d7295167edd3bafbddce70b",
"content_id": "64e31439428c15fcd461b8babe5801463e34033d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 14245,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 417,
"path": "/examples/TriangleQuality.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE TriangleQualityWholeArray.cxx\n////\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/DataSet.h>\n\n#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/CellShape.h>\n#include <vtkm/Math.h>\n#include <vtkm/VectorAnalysis.h>\n//// PAUSE-EXAMPLE\nnamespace TriangleQualityNamespace {\n//// RESUME-EXAMPLE\n\nstatic const vtkm::Id TRIANGLE_QUALITY_TABLE_DIMENSION = 8;\nstatic const vtkm::Id TRIANGLE_QUALITY_TABLE_SIZE =\n TRIANGLE_QUALITY_TABLE_DIMENSION*TRIANGLE_QUALITY_TABLE_DIMENSION;\n\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32> GetTriangleQualityTable()\n{\n // Use these precomputed values for the array. A real application would\n // probably use a larger array, but we are keeping it small for demonstration\n // purposes.\n static vtkm::Float32 triangleQualityBuffer[TRIANGLE_QUALITY_TABLE_SIZE] = {\n 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0.24431f,\n 0, 0, 0, 0, 0, 0, 0.43298f, 0.47059f,\n 0, 0, 0, 0, 0, 0.54217f, 0.65923f, 0.66408f,\n 0, 0, 0, 0, 0.57972f, 0.75425f, 0.82154f, 0.81536f,\n 0, 0, 0, 0.54217f, 0.75425f, 0.87460f, 0.92567f, 0.92071f,\n 0, 0, 0.43298f, 0.65923f, 0.82154f, 0.92567f, 0.97664f, 0.98100f,\n 0, 0.24431f, 0.47059f, 0.66408f, 0.81536f, 0.92071f, 0.98100f, 1\n };\n\n return vtkm::cont::make_ArrayHandle(triangleQualityBuffer,\n TRIANGLE_QUALITY_TABLE_SIZE);\n}\n\ntemplate<typename T>\nVTKM_EXEC_CONT\nvtkm::Vec<T,3> TriangleEdgeLengths(const vtkm::Vec<T,3> &point1,\n const vtkm::Vec<T,3> &point2,\n const vtkm::Vec<T,3> &point3)\n{\n return vtkm::make_Vec(vtkm::Magnitude(point1-point2),\n vtkm::Magnitude(point2-point3),\n vtkm::Magnitude(point3-point1));\n}\n\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename PortalType, typename T>\nVTKM_EXEC_CONT\nvtkm::Float32 LookupTriangleQuality(const PortalType &triangleQualityPortal,\n const vtkm::Vec<T,3> &point1,\n const vtkm::Vec<T,3> &point2,\n const vtkm::Vec<T,3> &point3)\n{\n vtkm::Vec<T,3> edgeLengths = TriangleEdgeLengths(point1, point2, point3);\n\n // To reduce the size of the table, we just store the quality of triangles\n // with the longest edge of size 1. The table is 2D indexed by the length\n // of the other two edges. Thus, to use the table we have to identify the\n // longest edge and scale appropriately.\n T smallEdge1 = vtkm::Min(edgeLengths[0], edgeLengths[1]);\n T tmpEdge = vtkm::Max(edgeLengths[0], edgeLengths[1]);\n T smallEdge2 = vtkm::Min(edgeLengths[2], tmpEdge);\n T largeEdge = vtkm::Max(edgeLengths[2], tmpEdge);\n\n smallEdge1 /= largeEdge;\n smallEdge2 /= largeEdge;\n\n // Find index into array.\n vtkm::Id index1 = static_cast<vtkm::Id>(\n vtkm::Floor(smallEdge1*(TRIANGLE_QUALITY_TABLE_DIMENSION-1)+0.5));\n vtkm::Id index2 = static_cast<vtkm::Id>(\n vtkm::Floor(smallEdge2*(TRIANGLE_QUALITY_TABLE_DIMENSION-1)+0.5));\n vtkm::Id totalIndex = index1 + index2*TRIANGLE_QUALITY_TABLE_DIMENSION;\n\n return triangleQualityPortal.Get(totalIndex);\n}\n\nstruct TriangleQualityWorklet : vtkm::worklet::WorkletMapPointToCell\n{\n typedef void ControlSignature(CellSetIn cells,\n FieldInPoint<Vec3> pointCoordinates,\n WholeArrayIn<Scalar> triangleQualityTable,\n FieldOutCell<Scalar> triangleQuality);\n typedef _4 ExecutionSignature(CellShape, _2, _3);\n typedef _1 InputDomain;\n\n template<typename CellShape,\n typename PointCoordinatesType,\n typename TriangleQualityTablePortalType>\n VTKM_EXEC\n vtkm::Float32 operator()(\n CellShape shape,\n const PointCoordinatesType &pointCoordinates,\n const TriangleQualityTablePortalType &triangleQualityTable) const\n {\n if (shape.Id != vtkm::CELL_SHAPE_TRIANGLE)\n {\n this->RaiseError(\"Only triangles are supported for triangle quality.\");\n return vtkm::Nan32();\n }\n\n return LookupTriangleQuality(triangleQualityTable,\n pointCoordinates[0],\n pointCoordinates[1],\n pointCoordinates[2]);\n }\n};\n\n// Normally we would encapsulate this call in a filter, but for demonstrative\n// purposes we are just calling the worklet directly.\ntemplate<typename DeviceAdapterTag>\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32>\nRunTriangleQuality(vtkm::cont::DataSet dataSet,\n DeviceAdapterTag)\n{\n vtkm::cont::ArrayHandle<vtkm::Float32> triangleQualityTable =\n GetTriangleQualityTable();\n\n vtkm::cont::ArrayHandle<vtkm::Float32> triangleQualities;\n\n vtkm::worklet::DispatcherMapTopology<TriangleQualityWorklet,DeviceAdapterTag>\n dispatcher;\n dispatcher.Invoke(dataSet.GetCellSet(),\n dataSet.GetCoordinateSystem().GetData(),\n triangleQualityTable,\n triangleQualities);\n\n return triangleQualities;\n}\n////\n//// END-EXAMPLE TriangleQualityWholeArray.cxx\n////\n\n////\n//// BEGIN-EXAMPLE TriangleQualityExecObject.cxx\n////\ntemplate<typename DeviceAdapterTag>\nclass TriangleQualityTable : public vtkm::exec::ExecutionObjectBase\n{\npublic:\n VTKM_CONT\n TriangleQualityTable()\n {\n this->TablePortal =\n GetTriangleQualityTable().PrepareForInput(DeviceAdapterTag());\n }\n\n template<typename T>\n VTKM_EXEC\n vtkm::Float32 GetQuality(const vtkm::Vec<T,3> &point1,\n const vtkm::Vec<T,3> &point2,\n const vtkm::Vec<T,3> &point3) const\n {\n return LookupTriangleQuality(this->TablePortal, point1, point2, point3);\n }\n\nprivate:\n typedef vtkm::cont::ArrayHandle<vtkm::Float32> TableArrayType;\n typedef typename TableArrayType::ExecutionTypes<DeviceAdapterTag>::PortalConst\n TableArrayPortalType;\n TableArrayPortalType TablePortal;\n};\n\nstruct TriangleQualityWorklet2 : vtkm::worklet::WorkletMapPointToCell\n{\n typedef void ControlSignature(CellSetIn cells,\n FieldInPoint<Vec3> pointCoordinates,\n ExecObject triangleQualityTable,\n FieldOutCell<Scalar> triangleQuality);\n typedef _4 ExecutionSignature(CellShape, _2, _3);\n typedef _1 InputDomain;\n\n template<typename CellShape,\n typename PointCoordinatesType,\n typename TriangleQualityTableType>\n VTKM_EXEC\n vtkm::Float32 operator()(\n CellShape shape,\n const PointCoordinatesType &pointCoordinates,\n const TriangleQualityTableType &triangleQualityTable) const\n {\n if (shape.Id != vtkm::CELL_SHAPE_TRIANGLE)\n {\n this->RaiseError(\"Only triangles are supported for triangle quality.\");\n return vtkm::Nan32();\n }\n\n return triangleQualityTable.GetQuality(pointCoordinates[0],\n pointCoordinates[1],\n pointCoordinates[2]);\n }\n};\n\n// Normally we would encapsulate this call in a filter, but for demonstrative\n// purposes we are just calling the worklet directly.\ntemplate<typename DeviceAdapterTag>\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32>\nRunTriangleQuality2(vtkm::cont::DataSet dataSet,\n DeviceAdapterTag)\n{\n TriangleQualityTable<DeviceAdapterTag> triangleQualityTable;\n\n vtkm::cont::ArrayHandle<vtkm::Float32> triangleQualities;\n\n vtkm::worklet::DispatcherMapTopology<TriangleQualityWorklet2,DeviceAdapterTag>\n dispatcher;\n dispatcher.Invoke(dataSet.GetCellSet(),\n dataSet.GetCoordinateSystem().GetData(),\n triangleQualityTable,\n triangleQualities);\n\n return triangleQualities;\n}\n////\n//// END-EXAMPLE TriangleQualityExecObject.cxx\n////\n\n} // namespace TriangleQualityNamespace\n\n#include <vtkm/cont/ArrayHandleUniformPointCoordinates.h>\n#include <vtkm/cont/DataSetBuilderExplicit.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace TriangleQualityNamespace {\n\ntemplate<typename T>\nVTKM_EXEC\nT TriangleQuality(const vtkm::Vec<T,3> &edgeLengths)\n{\n // Heron's formula for triangle area.\n T semiperimeter = (edgeLengths[0]+edgeLengths[1]+edgeLengths[2])/2;\n T area = vtkm::Sqrt(semiperimeter*\n (semiperimeter - edgeLengths[0])*\n (semiperimeter - edgeLengths[1])*\n (semiperimeter - edgeLengths[2]));\n\n if (!vtkm::IsFinite(area))\n {\n // If the edge lengths do not make a valid triangle (i.e. the sum of the\n // two smaller lengths is smaller than the larger length), then Heron's\n // formula gives an imaginary number, which we expect to result in a NaN.\n // If that happens, just return a quality of 0 for the degenerate triangle.\n return 0;\n }\n\n // Formula for triangle quality.\n return 4*area*vtkm::Sqrt(T(3))/vtkm::MagnitudeSquared(edgeLengths);\n}\n\nstruct ComputeTriangleQualityValues : vtkm::worklet::WorkletMapField\n{\n typedef void ControlSignature(FieldIn<Vec3>, FieldOut<Scalar>);\n typedef _2 ExecutionSignature(_1);\n\n template<typename T>\n VTKM_EXEC\n T operator()(const vtkm::Vec<T,3> &edgeLengths) const\n {\n return TriangleQuality(edgeLengths);\n }\n};\n\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32>\nBuildTriangleQualityTable()\n{\n // Repurpose uniform point coordinates to compute triange edge lengths.\n vtkm::cont::ArrayHandleUniformPointCoordinates edgeLengths(\n vtkm::Id3(TRIANGLE_QUALITY_TABLE_DIMENSION,\n TRIANGLE_QUALITY_TABLE_DIMENSION,\n 1),\n vtkm::Vec<vtkm::FloatDefault,3>(0, 0, 1),\n vtkm::Vec<vtkm::FloatDefault,3>(1.0f/(TRIANGLE_QUALITY_TABLE_DIMENSION-1),\n 1.0f/(TRIANGLE_QUALITY_TABLE_DIMENSION-1),\n 1.0f));\n\n vtkm::cont::ArrayHandle<vtkm::Float32> triQualityArray;\n\n vtkm::worklet::DispatcherMapField<ComputeTriangleQualityValues> dispatcher;\n dispatcher.Invoke(edgeLengths, triQualityArray);\n\n return triQualityArray;\n}\n\ntemplate<typename PortalType>\nVTKM_CONT\nvoid PrintTriangleQualityTable(const PortalType &portal)\n{\n for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++)\n {\n if (index%TRIANGLE_QUALITY_TABLE_DIMENSION == 0)\n {\n std::cout << std::endl;\n }\n std::cout << portal.Get(index) << \", \";\n }\n std::cout << std::endl << std::endl;\n}\n\nVTKM_CONT\nvtkm::cont::DataSet BuildDataSet()\n{\n static const vtkm::Id NUM_ROWS = 5;\n\n vtkm::cont::DataSetBuilderExplicitIterative dataSetBuilder;\n dataSetBuilder.Begin();\n\n for (vtkm::Id row = 0; row < NUM_ROWS; row++)\n {\n dataSetBuilder.AddPoint(0, static_cast<vtkm::Float32>(row*row), 0);\n dataSetBuilder.AddPoint(1, static_cast<vtkm::Float32>(row*row), 0);\n }\n\n for (vtkm::Id row = 0; row < NUM_ROWS-1; row++)\n {\n vtkm::Id firstPoint = 2*row;\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_TRIANGLE);\n dataSetBuilder.AddCellPoint(firstPoint+0);\n dataSetBuilder.AddCellPoint(firstPoint+1);\n dataSetBuilder.AddCellPoint(firstPoint+2);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_TRIANGLE);\n dataSetBuilder.AddCellPoint(firstPoint+1);\n dataSetBuilder.AddCellPoint(firstPoint+3);\n dataSetBuilder.AddCellPoint(firstPoint+2);\n }\n\n return dataSetBuilder.Create();\n}\n\nVTKM_CONT\nvoid CheckQualityArray(vtkm::cont::ArrayHandle<vtkm::Float32> qualities)\n{\n vtkm::cont::printSummary_ArrayHandle(qualities, std::cout);\n std::cout << std::endl;\n\n vtkm::cont::ArrayHandle<vtkm::Float32>::PortalConstControl qualityPortal =\n qualities.GetPortalConstControl();\n\n // Pairwise triangles should have the same quality.\n for (vtkm::Id pairIndex = 0;\n pairIndex < qualities.GetNumberOfValues()/2;\n pairIndex++)\n {\n vtkm::Float32 q1 = qualityPortal.Get(2*pairIndex);\n vtkm::Float32 q2 = qualityPortal.Get(2*pairIndex+1);\n VTKM_TEST_ASSERT(test_equal(q1,q2),\n \"Isometric triangles have different quality.\");\n }\n\n // Triangle qualities should be monotonically decreasing.\n vtkm::Float32 lastQuality = 1;\n for (vtkm::Id triIndex = 0;\n triIndex < qualities.GetNumberOfValues();\n triIndex++)\n {\n vtkm::Float32 quality = qualityPortal.Get(triIndex);\n VTKM_TEST_ASSERT(test_equal(quality,lastQuality)\n || (quality <= lastQuality),\n \"Triangle quality not monotonically decreasing.\");\n lastQuality = quality;\n }\n\n // The first quality should definitely be better than the last.\n vtkm::Float32 firstQuality = qualityPortal.Get(0);\n VTKM_TEST_ASSERT(firstQuality > lastQuality,\n \"First quality not better than last.\");\n}\n\nVTKM_CONT\nvoid TestTriangleQuality()\n{\n std::cout << \"Building triangle quality array.\" << std::endl;\n vtkm::cont::ArrayHandle<vtkm::Float32> triQualityTable =\n BuildTriangleQualityTable();\n VTKM_TEST_ASSERT(\n triQualityTable.GetNumberOfValues() ==\n TRIANGLE_QUALITY_TABLE_DIMENSION*TRIANGLE_QUALITY_TABLE_DIMENSION,\n \"Bad size for triangle quality array.\");\n PrintTriangleQualityTable(triQualityTable.GetPortalConstControl());\n\n std::cout << \"Creating a data set.\" << std::endl;\n vtkm::cont::DataSet dataSet = BuildDataSet();\n\n std::cout << \"Getting triangle quality using whole array argument.\"\n << std::endl;\n vtkm::cont::ArrayHandle<vtkm::Float32> qualities =\n RunTriangleQuality(dataSet, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n CheckQualityArray(qualities);\n\n std::cout << \"Getting triangle quality using execution object argument.\"\n << std::endl;\n qualities = RunTriangleQuality2(dataSet, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n CheckQualityArray(qualities);\n}\n\n} // namespace TriangleQualityNamespace\n\nint TriangleQuality(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(\n TriangleQualityNamespace::TestTriangleQuality);\n}\n"
},
{
"alpha_fraction": 0.6605769395828247,
"alphanum_fraction": 0.6705127954483032,
"avg_line_length": 28.714284896850586,
"blob_id": "8343faa8e2d8d9bb75b5f66be5bd68547038d25e",
"content_id": "20ffa3b870ff88d29761eba990dfa4e7f63338d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3120,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 105,
"path": "/examples/SimpleHistogram.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Math.h>\n#include <vtkm/Range.h>\n\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayHandleConstant.h>\n#include <vtkm/cont/ArrayRangeCompute.h>\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nstruct SimpleHistogram\n{\n ////\n //// BEGIN-EXAMPLE SimpleHistogram.cxx\n ////\n struct CountBins : vtkm::worklet::WorkletMapField\n {\n typedef void ControlSignature(FieldIn<Scalar> data,\n AtomicArrayInOut<> histogramBins);\n typedef void ExecutionSignature(_1, _2);\n using InputDomain = _1;\n\n vtkm::Range HistogramRange;\n vtkm::Id NumberOfBins;\n\n VTKM_CONT\n CountBins(const vtkm::Range &histogramRange, vtkm::Id &numBins)\n : HistogramRange(histogramRange), NumberOfBins(numBins)\n { }\n\n template<typename T, typename AtomicArrayType>\n void operator()(T value, const AtomicArrayType &histogramBins) const\n {\n vtkm::Float64 interp =\n (value - this->HistogramRange.Min)/this->HistogramRange.Length();\n vtkm::Id bin = static_cast<vtkm::Id>(interp*this->NumberOfBins);\n if (bin < 0) { bin = 0; }\n if (bin >= this->NumberOfBins) { bin = this->NumberOfBins - 1; }\n\n histogramBins.Add(bin, 1);\n }\n };\n ////\n //// END-EXAMPLE SimpleHistogram.cxx\n ////\n\n template<typename InputArray, typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Int32>\n Run(const InputArray &input, vtkm::Id numberOfBins, Device)\n {\n VTKM_IS_ARRAY_HANDLE(InputArray);\n\n // TODO: Should check that input type has only one component.\n\n vtkm::Range range =\n vtkm::cont::ArrayRangeCompute(input).GetPortalConstControl().Get(0);\n\n // Initialize histogram to 0\n vtkm::cont::ArrayHandle<vtkm::Int32> histogram;\n vtkm::cont::DeviceAdapterAlgorithm<Device>::Copy(\n vtkm::cont::ArrayHandleConstant<vtkm::Int32>(0, numberOfBins),\n histogram);\n\n CountBins histogramWorklet(range, numberOfBins);\n\n vtkm::worklet::DispatcherMapField<CountBins, Device>\n dispatcher(histogramWorklet);\n dispatcher.Invoke(input, histogram);\n\n return histogram;\n }\n};\n\nVTKM_CONT\nstatic inline void TrySimpleHistogram()\n{\n std::cout << \"Try Simple Histogram\" << std::endl;\n\n static const vtkm::Id ARRAY_SIZE = 100;\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray;\n inputArray.Allocate(ARRAY_SIZE);\n SetPortal(inputArray.GetPortalControl());\n\n vtkm::cont::ArrayHandle<vtkm::Int32> histogram =\n SimpleHistogram::Run(inputArray,\n ARRAY_SIZE/2,\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n VTKM_TEST_ASSERT(histogram.GetNumberOfValues() == ARRAY_SIZE/2,\n \"Bad array size\");\n for (vtkm::Id index = 0; index < histogram.GetNumberOfValues(); ++index)\n {\n vtkm::Int32 binSize = histogram.GetPortalConstControl().Get(index);\n VTKM_TEST_ASSERT(binSize == 2, \"Bad bin size.\");\n }\n}\n\nint SimpleHistogram(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(TrySimpleHistogram);\n}\n"
},
{
"alpha_fraction": 0.6132305860519409,
"alphanum_fraction": 0.6466829776763916,
"avg_line_length": 32.256248474121094,
"blob_id": "4c86613058b34cdba730b74bc8ea78c44741faf7",
"content_id": "efb2f1bfccd427f3c74b8cac351de303bf5be130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5321,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 160,
"path": "/examples/ArrayHandleCompositeVector.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleCompositeVector.h>\n#include <vtkm/cont/ArrayHandleConstant.h>\n#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/ArrayHandleIndex.h>\n#include <vtkm/cont/ArrayHandleUniformPointCoordinates.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(ArrayHandleType array)\n{\n vtkm::cont::printSummary_ArrayHandle(array, std::cout);\n std::cout << std::endl;\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n\n // [(0,3,2,0), (1,1,7,0), (2,4,1,0), (3,1,8,0), (4,5,2,0)].\n VTKM_TEST_ASSERT(test_equal(portal.Get(0), vtkm::make_Vec(0,3,2,0)),\n \"Bad value in array.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(1), vtkm::make_Vec(1,1,7,0)),\n \"Bad value in array.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(2), vtkm::make_Vec(2,4,1,0)),\n \"Bad value in array.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(3), vtkm::make_Vec(3,1,8,0)),\n \"Bad value in array.\");\n}\n\nvoid ArrayHandleCompositeVectorBasic()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleCompositeVectorBasic.cxx\n ////\n // Create an array with [0, 1, 2, 3, 4]\n typedef vtkm::cont::ArrayHandleIndex ArrayType1;\n ArrayType1 array1(5);\n\n // Create an array with [3, 1, 4, 1, 5]\n typedef vtkm::cont::ArrayHandle<vtkm::Id> ArrayType2;\n ArrayType2 array2;\n array2.Allocate(5);\n ArrayType2::PortalControl arrayPortal2 = array2.GetPortalControl();\n arrayPortal2.Set(0, 3);\n arrayPortal2.Set(1, 1);\n arrayPortal2.Set(2, 4);\n arrayPortal2.Set(3, 1);\n arrayPortal2.Set(4, 5);\n\n // Create an array with [2, 7, 1, 8, 2]\n typedef vtkm::cont::ArrayHandle<vtkm::Id> ArrayType3;\n ArrayType3 array3;\n array3.Allocate(5);\n ArrayType2::PortalControl arrayPortal3 = array3.GetPortalControl();\n arrayPortal3.Set(0, 2);\n arrayPortal3.Set(1, 7);\n arrayPortal3.Set(2, 1);\n arrayPortal3.Set(3, 8);\n arrayPortal3.Set(4, 2);\n\n // Create an array with [0, 0, 0, 0]\n typedef vtkm::cont::ArrayHandleConstant<vtkm::Id> ArrayType4;\n ArrayType4 array4(0, 5);\n\n // Use ArrayhandleCompositeVector to create the array\n // [(0,3,2,0), (1,1,7,0), (2,4,1,0), (3,1,8,0), (4,5,2,0)].\n typedef vtkm::cont::ArrayHandleCompositeVectorType<\n ArrayType1, ArrayType2, ArrayType3, ArrayType4>::type CompositeArrayType;\n CompositeArrayType compositeArray(array1, 0,\n array2, 0,\n array3, 0,\n array4, 0);\n ////\n //// END-EXAMPLE ArrayHandleCompositeVectorBasic.cxx\n ////\n CheckArray(compositeArray);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleCompositeVector.cxx\n ////\n vtkm::cont::make_ArrayHandleCompositeVector(array1, 0,\n array2, 0,\n array3, 0,\n array4, 0)\n ////\n //// END-EXAMPLE MakeArrayHandleCompositeVector.cxx\n ////\n );\n}\n\n////\n//// BEGIN-EXAMPLE ArrayHandleCompositeVectorComponents.cxx\n////\ntemplate<typename CoordinateArrayType, typename ElevationArrayType>\nVTKM_CONT\ntypename vtkm::cont::ArrayHandleCompositeVectorType<\n CoordinateArrayType, CoordinateArrayType, ElevationArrayType>::type\nElevateCoordianteArray(const CoordinateArrayType &coordinateArray,\n const ElevationArrayType &elevationArray)\n{\n VTKM_IS_ARRAY_HANDLE(CoordinateArrayType);\n VTKM_IS_ARRAY_HANDLE(ElevationArrayType);\n\n return vtkm::cont::make_ArrayHandleCompositeVector(coordinateArray, 0,\n coordinateArray, 1,\n elevationArray, 0);\n}\n////\n//// END-EXAMPLE ArrayHandleCompositeVectorComponents.cxx\n////\n\nvoid TryElevateCoordinateArray()\n{\n typedef vtkm::cont::ArrayHandleUniformPointCoordinates CoordinateArrayType;\n CoordinateArrayType coordinateArray(vtkm::Id3(5, 5, 3));\n\n typedef vtkm::cont::ArrayHandleCounting<vtkm::FloatDefault>\n ElevationArrayType;\n ElevationArrayType elevationArray(0.0f, 1.0f, 5*5*3);\n\n typedef vtkm::cont::ArrayHandleCompositeVectorType<\n CoordinateArrayType, CoordinateArrayType, ElevationArrayType>::type\n ElevatedCoordsType;\n ElevatedCoordsType elevatedCoords =\n ElevateCoordianteArray(coordinateArray, elevationArray);\n\n vtkm::cont::printSummary_ArrayHandle(elevatedCoords, std::cout);\n std::cout << std::endl;\n ElevatedCoordsType::PortalConstControl elevatedPortal =\n elevatedCoords.GetPortalConstControl();\n VTKM_TEST_ASSERT(elevatedPortal.GetNumberOfValues() == 5*5*3, \"Wrong size.\");\n vtkm::Id flatIndex = 0;\n for (vtkm::Id kIndex = 0; kIndex < 3; kIndex++)\n {\n for (vtkm::Id jIndex = 0; jIndex < 5; jIndex++)\n {\n for (vtkm::Id iIndex = 0; iIndex < 5; iIndex++)\n {\n VTKM_TEST_ASSERT(test_equal(elevatedPortal.Get(flatIndex),\n vtkm::make_Vec(iIndex, jIndex, flatIndex)),\n \"Bad value.\");\n flatIndex++;\n }\n }\n }\n}\n\nvoid Test()\n{\n ArrayHandleCompositeVectorBasic();\n TryElevateCoordinateArray();\n}\n\n} // anonymous namespace\n\nint ArrayHandleCompositeVector(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6492066979408264,
"alphanum_fraction": 0.6585524678230286,
"avg_line_length": 21.44390296936035,
"blob_id": "473e627420fd7ae979b961f85ffb793af680202c",
"content_id": "e57ac87ad6f3ed9ea4a73a3053667f40005107d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4601,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 205,
"path": "/examples/BasicGlut.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#ifdef __APPLE__\n// Glut is depricated on apple, but is sticking around for now. Hopefully\n// someone will step up and make FreeGlut or OpenGlut compatible. Or perhaps\n// we should move to GLFW. For now, just disable the warnings.\n#pragma GCC diagnostic ignored \"-Wdeprecated-declarations\"\n#endif\n\n////\n//// BEGIN-EXAMPLE BasicGlut.cxx\n////\n#include <vtkm/io/reader/VTKDataSetReader.h>\n\n#include <vtkm/rendering/Actor.h>\n#include <vtkm/rendering/Camera.h>\n#include <vtkm/rendering/CanvasGL.h>\n#include <vtkm/rendering/MapperGL.h>\n#include <vtkm/rendering/View3D.h>\n\n#ifdef __APPLE__\n#include <GLUT/glut.h>\n#else\n#include <GL/glut.h>\n#endif\n\nnamespace BasicGlutExample {\n\nvtkm::rendering::View3D *gViewPointer = NULL;\n\nint gButtonState[3] = { GLUT_UP, GLUT_UP, GLUT_UP };\nint gMousePositionX;\nint gMousePositionY;\n//// PAUSE-EXAMPLE\nbool gNoInteraction;\n//// RESUME-EXAMPLE\n\n////\n//// BEGIN-EXAMPLE PaintView.cxx\n////\nvoid DisplayCallback()\n{\n gViewPointer->Paint();\n glutSwapBuffers();\n //// PAUSE-EXAMPLE\n if (gNoInteraction)\n {\n delete gViewPointer;\n gViewPointer = NULL;\n exit(0);\n }\n //// RESUME-EXAMPLE\n}\n////\n//// END-EXAMPLE PaintView.cxx\n////\n\nvoid WindowReshapeCallback(int width, int height)\n{\n gViewPointer->GetCanvas().ResizeBuffers(width, height);\n}\n\nvoid MouseButtonCallback(int buttonIndex, int state, int x, int y)\n{\n gButtonState[buttonIndex] = state;\n gMousePositionX = x;\n gMousePositionY = y;\n}\n\nvoid MouseMoveCallback(int x, int y)\n{\n vtkm::Id width = gViewPointer->GetCanvas().GetWidth();\n vtkm::Id height = gViewPointer->GetCanvas().GetHeight();\n\n vtkm::Float32 lastX = (2.0f*gMousePositionX)/width - 1.0f;\n vtkm::Float32 lastY = 1.0f - (2.0f*gMousePositionY)/height;\n vtkm::Float32 nextX = (2.0f*x)/width - 1.0f;\n vtkm::Float32 nextY = 1.0f - (2.0f*y)/height;\n\n if (gButtonState[0] == GLUT_DOWN)\n {\n gViewPointer->GetCamera().TrackballRotate(lastX, lastY, nextX, nextY);\n }\n else if (gButtonState[1] == GLUT_DOWN)\n {\n gViewPointer->GetCamera().Pan(nextX-lastX, nextY-lastY);\n }\n else if (gButtonState[2] == GLUT_DOWN)\n {\n gViewPointer->GetCamera().Zoom(nextY-lastY);\n }\n\n gMousePositionX = x;\n gMousePositionY = y;\n\n glutPostRedisplay();\n}\n\nvoid KeyPressCallback(unsigned char key, int x, int y)\n{\n switch (key)\n {\n case 'q':\n case 'Q':\n delete gViewPointer;\n gViewPointer = NULL;\n exit(0);\n break;\n }\n //// PAUSE-EXAMPLE\n (void)x; (void)y;\n //// RESUME-EXAMPLE\n}\n\nint main(int argc, char *argv[])\n{\n // Initialize GLUT window and callbacks\n ////\n //// BEGIN-EXAMPLE InitializeGlut.cxx\n ////\n glutInit(&argc, argv);\n glutInitWindowSize(960, 600);\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH);\n glutCreateWindow(\"VTK-m Example\");\n ////\n //// END-EXAMPLE InitializeGlut.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE InitializeGlutCallbacks.cxx\n ////\n glutDisplayFunc(DisplayCallback);\n glutReshapeFunc(WindowReshapeCallback);\n glutMouseFunc(MouseButtonCallback);\n glutMotionFunc(MouseMoveCallback);\n glutKeyboardFunc(KeyPressCallback);\n ////\n //// END-EXAMPLE InitializeGlutCallbacks.cxx\n ////\n\n // Initialize VTK-m rendering classes\n vtkm::cont::DataSet surfaceData;\n try\n {\n vtkm::io::reader::VTKDataSetReader reader(\"data/cow.vtk\");\n surfaceData = reader.ReadDataSet();\n }\n catch (vtkm::io::ErrorIO &error)\n {\n std::cout << \"Could not read file:\" << std::endl\n << error.GetMessage() << std::endl;\n }\n catch (...)\n {\n throw;\n }\n\n ////\n //// BEGIN-EXAMPLE ConstructView.cxx\n ////\n ////\n //// BEGIN-EXAMPLE ActorScene.cxx\n ////\n vtkm::rendering::Actor actor(surfaceData.GetCellSet(),\n surfaceData.GetCoordinateSystem(),\n surfaceData.GetField(\"RandomPointScalars\"));\n\n vtkm::rendering::Scene scene;\n scene.AddActor(actor);\n ////\n //// END-EXAMPLE ActorScene.cxx\n ////\n\n vtkm::rendering::MapperGL mapper;\n vtkm::rendering::CanvasGL canvas;\n\n gViewPointer = new vtkm::rendering::View3D(scene, mapper, canvas);\n gViewPointer->Initialize();\n ////\n //// END-EXAMPLE ConstructView.cxx\n ////\n\n //// PAUSE-EXAMPLE\n if ((argc > 1) && (strcmp(argv[1], \"--no-interaction\") == 0))\n {\n gNoInteraction = true;\n }\n else\n {\n gNoInteraction = false;\n }\n //// RESUME-EXAMPLE\n // Start the GLUT rendering system. This function typically does not return.\n glutMainLoop();\n\n return 0;\n}\n////\n//// END-EXAMPLE BasicGlut.cxx\n////\n\n} // namespace BasicGlutExample\n\nint BasicGlut(int argc, char *argv[])\n{\n return BasicGlutExample::main(argc, argv);\n}\n"
},
{
"alpha_fraction": 0.609277069568634,
"alphanum_fraction": 0.6158239245414734,
"avg_line_length": 34.19117736816406,
"blob_id": "86c07a20603d7925b939867bedda6db3a29148b2",
"content_id": "259e499d4e588a3abaefdce16e5ff214d0027e61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7179,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 204,
"path": "/examples/SumOfAngles.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/CellSetExplicit.h>\n#include <vtkm/cont/DataSet.h>\n#include <vtkm/cont/DataSetFieldAdd.h>\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/exec/CellEdge.h>\n\n#include <vtkm/Math.h>\n#include <vtkm/VectorAnalysis.h>\n\n#include <vtkm/io/reader/VTKDataSetReader.h>\n#include <vtkm/io/writer/VTKDataSetWriter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nstruct GaussianCurvature\n{\n // This worklet computes the sum of the angles of all polygons connected\n // to each point. This sum is related (but not equal to) the Gaussian\n // curvature of the surface. A flat mesh will have a sum equal to 2 pi.\n // A concave or convex surface will have a sum less than 2 pi. A saddle\n // will have a sum greater than 2 pi. The actual Gaussian curvature is\n // equal to (2 pi - angle sum)/A where A is an area of influence (which\n // we are not calculating here). See\n // http://computergraphics.stackexchange.com/questions/1718/what-is-the-simplest-way-to-compute-principal-curvature-for-a-mesh-triangle#1721\n // or the publication \"Discrete Differential-Geometry Operators for\n // Triangulated 2-Manifolds\" by Mayer et al. (Equation 9).\n ////\n //// BEGIN-EXAMPLE SumOfAngles.cxx\n ////\n struct SumOfAngles : vtkm::worklet::WorkletMapCellToPoint\n {\n typedef void ControlSignature(CellSetIn inputCells,\n WholeCellSetIn<>, // Same as inputCells\n WholeArrayIn<> pointCoords,\n FieldOutPoint<Scalar> angleSum);\n typedef void ExecutionSignature(CellIndices incidentCells,\n InputIndex pointIndex,\n _2 cellSet,\n _3 pointCoordsPortal,\n _4 outSum);\n using InputDomain = _1;\n\n template<typename IncidentCellVecType,\n typename CellSetType,\n typename PointCoordsPortalType,\n typename SumType>\n VTKM_EXEC\n void operator()(const IncidentCellVecType &incidentCells,\n vtkm::Id pointIndex,\n const CellSetType &cellSet,\n const PointCoordsPortalType &pointCoordsPortal,\n SumType &outSum) const\n {\n using CoordType = typename PointCoordsPortalType::ValueType;\n\n CoordType thisPoint = pointCoordsPortal.Get(pointIndex);\n\n outSum = 0;\n for (vtkm::IdComponent incidentCellIndex = 0;\n incidentCellIndex < incidentCells.GetNumberOfComponents();\n ++incidentCellIndex)\n {\n // Get information about incident cell.\n vtkm::Id cellIndex = incidentCells[incidentCellIndex];\n typename CellSetType::CellShapeTag cellTag =\n cellSet.GetCellShape(cellIndex);\n typename CellSetType::IndicesType cellConnections =\n cellSet.GetIndices(cellIndex);\n vtkm::IdComponent numConnections =\n cellSet.GetNumberOfIndices(cellIndex);\n vtkm::IdComponent numEdges =\n vtkm::exec::CellEdgeNumberOfEdges(numConnections, cellTag, *this);\n\n // Iterate over all edges and find the first one with pointIndex.\n // Use that to find the first vector.\n vtkm::IdComponent edgeIndex = -1;\n CoordType vec1;\n while (true)\n {\n ++edgeIndex;\n if (edgeIndex >= numEdges)\n {\n this->RaiseError(\"Bad cell. Could not find two incident edges.\");\n return;\n }\n vtkm::Vec<vtkm::IdComponent,2> edge =\n vtkm::exec::CellEdgeLocalIndices(\n numConnections, edgeIndex, cellTag, *this);\n if (cellConnections[edge[0]] == pointIndex)\n {\n vec1 = pointCoordsPortal.Get(cellConnections[edge[1]]) - thisPoint;\n break;\n }\n else if (cellConnections[edge[1]] == pointIndex)\n {\n vec1 = pointCoordsPortal.Get(cellConnections[edge[0]]) - thisPoint;\n break;\n }\n else\n {\n // Continue to next iteration of loop.\n }\n }\n\n // Continue iteration over remaining edges and find the second one with\n // pointIndex. Use that to find the second vector.\n CoordType vec2;\n while (true)\n {\n ++edgeIndex;\n if (edgeIndex >= numEdges)\n {\n this->RaiseError(\"Bad cell. Could not find two incident edges.\");\n return;\n }\n vtkm::Vec<vtkm::IdComponent,2> edge =\n vtkm::exec::CellEdgeLocalIndices(\n numConnections, edgeIndex, cellTag, *this);\n if (cellConnections[edge[0]] == pointIndex)\n {\n vec2 = pointCoordsPortal.Get(cellConnections[edge[1]]) - thisPoint;\n break;\n }\n else if (cellConnections[edge[1]] == pointIndex)\n {\n vec2 = pointCoordsPortal.Get(cellConnections[edge[0]]) - thisPoint;\n break;\n }\n else\n {\n // Continue to next iteration of loop.\n }\n }\n\n // The dot product of two unit vectors is equal to the cosine of the\n // angle between them.\n vtkm::Normalize(vec1);\n vtkm::Normalize(vec2);\n SumType cosine = static_cast<SumType>(vtkm::dot(vec1, vec2));\n\n outSum += vtkm::ACos(cosine);\n }\n }\n };\n ////\n //// END-EXAMPLE SumOfAngles.cxx\n ////\n\n template<typename CellSetType,\n typename T,\n typename CoordStorage,\n typename Device>\n static vtkm::cont::ArrayHandle<T>\n Run(const CellSetType &cellSet,\n const vtkm::cont::ArrayHandle<vtkm::Vec<T,3>, CoordStorage> &pointCoords,\n Device)\n {\n VTKM_IS_CELL_SET(CellSetType);\n\n vtkm::cont::ArrayHandle<T> angleSums;\n\n vtkm::worklet::DispatcherMapTopology<SumOfAngles, Device> dispatcher;\n dispatcher.Invoke(cellSet, cellSet, pointCoords, angleSums);\n\n return angleSums;\n }\n};\n\nVTKM_CONT\nstatic void TrySumOfAngles()\n{\n std::cout << \"Read input data\" << std::endl;\n vtkm::io::reader::VTKDataSetReader reader(\"data/cow.vtk\");\n vtkm::cont::DataSet dataSet = reader.ReadDataSet();\n\n std::cout << \"Get information out of data\" << std::endl;\n vtkm::cont::CellSetExplicit<> cellSet;\n dataSet.GetCellSet().CopyTo(cellSet);\n\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::Float32,3> > pointCoordinates;\n dataSet.GetCoordinateSystem().GetData().CopyTo(pointCoordinates);\n\n std::cout << \"Run algorithm\" << std::endl;\n vtkm::cont::ArrayHandle<vtkm::Float32> angleSums =\n GaussianCurvature::Run(\n cellSet, pointCoordinates, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n std::cout << \"Add field to data set\" << std::endl;\n vtkm::cont::DataSetFieldAdd::AddPointField(dataSet, \"angle-sum\", angleSums);\n\n std::cout << \"Write result\" << std::endl;\n vtkm::io::writer::VTKDataSetWriter writer(\"cow-curvature.vtk\");\n writer.WriteDataSet(dataSet);\n}\n\nint SumOfAngles(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(TrySumOfAngles);\n}\n"
},
{
"alpha_fraction": 0.6306344866752625,
"alphanum_fraction": 0.6488074660301208,
"avg_line_length": 26.885440826416016,
"blob_id": "d9cbc2936adc4016ffa6747295bf175f9ff06c48",
"content_id": "e91fa6589bc70aa5f1c8c661870eddeadb7577d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 35052,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 1257,
"path": "/examples/FractalWorklets.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/TypeListTag.h>\n\n#include <vtkm/cont/ArrayHandleGroupVec.h>\n#include <vtkm/cont/ArrayRangeCompute.h>\n\n#include <vtkm/cont/arg/Transport.h>\n#include <vtkm/cont/arg/TypeCheck.h>\n#include <vtkm/cont/arg/TypeCheckTagArray.h>\n\n#include <vtkm/exec/arg/AspectTagDefault.h>\n#include <vtkm/exec/arg/Fetch.h>\n#include <vtkm/exec/arg/ThreadIndicesBasic.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/ScatterCounting.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n#include <fstream>\n#include <type_traits>\n\ntemplate<typename T>\nstatic vtkm::Vec<T,2> TransformSVGPoint(const vtkm::Vec<T,2> &point,\n const vtkm::Range xRange,\n const vtkm::Range yRange,\n float padding)\n{\n return vtkm::Vec<T,2>(static_cast<T>(point[0] - xRange.Min + padding),\n static_cast<T>(yRange.Max - point[1] + padding));\n}\n\ntemplate<typename T>\nstatic void WriteSVG(const std::string &filename,\n const vtkm::cont::ArrayHandle<vtkm::Vec<T,2> > &data,\n float width = 2.0,\n const std::string &color = \"black\")\n{\n static const float PADDING = 0.05f;\n\n vtkm::cont::ArrayHandle<vtkm::Range> ranges =\n vtkm::cont::ArrayRangeCompute(data);\n vtkm::Range xRange = ranges.GetPortalConstControl().Get(0);\n vtkm::Range yRange = ranges.GetPortalConstControl().Get(1);\n\n std::ofstream file(filename.c_str());\n\n file << \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\" ?>\\n\";\n file << \"<svg xmlns=\\\"http://www.w3.org/2000/svg\\\" version=\\\"1.1\\\" \"\n << \"width=\\\"\" << xRange.Length() + 2*PADDING << \"in\\\" \"\n << \"height=\\\"\" << yRange.Length() + 2*PADDING << \"in\\\" \"\n << \">\\n\";\n\n typename vtkm::cont::ArrayHandle<vtkm::Vec<T,2> >::PortalConstControl portal =\n data.GetPortalConstControl();\n for (vtkm::Id lineIndex = 0;\n lineIndex < portal.GetNumberOfValues()/2;\n ++lineIndex)\n {\n vtkm::Vec<T,2> p1 =\n TransformSVGPoint(portal.Get(lineIndex*2+0), xRange, yRange, PADDING);\n vtkm::Vec<T,2> p2 =\n TransformSVGPoint(portal.Get(lineIndex*2+1), xRange, yRange, PADDING);\n\n file << \" <line x1=\\\"\" << p1[0]\n << \"in\\\" y1=\\\"\" << p1[1]\n << \"in\\\" x2=\\\"\" << p2[0]\n << \"in\\\" y2=\\\"\" << p2[1]\n << \"in\\\" stroke=\\\"\" << color\n << \"\\\" stroke-width=\\\"\" << width\n << \"\\\" stroke-linecap=\\\"round\\\" />\\n\";\n }\n\n file << \"</svg>\\n\";\n file.close();\n}\n\n////\n//// BEGIN-EXAMPLE TypeCheckImpl.h\n////\nnamespace vtkm {\nnamespace cont {\nnamespace arg {\n\nstruct TypeCheckTag2DCoordinates { };\n\ntemplate<typename ArrayType>\nstruct TypeCheck<TypeCheckTag2DCoordinates, ArrayType>\n{\n static const bool value = vtkm::cont::arg::TypeCheck<\n vtkm::cont::arg::TypeCheckTagArray<vtkm::TypeListTagFieldVec2>,ArrayType\n >::value;\n};\n\n}\n}\n} // namespace vtkm::cont::arg\n////\n//// END-EXAMPLE TypeCheckImpl.h\n////\n\n////\n//// BEGIN-EXAMPLE TransportImpl.h\n////\nnamespace vtkm {\nnamespace cont {\nnamespace arg {\n\nstruct TransportTag2DLineSegmentsIn { };\n\ntemplate<typename ContObjectType, typename Device>\nstruct Transport<\n vtkm::cont::arg::TransportTag2DLineSegmentsIn, ContObjectType, Device>\n{\n VTKM_IS_ARRAY_HANDLE(ContObjectType);\n\n using GroupedArrayType = vtkm::cont::ArrayHandleGroupVec<ContObjectType,2>;\n\n using ExecObjectType =\n typename GroupedArrayType::template ExecutionTypes<Device>::PortalConst;\n\n template<typename InputDomainType>\n VTKM_CONT\n ExecObjectType operator()(const ContObjectType &object,\n const InputDomainType &,\n vtkm::Id inputRange,\n vtkm::Id) const\n {\n if (object.GetNumberOfValues() != inputRange*2)\n {\n throw vtkm::cont::ErrorBadValue(\n \"2D line segment array size does not agree with input size.\");\n }\n\n GroupedArrayType groupedArray(object);\n return groupedArray.PrepareForInput(Device());\n }\n};\n\n}\n}\n} // namespace vtkm::cont::arg\n////\n//// END-EXAMPLE TransportImpl.h\n////\n\n////\n//// BEGIN-EXAMPLE FetchImplBasic.h\n////\nnamespace vtkm {\nnamespace exec {\nnamespace arg {\n\nstruct FetchTag2DLineSegmentsIn { };\n\ntemplate<typename ThreadIndicesType, typename ExecObjectType>\nstruct Fetch<\n vtkm::exec::arg::FetchTag2DLineSegmentsIn,\n vtkm::exec::arg::AspectTagDefault,\n ThreadIndicesType,\n ExecObjectType>\n{\n using ValueType = typename ExecObjectType::ValueType;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC\n ValueType Load(const ThreadIndicesType &indices,\n const ExecObjectType &arrayPortal) const\n {\n return arrayPortal.Get(indices.GetInputIndex());\n }\n\n VTKM_EXEC\n void Store(const ThreadIndicesType &,\n const ExecObjectType &,\n const ValueType &) const\n {\n // Store is a no-op for this fetch.\n }\n};\n\n}\n}\n} // namespace vtkm::exec::arg\n////\n//// END-EXAMPLE FetchImplBasic.h\n////\n\n////\n//// BEGIN-EXAMPLE AspectImpl.h\n////\nnamespace vtkm {\nnamespace exec {\nnamespace arg {\n\nstruct AspectTagFirstPoint { };\n\ntemplate<typename ThreadIndicesType, typename ExecObjectType>\nstruct Fetch<\n vtkm::exec::arg::FetchTag2DLineSegmentsIn,\n vtkm::exec::arg::AspectTagFirstPoint,\n ThreadIndicesType,\n ExecObjectType>\n{\n using ValueType = typename ExecObjectType::ValueType::ComponentType;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC\n ValueType Load(const ThreadIndicesType &indices,\n const ExecObjectType &arrayPortal) const\n {\n return arrayPortal.Get(indices.GetInputIndex())[0];\n }\n\n VTKM_EXEC\n void Store(const ThreadIndicesType &,\n const ExecObjectType &,\n const ValueType &) const\n {\n // Store is a no-op for this fetch.\n }\n};\n\n//// PAUSE-EXAMPLE\nstruct AspectTagSecondPoint { };\n\ntemplate<typename ThreadIndicesType, typename ExecObjectType>\nstruct Fetch<\n vtkm::exec::arg::FetchTag2DLineSegmentsIn,\n vtkm::exec::arg::AspectTagSecondPoint,\n ThreadIndicesType,\n ExecObjectType>\n{\n using ValueType = typename ExecObjectType::ValueType::ComponentType;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC\n ValueType Load(const ThreadIndicesType &indices,\n const ExecObjectType &arrayPortal) const\n {\n return arrayPortal.Get(indices.GetInputIndex())[1];\n }\n\n VTKM_EXEC\n void Store(const ThreadIndicesType &,\n const ExecObjectType &,\n const ValueType &) const\n {\n // Store is a no-op for this fetch.\n }\n};\n\n//// RESUME-EXAMPLE\n}\n}\n} // namespace vtkm::exec::arg\n////\n//// END-EXAMPLE AspectImpl.h\n////\n\nstruct VecLineSegments : vtkm::worklet::WorkletMapField\n{\n ////\n //// BEGIN-EXAMPLE CustomControlSignatureTag.cxx\n ////\n struct LineSegment2DCoordinatesIn : vtkm::cont::arg::ControlSignatureTagBase\n {\n using TypeCheckTag = vtkm::cont::arg::TypeCheckTag2DCoordinates;\n using TransportTag = vtkm::cont::arg::TransportTag2DLineSegmentsIn;\n using FetchTag = vtkm::exec::arg::FetchTag2DLineSegmentsIn;\n };\n ////\n //// END-EXAMPLE CustomControlSignatureTag.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE CustomExecutionSignatureTag.cxx\n ////\n template<typename ArgTag>\n struct FirstPoint : vtkm::exec::arg::ExecutionSignatureTagBase\n {\n static const vtkm::IdComponent INDEX = ArgTag::INDEX;\n using AspectTag = vtkm::exec::arg::AspectTagFirstPoint;\n };\n ////\n //// END-EXAMPLE CustomExecutionSignatureTag.cxx\n ////\n\n template<typename ArgTag>\n struct SecondPoint : vtkm::exec::arg::ExecutionSignatureTagBase\n {\n static const vtkm::IdComponent INDEX = ArgTag::INDEX;\n using AspectTag = vtkm::exec::arg::AspectTagSecondPoint;\n };\n\n ////\n //// BEGIN-EXAMPLE UseCustomControlSignatureTag.cxx\n ////\n ////\n //// BEGIN-EXAMPLE UseCustomExecutionSignatureTag.cxx\n ////\n typedef void ControlSignature(LineSegment2DCoordinatesIn coordsIn,\n FieldOut<Vec2> vecOut,\n FieldIn<Index> index);\n ////\n //// END-EXAMPLE UseCustomControlSignatureTag.cxx\n ////\n typedef void ExecutionSignature(FirstPoint<_1>, SecondPoint<_1>, _2);\n ////\n //// END-EXAMPLE UseCustomExecutionSignatureTag.cxx\n ////\n using InputDomain = _3;\n\n template<typename T>\n VTKM_EXEC\n void operator()(const vtkm::Vec<T,2> &firstPoint,\n const vtkm::Vec<T,2> &secondPoint,\n vtkm::Vec<T,2> &vecOut) const\n {\n vecOut = secondPoint - firstPoint;\n }\n};\n\nvoid TryVecLineSegments()\n{\n using VecType = vtkm::Vec<vtkm::FloatDefault,2>;\n static const vtkm::Id ARRAY_SIZE = 10;\n\n vtkm::cont::ArrayHandle<VecType> inputArray;\n inputArray.Allocate(ARRAY_SIZE*2);\n SetPortal(inputArray.GetPortalControl());\n\n vtkm::cont::ArrayHandle<VecType> outputArray;\n\n vtkm::worklet::DispatcherMapField<VecLineSegments> dispatcher;\n dispatcher.Invoke(\n inputArray, outputArray, vtkm::cont::ArrayHandleIndex(ARRAY_SIZE));\n\n VTKM_TEST_ASSERT(outputArray.GetNumberOfValues() == ARRAY_SIZE,\n \"Output wrong size.\");\n\n for (vtkm::Id index = 0; index < ARRAY_SIZE; ++index)\n {\n VecType expectedVec =\n TestValue(index*2+1,VecType()) - TestValue(index*2,VecType());\n VecType computedVec = outputArray.GetPortalConstControl().Get(index);\n VTKM_TEST_ASSERT(test_equal(expectedVec, computedVec), \"Bad value.\");\n }\n}\n\n////\n//// BEGIN-EXAMPLE TransportImpl2.h\n////\nnamespace vtkm {\nnamespace cont {\nnamespace arg {\n\ntemplate<vtkm::IdComponent NumOutputPerInput>\nstruct TransportTag2DLineSegmentsOut { };\n\ntemplate<vtkm::IdComponent NumOutputPerInput,\n typename ContObjectType,\n typename Device>\nstruct Transport<\n vtkm::cont::arg::TransportTag2DLineSegmentsOut<NumOutputPerInput>,\n ContObjectType,\n Device>\n{\n VTKM_IS_ARRAY_HANDLE(ContObjectType);\n\n using GroupedArrayType =\n vtkm::cont::ArrayHandleGroupVec<\n vtkm::cont::ArrayHandleGroupVec<ContObjectType,2>, NumOutputPerInput>;\n\n using ExecObjectType =\n typename GroupedArrayType::template ExecutionTypes<Device>::Portal;\n\n template<typename InputDomainType>\n VTKM_CONT\n ExecObjectType operator()(const ContObjectType &object,\n const InputDomainType &,\n vtkm::Id,\n vtkm::Id outputRange) const\n {\n GroupedArrayType groupedArray(\n vtkm::cont::make_ArrayHandleGroupVec<2>(object));\n return groupedArray.PrepareForOutput(outputRange, Device());\n }\n};\n\n}\n}\n} // namespace vtkm::cont::arg\n////\n//// END-EXAMPLE TransportImpl2.h\n////\n\n////\n//// BEGIN-EXAMPLE ThreadIndicesLineFractal.h\n////\nnamespace vtkm {\nnamespace exec {\nnamespace arg {\n\nclass ThreadIndicesLineFractal : public vtkm::exec::arg::ThreadIndicesBasic\n{\n using Superclass = vtkm::exec::arg::ThreadIndicesBasic;\n\npublic:\n using CoordinateType = vtkm::Vec<vtkm::FloatDefault,2>;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n template<typename OutToInPortalType,\n typename VisitPortalType,\n typename InputPointPortal>\n VTKM_EXEC\n ThreadIndicesLineFractal(vtkm::Id threadIndex,\n const OutToInPortalType &outToIn,\n const VisitPortalType &visit,\n const InputPointPortal &inputPoints,\n vtkm::Id globalThreadIndexOffset=0)\n : Superclass(threadIndex,\n outToIn.Get(threadIndex),\n visit.Get(threadIndex),\n globalThreadIndexOffset)\n {\n this->Point0 = inputPoints.Get(this->GetInputIndex())[0];\n this->Point1 = inputPoints.Get(this->GetInputIndex())[1];\n }\n\n VTKM_EXEC\n const CoordinateType &GetPoint0() const\n {\n return this->Point0;\n }\n\n VTKM_EXEC\n const CoordinateType &GetPoint1() const\n {\n return this->Point1;\n }\n\nprivate:\n CoordinateType Point0;\n CoordinateType Point1;\n};\n\n}\n}\n} // namespace vtkm::exec::arg\n////\n//// END-EXAMPLE ThreadIndicesLineFractal.h\n////\n\n////\n//// BEGIN-EXAMPLE LineFractalTransform.h\n////\nnamespace vtkm {\nnamespace exec {\n\nclass LineFractalTransform\n{\n using VecType = vtkm::Vec<vtkm::FloatDefault,2>;\n\npublic:\n template<typename T>\n VTKM_EXEC\n LineFractalTransform(const vtkm::Vec<T,2> &point0,\n const vtkm::Vec<T,2> &point1)\n {\n this->Offset = point0;\n this->UAxis = point1 - point0;\n this->VAxis = vtkm::make_Vec(-this->UAxis[1], this->UAxis[0]);\n }\n\n template<typename T>\n VTKM_EXEC\n vtkm::Vec<T,2> operator()(const vtkm::Vec<T,2> &ppoint) const\n {\n VecType ppointCast(ppoint);\n VecType transform =\n ppointCast[0]*this->UAxis + ppointCast[1]*this->VAxis + this->Offset;\n return vtkm::Vec<T,2>(transform);\n }\n\n template<typename T>\n VTKM_EXEC\n vtkm::Vec<T,2> operator()(T x, T y) const\n {\n return (*this)(vtkm::Vec<T,2>(x,y));\n }\n\nprivate:\n VecType Offset;\n VecType UAxis;\n VecType VAxis;\n};\n\n}\n} // namespace vtkm::exec\n////\n//// END-EXAMPLE LineFractalTransform.h\n////\n\n////\n//// BEGIN-EXAMPLE InputDomainFetch.h\n////\nnamespace vtkm {\nnamespace exec {\nnamespace arg {\n\nstruct AspectTagLineFractalTransform { };\n\ntemplate<typename FetchTag, typename ExecObjectType>\nstruct Fetch<\n FetchTag,\n vtkm::exec::arg::AspectTagLineFractalTransform,\n vtkm::exec::arg::ThreadIndicesLineFractal,\n ExecObjectType>\n{\n using ValueType = LineFractalTransform;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC\n ValueType Load(const vtkm::exec::arg::ThreadIndicesLineFractal &indices,\n const ExecObjectType &) const\n {\n return ValueType(indices.GetPoint0(), indices.GetPoint1());\n }\n\n VTKM_EXEC\n void Store(const vtkm::exec::arg::ThreadIndicesLineFractal &,\n const ExecObjectType &,\n const ValueType &) const\n {\n // Store is a no-op for this fetch.\n }\n};\n\n}\n}\n} // namespace vtkm::exec::arg\n////\n//// END-EXAMPLE InputDomainFetch.h\n////\n\n////\n//// BEGIN-EXAMPLE WorkletLineFractal.h\n////\nnamespace vtkm {\nnamespace worklet {\n\nclass WorkletLineFractal : public vtkm::worklet::internal::WorkletBase\n{\npublic:\n /// Control signature tag for line segments in the plane. Used as the input\n /// domain.\n ///\n ////\n //// BEGIN-EXAMPLE WorkletLineFractalInputDomainTag.cxx\n ////\n struct SegmentsIn : vtkm::cont::arg::ControlSignatureTagBase\n {\n using TypeCheckTag = vtkm::cont::arg::TypeCheckTag2DCoordinates;\n using TransportTag = vtkm::cont::arg::TransportTag2DLineSegmentsIn;\n using FetchTag = vtkm::exec::arg::FetchTag2DLineSegmentsIn;\n };\n ////\n //// END-EXAMPLE WorkletLineFractalInputDomainTag.cxx\n ////\n\n /// Control signature tag for a group of output line segments. The template\n /// argument specifies how many line segments are outputted for each input.\n /// The type is a Vec-like (of size NumSegments) of Vec-2's.\n ///\n ////\n //// BEGIN-EXAMPLE WorkletLineFractalOutputTag.cxx\n ////\n template<vtkm::IdComponent NumSegments>\n struct SegmentsOut : vtkm::cont::arg::ControlSignatureTagBase\n {\n using TypeCheckTag = vtkm::cont::arg::TypeCheckTag2DCoordinates;\n using TransportTag =\n vtkm::cont::arg::TransportTag2DLineSegmentsOut<NumSegments>;\n using FetchTag = vtkm::exec::arg::FetchTagArrayDirectOut;\n };\n ////\n //// END-EXAMPLE WorkletLineFractalOutputTag.cxx\n ////\n\n /// Control signature tag for input fields. There is one entry per input line\n /// segment. This tag takes a template argument that is a type list tag that\n /// limits the possible value types in the array.\n ///\n ////\n //// BEGIN-EXAMPLE WorkletLineFractalFieldInTag.cxx\n ////\n template<typename TypeList = AllTypes>\n struct FieldIn : vtkm::cont::arg::ControlSignatureTagBase {\n using TypeCheckTag = vtkm::cont::arg::TypeCheckTagArray<TypeList>;\n using TransportTag = vtkm::cont::arg::TransportTagArrayIn;\n using FetchTag = vtkm::exec::arg::FetchTagArrayDirectIn;\n };\n ////\n //// END-EXAMPLE WorkletLineFractalFieldInTag.cxx\n ////\n\n /// Control signature tag for input fields. There is one entry per input line\n /// segment. This tag takes a template argument that is a type list tag that\n /// limits the possible value types in the array.\n ///\n template<typename TypeList = AllTypes>\n struct FieldOut : vtkm::cont::arg::ControlSignatureTagBase {\n using TypeCheckTag = vtkm::cont::arg::TypeCheckTagArray<TypeList>;\n using TransportTag = vtkm::cont::arg::TransportTagArrayOut;\n using FetchTag = vtkm::exec::arg::FetchTagArrayDirectOut;\n };\n\n /// Execution signature tag for a LineFractalTransform from the input.\n ///\n ////\n //// BEGIN-EXAMPLE WorkletLineFractalTransformTag.cxx\n ////\n struct Transform : vtkm::exec::arg::ExecutionSignatureTagBase\n {\n static const vtkm::IdComponent INDEX = 1;\n using AspectTag = vtkm::exec::arg::AspectTagLineFractalTransform;\n };\n ////\n //// END-EXAMPLE WorkletLineFractalTransformTag.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE GetThreadIndices.cxx\n ////\n VTKM_SUPPRESS_EXEC_WARNINGS\n template<typename OutToInPortalType,\n typename VisitPortalType,\n typename InputDomainType>\n VTKM_EXEC\n vtkm::exec::arg::ThreadIndicesLineFractal\n GetThreadIndices(vtkm::Id threadIndex,\n const OutToInPortalType &outToIn,\n const VisitPortalType &visit,\n const InputDomainType &inputPoints,\n vtkm::Id globalThreadIndexOffset) const\n {\n return vtkm::exec::arg::ThreadIndicesLineFractal(\n threadIndex,\n outToIn,\n visit,\n inputPoints,\n globalThreadIndexOffset);\n }\n ////\n //// END-EXAMPLE GetThreadIndices.cxx\n ////\n};\n\n}\n} // namespace vtkm::worklet\n////\n//// END-EXAMPLE WorkletLineFractal.h\n////\n\n////\n//// BEGIN-EXAMPLE DispatcherLineFractal.h\n////\nnamespace vtkm {\nnamespace worklet {\n\n////\n//// BEGIN-EXAMPLE DispatcherSuperclass.cxx\n////\n////\n//// BEGIN-EXAMPLE DispatcherTemplate.cxx\n////\ntemplate<typename WorkletType,\n typename Device = VTKM_DEFAULT_DEVICE_ADAPTER_TAG>\nclass DispatcherLineFractal\n////\n//// END-EXAMPLE DispatcherTemplate.cxx\n////\n : public vtkm::worklet::internal::DispatcherBase<\n DispatcherLineFractal<WorkletType, Device>,\n WorkletType,\n vtkm::worklet::WorkletLineFractal\n >\n////\n//// END-EXAMPLE DispatcherSuperclass.cxx\n////\n{\n using Superclass =\n vtkm::worklet::internal::DispatcherBase<\n DispatcherLineFractal<WorkletType, Device>,\n WorkletType,\n vtkm::worklet::WorkletLineFractal\n >;\n\npublic:\n ////\n //// BEGIN-EXAMPLE DispatcherConstructor.cxx\n ////\n VTKM_CONT\n DispatcherLineFractal(const WorkletType &worklet = WorkletType())\n : Superclass(worklet)\n { }\n ////\n //// END-EXAMPLE DispatcherConstructor.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE DispatcherDoInvokePrototype.cxx\n ////\n template<typename Invocation>\n VTKM_CONT\n void DoInvoke(const Invocation &invocation) const\n ////\n //// END-EXAMPLE DispatcherDoInvokePrototype.cxx\n ////\n {\n ////\n //// BEGIN-EXAMPLE CheckInputDomainType.cxx\n ////\n // Get the control signature tag for the input domain.\n using InputDomainTag = typename Invocation::InputDomainTag;\n\n // If you get a compile error on this line, then you have set the input\n // domain to something that is not a SegmentsIn parameter, which is not\n // valid.\n VTKM_STATIC_ASSERT((std::is_same<\n InputDomainTag,\n vtkm::worklet::WorkletLineFractal::SegmentsIn\n >::value));\n\n // This is the type for the input domain\n using InputDomainType = typename Invocation::InputDomainType;\n\n // If you get a compile error on this line, then you have tried to use\n // something that is not a vtkm::cont::ArrayHandle as the input domain to a\n // topology operation (that operates on a cell set connection domain).\n VTKM_IS_ARRAY_HANDLE(InputDomainType);\n ////\n //// END-EXAMPLE CheckInputDomainType.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE CallBasicInvoke.cxx\n ////\n // We can pull the input domain parameter (the data specifying the input\n // domain) from the invocation object.\n const InputDomainType &inputDomain = invocation.GetInputDomain();\n\n // Now that we have the input domain, we can extract the range of the\n // scheduling and call BasicInvoke.\n this->BasicInvoke(invocation,\n inputDomain.GetNumberOfValues()/2,\n Device());\n ////\n //// END-EXAMPLE CallBasicInvoke.cxx\n ////\n }\n};\n\n}\n} // namespace vtkm::worklet\n////\n//// END-EXAMPLE DispatcherLineFractal.h\n////\n\n////\n//// BEGIN-EXAMPLE KochSnowflake.cxx\n////\nstruct KochSnowflake\n{\n struct FractalWorklet : vtkm::worklet::WorkletLineFractal\n {\n typedef void ControlSignature(SegmentsIn, SegmentsOut<4>);\n typedef void ExecutionSignature(Transform, _2);\n using InputDomain = _1;\n\n template<typename SegmentsOutVecType>\n void operator()(const vtkm::exec::LineFractalTransform &transform,\n SegmentsOutVecType &segmentsOutVec) const\n {\n segmentsOutVec[0][0] = transform(0.00f, 0.00f);\n segmentsOutVec[0][1] = transform(0.33f, 0.00f);\n\n segmentsOutVec[1][0] = transform(0.33f, 0.00f);\n segmentsOutVec[1][1] = transform(0.50f, 0.29f);\n\n segmentsOutVec[2][0] = transform(0.50f, 0.29f);\n segmentsOutVec[2][1] = transform(0.67f, 0.00f);\n\n segmentsOutVec[3][0] = transform(0.67f, 0.00f);\n segmentsOutVec[3][1] = transform(1.00f, 0.00f);\n }\n };\n\n template<typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,2> >\n Run(vtkm::IdComponent numIterations, Device)\n {\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n\n vtkm::cont::ArrayHandle<VecType> points;\n\n // Initialize points array with a single line\n points.Allocate(2);\n points.GetPortalControl().Set(0, VecType(0.0f, 0.0f));\n points.GetPortalControl().Set(1, VecType(1.0f, 0.0f));\n\n vtkm::worklet::DispatcherLineFractal<KochSnowflake::FractalWorklet, Device>\n dispatcher;\n\n for (vtkm::IdComponent i = 0; i < numIterations; ++i)\n {\n vtkm::cont::ArrayHandle<VecType> outPoints;\n dispatcher.Invoke(points, outPoints);\n points = outPoints;\n }\n\n return points;\n }\n};\n////\n//// END-EXAMPLE KochSnowflake.cxx\n////\n\nstatic void TryKoch()\n{\n // Demonstrate a single line.\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n vtkm::cont::ArrayHandle<VecType> points;\n\n points = KochSnowflake::Run(1, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"Koch1.svg\", points);\n\n for (vtkm::Id index = 0; index < points.GetNumberOfValues()/2; ++index)\n {\n std::cout << index << \": \"\n << points.GetPortalConstControl().Get(index*2+0) << \" \"\n << points.GetPortalConstControl().Get(index*2+1) << std::endl;\n }\n\n points = KochSnowflake::Run(2, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"Koch2.svg\", points);\n\n for (vtkm::Id index = 0; index < points.GetNumberOfValues()/2; ++index)\n {\n std::cout << index << \": \"\n << points.GetPortalConstControl().Get(index*2+0) << \" \"\n << points.GetPortalConstControl().Get(index*2+1) << std::endl;\n }\n\n points = KochSnowflake::Run(5, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"Koch5.svg\", points, 0.1f);\n}\n\n////\n//// BEGIN-EXAMPLE QuadraticType2.cxx\n////\nstruct QuadraticType2\n{\n struct FractalWorklet : vtkm::worklet::WorkletLineFractal\n {\n typedef void ControlSignature(SegmentsIn, SegmentsOut<8>);\n typedef void ExecutionSignature(Transform, _2);\n using InputDomain = _1;\n\n template<typename SegmentsOutVecType>\n void operator()(const vtkm::exec::LineFractalTransform &transform,\n SegmentsOutVecType &segmentsOutVec) const\n {\n segmentsOutVec[0][0] = transform(0.00f, 0.00f);\n segmentsOutVec[0][1] = transform(0.25f, 0.00f);\n\n segmentsOutVec[1][0] = transform(0.25f, 0.00f);\n segmentsOutVec[1][1] = transform(0.25f, 0.25f);\n\n segmentsOutVec[2][0] = transform(0.25f, 0.25f);\n segmentsOutVec[2][1] = transform(0.50f, 0.25f);\n\n segmentsOutVec[3][0] = transform(0.50f, 0.25f);\n segmentsOutVec[3][1] = transform(0.50f, 0.00f);\n\n segmentsOutVec[4][0] = transform(0.50f, 0.00f);\n segmentsOutVec[4][1] = transform(0.50f, -0.25f);\n\n segmentsOutVec[5][0] = transform(0.50f, -0.25f);\n segmentsOutVec[5][1] = transform(0.75f, -0.25f);\n\n segmentsOutVec[6][0] = transform(0.75f, -0.25f);\n segmentsOutVec[6][1] = transform(0.75f, 0.00f);\n\n segmentsOutVec[7][0] = transform(0.75f, 0.00f);\n segmentsOutVec[7][1] = transform(1.00f, 0.00f);\n }\n };\n\n template<typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,2> >\n Run(vtkm::IdComponent numIterations, Device)\n {\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n\n vtkm::cont::ArrayHandle<VecType> points;\n\n // Initialize points array with a single line\n points.Allocate(2);\n points.GetPortalControl().Set(0, VecType(0.0f, 0.0f));\n points.GetPortalControl().Set(1, VecType(1.0f, 0.0f));\n\n vtkm::worklet::DispatcherLineFractal<QuadraticType2::FractalWorklet, Device>\n dispatcher;\n\n for (vtkm::IdComponent i = 0; i < numIterations; ++i)\n {\n vtkm::cont::ArrayHandle<VecType> outPoints;\n dispatcher.Invoke(points, outPoints);\n points = outPoints;\n }\n\n return points;\n }\n};\n////\n//// END-EXAMPLE QuadraticType2.cxx\n////\n\nstatic void TryQuadraticType2()\n{\n // Demonstrate a single line.\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n vtkm::cont::ArrayHandle<VecType> points;\n\n points = QuadraticType2::Run(1, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"QuadraticType2_1.svg\", points);\n\n for (vtkm::Id index = 0; index < points.GetNumberOfValues()/2; ++index)\n {\n std::cout << index << \": \"\n << points.GetPortalConstControl().Get(index*2+0) << \" \"\n << points.GetPortalConstControl().Get(index*2+1) << std::endl;\n }\n\n points = QuadraticType2::Run(2, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"QuadraticType2_2.svg\", points);\n\n for (vtkm::Id index = 0; index < points.GetNumberOfValues()/2; ++index)\n {\n std::cout << index << \": \"\n << points.GetPortalConstControl().Get(index*2+0) << \" \"\n << points.GetPortalConstControl().Get(index*2+1) << std::endl;\n }\n\n points = QuadraticType2::Run(4, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n WriteSVG(\"QuadraticType2_4.svg\", points, 0.1f);\n}\n\n////\n//// BEGIN-EXAMPLE DragonFractal.cxx\n////\nstruct DragonFractal\n{\n struct FractalWorklet : vtkm::worklet::WorkletLineFractal\n {\n typedef void ControlSignature(SegmentsIn, SegmentsOut<2>);\n typedef void ExecutionSignature(Transform, _2);\n using InputDomain = _1;\n\n template<typename SegmentsOutVecType>\n void operator()(const vtkm::exec::LineFractalTransform &transform,\n SegmentsOutVecType &segmentsOutVec) const\n {\n segmentsOutVec[0][0] = transform(0.5f, 0.5f);\n segmentsOutVec[0][1] = transform(0.0f, 0.0f);\n\n segmentsOutVec[1][0] = transform(0.5f, 0.5f);\n segmentsOutVec[1][1] = transform(1.0f, 0.0f);\n }\n };\n\n template<typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,2> >\n Run(vtkm::IdComponent numIterations, Device)\n {\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n\n vtkm::cont::ArrayHandle<VecType> points;\n\n // Initialize points array with a single line\n points.Allocate(2);\n points.GetPortalControl().Set(0, VecType(0.0f, 0.0f));\n points.GetPortalControl().Set(1, VecType(1.0f, 0.0f));\n\n vtkm::worklet::DispatcherLineFractal<DragonFractal::FractalWorklet, Device>\n dispatcher;\n\n for (vtkm::IdComponent i = 0; i < numIterations; ++i)\n {\n vtkm::cont::ArrayHandle<VecType> outPoints;\n dispatcher.Invoke(points, outPoints);\n points = outPoints;\n }\n\n return points;\n }\n};\n////\n//// END-EXAMPLE DragonFractal.cxx\n////\n\nstatic void TryDragon()\n{\n // Demonstrate a single line.\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n vtkm::cont::ArrayHandle<VecType> points;\n\n for (vtkm::IdComponent numIterations = 1;\n numIterations <= 13;\n ++numIterations)\n {\n points = DragonFractal::Run(numIterations,\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n char filename[FILENAME_MAX];\n sprintf(filename, \"Dragon%02d.svg\", numIterations);\n WriteSVG(filename, points, 2.0f/numIterations);\n }\n}\n\n////\n//// BEGIN-EXAMPLE HilbertCurve.cxx\n////\nstruct HilbertCurve\n{\n struct FractalWorklet : vtkm::worklet::WorkletLineFractal\n {\n typedef void ControlSignature(SegmentsIn,\n FieldIn<> directionIn,\n SegmentsOut<4>,\n FieldOut<> directionOut);\n typedef void ExecutionSignature(Transform, _2, _3, _4);\n using InputDomain = _1;\n\n template<typename SegmentsOutVecType>\n void operator()(const vtkm::exec::LineFractalTransform &transform,\n vtkm::Int8 directionIn,\n SegmentsOutVecType &segmentsOutVec,\n vtkm::Vec<vtkm::Int8,4> &directionOut) const\n {\n segmentsOutVec[0][0] = transform(0.0f, directionIn*0.0f);\n segmentsOutVec[0][1] = transform(0.0f, directionIn*0.5f);\n directionOut[0] = -directionIn;\n\n segmentsOutVec[1][0] = transform(0.0f, directionIn*0.5f);\n segmentsOutVec[1][1] = transform(0.5f, directionIn*0.5f);\n directionOut[1] = directionIn;\n\n segmentsOutVec[2][0] = transform(0.5f, directionIn*0.5f);\n segmentsOutVec[2][1] = transform(1.0f, directionIn*0.5f);\n directionOut[2] = directionIn;\n\n segmentsOutVec[3][0] = transform(1.0f, directionIn*0.5f);\n segmentsOutVec[3][1] = transform(1.0f, directionIn*0.0f);\n directionOut[3] = -directionIn;\n }\n };\n\n template<typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,2> >\n Run(vtkm::IdComponent numIterations, Device)\n {\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n\n vtkm::cont::ArrayHandle<VecType> points;\n\n // Initialize points array with a single line\n points.Allocate(2);\n points.GetPortalControl().Set(0, VecType(0.0f, 0.0f));\n points.GetPortalControl().Set(1, VecType(1.0f, 0.0f));\n\n vtkm::cont::ArrayHandle<vtkm::Int8> directions;\n\n // Initialize direction with positive.\n directions.Allocate(1);\n directions.GetPortalControl().Set(0, 1);\n\n vtkm::worklet::DispatcherLineFractal<HilbertCurve::FractalWorklet, Device>\n dispatcher;\n\n for (vtkm::IdComponent i = 0; i < numIterations; ++i)\n {\n vtkm::cont::ArrayHandle<VecType> outPoints;\n vtkm::cont::ArrayHandle<vtkm::Int8> outDirections;\n dispatcher.Invoke(points,\n directions,\n outPoints,\n vtkm::cont::make_ArrayHandleGroupVec<4>(outDirections));\n points = outPoints;\n directions = outDirections;\n }\n\n return points;\n }\n};\n////\n//// END-EXAMPLE HilbertCurve.cxx\n////\n\nstatic void TryHilbert()\n{\n // Demonstrate a single line.\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n vtkm::cont::ArrayHandle<VecType> points;\n\n for (vtkm::IdComponent numIterations = 1;\n numIterations <= 6;\n ++numIterations)\n {\n points = HilbertCurve::Run(numIterations,\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n char filename[FILENAME_MAX];\n sprintf(filename, \"Hilbert%02d.svg\", numIterations);\n WriteSVG(filename, points, 2.0f/numIterations);\n }\n}\n\n////\n//// BEGIN-EXAMPLE TreeFractal.cxx\n////\nstruct TreeFractal\n{\n struct FractalWorklet : vtkm::worklet::WorkletLineFractal\n {\n typedef void ControlSignature(SegmentsIn,\n SegmentsOut<1>,\n FieldOut<> countNextIteration);\n typedef void ExecutionSignature(Transform, VisitIndex, _2, _3);\n using InputDomain = _1;\n\n using ScatterType = vtkm::worklet::ScatterCounting;\n VTKM_CONT\n ScatterType GetScatter() const { return this->Scatter; }\n\n template<typename Storage, typename Device>\n VTKM_CONT\n FractalWorklet(\n const vtkm::cont::ArrayHandle<vtkm::IdComponent,Storage> &count,\n Device)\n : Scatter(count, Device())\n { }\n\n template<typename SegmentsOutVecType>\n void operator()(const vtkm::exec::LineFractalTransform &transform,\n vtkm::IdComponent visitIndex,\n SegmentsOutVecType &segmentsOutVec,\n vtkm::IdComponent &countNextIteration) const\n {\n switch (visitIndex)\n {\n case 0:\n segmentsOutVec[0][0] = transform(0.0f, 0.0f);\n segmentsOutVec[0][1] = transform(1.0f, 0.0f);\n countNextIteration = 1;\n break;\n case 1:\n segmentsOutVec[0][0] = transform(1.0f, 0.0f);\n segmentsOutVec[0][1] = transform(1.5f, -0.25f);\n countNextIteration = 3;\n break;\n case 2:\n segmentsOutVec[0][0] = transform(1.0f, 0.0f);\n segmentsOutVec[0][1] = transform(1.5f, 0.35f);\n countNextIteration = 3;\n break;\n default:\n this->RaiseError(\"Unexpected visit index.\");\n }\n }\n\n private:\n ScatterType Scatter;\n };\n\n template<typename Device>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,2> >\n Run(vtkm::IdComponent numIterations, Device)\n {\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n\n vtkm::cont::ArrayHandle<VecType> points;\n\n // Initialize points array with a single line\n points.Allocate(2);\n points.GetPortalControl().Set(0, VecType(0.0f, 0.0f));\n points.GetPortalControl().Set(1, VecType(0.0f, 1.0f));\n\n vtkm::cont::ArrayHandle<vtkm::IdComponent> count;\n\n // Initialize count array with 3 (meaning iterate)\n count.Allocate(1);\n count.GetPortalControl().Set(0, 3);\n\n for (vtkm::IdComponent i = 0; i < numIterations; ++i)\n {\n vtkm::worklet::DispatcherLineFractal<TreeFractal::FractalWorklet, Device>\n dispatcher(FractalWorklet(count, Device()));\n\n vtkm::cont::ArrayHandle<VecType> outPoints;\n dispatcher.Invoke(points, outPoints, count);\n points = outPoints;\n }\n\n return points;\n }\n};\n////\n//// END-EXAMPLE TreeFractal.cxx\n////\n\nstatic void TryTree()\n{\n // Demonstrate a single line.\n using VecType = vtkm::Vec<vtkm::Float32,2>;\n vtkm::cont::ArrayHandle<VecType> points;\n\n for (vtkm::IdComponent numIterations = 1;\n numIterations <= 8;\n ++numIterations)\n {\n points = TreeFractal::Run(numIterations, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n char filename[FILENAME_MAX];\n sprintf(filename, \"Tree%02d.svg\", numIterations);\n WriteSVG(filename, points, 2.0f/numIterations);\n }\n}\n\nstatic void RunTests()\n{\n TryVecLineSegments();\n TryKoch();\n TryQuadraticType2();\n TryDragon();\n TryHilbert();\n TryTree();\n}\n\nint FractalWorklets(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(RunTests);\n}\n"
},
{
"alpha_fraction": 0.6876470446586609,
"alphanum_fraction": 0.7041176557540894,
"avg_line_length": 24,
"blob_id": "5021191a6594d9acd0c4895caa43a0f43a55bdd9",
"content_id": "e96c70079e316f611aad3bdaf083b9ad31a03ea5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3400,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 136,
"path": "/examples/ProvidedFilters.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/filter/MarchingCubes.h>\n#include <vtkm/filter/PointElevation.h>\n#include <vtkm/filter/VertexClustering.h>\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE PointElevation.cxx\n////\nVTKM_CONT\nvtkm::cont::DataSet ComputeAirPressure(vtkm::cont::DataSet dataSet)\n{\n vtkm::filter::PointElevation elevationFilter;\n\n // Use the elevation filter to estimate atmospheric pressure based on the\n // height of the point coordinates. Atmospheric pressure is 101325 Pa at\n // sea level and drops about 12 Pa per meter.\n elevationFilter.SetOutputFieldName(\"pressure\");\n elevationFilter.SetLowPoint(0.0, 0.0, 0.0);\n elevationFilter.SetHighPoint(0.0, 0.0, 2000.0);\n elevationFilter.SetRange(101325.0, 77325.0);\n\n vtkm::filter::ResultField result =\n elevationFilter.Execute(dataSet, dataSet.GetCoordinateSystem());\n\n if (!result.IsValid())\n {\n throw vtkm::cont::ErrorBadValue(\"Failed to run elevation filter.\");\n }\n\n return result.GetDataSet();\n}\n////\n//// END-EXAMPLE PointElevation.cxx\n////\n\nvoid DoPointElevation()\n{\n vtkm::cont::testing::MakeTestDataSet makeData;\n vtkm::cont::DataSet inData = makeData.Make3DRegularDataSet0();\n\n vtkm::cont::DataSet pressureData = ComputeAirPressure(inData);\n\n pressureData.GetField(\"pressure\").PrintSummary(std::cout);\n std::cout << std::endl;\n}\n\nvoid DoVertexClustering()\n{\n vtkm::cont::testing::MakeTestDataSet makeData;\n vtkm::cont::DataSet originalSurface = makeData.Make3DExplicitDataSetCowNose();\n\n ////\n //// BEGIN-EXAMPLE VertexClustering.cxx\n ////\n vtkm::filter::VertexClustering vertexClustering;\n\n vertexClustering.SetNumberOfDivisions(vtkm::Id3(128,128,128));\n\n vtkm::filter::ResultDataSet result =\n vertexClustering.Execute(originalSurface);\n\n if (!result.IsValid())\n {\n throw vtkm::cont::ErrorBadValue(\"Failed to run vertex clustering.\");\n }\n\n for (vtkm::IdComponent fieldIndex = 0;\n fieldIndex < originalSurface.GetNumberOfFields();\n fieldIndex++)\n {\n vertexClustering.MapFieldOntoOutput(result,\n originalSurface.GetField(fieldIndex));\n }\n\n vtkm::cont::DataSet simplifiedSurface = result.GetDataSet();\n ////\n //// END-EXAMPLE VertexClustering.cxx\n ////\n\n simplifiedSurface.PrintSummary(std::cout);\n std::cout << std::endl;\n}\n\nvoid DoMarchingCubes()\n{\n vtkm::cont::testing::MakeTestDataSet makeData;\n vtkm::cont::DataSet inData = makeData.Make3DRectilinearDataSet0();\n\n ////\n //// BEGIN-EXAMPLE MarchingCubes.cxx\n ////\n vtkm::filter::MarchingCubes marchingCubes;\n\n marchingCubes.SetIsoValue(10.0);\n\n vtkm::filter::ResultDataSet result =\n marchingCubes.Execute(inData, \"pointvar\");\n\n if (!result.IsValid())\n {\n throw vtkm::cont::ErrorBadValue(\"Failed to run Marching Cubes.\");\n }\n\n for (vtkm::IdComponent fieldIndex = 0;\n fieldIndex < inData.GetNumberOfFields();\n fieldIndex++)\n {\n marchingCubes.MapFieldOntoOutput(result, inData.GetField(fieldIndex));\n }\n\n vtkm::cont::DataSet isosurface = result.GetDataSet();\n ////\n //// END-EXAMPLE MarchingCubes.cxx\n ////\n\n isosurface.PrintSummary(std::cout);\n std::cout << std::endl;\n}\n\nvoid Test()\n{\n DoPointElevation();\n DoVertexClustering();\n DoMarchingCubes();\n}\n\n} // anonymous namespace\n\nint ProvidedFilters(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6369606256484985,
"alphanum_fraction": 0.6519699692726135,
"avg_line_length": 25.987340927124023,
"blob_id": "1c2aef5c76d52315f5e20a60e7b0421cb405a6b1",
"content_id": "abd4ffbbc929c750d07f74a37ec8d69ba4a2096c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2132,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 79,
"path": "/examples/ScatterUniform.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleConstant.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/ScatterUniform.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE ScatterUniform.cxx\n////\nstruct InterleaveArrays : vtkm::worklet::WorkletMapField\n{\n typedef void ControlSignature(FieldIn<>, FieldIn<>, FieldOut<>);\n typedef void ExecutionSignature(_1, _2, _3, VisitIndex);\n using InputDomain = _1;\n\n using ScatterType = vtkm::worklet::ScatterUniform;\n\n VTKM_CONT\n ScatterType GetScatter() const { return vtkm::worklet::ScatterUniform(2); }\n\n template<typename T>\n VTKM_EXEC\n void operator()(const T &input0,\n const T &input1,\n T &output,\n vtkm::IdComponent visitIndex) const\n {\n if (visitIndex == 0)\n {\n output = input0;\n }\n else // visitIndex == 1\n {\n output = input1;\n }\n }\n};\n////\n//// END-EXAMPLE ScatterUniform.cxx\n////\n\nvoid Run()\n{\n std::cout << \"Trying scatter uniform with array interleave.\" << std::endl;\n\n static const vtkm::Id ARRAY_SIZE = 10;\n static const vtkm::Id value0 = 8;\n static const vtkm::Id value1 = 42;\n\n vtkm::cont::ArrayHandle<vtkm::Id> outArray;\n\n vtkm::worklet::DispatcherMapField<InterleaveArrays> dispatcher;\n dispatcher.Invoke(vtkm::cont::make_ArrayHandleConstant(value0, ARRAY_SIZE),\n vtkm::cont::make_ArrayHandleConstant(value1, ARRAY_SIZE),\n outArray);\n\n vtkm::cont::printSummary_ArrayHandle(outArray, std::cout);\n std::cout << std::endl;\n VTKM_TEST_ASSERT(outArray.GetNumberOfValues() == ARRAY_SIZE*2,\n \"Wrong sized array.\");\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n vtkm::Id v0 = outArray.GetPortalConstControl().Get(2*index+0);\n VTKM_TEST_ASSERT(v0 == value0, \"Bad value in array.\");\n vtkm::Id v1 = outArray.GetPortalConstControl().Get(2*index+1);\n VTKM_TEST_ASSERT(v1 == value1, \"Bad value in array.\");\n }\n}\n\n} // anonymous namespace\n\nint ScatterUniform(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.4998529851436615,
"alphanum_fraction": 0.5450357794761658,
"avg_line_length": 31.597444534301758,
"blob_id": "31c1455729f6ce31b57960c351820f991de386f6",
"content_id": "8092d4a018fbc65571ef842218a71528e8c49758",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10203,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 313,
"path": "/examples/CoreDataTypes.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Range.h>\n#include <vtkm/Types.h>\n#include <vtkm/VecVariable.h>\n\n#include <vtkm/testing/Testing.h>\n\nnamespace {\n\nvoid CreatingVectorTypes()\n{\n ////\n //// BEGIN-EXAMPLE CreatingVectorTypes.cxx\n ////\n vtkm::Vec<vtkm::Float32,3> A(1); // A is (1, 1, 1)\n A[1] = 2; // A is now (1, 2, 1)\n vtkm::Vec<vtkm::Float32,3> B(1, 2, 3); // B is (1, 2, 3)\n vtkm::Vec<vtkm::Float32,3> C = vtkm::make_Vec(3, 4, 5); // C is (3, 4, 5)\n ////\n //// END-EXAMPLE CreatingVectorTypes.cxx\n ////\n\n VTKM_TEST_ASSERT((A[0] == 1) && (A[1] == 2) && (A[2] == 1),\n \"A is different than expected.\");\n VTKM_TEST_ASSERT((B[0] == 1) && (B[1] == 2) && (B[2] == 3),\n \"B is different than expected.\");\n VTKM_TEST_ASSERT((C[0] == 3) && (C[1] == 4) && (C[2] == 5),\n \"C is different than expected.\");\n}\n\nvoid VectorOperations()\n{\n ////\n //// BEGIN-EXAMPLE VectorOperations.cxx\n ////\n vtkm::Vec<vtkm::Float32,3> A(1, 2, 3);\n vtkm::Vec<vtkm::Float32,3> B(4, 5, 6.5);\n vtkm::Vec<vtkm::Float32,3> C = A + B; // C is (5, 7, 9.5)\n vtkm::Vec<vtkm::Float32,3> D = 2.0f * C; // D is (10, 14, 19)\n vtkm::Float32 s = vtkm::dot(A, B); // s is 33.5\n bool b1 = (A == B); // b1 is false\n bool b2 = (A == vtkm::make_Vec(1, 2, 3)); // b2 is true\n ////\n //// END-EXAMPLE VectorOperations.cxx\n ////\n\n VTKM_TEST_ASSERT(test_equal(C, vtkm::Vec<vtkm::Float32,3>(5, 7, 9.5)), \"C is wrong\");\n VTKM_TEST_ASSERT(test_equal(D, vtkm::Vec<vtkm::Float32,3>(10, 14, 19)), \"D is wrong\");\n VTKM_TEST_ASSERT(test_equal(s, 33.5), \"s is wrong\");\n VTKM_TEST_ASSERT(!b1, \"b1 is wrong\");\n VTKM_TEST_ASSERT(b2, \"b2 is wrong\");\n}\n\nvoid LongerVector()\n{\n ////\n //// BEGIN-EXAMPLE LongerVector.cxx\n ////\n vtkm::Vec<vtkm::Float64, 5> A(2); // A is (2, 2, 2, 2, 2)\n for (vtkm::IdComponent index = 1; index < A.NUM_COMPONENTS; index++)\n {\n A[index] = A[index-1] * 1.5;\n }\n // A is now (2, 3, 4.5, 6.75, 10.125)\n ////\n //// END-EXAMPLE LongerVector.cxx\n ////\n\n VTKM_TEST_ASSERT(\n (A[0] == 2)\n && (A[1] == 3)\n && (A[2] == 4.5)\n && (A[3] == 6.75)\n && (A[4] == 10.125),\n \"A is wrong\");\n}\n\nvoid EquilateralTriangle()\n{\n ////\n //// BEGIN-EXAMPLE EquilateralTriangle.cxx\n ////\n vtkm::Vec<vtkm::Vec<vtkm::Float32,2>, 3> equilateralTriangle(\n vtkm::make_Vec(0.0, 0.0),\n vtkm::make_Vec(1.0, 0.0),\n vtkm::make_Vec(0.5, 0.8660254));\n ////\n //// END-EXAMPLE EquilateralTriangle.cxx\n ////\n\n vtkm::Float32 edgeLengthSqr = 1.0;\n vtkm::Vec<vtkm::Vec<vtkm::Float32,2>,3> edges(\n equilateralTriangle[1] - equilateralTriangle[0],\n equilateralTriangle[2] - equilateralTriangle[0],\n equilateralTriangle[2] - equilateralTriangle[1]);\n VTKM_TEST_ASSERT(test_equal(vtkm::dot(edges[0],edges[0]), edgeLengthSqr),\n \"Bad edge length.\");\n VTKM_TEST_ASSERT(test_equal(vtkm::dot(edges[1],edges[1]), edgeLengthSqr),\n \"Bad edge length.\");\n VTKM_TEST_ASSERT(test_equal(vtkm::dot(edges[2],edges[2]), edgeLengthSqr),\n \"Bad edge length.\");\n}\n\n////\n//// BEGIN-EXAMPLE VecCExample.cxx\n////\nVTKM_EXEC_CONSTANT\nstatic const vtkm::IdComponent HexagonIndexToIJKTable[8][3] = {\n { 0, 0, 0 },\n { 1, 0, 0 },\n { 1, 1, 0 },\n { 0, 1, 0 },\n { 0, 0, 1 },\n { 1, 0, 1 },\n { 1, 1, 1 },\n { 0, 1, 1 }\n};\n\nVTKM_EXEC_CONSTANT\nstatic const vtkm::IdComponent HexagonIJKToIndexTable[2][2][2] = {\n { // i=0\n { 0, 4 }, // j=0\n { 3, 7 }, // j=1\n },\n { // i=1\n { 1, 5 }, // j=0\n { 2, 6 }, // j=1\n }\n};\n\nVTKM_EXEC\nvtkm::VecCConst<vtkm::IdComponent> HexagonIndexToIJK(vtkm::IdComponent index)\n{\n return vtkm::make_VecC(HexagonIndexToIJKTable[index], 3);\n}\n\nVTKM_EXEC\nvtkm::IdComponent HexagonIJKToIndex(vtkm::VecCConst<vtkm::IdComponent> ijk)\n{\n return HexagonIJKToIndexTable[ijk[0]][ijk[1]][ijk[2]];\n}\n////\n//// END-EXAMPLE VecCExample.cxx\n////\n\n////\n//// BEGIN-EXAMPLE VecVariableExample.cxx\n////\nvtkm::VecVariable<vtkm::IdComponent,4>\nHexagonShortestPath(vtkm::IdComponent startPoint, vtkm::IdComponent endPoint)\n{\n vtkm::VecCConst<vtkm::IdComponent> startIJK = HexagonIndexToIJK(startPoint);\n vtkm::VecCConst<vtkm::IdComponent> endIJK = HexagonIndexToIJK(endPoint);\n\n vtkm::Vec<vtkm::IdComponent,3> currentIJK;\n startIJK.CopyInto(currentIJK);\n\n vtkm::VecVariable<vtkm::IdComponent,4> path;\n path.Append(startPoint);\n for (vtkm::IdComponent dimension = 0; dimension < 3; dimension++)\n {\n if (currentIJK[dimension] != endIJK[dimension])\n {\n currentIJK[dimension] = endIJK[dimension];\n path.Append(HexagonIJKToIndex(currentIJK));\n }\n }\n\n return path;\n}\n////\n//// END-EXAMPLE VecVariableExample.cxx\n////\n\nvoid UsingVecCAndVecVariable()\n{\n vtkm::VecVariable<vtkm::IdComponent,4> path;\n\n path = HexagonShortestPath(2, 2);\n VTKM_TEST_ASSERT(test_equal(path, vtkm::Vec<vtkm::IdComponent,1>(2)),\n \"Bad path\");\n\n path = HexagonShortestPath(0, 7);\n VTKM_TEST_ASSERT(test_equal(path, vtkm::Vec<vtkm::IdComponent,3>(0,3,7)),\n \"Bad path\");\n\n path = HexagonShortestPath(5,3);\n VTKM_TEST_ASSERT(test_equal(path, vtkm::Vec<vtkm::IdComponent,4>(5,4,7,3)),\n \"Bad path\");\n}\n\nvoid UsingRange()\n{\n ////\n //// BEGIN-EXAMPLE UsingRange.cxx\n ////\n vtkm::Range range; // default constructor is empty range\n bool b1 = range.IsNonEmpty(); // b1 is false\n\n range.Include(0.5); // range now is [0.5 .. 0.5]\n bool b2 = range.IsNonEmpty(); // b2 is true\n bool b3 = range.Contains(0.5); // b3 is true\n bool b4 = range.Contains(0.6); // b4 is false\n\n range.Include(2.0); // range is now [0.5 .. 2]\n bool b5 = range.Contains(0.5); // b3 is true\n bool b6 = range.Contains(0.6); // b4 is true\n\n range.Include(vtkm::Range(-1, 1)); // range is now [-1 .. 2]\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(test_equal(range, vtkm::Range(-1,2)), \"Bad range\");\n //// RESUME-EXAMPLE\n\n range.Include(vtkm::Range(3, 4)); // range is now [-1 .. 4]\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(test_equal(range, vtkm::Range(-1,4)), \"Bad range\");\n //// RESUME-EXAMPLE\n\n vtkm::Float64 lower = range.Min; // lower is -1\n vtkm::Float64 upper = range.Max; // upper is 4\n vtkm::Float64 length = range.Length(); // length is 5\n vtkm::Float64 center = range.Center(); // center is 1.5\n ////\n //// END-EXAMPLE UsingRange.cxx\n ////\n\n VTKM_TEST_ASSERT(!b1, \"Bad non empty.\");\n VTKM_TEST_ASSERT(b2, \"Bad non empty.\");\n VTKM_TEST_ASSERT(b3, \"Bad contains.\");\n VTKM_TEST_ASSERT(!b4, \"Bad contains.\");\n VTKM_TEST_ASSERT(b5, \"Bad contains.\");\n VTKM_TEST_ASSERT(b6, \"Bad contains.\");\n\n VTKM_TEST_ASSERT(test_equal(lower, -1), \"Bad lower\");\n VTKM_TEST_ASSERT(test_equal(upper, 4), \"Bad upper\");\n VTKM_TEST_ASSERT(test_equal(length, 5), \"Bad length\");\n VTKM_TEST_ASSERT(test_equal(center, 1.5), \"Bad center\");\n}\n\nvoid UsingBounds()\n{\n ////\n //// BEGIN-EXAMPLE UsingBounds.cxx\n ////\n vtkm::Bounds bounds; // default constructor makes empty\n bool b1 = bounds.IsNonEmpty(); // b1 is false\n\n bounds.Include(vtkm::make_Vec(0.5, 2.0, 0.0)); // bounds contains only\n // the point [0.5, 2, 0]\n bool b2 = bounds.IsNonEmpty(); // b2 is true\n bool b3 = bounds.Contains(vtkm::make_Vec(0.5, 2.0, 0.0)); // b3 is true\n bool b4 = bounds.Contains(vtkm::make_Vec(1, 1, 1)); // b4 is false\n bool b5 = bounds.Contains(vtkm::make_Vec(0, 0, 0)); // b5 is false\n\n bounds.Include(vtkm::make_Vec(4, -1, 2)); // bounds is region [0.5 .. 4] in X,\n // [-1 .. 2] in Y,\n // and [0 .. 2] in Z\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(0.5, 4, -1, 2, 0, 2)), \"\");\n //// RESUME-EXAMPLE\n bool b6 = bounds.Contains(vtkm::make_Vec(0.5, 2.0, 0.0)); // b6 is true\n bool b7 = bounds.Contains(vtkm::make_Vec(1, 1, 1)); // b7 is true\n bool b8 = bounds.Contains(vtkm::make_Vec(0, 0, 0)); // b8 is false\n\n vtkm::Bounds otherBounds(vtkm::make_Vec(0, 0, 0), vtkm::make_Vec(3, 3, 3));\n // otherBounds is region [0 .. 3] in X, Y, and Z\n bounds.Include(otherBounds); // bounds is now region [0 .. 4] in X,\n // [-1 .. 3] in Y,\n // and [0 .. 3] in Z\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(0, 4, -1, 3, 0, 3)), \"\");\n //// RESUME-EXAMPLE\n\n vtkm::Vec<vtkm::Float64,3> lower(bounds.X.Min, bounds.Y.Min, bounds.Z.Min);\n // lower is [0, -1, 0]\n vtkm::Vec<vtkm::Float64,3> upper(bounds.X.Max, bounds.Y.Max, bounds.Z.Max);\n // upper is [4, 3, 3]\n\n vtkm::Vec<vtkm::Float64,3> center = bounds.Center(); // center is [2, 1, 1.5]\n ////\n //// END-EXAMPLE UsingBounds.cxx\n ////\n\n VTKM_TEST_ASSERT(!b1, \"Bad non empty.\");\n VTKM_TEST_ASSERT(b2, \"Bad non empty.\");\n VTKM_TEST_ASSERT(b3, \"Bad contains.\");\n VTKM_TEST_ASSERT(!b4, \"Bad contains.\");\n VTKM_TEST_ASSERT(!b5, \"Bad contains.\");\n VTKM_TEST_ASSERT(b6, \"Bad contains.\");\n VTKM_TEST_ASSERT(b7, \"Bad contains.\");\n VTKM_TEST_ASSERT(!b8, \"Bad contains.\");\n VTKM_TEST_ASSERT(test_equal(lower, vtkm::make_Vec(0, -1, 0)), \"\");\n VTKM_TEST_ASSERT(test_equal(upper, vtkm::make_Vec(4, 3, 3)), \"\");\n VTKM_TEST_ASSERT(test_equal(center, vtkm::make_Vec(2.0, 1.0, 1.5)), \"\");\n}\n\n\nvoid Test()\n{\n CreatingVectorTypes();\n VectorOperations();\n LongerVector();\n EquilateralTriangle();\n UsingVecCAndVecVariable();\n UsingRange();\n UsingBounds();\n}\n\n} // anonymous namespace\n\nint CoreDataTypes(int, char *[])\n{\n return vtkm::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6222265362739563,
"alphanum_fraction": 0.6503654718399048,
"avg_line_length": 34.619815826416016,
"blob_id": "b8a511baf6175584032af6a83035f474e9b6634c",
"content_id": "da5f8cddbfeab56ff0d5de56dbccee9dcfb01831",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 15459,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 434,
"path": "/examples/DataSetCreation.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/CellSetPermutation.h>\n#include <vtkm/cont/DataSetBuilderExplicit.h>\n#include <vtkm/cont/DataSetBuilderRectilinear.h>\n#include <vtkm/cont/DataSetBuilderUniform.h>\n#include <vtkm/cont/DataSetFieldAdd.h>\n\n#include <vtkm/Math.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace DataSetCreationNamespace {\n\nvoid CreateUniformGrid()\n{\n std::cout << \"Creating uniform grid.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateUniformGrid.cxx\n ////\n vtkm::cont::DataSetBuilderUniform dataSetBuilder;\n\n vtkm::cont::DataSet dataSet = dataSetBuilder.Create(vtkm::Id3(101, 101, 26));\n ////\n //// END-EXAMPLE CreateUniformGrid.cxx\n ////\n\n vtkm::Bounds bounds = dataSet.GetCoordinateSystem().GetBounds();\n std::cout << bounds << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(0,100,0,100,0,25)),\n \"Bad bounds\");\n}\n\nvoid CreateUniformGridCustomOriginSpacing()\n{\n std::cout << \"Creating uniform grid with custom origin and spacing.\"\n << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateUniformGridCustomOriginSpacing.cxx\n ////\n vtkm::cont::DataSetBuilderUniform dataSetBuilder;\n\n vtkm::cont::DataSet dataSet =\n dataSetBuilder.Create(\n vtkm::Id3(101, 101, 26),\n vtkm::Vec<vtkm::FloatDefault,3>(-50.0, -50.0, -50.0),\n vtkm::Vec<vtkm::FloatDefault,3>(1.0, 1.0, 4.0));\n ////\n //// END-EXAMPLE CreateUniformGridCustomOriginSpacing.cxx\n ////\n\n vtkm::Bounds bounds = dataSet.GetCoordinateSystem().GetBounds();\n std::cout << bounds << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(-50,50,-50,50,-50,50)),\n \"Bad bounds\");\n}\n\nvoid CreateRectilinearGrid()\n{\n std::cout << \"Create rectilinear grid.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateRectilinearGrid.cxx\n ////\n // Make x coordinates range from -4 to 4 with tighter spacing near 0.\n std::vector<vtkm::Float32> xCoordinates;\n for (vtkm::Float32 x = -2.0f; x <= 2.0f; x += 0.02f)\n {\n xCoordinates.push_back(vtkm::CopySign(x*x, x));\n }\n\n // Make y coordinates range from 0 to 2 with tighter spacing near 2.\n std::vector<vtkm::Float32> yCoordinates;\n for (vtkm::Float32 y = 0.0f; y <= 4.0f; y += 0.02f)\n {\n yCoordinates.push_back(vtkm::Sqrt(y));\n }\n\n // Make z coordinates rangefrom -1 to 1 with even spacing.\n std::vector<vtkm::Float32> zCoordinates;\n for (vtkm::Float32 z = -1.0f; z <= 1.0f; z += 0.02f)\n {\n zCoordinates.push_back(z);\n }\n\n vtkm::cont::DataSetBuilderRectilinear dataSetBuilder;\n\n vtkm::cont::DataSet dataSet = dataSetBuilder.Create(xCoordinates,\n yCoordinates,\n zCoordinates);\n ////\n //// END-EXAMPLE CreateRectilinearGrid.cxx\n ////\n\n vtkm::Id numPoints = dataSet.GetCellSet().GetNumberOfPoints();\n std::cout << \"Num points: \" << numPoints << std::endl;\n VTKM_TEST_ASSERT(numPoints == 4080501, \"Got wrong number of points.\");\n\n vtkm::Bounds bounds = dataSet.GetCoordinateSystem().GetBounds();\n std::cout << bounds << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(-4,4,0,2,-1,1)),\n \"Bad bounds\");\n}\n\nvoid CreateExplicitGrid()\n{\n std::cout << \"Creating explicit grid.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateExplicitGrid.cxx\n ////\n // Array of point coordinates.\n std::vector<vtkm::Vec<vtkm::Float32,3> > pointCoordinates;\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(1.1f, 0.0f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(0.2f, 0.4f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(0.9f, 0.6f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(1.4f, 0.5f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(1.8f, 0.3f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(0.4f, 1.0f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(1.0f, 1.2f, 0.0f));\n pointCoordinates.push_back(vtkm::Vec<vtkm::Float32,3>(1.5f, 0.9f, 0.0f));\n\n // Array of shapes.\n std::vector<vtkm::UInt8> shapes;\n shapes.push_back(vtkm::CELL_SHAPE_TRIANGLE);\n shapes.push_back(vtkm::CELL_SHAPE_QUAD);\n shapes.push_back(vtkm::CELL_SHAPE_TRIANGLE);\n shapes.push_back(vtkm::CELL_SHAPE_POLYGON);\n shapes.push_back(vtkm::CELL_SHAPE_TRIANGLE);\n\n // Array of number of indices per cell.\n std::vector<vtkm::IdComponent> numIndices;\n numIndices.push_back(3);\n numIndices.push_back(4);\n numIndices.push_back(3);\n numIndices.push_back(5);\n numIndices.push_back(3);\n\n // Connectivity array.\n std::vector<vtkm::Id> connectivity;\n connectivity.push_back(0); // Cell 0\n connectivity.push_back(2);\n connectivity.push_back(1);\n connectivity.push_back(0); // Cell 1\n connectivity.push_back(4);\n connectivity.push_back(3);\n connectivity.push_back(2);\n connectivity.push_back(1); // Cell 2\n connectivity.push_back(2);\n connectivity.push_back(5);\n connectivity.push_back(2); // Cell 3\n connectivity.push_back(3);\n connectivity.push_back(7);\n connectivity.push_back(6);\n connectivity.push_back(5);\n connectivity.push_back(3); // Cell 4\n connectivity.push_back(4);\n connectivity.push_back(7);\n\n // Copy these arrays into a DataSet.\n vtkm::cont::DataSetBuilderExplicit dataSetBuilder;\n\n vtkm::cont::DataSet dataSet = dataSetBuilder.Create(pointCoordinates,\n shapes,\n numIndices,\n connectivity);\n ////\n //// END-EXAMPLE CreateExplicitGrid.cxx\n ////\n\n vtkm::cont::CellSetExplicit<> cellSet;\n dataSet.GetCellSet().CopyTo(cellSet);\n VTKM_TEST_ASSERT(test_equal(cellSet.GetNumberOfPoints(), 8),\n \"Data set has wrong number of points.\");\n VTKM_TEST_ASSERT(test_equal(cellSet.GetNumberOfCells(), 5),\n \"Data set has wrong number of cells.\");\n\n vtkm::Bounds bounds = dataSet.GetCoordinateSystem().GetBounds();\n std::cout << bounds << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(0.2,1.8,0.0,1.2,0.0,0.0)),\n \"Bad bounds\");\n\n // Do a simple check of the connectivity by getting the number of cells\n // incident on each point. This array is unlikely to be correct if the\n // topology got screwed up.\n cellSet.BuildConnectivity(VTKM_DEFAULT_DEVICE_ADAPTER_TAG(),\n vtkm::TopologyElementTagCell(),\n vtkm::TopologyElementTagPoint());\n vtkm::cont::ArrayHandle<vtkm::IdComponent> numCellsPerPoint =\n cellSet.GetNumIndicesArray(vtkm::TopologyElementTagCell(),\n vtkm::TopologyElementTagPoint());\n vtkm::cont::printSummary_ArrayHandle(numCellsPerPoint, std::cout);\n std::cout << std::endl;\n vtkm::cont::ArrayHandle<vtkm::IdComponent>::PortalConstControl numCellsPortal=\n numCellsPerPoint.GetPortalConstControl();\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(0), 2),\n \"Wrong number of cells on point 0\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(1), 2),\n \"Wrong number of cells on point 1\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(2), 4),\n \"Wrong number of cells on point 2\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(3), 3),\n \"Wrong number of cells on point 3\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(4), 2),\n \"Wrong number of cells on point 4\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(5), 2),\n \"Wrong number of cells on point 5\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(6), 1),\n \"Wrong number of cells on point 6\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(7), 2),\n \"Wrong number of cells on point 7\");\n}\n\nvoid CreateExplicitGridIterative()\n{\n std::cout << \"Creating explicit grid iteratively.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateExplicitGridIterative.cxx\n ////\n vtkm::cont::DataSetBuilderExplicitIterative dataSetBuilder;\n\n dataSetBuilder.AddPoint(1.1, 0.0, 0.0);\n dataSetBuilder.AddPoint(0.2, 0.4, 0.0);\n dataSetBuilder.AddPoint(0.9, 0.6, 0.0);\n dataSetBuilder.AddPoint(1.4, 0.5, 0.0);\n dataSetBuilder.AddPoint(1.8, 0.3, 0.0);\n dataSetBuilder.AddPoint(0.4, 1.0, 0.0);\n dataSetBuilder.AddPoint(1.0, 1.2, 0.0);\n dataSetBuilder.AddPoint(1.5, 0.9, 0.0);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_TRIANGLE);\n dataSetBuilder.AddCellPoint(0);\n dataSetBuilder.AddCellPoint(2);\n dataSetBuilder.AddCellPoint(1);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_QUAD);\n dataSetBuilder.AddCellPoint(0);\n dataSetBuilder.AddCellPoint(4);\n dataSetBuilder.AddCellPoint(3);\n dataSetBuilder.AddCellPoint(2);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_TRIANGLE);\n dataSetBuilder.AddCellPoint(1);\n dataSetBuilder.AddCellPoint(2);\n dataSetBuilder.AddCellPoint(5);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_POLYGON);\n dataSetBuilder.AddCellPoint(2);\n dataSetBuilder.AddCellPoint(3);\n dataSetBuilder.AddCellPoint(7);\n dataSetBuilder.AddCellPoint(6);\n dataSetBuilder.AddCellPoint(5);\n\n dataSetBuilder.AddCell(vtkm::CELL_SHAPE_TRIANGLE);\n dataSetBuilder.AddCellPoint(3);\n dataSetBuilder.AddCellPoint(4);\n dataSetBuilder.AddCellPoint(7);\n\n vtkm::cont::DataSet dataSet = dataSetBuilder.Create();\n ////\n //// END-EXAMPLE CreateExplicitGridIterative.cxx\n ////\n\n vtkm::cont::CellSetExplicit<> cellSet;\n dataSet.GetCellSet().CopyTo(cellSet);\n VTKM_TEST_ASSERT(test_equal(cellSet.GetNumberOfPoints(), 8),\n \"Data set has wrong number of points.\");\n VTKM_TEST_ASSERT(test_equal(cellSet.GetNumberOfCells(), 5),\n \"Data set has wrong number of cells.\");\n\n vtkm::Bounds bounds = dataSet.GetCoordinateSystem().GetBounds();\n std::cout << bounds << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(bounds, vtkm::Bounds(0.2,1.8,0.0,1.2,0.0,0.0)),\n \"Bad bounds\");\n\n // Do a simple check of the connectivity by getting the number of cells\n // incident on each point. This array is unlikely to be correct if the\n // topology got screwed up.\n cellSet.BuildConnectivity(VTKM_DEFAULT_DEVICE_ADAPTER_TAG(),\n vtkm::TopologyElementTagCell(),\n vtkm::TopologyElementTagPoint());\n vtkm::cont::ArrayHandle<vtkm::IdComponent> numCellsPerPoint =\n cellSet.GetNumIndicesArray(vtkm::TopologyElementTagCell(),\n vtkm::TopologyElementTagPoint());\n vtkm::cont::printSummary_ArrayHandle(numCellsPerPoint, std::cout);\n std::cout << std::endl;\n vtkm::cont::ArrayHandle<vtkm::IdComponent>::PortalConstControl numCellsPortal=\n numCellsPerPoint.GetPortalConstControl();\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(0), 2),\n \"Wrong number of cells on point 0\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(1), 2),\n \"Wrong number of cells on point 1\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(2), 4),\n \"Wrong number of cells on point 2\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(3), 3),\n \"Wrong number of cells on point 3\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(4), 2),\n \"Wrong number of cells on point 4\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(5), 2),\n \"Wrong number of cells on point 5\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(6), 1),\n \"Wrong number of cells on point 6\");\n VTKM_TEST_ASSERT(test_equal(numCellsPortal.Get(7), 2),\n \"Wrong number of cells on point 7\");\n}\n\nvoid AddFieldData()\n{\n std::cout << \"Add field data.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE AddFieldData.cxx\n ////\n // Make a simple structured data set.\n const vtkm::Id3 pointDimensions(20, 20, 10);\n const vtkm::Id3 cellDimensions = pointDimensions - vtkm::Id3(1, 1, 1);\n vtkm::cont::DataSetBuilderUniform dataSetBuilder;\n vtkm::cont::DataSet dataSet = dataSetBuilder.Create(pointDimensions);\n\n // This is the helper object to add fields to a data set.\n vtkm::cont::DataSetFieldAdd dataSetFieldAdd;\n\n // Create a field that identifies points on the boundary.\n std::vector<vtkm::UInt8> boundaryPoints;\n for (vtkm::Id zIndex = 0; zIndex < pointDimensions[2]; zIndex++)\n {\n for (vtkm::Id yIndex = 0; yIndex < pointDimensions[1]; yIndex++)\n {\n for (vtkm::Id xIndex = 0; xIndex < pointDimensions[0]; xIndex++)\n {\n if ( (xIndex == 0) || (xIndex == pointDimensions[0]-1) ||\n (yIndex == 0) || (yIndex == pointDimensions[1]-1) ||\n (zIndex == 0) || (zIndex == pointDimensions[2]-1) )\n {\n boundaryPoints.push_back(1);\n }\n else\n {\n boundaryPoints.push_back(0);\n }\n }\n }\n }\n\n dataSetFieldAdd.AddPointField(dataSet, \"boundary_points\", boundaryPoints);\n\n // Create a field that identifies cells on the boundary.\n std::vector<vtkm::UInt8> boundaryCells;\n for (vtkm::Id zIndex = 0; zIndex < cellDimensions[2]; zIndex++)\n {\n for (vtkm::Id yIndex = 0; yIndex < cellDimensions[1]; yIndex++)\n {\n for (vtkm::Id xIndex = 0; xIndex < cellDimensions[0]; xIndex++)\n {\n if ( (xIndex == 0) || (xIndex == cellDimensions[0]-1) ||\n (yIndex == 0) || (yIndex == cellDimensions[1]-1) ||\n (zIndex == 0) || (zIndex == cellDimensions[2]-1) )\n {\n boundaryCells.push_back(1);\n }\n else\n {\n boundaryCells.push_back(0);\n }\n }\n }\n }\n\n dataSetFieldAdd.AddCellField(dataSet, \"boundary_cells\", boundaryCells);\n ////\n //// END-EXAMPLE AddFieldData.cxx\n ////\n}\n\nvoid CreateCellSetPermutation()\n{\n std::cout << \"Create a cell set permutation\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE CreateCellSetPermutation.cxx\n ////\n // Create a simple data set.\n vtkm::cont::DataSetBuilderUniform dataSetBuilder;\n vtkm::cont::DataSet originalDataSet =\n dataSetBuilder.Create(vtkm::Id3(33,33,26));\n vtkm::cont::CellSetStructured<3> originalCellSet;\n originalDataSet.GetCellSet().CopyTo(originalCellSet);\n\n // Create a permutation array for the cells. Each value in the array refers\n // to a cell in the original cell set. This particular array selects every\n // 10th cell.\n vtkm::cont::ArrayHandleCounting<vtkm::Id> permutationArray(0, 10, 2560);\n\n // Create a permutation of that cell set containing only every 10th cell.\n vtkm::cont::CellSetPermutation<\n vtkm::cont::CellSetStructured<3>,\n vtkm::cont::ArrayHandleCounting<vtkm::Id> >\n permutedCellSet(permutationArray, originalCellSet);\n ////\n //// END-EXAMPLE CreateCellSetPermutation.cxx\n ////\n\n std::cout << \"Num points: \" << permutedCellSet.GetNumberOfPoints()\n << std::endl;\n VTKM_TEST_ASSERT(permutedCellSet.GetNumberOfPoints() == 28314,\n \"Wrong number of points.\");\n std::cout << \"Num cells: \" << permutedCellSet.GetNumberOfCells() << std::endl;\n VTKM_TEST_ASSERT(permutedCellSet.GetNumberOfCells() == 2560,\n \"Wrong number of cells.\");\n}\n\nvoid Test()\n{\n CreateUniformGrid();\n CreateUniformGridCustomOriginSpacing();\n CreateRectilinearGrid();\n CreateExplicitGrid();\n CreateExplicitGridIterative();\n AddFieldData();\n CreateCellSetPermutation();\n}\n\n} // namespace DataSetCreationNamespace\n\nint DataSetCreation(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(DataSetCreationNamespace::Test);\n}\n"
},
{
"alpha_fraction": 0.6436223983764648,
"alphanum_fraction": 0.6484754085540771,
"avg_line_length": 32.8111457824707,
"blob_id": "174b52a36ed7ff7a217977a6312b18fcb90ba0ec",
"content_id": "c42208af0175918925792e2505786e0c1c23e624",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10921,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 323,
"path": "/examples/CellEdgesFaces.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/exec/CellEdge.h>\n#include <vtkm/exec/CellFace.h>\n\n#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/ScatterCounting.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayHandleGroupVec.h>\n#include <vtkm/cont/ArrayHandleGroupVecVariable.h>\n#include <vtkm/cont/CellSetSingleType.h>\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nstruct ExtractEdges {\n ////\n //// BEGIN-EXAMPLE CellEdge.cxx\n ////\n struct EdgesCount : vtkm::worklet::WorkletMapPointToCell\n {\n typedef void ControlSignature(CellSetIn,\n FieldOutCell<> numEdgesInCell);\n typedef _2 ExecutionSignature(CellShape, PointCount);\n typedef _1 InputDomain;\n\n template<typename CellShapeTag>\n VTKM_EXEC\n vtkm::IdComponent\n operator()(CellShapeTag shape, vtkm::IdComponent numPointsInCell) const\n {\n return vtkm::exec::CellEdgeNumberOfEdges(numPointsInCell, shape, *this);\n }\n };\n\n struct EdgesExtract : vtkm::worklet::WorkletMapPointToCell\n {\n typedef void ControlSignature(CellSetIn,\n FieldOutCell<> edgeIndices);\n typedef void ExecutionSignature(CellShape, PointIndices, VisitIndex, _2);\n typedef _1 InputDomain;\n\n typedef vtkm::worklet::ScatterCounting ScatterType;\n VTKM_CONT ScatterType GetScatter() const { return this->Scatter; }\n\n VTKM_CONT\n EdgesExtract(const ScatterType &scatter)\n : Scatter(scatter) { }\n\n template<typename CellShapeTag,\n typename PointIndexVecType,\n typename EdgeIndexVecType>\n VTKM_EXEC\n void operator()(CellShapeTag shape,\n const PointIndexVecType &pointIndices,\n vtkm::IdComponent visitIndex,\n EdgeIndexVecType &edgeIndices) const\n {\n vtkm::Vec<vtkm::IdComponent,2> localEdgeIndices =\n vtkm::exec::CellEdgeLocalIndices(pointIndices.GetNumberOfComponents(),\n visitIndex,\n shape,\n *this);\n edgeIndices[0] = pointIndices[localEdgeIndices[0]];\n edgeIndices[1] = pointIndices[localEdgeIndices[1]];\n }\n\n private:\n ScatterType Scatter;\n };\n ////\n //// END-EXAMPLE CellEdge.cxx\n ////\n\n template<typename CellSetInType, typename Device>\n VTKM_CONT\n vtkm::cont::CellSetSingleType<>\n Run(const CellSetInType &cellSetIn, Device)\n {\n // Count how many edges each cell has\n vtkm::cont::ArrayHandle<vtkm::IdComponent> edgeCounts;\n vtkm::worklet::DispatcherMapTopology<EdgesCount,Device> countDispatcher;\n countDispatcher.Invoke(cellSetIn, edgeCounts);\n\n // Set up a \"scatter\" to create an output entry for each edge in the input\n vtkm::worklet::ScatterCounting scatter(edgeCounts, Device());\n\n // Get the cell index array for all the edges\n vtkm::cont::ArrayHandle<vtkm::Id> edgeIndices;\n vtkm::worklet::DispatcherMapTopology<EdgesExtract,Device>\n extractDispatcher(scatter);\n extractDispatcher.Invoke(\n cellSetIn,\n vtkm::cont::make_ArrayHandleGroupVec<2>(edgeIndices));\n\n // Construct the resulting cell set and return\n vtkm::cont::CellSetSingleType<> cellSetOut(cellSetIn.GetName());\n cellSetOut.Fill(cellSetIn.GetNumberOfPoints(),\n vtkm::CELL_SHAPE_LINE,\n 2,\n edgeIndices);\n return cellSetOut;\n }\n};\n\nvoid TryExtractEdges()\n{\n std::cout << \"Trying extract edges worklets.\" << std::endl;\n\n vtkm::cont::DataSet dataSet =\n vtkm::cont::testing::MakeTestDataSet().Make3DExplicitDataSet5();\n\n ExtractEdges extractEdges;\n vtkm::cont::CellSetSingleType<> edgeCells =\n extractEdges.Run(dataSet.GetCellSet(),\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n VTKM_TEST_ASSERT(edgeCells.GetNumberOfPoints() == 11,\n \"Output has wrong number of points\");\n VTKM_TEST_ASSERT(edgeCells.GetNumberOfCells() == 35,\n \"Output has wrong number of cells\");\n}\n\nstruct ExtractFaces {\n ////\n //// BEGIN-EXAMPLE CellFace.cxx\n ////\n struct FacesCount : vtkm::worklet::WorkletMapPointToCell\n {\n typedef void ControlSignature(CellSetIn,\n FieldOutCell<> numFacesInCell);\n typedef _2 ExecutionSignature(CellShape);\n typedef _1 InputDomain;\n\n template<typename CellShapeTag>\n VTKM_EXEC\n vtkm::IdComponent\n operator()(CellShapeTag shape) const\n {\n return vtkm::exec::CellFaceNumberOfFaces(shape, *this);\n }\n };\n\n struct FacesCountPoints : vtkm::worklet::WorkletMapPointToCell\n {\n typedef void ControlSignature(CellSetIn,\n FieldOutCell<> numPointsInFace,\n FieldOutCell<> faceShape);\n typedef void ExecutionSignature(CellShape, VisitIndex, _2, _3);\n typedef _1 InputDomain;\n\n typedef vtkm::worklet::ScatterCounting ScatterType;\n VTKM_CONT ScatterType GetScatter() const { return this->Scatter; }\n\n VTKM_CONT\n FacesCountPoints(const ScatterType &scatter)\n : Scatter(scatter) { }\n\n template<typename CellShapeTag>\n VTKM_EXEC\n void operator()(CellShapeTag shape,\n vtkm::IdComponent visitIndex,\n vtkm::IdComponent &numPointsInFace,\n vtkm::UInt8 &faceShape) const\n {\n numPointsInFace =\n vtkm::exec::CellFaceNumberOfPoints(visitIndex, shape, *this);\n switch (numPointsInFace)\n {\n case 3: faceShape = vtkm::CELL_SHAPE_TRIANGLE; break;\n case 4: faceShape = vtkm::CELL_SHAPE_QUAD; break;\n default: faceShape = vtkm::CELL_SHAPE_POLYGON; break;\n }\n }\n\n private:\n ScatterType Scatter;\n };\n\n struct FacesExtract : vtkm::worklet::WorkletMapPointToCell\n {\n typedef void ControlSignature(CellSetIn,\n FieldOutCell<> faceIndices);\n typedef void ExecutionSignature(CellShape, PointIndices, VisitIndex, _2);\n typedef _1 InputDomain;\n\n typedef vtkm::worklet::ScatterCounting ScatterType;\n VTKM_CONT ScatterType GetScatter() const { return this->Scatter; }\n\n VTKM_CONT\n FacesExtract(const ScatterType &scatter)\n : Scatter(scatter) { }\n\n template<typename CellShapeTag,\n typename PointIndexVecType,\n typename FaceIndexVecType>\n VTKM_EXEC\n void operator()(CellShapeTag shape,\n const PointIndexVecType &pointIndices,\n vtkm::IdComponent visitIndex,\n FaceIndexVecType &faceIndices) const\n {\n vtkm::VecCConst<vtkm::IdComponent> localFaceIndices =\n vtkm::exec::CellFaceLocalIndices(visitIndex,\n shape,\n *this);\n\n vtkm::IdComponent numPoints = faceIndices.GetNumberOfComponents();\n VTKM_ASSERT(numPoints == localFaceIndices.GetNumberOfComponents());\n for (vtkm::IdComponent localPointIndex = 0;\n localPointIndex < numPoints;\n localPointIndex++)\n {\n faceIndices[localPointIndex] =\n pointIndices[localFaceIndices[localPointIndex]];\n }\n }\n\n private:\n ScatterType Scatter;\n };\n ////\n //// END-EXAMPLE CellFace.cxx\n ////\n\n template<typename CellSetInType, typename Device>\n VTKM_CONT\n vtkm::cont::CellSetExplicit<>\n Run(const CellSetInType &cellSetIn, Device)\n {\n // Count how many faces each cell has\n vtkm::cont::ArrayHandle<vtkm::IdComponent> faceCounts;\n vtkm::worklet::DispatcherMapTopology<FacesCount,Device> countDispatcher;\n countDispatcher.Invoke(cellSetIn, faceCounts);\n\n // Set up a \"scatter\" to create an output entry for each face in the input\n vtkm::worklet::ScatterCounting scatter(faceCounts, Device());\n\n // Count how many points each face has. Also get the shape of each face.\n vtkm::cont::ArrayHandle<vtkm::IdComponent> pointsPerFace;\n vtkm::cont::ArrayHandle<vtkm::UInt8> faceShapes;\n vtkm::worklet::DispatcherMapTopology<FacesCountPoints,Device>\n countPointsDispatcher(scatter);\n countPointsDispatcher.Invoke(cellSetIn, pointsPerFace, faceShapes);\n\n // To construct an ArrayHandleGroupVecVariable, we need to convert\n // pointsPerFace to an array of offsets\n vtkm::Id faceIndicesSize;\n vtkm::cont::ArrayHandle<vtkm::Id> faceIndexOffsets =\n vtkm::cont::ConvertNumComponentsToOffsets(pointsPerFace,\n faceIndicesSize);\n\n // We need to preallocate the array for faceIndices (because that is the\n // way ArrayHandleGroupVecVariable works). We use the value previously\n // returned from ConvertNumComponentsToOffsets.\n vtkm::cont::ArrayHandle<vtkm::Id> faceIndices;\n faceIndices.Allocate(faceIndicesSize);\n\n // Get the cell index array for all the faces\n vtkm::worklet::DispatcherMapTopology<FacesExtract,Device>\n extractDispatcher(scatter);\n extractDispatcher.Invoke(\n cellSetIn,\n vtkm::cont::make_ArrayHandleGroupVecVariable(faceIndices,\n faceIndexOffsets));\n\n // Construct the resulting cell set and return\n vtkm::cont::CellSetExplicit<> cellSetOut(cellSetIn.GetName());\n cellSetOut.Fill(cellSetIn.GetNumberOfPoints(),\n faceShapes,\n pointsPerFace,\n faceIndices,\n faceIndexOffsets);\n return cellSetOut;\n }\n};\n\nvoid TryExtractFaces()\n{\n std::cout << \"Trying extract faces worklets.\" << std::endl;\n\n vtkm::cont::DataSet dataSet =\n vtkm::cont::testing::MakeTestDataSet().Make3DExplicitDataSet5();\n\n ExtractFaces extractFaces;\n vtkm::cont::CellSetExplicit<> faceCells =\n extractFaces.Run(dataSet.GetCellSet(),\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n VTKM_TEST_ASSERT(faceCells.GetNumberOfPoints() == 11,\n \"Output has wrong number of points\");\n VTKM_TEST_ASSERT(faceCells.GetNumberOfCells() == 20,\n \"Output has wrong number of cells\");\n\n VTKM_TEST_ASSERT(faceCells.GetCellShape(0) == vtkm::CELL_SHAPE_QUAD,\n \"Face wrong\");\n vtkm::Vec<vtkm::Id,4> quadIndices;\n faceCells.GetIndices(0, quadIndices);\n VTKM_TEST_ASSERT(test_equal(quadIndices, vtkm::Vec<vtkm::Id,4>(0,3,7,4)),\n \"Face wrong\");\n\n VTKM_TEST_ASSERT(faceCells.GetCellShape(12) == vtkm::CELL_SHAPE_TRIANGLE,\n \"Face wrong\");\n vtkm::Vec<vtkm::Id,3> triIndices;\n faceCells.GetIndices(12, triIndices);\n VTKM_TEST_ASSERT(test_equal(triIndices, vtkm::Id3(8,10,6)),\n \"Face wrong\");\n}\n\nvoid Run()\n{\n TryExtractEdges();\n TryExtractFaces();\n}\n\n} // anonymous namespace\n\nint CellEdgesFaces(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.6368051767349243,
"alphanum_fraction": 0.6411225199699402,
"avg_line_length": 23.38157844543457,
"blob_id": "30f72e826e01482ee651d5de8a236b36705e7281",
"content_id": "530c16dc2c039b0ec1c13047d210b9065b6ff241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1853,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 76,
"path": "/examples/ArrayHandleCast.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleCast.h>\n\n#include <vector>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename OriginalType, typename ArrayHandleType>\nvoid CheckArray(const ArrayHandleType array)\n{\n vtkm::Id length = array.GetNumberOfValues();\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == length,\n \"Portal has wrong size.\");\n\n for (vtkm::Id index = 0; index < length; index++)\n {\n VTKM_TEST_ASSERT(test_equal(portal.Get(index),\n TestValue(index, OriginalType())),\n \"Array has wrong value.\");\n VTKM_TEST_ASSERT(\n !test_equal(portal.Get(index),\n TestValue(index, typename ArrayHandleType::ValueType())),\n \"Array has wrong value.\");\n }\n}\n\n////\n//// BEGIN-EXAMPLE ArrayHandleCast.cxx\n////\ntemplate<typename T>\nVTKM_CONT\nvoid Foo(const std::vector<T> &inputData)\n{\n vtkm::cont::ArrayHandle<T> originalArray =\n vtkm::cont::make_ArrayHandle(inputData);\n\n vtkm::cont::ArrayHandleCast<vtkm::Float64, vtkm::cont::ArrayHandle<T> >\n castArray(originalArray);\n ////\n //// END-EXAMPLE ArrayHandleCast.cxx\n ////\n CheckArray<T>(castArray);\n\n CheckArray<T>(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleCast.cxx\n ////\n vtkm::cont::make_ArrayHandleCast<vtkm::Float64>(originalArray)\n ////\n //// END-EXAMPLE MakeArrayHandleCast.cxx\n ////\n );\n}\n\nvoid Test()\n{\n const std::size_t ARRAY_SIZE = 50;\n std::vector<vtkm::Id> inputData(ARRAY_SIZE);\n for (std::size_t index = 0; index < ARRAY_SIZE; index++)\n {\n inputData[index] = TestValue(vtkm::Id(index), vtkm::Id());\n }\n\n Foo(inputData);\n}\n\n} // anonymous namespace\n\nint ArrayHandleCast(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6581485271453857,
"alphanum_fraction": 0.6753455400466919,
"avg_line_length": 31.40625,
"blob_id": "c207ee891b963947335c0cf469089471cec8bc94",
"content_id": "d61f7c9166a79f230014cb5031e0a10d9c13f65f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6222,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 192,
"path": "/examples/UseWorkletMapCellToPoint.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE UseWorkletMapCellToPoint.cxx\n////\n#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/cont/DataSet.h>\n#include <vtkm/cont/DataSetFieldAdd.h>\n#include <vtkm/cont/DynamicArrayHandle.h>\n#include <vtkm/cont/DynamicCellSet.h>\n#include <vtkm/cont/Field.h>\n\nnamespace vtkm {\nnamespace worklet {\n\nclass AverageCellField : public vtkm::worklet::WorkletMapCellToPoint\n{\npublic:\n typedef void ControlSignature(CellSetIn cellSet,\n FieldInCell<> inputCellField,\n FieldOut<> outputPointField);\n typedef void ExecutionSignature(CellCount, _2, _3);\n\n typedef _1 InputDomain;\n\n template<typename InputCellFieldType, typename OutputFieldType>\n VTKM_EXEC\n void\n operator()(vtkm::IdComponent numCells,\n const InputCellFieldType &inputCellField,\n OutputFieldType &fieldAverage) const\n {\n // TODO: This trickery with calling DoAverage with an extra fabricated type\n // is to handle when the dynamic type resolution provides combinations that\n // are incompatible. On the todo list for VTK-m is to allow you to express\n // types that are the same for different parameters of the control\n // signature. When that happens, we can get rid of this hack.\n typedef typename InputCellFieldType::ComponentType InputComponentType;\n this->DoAverage(numCells,\n inputCellField,\n fieldAverage,\n vtkm::ListTagBase<InputComponentType,OutputFieldType>());\n }\n\nprivate:\n template<typename InputCellFieldType, typename OutputFieldType>\n VTKM_EXEC\n void DoAverage(vtkm::IdComponent numCells,\n const InputCellFieldType &inputCellField,\n OutputFieldType &fieldAverage,\n vtkm::ListTagBase<OutputFieldType,OutputFieldType>) const\n {\n fieldAverage = OutputFieldType(0);\n\n for (vtkm::IdComponent cellIndex = 0; cellIndex < numCells; cellIndex++)\n {\n fieldAverage = fieldAverage + inputCellField[cellIndex];\n }\n\n //// PAUSE-EXAMPLE\n // The following line can create a warning when converting numCells to a\n // float. However, casting it is tricky since OutputFieldType could be\n // a vector, and that would unnecessarily complicate this example. Instead,\n // just suppress the warning.\n#ifdef VTKM_MSVC\n#pragma warning(push)\n#pragma warning(disable:4244)\n#endif\n //// RESUME-EXAMPLE\n fieldAverage = fieldAverage / OutputFieldType(numCells);\n //// PAUSE-EXAMPLE\n#ifdef VTKM_MSVC\n#pragma warning(pop)\n#endif\n //// RESUME-EXAMPLE\n }\n\n template<typename T1, typename T2, typename T3>\n VTKM_EXEC\n void DoAverage(vtkm::IdComponent, T1, T2, T3) const\n {\n this->RaiseError(\"Incompatible types for input and output.\");\n }\n};\n\n}\n} // namespace vtkm::worklet\n\nVTKM_CONT\nvtkm::cont::DataSet\nConvertCellFieldsToPointFields(const vtkm::cont::DataSet &inData)\n{\n vtkm::cont::DataSet outData;\n\n // Copy parts of structure that should be passed through.\n for (vtkm::Id cellSetIndex = 0;\n cellSetIndex < inData.GetNumberOfCellSets();\n cellSetIndex++)\n {\n outData.AddCellSet(inData.GetCellSet(cellSetIndex));\n }\n for (vtkm::Id coordSysIndex = 0;\n coordSysIndex < inData.GetNumberOfCoordinateSystems();\n coordSysIndex++)\n {\n outData.AddCoordinateSystem(inData.GetCoordinateSystem(coordSysIndex));\n }\n\n // Copy all fields, converting cell fields to point fields.\n for (vtkm::Id fieldIndex = 0;\n fieldIndex < inData.GetNumberOfFields();\n fieldIndex++)\n {\n vtkm::cont::Field inField = inData.GetField(fieldIndex);\n if (inField.GetAssociation() == vtkm::cont::Field::ASSOC_CELL_SET)\n {\n vtkm::cont::DynamicArrayHandle inFieldData = inField.GetData();\n vtkm::cont::DynamicCellSet inCellSet =\n inData.GetCellSet(inField.GetAssocCellSet());\n\n vtkm::cont::DynamicArrayHandle outFieldData = inFieldData.NewInstance();\n vtkm::worklet::DispatcherMapTopology<vtkm::worklet::AverageCellField>\n dispatcher;\n dispatcher.Invoke(inCellSet, inFieldData, outFieldData);\n\n vtkm::cont::DataSetFieldAdd::AddCellField(outData,\n inField.GetName(),\n outFieldData,\n inField.GetAssocCellSet());\n }\n else\n {\n outData.AddField(inField);\n }\n }\n\n return outData;\n}\n////\n//// END-EXAMPLE UseWorkletMapCellToPoint.cxx\n////\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid Test()\n{\n vtkm::cont::testing::MakeTestDataSet makeTestDataSet;\n\n std::cout << \"Making test data set.\" << std::endl;\n vtkm::cont::DataSet inDataSet = makeTestDataSet.Make3DUniformDataSet0();\n\n std::cout << \"Average cell data.\" << std::endl;\n vtkm::cont::DataSet resultDataSet = ConvertCellFieldsToPointFields(inDataSet);\n\n std::cout << \"Checking cell data converted to points.\" << std::endl;\n vtkm::cont::Field convertedField = resultDataSet.GetField(\"cellvar\");\n VTKM_TEST_ASSERT(\n convertedField.GetAssociation() == vtkm::cont::Field::ASSOC_CELL_SET,\n \"Result field has wrong association.\");\n\n const vtkm::Id numPoints = 18;\n vtkm::Float64 expectedData[numPoints] = {\n 100.1, 100.15, 100.2, 100.1, 100.15, 100.2,\n 100.2, 100.25, 100.3, 100.2, 100.25, 100.3,\n 100.3, 100.35, 100.4, 100.3, 100.35, 100.4\n };\n\n vtkm::cont::ArrayHandle<vtkm::Float32> outData;\n convertedField.GetData().CopyTo(outData);\n vtkm::cont::ArrayHandle<vtkm::Float32>::PortalConstControl outPortal =\n outData.GetPortalConstControl();\n vtkm::cont::printSummary_ArrayHandle(outData, std::cout);\n std::cout << std::endl;\n VTKM_TEST_ASSERT(outPortal.GetNumberOfValues() == numPoints,\n \"Result array wrong size.\");\n\n for (vtkm::Id pointId = 0; pointId < numPoints; pointId++)\n {\n VTKM_TEST_ASSERT(test_equal(outPortal.Get(pointId), expectedData[pointId]),\n \"Got wrong result.\");\n }\n}\n\n} // anonymous namespace\n\nint UseWorkletMapCellToPoint(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6949397325515747,
"alphanum_fraction": 0.7539883255958557,
"avg_line_length": 36.339622497558594,
"blob_id": "38fa7acd0bcb977e3efb40d887a96d5070d67ba3",
"content_id": "abd0be3ad0117ca46873eb4a3534878618b36deb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13853,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 371,
"path": "/images/CameraImages/CameraPositionOrientation.py",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "# state file generated using paraview version 5.0.1\n\n# ----------------------------------------------------------------\n# setup views used in the visualization\n# ----------------------------------------------------------------\n\n#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# Create a new 'Render View'\nrenderView1 = CreateView('RenderView')\nrenderView1.ViewSize = [792, 551]\nrenderView1.AxesGrid = 'GridAxes3DActor'\nrenderView1.OrientationAxesLabelColor = [0.0, 0.0, 0.0]\nrenderView1.OrientationAxesOutlineColor = [0.0, 0.0, 0.0]\nrenderView1.CenterOfRotation = [0.5283281803131104, 0.1371404528617859, 0.48934412002563477]\nrenderView1.StereoType = 0\nrenderView1.CameraPosition = [1.2994298146982577, 0.5342821058510525, 10.024471534857792]\nrenderView1.CameraFocalPoint = [1.2994298146982577, 0.5342821058510525, -9.307525219633725]\nrenderView1.CameraParallelScale = 5.0034889399225255\nrenderView1.Background = [1.0, 1.0, 1.0]\n\n# init the 'GridAxes3DActor' selected for 'AxesGrid'\nrenderView1.AxesGrid.XTitleColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.YTitleColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.ZTitleColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.GridColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.XLabelColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.YLabelColor = [0.0, 0.0, 0.0]\nrenderView1.AxesGrid.ZLabelColor = [0.0, 0.0, 0.0]\n\n# ----------------------------------------------------------------\n# setup the data processing pipelines\n# ----------------------------------------------------------------\n\n# create a new 'Point Source'\npointSource1 = PointSource()\n\n# create a new 'Line'\nfrustum2 = Line()\n\n# create a new 'Sphere'\nsphere1 = Sphere()\nsphere1.Radius = 4.0\nsphere1.ThetaResolution = 40\nsphere1.PhiResolution = 40\n\n# create a new 'Line'\nfrustum1 = Line()\n\n# create a new 'Line'\ndolly = Line()\n\n# create a new 'Slice'\nelevationLine = Slice(Input=sphere1)\nelevationLine.SliceType = 'Plane'\nelevationLine.SliceOffsetValues = [0.0]\n\n# init the 'Plane' selected for 'SliceType'\nelevationLine.SliceType.Normal = [1.0, 0.0, -1.0]\n\n# create a new 'Legacy VTK Reader'\nteapotvtk = LegacyVTKReader(FileNames=['/Users/kmorel/papers/VTKmUsersGuide/data/teapot.vtk'])\n\n# create a new 'Plane'\nplane1 = Plane()\nplane1.Origin = [-1.0, -1.0, 0.0]\nplane1.Point1 = [1.0, -1.0, 0.0]\nplane1.Point2 = [-1.0, 1.0, 0.0]\n\n# create a new 'Transform'\nfarTranslate = Transform(Input=plane1)\nfarTranslate.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nfarTranslate.Transform.Translate = [0.0, 0.0, -2.0]\nfarTranslate.Transform.Scale = [1.5, 1.5, 1.5]\n\n# create a new 'Transform'\nfarElevation = Transform(Input=farTranslate)\nfarElevation.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nfarElevation.Transform.Rotate = [-15.0, 0.0, 0.0]\n\n# create a new 'Transform'\nfarAzimuth = Transform(Input=farElevation)\nfarAzimuth.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nfarAzimuth.Transform.Rotate = [0.0, 45.0, 0.0]\n\n# create a new 'Extract Edges'\nextractEdges2 = ExtractEdges(Input=farAzimuth)\n\n# create a new 'Slice'\nazimuth = Slice(Input=sphere1)\nazimuth.SliceType = 'Plane'\nazimuth.SliceOffsetValues = [0.0]\n\n# init the 'Plane' selected for 'SliceType'\nazimuth.SliceType.Normal = [1.0, -5.25, 1.0]\n\n# create a new 'Line'\nfrustum4 = Line()\n\n# create a new 'Legacy VTK Reader'\ncameravtk = LegacyVTKReader(FileNames=['/Users/kmorel/papers/VTKmUsersGuide/data/camera.vtk'])\n\n# create a new 'Transform'\ntranslate = Transform(Input=cameravtk)\ntranslate.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\ntranslate.Transform.Translate = [0.0, 0.0, 4.0]\n\n# create a new 'Transform'\nelevation = Transform(Input=translate)\nelevation.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nelevation.Transform.Rotate = [-15.0, 0.0, 0.0]\n\n# create a new 'Transform'\nazimuth_1 = Transform(Input=elevation)\nazimuth_1.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nazimuth_1.Transform.Rotate = [0.0, 45.0, 0.0]\n\n# create a new 'Generate Surface Normals'\ngenerateSurfaceNormals1 = GenerateSurfaceNormals(Input=azimuth_1)\n\n# create a new 'Transform'\ntransform1 = Transform(Input=teapotvtk)\ntransform1.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\ntransform1.Transform.Translate = [0.0, -0.45, 0.0]\n\n# create a new 'Line'\nfrustum3 = Line()\n\n# create a new 'Transform'\nnearTranslate = Transform(Input=plane1)\nnearTranslate.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nnearTranslate.Transform.Translate = [0.0, 0.0, 2.0]\nnearTranslate.Transform.Scale = [0.5, 0.5, 0.5]\n\n# create a new 'Transform'\nnearElevation = Transform(Input=nearTranslate)\nnearElevation.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nnearElevation.Transform.Rotate = [-15.0, 0.0, 0.0]\n\n# create a new 'Transform'\nnearAzimuth = Transform(Input=nearElevation)\nnearAzimuth.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\nnearAzimuth.Transform.Rotate = [0.0, 45.0, 0.0]\n\n# create a new 'Extract Edges'\nextractEdges1 = ExtractEdges(Input=nearAzimuth)\n\n# create a new 'Transform'\ntransform2 = Transform(Input=pointSource1)\ntransform2.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\ntransform2.Transform.Translate = [0.0, 0.0, 4.0]\n\n# create a new 'Transform'\ntransform3 = Transform(Input=transform2)\ntransform3.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\ntransform3.Transform.Rotate = [-15.0, 0.0, 0.0]\n\n# create a new 'Transform'\ntransform4 = Transform(Input=transform3)\ntransform4.Transform = 'Transform'\n\n# init the 'Transform' selected for 'Transform'\ntransform4.Transform.Rotate = [0.0, 45.0, 0.0]\n\n# ----------------------------------------------------------------\n# setup the visualization in view 'renderView1'\n# ----------------------------------------------------------------\n\n# show data from transform1\ntransform1Display = Show(transform1, renderView1)\n# trace defaults for the display properties.\ntransform1Display.AmbientColor = [0.0, 0.0, 0.0]\ntransform1Display.ColorArrayName = [None, '']\ntransform1Display.GlyphType = 'Arrow'\ntransform1Display.CubeAxesColor = [0.0, 0.0, 0.0]\ntransform1Display.SetScaleArray = [None, '']\ntransform1Display.ScaleTransferFunction = 'PiecewiseFunction'\ntransform1Display.OpacityArray = [None, '']\ntransform1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from generateSurfaceNormals1\ngenerateSurfaceNormals1Display = Show(generateSurfaceNormals1, renderView1)\n# trace defaults for the display properties.\ngenerateSurfaceNormals1Display.AmbientColor = [0.0, 0.0, 0.0]\ngenerateSurfaceNormals1Display.ColorArrayName = [None, '']\ngenerateSurfaceNormals1Display.DiffuseColor = [0.4980392156862745, 0.4980392156862745, 0.4980392156862745]\ngenerateSurfaceNormals1Display.Specular = 1.0\ngenerateSurfaceNormals1Display.GlyphType = 'Arrow'\ngenerateSurfaceNormals1Display.CubeAxesColor = [0.0, 0.0, 0.0]\ngenerateSurfaceNormals1Display.SetScaleArray = [None, '']\ngenerateSurfaceNormals1Display.ScaleTransferFunction = 'PiecewiseFunction'\ngenerateSurfaceNormals1Display.OpacityArray = [None, '']\ngenerateSurfaceNormals1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from nearAzimuth\nnearAzimuthDisplay = Show(nearAzimuth, renderView1)\n# trace defaults for the display properties.\nnearAzimuthDisplay.AmbientColor = [0.0, 0.0, 0.0]\nnearAzimuthDisplay.ColorArrayName = [None, '']\nnearAzimuthDisplay.DiffuseColor = [1.0, 0.0, 0.0]\nnearAzimuthDisplay.Opacity = 0.1\nnearAzimuthDisplay.GlyphType = 'Arrow'\nnearAzimuthDisplay.CubeAxesColor = [0.0, 0.0, 0.0]\nnearAzimuthDisplay.SetScaleArray = [None, '']\nnearAzimuthDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nnearAzimuthDisplay.OpacityArray = [None, '']\nnearAzimuthDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from farAzimuth\nfarAzimuthDisplay = Show(farAzimuth, renderView1)\n# trace defaults for the display properties.\nfarAzimuthDisplay.AmbientColor = [0.0, 0.0, 0.0]\nfarAzimuthDisplay.ColorArrayName = [None, '']\nfarAzimuthDisplay.DiffuseColor = [1.0, 0.0, 0.0]\nfarAzimuthDisplay.Opacity = 0.1\nfarAzimuthDisplay.GlyphType = 'Arrow'\nfarAzimuthDisplay.CubeAxesColor = [0.0, 0.0, 0.0]\nfarAzimuthDisplay.SetScaleArray = [None, '']\nfarAzimuthDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nfarAzimuthDisplay.OpacityArray = [None, '']\nfarAzimuthDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from frustum1\nfrustum1Display = Show(frustum1, renderView1)\n# trace defaults for the display properties.\nfrustum1Display.AmbientColor = [0.0, 0.0, 0.0]\nfrustum1Display.ColorArrayName = [None, '']\nfrustum1Display.DiffuseColor = [1.0, 0.0, 0.0]\nfrustum1Display.LineWidth = 2.0\nfrustum1Display.GlyphType = 'Arrow'\nfrustum1Display.CubeAxesColor = [0.0, 0.0, 0.0]\nfrustum1Display.SetScaleArray = [None, '']\nfrustum1Display.ScaleTransferFunction = 'PiecewiseFunction'\nfrustum1Display.OpacityArray = [None, '']\nfrustum1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from frustum2\nfrustum2Display = Show(frustum2, renderView1)\n# trace defaults for the display properties.\nfrustum2Display.AmbientColor = [0.0, 0.0, 0.0]\nfrustum2Display.ColorArrayName = [None, '']\nfrustum2Display.DiffuseColor = [1.0, 0.0, 0.0]\nfrustum2Display.LineWidth = 2.0\nfrustum2Display.GlyphType = 'Arrow'\nfrustum2Display.CubeAxesColor = [0.0, 0.0, 0.0]\nfrustum2Display.SetScaleArray = [None, '']\nfrustum2Display.ScaleTransferFunction = 'PiecewiseFunction'\nfrustum2Display.OpacityArray = [None, '']\nfrustum2Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from frustum3\nfrustum3Display = Show(frustum3, renderView1)\n# trace defaults for the display properties.\nfrustum3Display.AmbientColor = [0.0, 0.0, 0.0]\nfrustum3Display.ColorArrayName = [None, '']\nfrustum3Display.DiffuseColor = [1.0, 0.0, 0.0]\nfrustum3Display.LineWidth = 2.0\nfrustum3Display.GlyphType = 'Arrow'\nfrustum3Display.CubeAxesColor = [0.0, 0.0, 0.0]\nfrustum3Display.SetScaleArray = [None, '']\nfrustum3Display.ScaleTransferFunction = 'PiecewiseFunction'\nfrustum3Display.OpacityArray = [None, '']\nfrustum3Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from frustum4\nfrustum4Display = Show(frustum4, renderView1)\n# trace defaults for the display properties.\nfrustum4Display.AmbientColor = [0.0, 0.0, 0.0]\nfrustum4Display.ColorArrayName = [None, '']\nfrustum4Display.DiffuseColor = [1.0, 0.0, 0.0]\nfrustum4Display.LineWidth = 2.0\nfrustum4Display.GlyphType = 'Arrow'\nfrustum4Display.CubeAxesColor = [0.0, 0.0, 0.0]\nfrustum4Display.SetScaleArray = [None, '']\nfrustum4Display.ScaleTransferFunction = 'PiecewiseFunction'\nfrustum4Display.OpacityArray = [None, '']\nfrustum4Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from elevationLine\nelevationLineDisplay = Show(elevationLine, renderView1)\n# trace defaults for the display properties.\nelevationLineDisplay.Representation = 'Wireframe'\nelevationLineDisplay.AmbientColor = [0.0, 0.0, 0.0]\nelevationLineDisplay.ColorArrayName = [None, '']\nelevationLineDisplay.GlyphType = 'Arrow'\nelevationLineDisplay.CubeAxesColor = [0.0, 0.0, 0.0]\nelevationLineDisplay.SetScaleArray = [None, '']\nelevationLineDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nelevationLineDisplay.OpacityArray = [None, '']\nelevationLineDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from azimuth\nazimuthDisplay = Show(azimuth, renderView1)\n# trace defaults for the display properties.\nazimuthDisplay.Representation = 'Wireframe'\nazimuthDisplay.AmbientColor = [0.0, 0.0, 0.0]\nazimuthDisplay.ColorArrayName = [None, '']\nazimuthDisplay.GlyphType = 'Arrow'\nazimuthDisplay.CubeAxesColor = [0.0, 0.0, 0.0]\nazimuthDisplay.SetScaleArray = [None, '']\nazimuthDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nazimuthDisplay.OpacityArray = [None, '']\nazimuthDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from dolly\ndollyDisplay = Show(dolly, renderView1)\n# trace defaults for the display properties.\ndollyDisplay.Representation = 'Wireframe'\ndollyDisplay.AmbientColor = [0.0, 0.0, 0.0]\ndollyDisplay.ColorArrayName = [None, '']\ndollyDisplay.GlyphType = 'Arrow'\ndollyDisplay.CubeAxesColor = [0.0, 0.0, 0.0]\ndollyDisplay.SetScaleArray = [None, '']\ndollyDisplay.ScaleTransferFunction = 'PiecewiseFunction'\ndollyDisplay.OpacityArray = [None, '']\ndollyDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from extractEdges1\nextractEdges1Display = Show(extractEdges1, renderView1)\n# trace defaults for the display properties.\nextractEdges1Display.Representation = 'Wireframe'\nextractEdges1Display.AmbientColor = [1.0, 0.0, 0.0]\nextractEdges1Display.ColorArrayName = [None, '']\nextractEdges1Display.LineWidth = 2.0\nextractEdges1Display.GlyphType = 'Arrow'\nextractEdges1Display.CubeAxesColor = [0.0, 0.0, 0.0]\nextractEdges1Display.SetScaleArray = [None, '']\nextractEdges1Display.ScaleTransferFunction = 'PiecewiseFunction'\nextractEdges1Display.OpacityArray = [None, '']\nextractEdges1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show data from extractEdges2\nextractEdges2Display = Show(extractEdges2, renderView1)\n# trace defaults for the display properties.\nextractEdges2Display.Representation = 'Wireframe'\nextractEdges2Display.AmbientColor = [1.0, 0.0, 0.0]\nextractEdges2Display.ColorArrayName = [None, '']\nextractEdges2Display.LineWidth = 2.0\nextractEdges2Display.GlyphType = 'Arrow'\nextractEdges2Display.CubeAxesColor = [0.0, 0.0, 0.0]\nextractEdges2Display.SetScaleArray = [None, '']\nextractEdges2Display.ScaleTransferFunction = 'PiecewiseFunction'\nextractEdges2Display.OpacityArray = [None, '']\nextractEdges2Display.OpacityTransferFunction = 'PiecewiseFunction'\n"
},
{
"alpha_fraction": 0.7726969122886658,
"alphanum_fraction": 0.785380482673645,
"avg_line_length": 20.86861228942871,
"blob_id": "d42235e13b642fc74778f3111db280e73a630f49",
"content_id": "2d11bf3d25db6f35943b8fb4f51e8654cf7f925c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 2996,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 137,
"path": "/CMakeLists.txt",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8)\n\nproject(VTKmUsersGuide CXX C)\n\ninclude(UseLATEX.cmake)\n\nset(images\n images/TitleImage.png\n images/SandiaLogo.pdf\n images/DOELogo.pdf\n images/KitwareLogo.png\n images/CMakeGUIBlank.png\n images/CMakeGUI.png\n images/ArrayHandleStorage.pdf\n images/BasicGlut.png\n images/CameraViewRange2D.pdf\n images/CameraPositionOrientation.pdf\n images/CameraMovement.pdf\n images/CellConnectionsHexahedron.pdf\n images/CellConnectionsLine.pdf\n images/CellConnectionsPolygon.pdf\n images/CellConnectionsPyramid.pdf\n images/CellConnectionsQuadrilateral.pdf\n images/CellConnectionsTetrahedron.pdf\n images/CellConnectionsTriangle.pdf\n images/CellConnectionsVertex.pdf\n images/CellConnectionsWedge.pdf\n images/CellConstituents.pdf\n images/Dragon01.pdf\n images/Dragon02.pdf\n images/Dragon03.pdf\n images/Dragon04.pdf\n images/Dragon12.pdf\n images/ExplicitCellConnections.pdf\n images/Hilbert01.pdf\n images/Hilbert02.pdf\n images/Hilbert03.pdf\n images/Hilbert06.pdf\n images/Koch1.pdf\n images/Koch2.pdf\n images/Koch5.pdf\n images/KochApply.pdf\n images/KochParametric.pdf\n images/MCCompareCuda.png\n images/MCComparePiston.png\n images/MCCompareVTKm.png\n images/PackageHierarchy.pdf\n images/PointIncidentAngles.pdf\n images/QuadraticType2_1.pdf\n images/QuadraticType2_2.pdf\n images/QuadraticType2_4.pdf\n images/StructuredCellSet.pdf\n images/Tree01.pdf\n images/Tree02.pdf\n images/Tree08.pdf\n images/VTKmEnvironments.pdf\n images/WorkletExampleAnnotated.pdf\n )\n\nset(input_docs\n Body.tex\n Macros.tex\n LanguageDefinitions.tex\n TitlePage.tex\n Contributors.tex\n Introduction.tex\n BuildAndInstall.tex\n IO.tex\n ProvidedFilters.tex\n Rendering.tex\n BasicProvisions.tex\n ArrayHandle.tex\n DeviceAdapter.tex\n Timer.tex\n FancyArrayStorage.tex\n DynamicArrayHandle.tex\n DataSet.tex\n Policies.tex\n Worklets.tex\n CreatingFilters.tex\n Math.tex\n WorkingWithCells.tex\n VirtualObjects.tex\n ImplementingDeviceAdapters.tex\n OpenGLInteroperability.tex\n FunctionInterface.tex\n WorkletArguments.tex\n NewWorkletTypes.tex\n CodingConventions.tex\n )\n\noption(BUILD_EXAMPLES\n \"When on, the examples used in the document are compiled. The examples can be run with ctest, which will report an error if they do not run correctly.\"\n OFF\n )\n\nif (BUILD_EXAMPLES)\n enable_testing()\n include(CTest)\nendif ()\n\nset(VTKm_GUIDE_VERSION 1.1)\n\nadd_subdirectory(examples)\n\nset(add_latex_document_args\n INPUTS ${input_docs}\n CONFIGURE TitlePage.tex\n IMAGES ${images}\n IMAGE_DIRS images/ColorTables\n DEPENDS example-listings\n USE_INDEX\n )\n\nadd_latex_document(VTKmUsersGuide.tex\n ${add_latex_document_args}\n )\n\nadd_latex_document(GettingStarted.tex\n ${add_latex_document_args}\n EXCLUDE_FROM_ALL\n )\n\nadd_latex_document(Using.tex\n ${add_latex_document_args}\n EXCLUDE_FROM_ALL\n )\n\nadd_latex_document(Developing.tex\n ${add_latex_document_args}\n EXCLUDE_FROM_ALL\n )\n\nadd_latex_document(Advanced.tex\n ${add_latex_document_args}\n EXCLUDE_FROM_ALL\n )\n"
},
{
"alpha_fraction": 0.6407374143600464,
"alphanum_fraction": 0.6515287756919861,
"avg_line_length": 19.77570152282715,
"blob_id": "e090d2d43b2acddc4fe1aecce0eb024a54d20c94",
"content_id": "9a6a7988b227e8c9820e7bc11f8513b3fdab7e51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2224,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 107,
"path": "/examples/ColorTables.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "\n#include <vtkm/rendering/CanvasRayTracer.h>\n#include <vtkm/rendering/ColorTable.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nstatic const vtkm::Id TABLE_IMAGE_WIDTH = 300;\n\nstatic const char *tableNames[] = {\n \"blue\",\n \"orange\",\n \"cool2warm\",\n \"temperature\",\n \"rainbow\",\n \"levels\",\n \"dense\",\n \"sharp\",\n \"thermal\",\n \"IsoL\",\n \"CubicL\",\n \"CubicYF\",\n \"LinearL\",\n \"LinLhot\",\n \"PuRd\",\n \"Accent\",\n \"Blues\",\n \"BrBG\",\n \"BuGn\",\n \"BuPu\",\n \"Dark2\",\n \"GnBu\",\n \"Greens\",\n \"Greys\",\n \"Oranges\",\n \"OrRd\",\n \"Paired\",\n \"Pastel1\",\n \"Pastel2\",\n \"PiYG\",\n \"PRGn\",\n \"PuBu\",\n \"PuBuGn\",\n \"PuOr\",\n \"PuRd\",\n \"Purples\",\n \"RdBu\",\n \"RdGy\",\n \"RdPu\",\n \"RdYlBu\",\n \"RdYlGn\",\n \"Reds\",\n \"Set1\",\n \"Set2\",\n \"Set3\",\n \"Spectral\",\n \"YlGnBu\",\n \"YlGn\",\n \"YlOrBr\",\n \"YlOrRd\"\n};\n\nstatic const vtkm::Id NUM_TABLES = sizeof(tableNames)/sizeof(char *);\n\nvoid CreateColorTableImage(const std::string &name)\n{\n std::cout << \"Creating color table \" << name << std::endl;\n\n vtkm::rendering::ColorTable colorTable(name);\n\n // Create a CanvasRayTracer simply for the color buffer and the ability to\n // write out images.\n vtkm::rendering::CanvasRayTracer canvas(TABLE_IMAGE_WIDTH, 1);\n typedef vtkm::rendering::CanvasRayTracer::ColorBufferType ColorBufferType;\n ColorBufferType colorBuffer = canvas.GetColorBuffer();\n ColorBufferType::PortalControl colorPortal = colorBuffer.GetPortalControl();\n VTKM_TEST_ASSERT(colorPortal.GetNumberOfValues() == TABLE_IMAGE_WIDTH,\n \"Wrong size of color buffer.\");\n\n const vtkm::Float32 indexScale =\n 1.0f/static_cast<vtkm::Float32>(TABLE_IMAGE_WIDTH-1);\n for (vtkm::Id index = 0; index < TABLE_IMAGE_WIDTH; index++)\n {\n vtkm::Float32 scalar = static_cast<vtkm::Float32>(index)*indexScale;\n vtkm::rendering::Color color = colorTable.MapRGB(scalar);\n colorPortal.Set(index, color.Components);\n }\n\n canvas.SaveAs(name + \".ppm\");\n}\n\nvoid DoColorTables()\n{\n for (vtkm::Id tableNameIndex = 0;\n tableNameIndex < NUM_TABLES;\n tableNameIndex++)\n {\n CreateColorTableImage(tableNames[tableNameIndex]);\n }\n}\n\n} // anonymous namespace\n\nint ColorTables(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(DoColorTables);\n}\n"
},
{
"alpha_fraction": 0.6566128730773926,
"alphanum_fraction": 0.6617403030395508,
"avg_line_length": 28.211454391479492,
"blob_id": "85930037abd71901ea30c09ed379da5506de9162",
"content_id": "331cb5dea8c7d2197112e9df0b4112e302940d48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6631,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 227,
"path": "/examples/UseWorkletMapPointToCell.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE UseWorkletMapPointToCell.cxx\n////\n#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/cont/DataSet.h>\n#include <vtkm/cont/DataSetFieldAdd.h>\n\n#include <vtkm/exec/CellInterpolate.h>\n#include <vtkm/exec/ParametricCoordinates.h>\n\nnamespace vtkm {\nnamespace worklet {\n\nclass CellCenter : public vtkm::worklet::WorkletMapPointToCell\n{\npublic:\n typedef void ControlSignature(CellSetIn cellSet,\n FieldInPoint<> inputPointField,\n FieldOut<> outputCellField);\n typedef _3 ExecutionSignature(_1, PointCount, _2);\n\n typedef _1 InputDomain;\n\n template<typename CellShape,\n typename InputPointFieldType>\n VTKM_EXEC\n typename InputPointFieldType::ComponentType\n operator()(CellShape shape,\n vtkm::IdComponent numPoints,\n const InputPointFieldType &inputPointField) const\n {\n vtkm::Vec<vtkm::FloatDefault,3> parametricCenter =\n vtkm::exec::ParametricCoordinatesCenter(numPoints, shape, *this);\n return vtkm::exec::CellInterpolate(inputPointField,\n parametricCenter,\n shape,\n *this);\n }\n};\n\n}\n} // namespace vtkm::worklet\n\nVTKM_CONT\nvoid FindCellCenters(vtkm::cont::DataSet &dataSet)\n{\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> > cellCentersArray;\n\n vtkm::worklet::DispatcherMapTopology<vtkm::worklet::CellCenter> dispatcher;\n dispatcher.Invoke(dataSet.GetCellSet(),\n dataSet.GetCoordinateSystem().GetData(),\n cellCentersArray);\n\n vtkm::cont::DataSetFieldAdd dataSetFieldAdd;\n dataSetFieldAdd.AddCellField(dataSet, \"cell_center\", cellCentersArray);\n}\n////\n//// END-EXAMPLE UseWorkletMapPointToCell.cxx\n////\n\n#include <vtkm/filter/FilterCell.h>\n\n////\n//// BEGIN-EXAMPLE UseFilterCell.cxx\n////\nnamespace vtkm {\nnamespace filter {\n\nclass CellCenters : public vtkm::filter::FilterCell<CellCenters>\n{\npublic:\n VTKM_CONT\n CellCenters();\n\n template<typename ArrayHandleType, typename Policy, typename DeviceAdapter>\n VTKM_CONT\n vtkm::filter::ResultField\n DoExecute(const vtkm::cont::DataSet &inDataSet,\n const ArrayHandleType &inField,\n const vtkm::filter::FieldMetadata &FieldMetadata,\n vtkm::filter::PolicyBase<Policy>,\n DeviceAdapter);\n};\n\n}\n} // namespace vtkm::filter\n////\n//// END-EXAMPLE UseFilterCell.cxx\n////\n\n////\n//// BEGIN-EXAMPLE FilterCellImpl.cxx\n////\nnamespace vtkm {\nnamespace filter {\n\nVTKM_CONT\nCellCenters::CellCenters()\n{\n this->SetOutputFieldName(\"\");\n}\n\ntemplate<typename ArrayHandleType, typename Policy, typename DeviceAdapter>\nVTKM_CONT\nvtkm::filter::ResultField\nCellCenters::DoExecute(const vtkm::cont::DataSet &inDataSet,\n const ArrayHandleType &inField,\n const vtkm::filter::FieldMetadata &fieldMetadata,\n vtkm::filter::PolicyBase<Policy>,\n DeviceAdapter)\n{\n VTKM_IS_ARRAY_HANDLE(ArrayHandleType);\n VTKM_IS_DEVICE_ADAPTER_TAG(DeviceAdapter);\n\n if (!fieldMetadata.IsPointField())\n {\n throw vtkm::cont::ErrorBadType(\n \"Cell Centers filter operates on point data.\");\n }\n\n vtkm::cont::DynamicCellSet cellSet =\n inDataSet.GetCellSet(this->GetActiveCellSetIndex());\n\n using ValueType = typename ArrayHandleType::ValueType;\n vtkm::cont::ArrayHandle<ValueType> outField;\n\n vtkm::worklet::DispatcherMapTopology<vtkm::worklet::CellCenter, DeviceAdapter>\n dispatcher;\n\n dispatcher.Invoke(vtkm::filter::ApplyPolicy(cellSet, Policy()),\n inField,\n outField);\n\n std::string outFieldName = this->GetOutputFieldName();\n if (outFieldName == \"\")\n {\n outFieldName = fieldMetadata.GetName() + \"_center\";\n }\n\n return vtkm::filter::ResultField(inDataSet,\n outField,\n outFieldName,\n vtkm::cont::Field::ASSOC_CELL_SET,\n cellSet.GetName());\n}\n\n}\n} // namespace vtkm::filter\n////\n//// END-EXAMPLE FilterCellImpl.cxx\n////\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid CheckCellCenters(const vtkm::cont::DataSet &dataSet)\n{\n std::cout << \"Checking cell centers.\" << std::endl;\n vtkm::cont::CellSetStructured<3> cellSet;\n dataSet.GetCellSet().CopyTo(cellSet);\n\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> > cellCentersArray;\n dataSet.GetField(\"cell_center\", vtkm::cont::Field::ASSOC_CELL_SET)\n .GetData().CopyTo(cellCentersArray);\n\n VTKM_TEST_ASSERT(\n cellSet.GetNumberOfCells() == cellCentersArray.GetNumberOfValues(),\n \"Cell centers array has wrong number of values.\");\n\n vtkm::Id3 cellDimensions = cellSet.GetCellDimensions() - vtkm::Id3(1);\n\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> >::PortalConstControl\n cellCentersPortal = cellCentersArray.GetPortalConstControl();\n\n vtkm::Id cellIndex = 0;\n for (vtkm::Id kIndex = 0; kIndex < cellDimensions[2]; kIndex++)\n {\n for (vtkm::Id jIndex = 0; jIndex < cellDimensions[1]; jIndex++)\n {\n for (vtkm::Id iIndex = 0; iIndex < cellDimensions[0]; iIndex++)\n {\n vtkm::Vec<vtkm::FloatDefault,3> center =\n cellCentersPortal.Get(cellIndex);\n VTKM_TEST_ASSERT(test_equal(center[0], iIndex+0.5), \"Bad X coord.\");\n VTKM_TEST_ASSERT(test_equal(center[1], jIndex+0.5), \"Bad Y coord.\");\n VTKM_TEST_ASSERT(test_equal(center[2], kIndex+0.5), \"Bad Z coord.\");\n cellIndex++;\n }\n }\n }\n}\n\nvoid Test()\n{\n vtkm::cont::testing::MakeTestDataSet makeTestDataSet;\n\n std::cout << \"Making test data set.\" << std::endl;\n vtkm::cont::DataSet dataSet = makeTestDataSet.Make3DUniformDataSet0();\n\n std::cout << \"Finding cell centers directly.\" << std::endl;\n FindCellCenters(dataSet);\n\n CheckCellCenters(dataSet);\n\n std::cout << \"Making fresh test data set.\" << std::endl;\n dataSet = makeTestDataSet.Make3DUniformDataSet0();\n\n std::cout << \"Finding cell centers with filter.\" << std::endl;\n vtkm::filter::CellCenters cellCentersFilter;\n cellCentersFilter.SetActiveCellSet(0);\n cellCentersFilter.SetOutputFieldName(\"cell_center\");\n vtkm::filter::ResultField results =\n cellCentersFilter.Execute(dataSet, dataSet.GetCoordinateSystem());\n\n CheckCellCenters(results.GetDataSet());\n}\n\n} // anonymous namespace\n\nint UseWorkletMapPointToCell(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6555779576301575,
"alphanum_fraction": 0.6649865508079529,
"avg_line_length": 23.393442153930664,
"blob_id": "d5b425da927a35f555001acbd554163035360fe0",
"content_id": "b4d068776d3e649f9589b6f800561ffa428d3f9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2976,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 122,
"path": "/examples/ArrayHandleTransform.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Types.h>\n\n////\n//// BEGIN-EXAMPLE TransformArrayFunctor.cxx\n////\ntemplate<typename T>\nstruct ScaleBiasFunctor\n{\n VTKM_EXEC_CONT\n ScaleBiasFunctor(T scale = T(1), T bias = T(0))\n : Scale(scale), Bias(bias) { }\n\n VTKM_EXEC_CONT\n T operator()(T x) const\n {\n return this->Scale*x + this->Bias;\n }\n\n T Scale;\n T Bias;\n};\n////\n//// END-EXAMPLE TransformArrayFunctor.cxx\n////\n\n////\n//// BEGIN-EXAMPLE TransformArrayHandle.cxx\n////\n#include <vtkm/cont/ArrayHandleTransform.h>\n\ntemplate<typename ArrayHandleType>\nclass ArrayHandleScaleBias\n : public vtkm::cont::ArrayHandleTransform<\n typename ArrayHandleType::ValueType,\n ArrayHandleType,\n ScaleBiasFunctor<typename ArrayHandleType::ValueType> >\n{\npublic:\n VTKM_ARRAY_HANDLE_SUBCLASS(\n ArrayHandleScaleBias,\n (ArrayHandleScaleBias<ArrayHandleType>),\n (vtkm::cont::ArrayHandleTransform<\n typename ArrayHandleType::ValueType,\n ArrayHandleType,\n ScaleBiasFunctor<typename ArrayHandleType::ValueType> >)\n );\n\n VTKM_CONT\n ArrayHandleScaleBias(const ArrayHandleType &array,\n ValueType scale,\n ValueType bias)\n : Superclass(array, ScaleBiasFunctor<ValueType>(scale, bias)) { }\n};\n\ntemplate<typename ArrayHandleType>\nVTKM_CONT\nArrayHandleScaleBias<ArrayHandleType>\nmake_ArrayHandleScaleBias(const ArrayHandleType &array,\n typename ArrayHandleType::ValueType scale,\n typename ArrayHandleType::ValueType bias)\n{\n return ArrayHandleScaleBias<ArrayHandleType>(array, scale, bias);\n}\n////\n//// END-EXAMPLE TransformArrayHandle.cxx\n////\n\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n#include <vector>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(const ArrayHandleType &array)\n{\n for(vtkm::Id index = 0; index < array.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(test_equal(array.GetPortalConstControl().Get(index),\n 2*TestValue(index,vtkm::Float32()) + 3),\n \"Bad transformed value.\");\n }\n}\n\nvoid Test()\n{\n std::vector<vtkm::Float32> buffer(10);\n for (size_t index = 0; index < buffer.size(); index++)\n {\n buffer[index] = TestValue(index, vtkm::Float32());\n }\n\n vtkm::cont::ArrayHandle<vtkm::Float32> array =\n vtkm::cont::make_ArrayHandle(buffer);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleTransform.cxx\n ////\n vtkm::cont::make_ArrayHandleTransform<vtkm::Float32>(\n array, ScaleBiasFunctor<vtkm::Float32>(2,3))\n ////\n //// END-EXAMPLE MakeArrayHandleTransform.cxx\n ////\n );\n\n ArrayHandleScaleBias<vtkm::cont::ArrayHandle<vtkm::Float32> >\n transformArray(array, 2, 3);\n\n CheckArray(transformArray);\n\n CheckArray(make_ArrayHandleScaleBias(array, 2, 3));\n}\n\n} // anonymous namespace\n\nint ArrayHandleTransform(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6524428129196167,
"alphanum_fraction": 0.6641929745674133,
"avg_line_length": 26.406780242919922,
"blob_id": "726005ecf3e5355cc2c0c43a87c4e9597c0101f6",
"content_id": "4222c9e09310b55b72b4c3882079c1bb50f7ed20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1617,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 59,
"path": "/examples/ArrayHandleConstant.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleConstant.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(const ArrayHandleType array,\n vtkm::Id expectedLength,\n typename ArrayHandleType::ValueType expectedValue)\n{\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == expectedLength,\n \"Array has wrong size.\");\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == expectedLength,\n \"Portal has wrong size.\");\n\n for (vtkm::Id index = 0; index < expectedLength; index++)\n {\n VTKM_TEST_ASSERT(test_equal(portal.Get(index), expectedValue),\n \"Array has wrong value.\");\n }\n}\n\nvoid Test()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleConstant.cxx\n ////\n // Create an array of 50 entries, all containing the number 3. This could be\n // used, for example, to represent the sizes of all the polygons in a set\n // where we know all the polygons are triangles.\n vtkm::cont::ArrayHandleConstant<vtkm::Id> constantArray(3, 50);\n ////\n //// END-EXAMPLE ArrayHandleConstant.cxx\n ////\n\n CheckArray(constantArray, 50, 3);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleConstant.cxx\n ////\n // Create an array of 50 entries, all containing the number 3.\n vtkm::cont::make_ArrayHandleConstant(3, 50)\n ////\n //// END-EXAMPLE MakeArrayHandleConstant.cxx\n ////\n , 50, 3);\n}\n\n} // anonymous namespace\n\nint ArrayHandleConstant(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6897196173667908,
"alphanum_fraction": 0.69532710313797,
"avg_line_length": 22.77777862548828,
"blob_id": "cbfa61fa08331882907254123fe28930846fe50f",
"content_id": "fbd39ccfed671935c786a04dae22ea6d795f190d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1070,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 45,
"path": "/examples/Timer.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/DataSet.h>\n#include <vtkm/cont/Timer.h>\n\n#include <vtkm/filter/PointElevation.h>\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid DoTiming()\n{\n vtkm::cont::DataSet dataSet =\n vtkm::cont::testing::MakeTestDataSet().Make2DUniformDataSet0();\n ////\n //// BEGIN-EXAMPLE Timer.cxx\n ////\n vtkm::filter::PointElevation elevationFilter;\n\n vtkm::cont::Timer<> timer;\n\n vtkm::filter::ResultField result =\n elevationFilter.Execute(dataSet, dataSet.GetCoordinateSystem());\n\n // This code makes sure data is pulled back to the host in a host/device\n // architecture.\n vtkm::cont::ArrayHandle<vtkm::Float64> outArray;\n result.FieldAs(outArray);\n outArray.GetPortalConstControl();\n\n vtkm::Float64 elapsedTime = timer.GetElapsedTime();\n\n std::cout << \"Time to run: \" << elapsedTime << std::endl;\n ////\n //// END-EXAMPLE Timer.cxx\n ////\n}\n\n} // anonymous namespace\n\nint Timer(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(DoTiming);\n}\n"
},
{
"alpha_fraction": 0.6975769996643066,
"alphanum_fraction": 0.6993718147277832,
"avg_line_length": 25.743999481201172,
"blob_id": "5de407a2653fafa5342d05320530139c0b677a97",
"content_id": "682d7c3724df01e3b9bd4e9f87dade1047780982",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3343,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 125,
"path": "/examples/DeviceAdapterTag.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE DefaultDeviceAdapter.cxx\n////\n// Uncomment one (and only one) of the following to reconfigure the VTK-m\n// code to use a particular device. Comment them all to automatically pick a\n// device.\n#define VTKM_DEVICE_ADAPTER VTKM_DEVICE_ADAPTER_SERIAL\n//#define VTKM_DEVICE_ADAPTER VTKM_DEVICE_ADAPTER_CUDA\n//#define VTKM_DEVICE_ADAPTER VTKM_DEVICE_ADAPTER_OPENMP\n//#define VTKM_DEVICE_ADAPTER VTKM_DEVICE_ADAPTER_TBB\n\n#include <vtkm/cont/DeviceAdapter.h>\n////\n//// END-EXAMPLE DefaultDeviceAdapter.cxx\n////\n\n#include <vtkm/cont/tbb/DeviceAdapterTBB.h>\n\n#include <vtkm/exec/FunctorBase.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE DefaultDeviceTemplateArg.cxx\n////\ntemplate<typename Device = VTKM_DEFAULT_DEVICE_ADAPTER_TAG>\nstruct SetPortalFunctor : vtkm::exec::FunctorBase\n{\n VTKM_IS_DEVICE_ADAPTER_TAG(Device);\n\n typedef typename vtkm::cont::ArrayHandle<vtkm::Id>::\n ExecutionTypes<Device>::Portal ExecPortalType;\n ExecPortalType Portal;\n\n VTKM_CONT\n SetPortalFunctor(vtkm::cont::ArrayHandle<vtkm::Id> array, vtkm::Id size)\n : Portal(array.PrepareForOutput(size, Device()))\n { }\n\n //// PAUSE-EXAMPLE\n VTKM_CONT\n SetPortalFunctor(const ExecPortalType &portal) : Portal(portal)\n { }\n //// RESUME-EXAMPLE\n VTKM_EXEC\n void operator()(vtkm::Id index) const\n {\n //// PAUSE-EXAMPLE\n VTKM_ASSERT(index >= 0);\n VTKM_ASSERT(index < this->Portal.GetNumberOfValues());\n //// RESUME-EXAMPLE\n typedef typename ExecPortalType::ValueType ValueType;\n this->Portal.Set(index, TestValue(index, ValueType()));\n }\n};\n////\n//// END-EXAMPLE DefaultDeviceTemplateArg.cxx\n////\n\ntemplate<typename ExecPortalType,\n typename ArrayHandleType,\n typename Device>\nVTKM_CONT\nvoid TryUsingExecPortal(const ExecPortalType &execPortal,\n const ArrayHandleType &arrayHandle,\n Device)\n{\n typedef typename ArrayHandleType::ValueType ValueType;\n\n SetPortalFunctor<Device> functor(execPortal);\n\n vtkm::cont::DeviceAdapterAlgorithm<Device>::Schedule(\n functor, arrayHandle.GetNumberOfValues());\n\n typename ArrayHandleType::PortalConstControl contPortal =\n arrayHandle.GetPortalConstControl();\n for (vtkm::Id index = 0; index < arrayHandle.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(contPortal.Get(index) == TestValue(index, ValueType()),\n \"Bad value set.\");\n }\n}\n\nvoid UseTBBDeviceAdapter()\n{\n vtkm::cont::ArrayHandle<vtkm::Id> arrayHandle;\n\n vtkm::cont::ArrayHandle<vtkm::Id>::\n ExecutionTypes<vtkm::cont::DeviceAdapterTagTBB>::Portal portal =\n ////\n //// BEGIN-EXAMPLE SpecifyDeviceAdapter.cxx\n ////\n arrayHandle.PrepareForOutput(50, vtkm::cont::DeviceAdapterTagTBB());\n ////\n //// END-EXAMPLE SpecifyDeviceAdapter.cxx\n ////\n\n TryUsingExecPortal(portal, arrayHandle, vtkm::cont::DeviceAdapterTagTBB());\n}\n\nvoid UseDefaultDeviceAdapter()\n{\n vtkm::cont::ArrayHandle<vtkm::Id> arrayHandle;\n\n SetPortalFunctor<> functor(arrayHandle, 50);\n\n TryUsingExecPortal(functor.Portal,\n arrayHandle,\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n}\n\nvoid Test()\n{\n UseTBBDeviceAdapter();\n UseDefaultDeviceAdapter();\n}\n\n} // anonymous namespace\n\nint DeviceAdapterTag(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.641299307346344,
"alphanum_fraction": 0.6730858683586121,
"avg_line_length": 30.925926208496094,
"blob_id": "84d955cf7e313cbbd78f9d81af44608712d92650",
"content_id": "0641b37927a86b0fe8ea19bdc3a2c30e0dda4b93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4310,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 135,
"path": "/examples/ArrayHandleGroupVec.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/ArrayHandleIndex.h>\n#include <vtkm/cont/ArrayHandleGroupVec.h>\n#include <vtkm/cont/ArrayHandleGroupVecVariable.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(ArrayHandleType array)\n{\n vtkm::cont::printSummary_ArrayHandle(array, std::cout);\n std::cout << std::endl;\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n\n vtkm::Id expectedValue = 0;\n for (vtkm::Id vecIndex = 0; vecIndex < portal.GetNumberOfValues(); ++vecIndex)\n {\n for (vtkm::IdComponent componentIndex = 0;\n componentIndex < portal.Get(vecIndex).GetNumberOfComponents();\n componentIndex++)\n {\n VTKM_TEST_ASSERT(portal.Get(vecIndex)[componentIndex] == expectedValue,\n \"Got bad value.\");\n ++expectedValue;\n }\n }\n}\n\nvoid ArrayHandleGroupVecBasic()\n{\n std::cout << \"ArrayHandleGroupVec\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleGroupVecBasic.cxx\n ////\n // Create an array containing [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n using ArrayType = vtkm::cont::ArrayHandleIndex;\n ArrayType sourceArray(12);\n\n // Create an array containing [(0,1), (2,3), (4,5), (6,7), (8,9), (10,11)]\n vtkm::cont::ArrayHandleGroupVec<ArrayType,2> vec2Array(sourceArray);\n\n // Create an array containing [(0,1,2), (3,4,5), (6,7,8), (9,10,11)]\n vtkm::cont::ArrayHandleGroupVec<ArrayType,3> vec3Array(sourceArray);\n ////\n //// END-EXAMPLE ArrayHandleGroupVecBasic.cxx\n ////\n CheckArray(vec2Array);\n vtkm::cont::printSummary_ArrayHandle(vec3Array, std::cout);\n std::cout << std::endl;\n CheckArray(vec3Array);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleGroupVec.cxx\n ////\n // Create an array containing [(0,1,2,3), (4,5,6,7), (8,9,10,11)]\n vtkm::cont::make_ArrayHandleGroupVec<4>(sourceArray)\n ////\n //// END-EXAMPLE MakeArrayHandleGroupVec.cxx\n ////\n );\n}\n\nvoid ArrayHandleGroupVecVariable()\n{\n std::cout << \"ArrayHandleGroupVecVariable\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleGroupVecVariable.cxx\n ////\n // Create an array of counts containing [4, 2, 3, 3]\n vtkm::IdComponent countBuffer[4] = { 4, 2, 3, 3 };\n vtkm::cont::ArrayHandle<vtkm::IdComponent> countArray =\n vtkm::cont::make_ArrayHandle(countBuffer, 4);\n\n // Convert the count array to an offset array [0, 4, 6, 9]\n // Returns the number of total components: 12\n vtkm::Id sourceArraySize;\n using OffsetArrayType = vtkm::cont::ArrayHandle<vtkm::Id>;\n OffsetArrayType offsetArray =\n vtkm::cont::ConvertNumComponentsToOffsets(countArray, sourceArraySize);\n //// PAUSE-EXAMPLE\n vtkm::cont::printSummary_ArrayHandle(offsetArray, std::cout);\n std::cout << std::endl;\n VTKM_TEST_ASSERT(sourceArraySize == 12, \"Bad source array size\");\n VTKM_TEST_ASSERT(offsetArray.GetPortalConstControl().Get(0) == 0,\n \"Unexpected offset value\");\n VTKM_TEST_ASSERT(offsetArray.GetPortalConstControl().Get(1) == 4,\n \"Unexpected offset value\");\n VTKM_TEST_ASSERT(offsetArray.GetPortalConstControl().Get(2) == 6,\n \"Unexpected offset value\");\n VTKM_TEST_ASSERT(offsetArray.GetPortalConstControl().Get(3) == 9,\n \"Unexpected offset value\");\n //// RESUME-EXAMPLE\n\n // Create an array containing [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n using SourceArrayType = vtkm::cont::ArrayHandleIndex;\n SourceArrayType sourceArray(sourceArraySize);\n\n // Create an array containing [(0,1,2,3), (4,5), (6,7,8), (9,10,11)]\n vtkm::cont::ArrayHandleGroupVecVariable<SourceArrayType,OffsetArrayType>\n vecVariableArray(sourceArray, offsetArray);\n ////\n //// END-EXAMPLE ArrayHandleGroupVecVariable.cxx\n ////\n CheckArray(vecVariableArray);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleGroupVecVariable.cxx\n ////\n // Create an array containing [(0,1,2,3), (4,5), (6,7,8), (9,10,11)]\n vtkm::cont::make_ArrayHandleGroupVecVariable(sourceArray, offsetArray)\n ////\n //// END-EXAMPLE MakeArrayHandleGroupVecVariable.cxx\n ////\n );\n}\n\nvoid Test()\n{\n ArrayHandleGroupVecBasic();\n ArrayHandleGroupVecVariable();\n}\n\n} // anonymous namespace\n\nint ArrayHandleGroupVec(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6158856749534607,
"alphanum_fraction": 0.631225049495697,
"avg_line_length": 31.595890045166016,
"blob_id": "1a86e50346138256aa8e4cd4f26bbffd85d109cf",
"content_id": "43753817c1924161d124c61770361de6dea7c73b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4759,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 146,
"path": "/examples/ScatterCounting.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleUniformPointCoordinates.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/ScatterCounting.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE ScatterCounting.cxx\n////\nstruct ClipPoints\n{\n class Count : public vtkm::worklet::WorkletMapField\n {\n public:\n typedef void ControlSignature(FieldIn<Vec3> points,\n FieldOut<IdComponentType> count);\n typedef _2 ExecutionSignature(_1);\n using InputDomain = _1;\n\n template<typename T>\n VTKM_CONT\n Count(const vtkm::Vec<T,3> &boundsMin, const vtkm::Vec<T,3> &boundsMax)\n : BoundsMin(boundsMin[0], boundsMin[1], boundsMin[2]),\n BoundsMax(boundsMax[0], boundsMax[1], boundsMax[2])\n { }\n\n template<typename T>\n VTKM_EXEC\n vtkm::IdComponent operator()(const vtkm::Vec<T,3> &point) const\n {\n return static_cast<vtkm::IdComponent>((this->BoundsMin[0] < point[0]) &&\n (this->BoundsMin[1] < point[1]) &&\n (this->BoundsMin[2] < point[2]) &&\n (this->BoundsMax[0] > point[0]) &&\n (this->BoundsMax[1] > point[1]) &&\n (this->BoundsMax[2] > point[2]));\n }\n\n private:\n vtkm::Vec<vtkm::FloatDefault,3> BoundsMin;\n vtkm::Vec<vtkm::FloatDefault,3> BoundsMax;\n };\n\n class Generate : public vtkm::worklet::WorkletMapField\n {\n public:\n typedef void ControlSignature(FieldIn<Vec3> inPoints,\n FieldOut<Vec3> outPoints);\n typedef void ExecutionSignature(_1, _2);\n using InputDomain = _1;\n\n ////\n //// BEGIN-EXAMPLE DeclareScatter.cxx\n ////\n using ScatterType = vtkm::worklet::ScatterCounting;\n\n VTKM_CONT\n ScatterType GetScatter() const { return this->Scatter; }\n ////\n //// END-EXAMPLE DeclareScatter.cxx\n ////\n\n template<typename CountArrayType, typename DeviceAdapterTag>\n VTKM_CONT\n Generate(const CountArrayType &countArray, DeviceAdapterTag)\n : Scatter(countArray, DeviceAdapterTag())\n {\n VTKM_IS_ARRAY_HANDLE(CountArrayType);\n }\n\n template<typename InType, typename OutType>\n VTKM_EXEC\n void operator()(const vtkm::Vec<InType,3> &inPoint,\n vtkm::Vec<OutType,3> &outPoint) const\n {\n // The scatter ensures that this method is only called for input points\n // that are passed to the output (where the count was 1). Thus, in this\n // case we know that we just need to copy the input to the output.\n outPoint = vtkm::Vec<OutType,3>(inPoint[0], inPoint[1], inPoint[2]);\n }\n\n private:\n ScatterType Scatter;\n };\n\n template<typename T, typename Storage, typename DeviceAdapterTag>\n VTKM_CONT\n static vtkm::cont::ArrayHandle<vtkm::Vec<T,3> >\n Run(const vtkm::cont::ArrayHandle<vtkm::Vec<T,3>, Storage> &pointArray,\n vtkm::Vec<T,3> boundsMin,\n vtkm::Vec<T,3> boundsMax,\n DeviceAdapterTag)\n {\n vtkm::cont::ArrayHandle<vtkm::IdComponent> countArray;\n\n ClipPoints::Count workletCount(boundsMin, boundsMax);\n vtkm::worklet::DispatcherMapField<ClipPoints::Count, DeviceAdapterTag>\n dispatcherCount(workletCount);\n dispatcherCount.Invoke(pointArray, countArray);\n\n vtkm::cont::ArrayHandle<vtkm::Vec<T,3> > clippedPointsArray;\n\n ClipPoints::Generate workletGenerate(countArray, DeviceAdapterTag());\n vtkm::worklet::DispatcherMapField<ClipPoints::Generate, DeviceAdapterTag>\n dispatcherGenerate(workletGenerate);\n dispatcherGenerate.Invoke(pointArray, clippedPointsArray);\n\n return clippedPointsArray;\n }\n};\n////\n//// END-EXAMPLE ScatterCounting.cxx\n////\n\nvoid Run()\n{\n std::cout << \"Trying clip points.\" << std::endl;\n vtkm::cont::ArrayHandleUniformPointCoordinates points(vtkm::Id3(10, 10, 10));\n vtkm::Vec<vtkm::FloatDefault,3> boundsMin(0.5f, 0.5f, 0.5f);\n vtkm::Vec<vtkm::FloatDefault,3> boundsMax(8.5f, 8.5f, 8.5f);\n\n VTKM_TEST_ASSERT(points.GetNumberOfValues() == 1000,\n \"Unexpected number of input points.\");\n\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> > clippedPoints =\n ClipPoints::Run(points,\n boundsMin,\n boundsMax,\n VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n vtkm::cont::printSummary_ArrayHandle(clippedPoints, std::cout);\n std::cout << std::endl;\n VTKM_TEST_ASSERT(clippedPoints.GetNumberOfValues() == 512,\n \"Unexpected number of output points.\");\n}\n\n} // anonymous namespace\n\nint ScatterCounting(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.6683990359306335,
"alphanum_fraction": 0.6758778095245361,
"avg_line_length": 27.480144500732422,
"blob_id": "ea7e685df002fe99118979842d65295b0007da5c",
"content_id": "fa7ef7580f8feb2c54c8d93f8c8d0e262768cdcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7889,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 277,
"path": "/examples/UseWorkletMapField.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE UseWorkletMapField.cxx\n////\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/DynamicArrayHandle.h>\n\n#include <vtkm/VectorAnalysis.h>\n\nnamespace vtkm {\nnamespace worklet {\n\nclass Magnitude : public vtkm::worklet::WorkletMapField\n{\npublic:\n //// BEGIN-EXAMPLE ControlSignature.cxx\n typedef void ControlSignature(FieldIn<VecAll> inputVectors,\n FieldOut<Scalar> outputMagnitudes);\n //// END-EXAMPLE ControlSignature.cxx\n //// BEGIN-EXAMPLE ExecutionSignature.cxx\n typedef _2 ExecutionSignature(_1);\n //// END-EXAMPLE ExecutionSignature.cxx\n\n //// BEGIN-EXAMPLE InputDomain.cxx\n typedef _1 InputDomain;\n //// END-EXAMPLE InputDomain.cxx\n\n //// BEGIN-EXAMPLE WorkletOperator.cxx\n template<typename T, vtkm::IdComponent Size>\n VTKM_EXEC\n T operator()(const vtkm::Vec<T,Size> &inVector) const\n {\n //// END-EXAMPLE WorkletOperator.cxx\n return vtkm::Magnitude(inVector);\n }\n};\n\n}\n} // namespace vtkm::worklet\n\nVTKM_CONT\nvtkm::cont::DynamicArrayHandle\nInvokeMagnitude(vtkm::cont::DynamicArrayHandle input)\n{\n vtkm::cont::ArrayHandle<vtkm::FloatDefault> output;\n\n vtkm::worklet::DispatcherMapField<vtkm::worklet::Magnitude> dispatcher;\n dispatcher.Invoke(input, output);\n\n return vtkm::cont::DynamicArrayHandle(output);\n}\n////\n//// END-EXAMPLE UseWorkletMapField.cxx\n////\n\n#include <vtkm/filter/FilterField.h>\n\n////\n//// BEGIN-EXAMPLE UseFilterField.cxx\n////\nnamespace vtkm {\nnamespace filter {\n\nclass FieldMagnitude : public vtkm::filter::FilterField<FieldMagnitude>\n{\npublic:\n VTKM_CONT\n FieldMagnitude();\n\n template<typename ArrayHandleType, typename Policy, typename DeviceAdapter>\n VTKM_CONT\n vtkm::filter::ResultField\n DoExecute(const vtkm::cont::DataSet &inDataSet,\n const ArrayHandleType &inField,\n const vtkm::filter::FieldMetadata &fieldMetadata,\n vtkm::filter::PolicyBase<Policy>,\n DeviceAdapter);\n};\n\ntemplate<>\nclass FilterTraits<vtkm::filter::FieldMagnitude>\n{\npublic:\n struct InputFieldTypeList :\n vtkm::ListTagBase<vtkm::Vec<vtkm::Float32,2>,\n vtkm::Vec<vtkm::Float64,2>,\n vtkm::Vec<vtkm::Float32,3>,\n vtkm::Vec<vtkm::Float64,3>,\n vtkm::Vec<vtkm::Float32,4>,\n vtkm::Vec<vtkm::Float64,4> >\n { };\n};\n\n}\n} // namespace vtkm::filter\n////\n//// END-EXAMPLE UseFilterField.cxx\n////\n\n////\n//// BEGIN-EXAMPLE FilterFieldImpl.cxx\n////\nnamespace vtkm {\nnamespace filter {\n\nVTKM_CONT\nFieldMagnitude::FieldMagnitude()\n{\n this->SetOutputFieldName(\"\");\n}\n\ntemplate<typename ArrayHandleType, typename Policy, typename DeviceAdapter>\nVTKM_CONT\nvtkm::filter::ResultField\nFieldMagnitude::DoExecute(const vtkm::cont::DataSet &inDataSet,\n const ArrayHandleType &inField,\n const vtkm::filter::FieldMetadata &fieldMetadata,\n vtkm::filter::PolicyBase<Policy>,\n DeviceAdapter)\n{\n VTKM_IS_ARRAY_HANDLE(ArrayHandleType);\n VTKM_IS_DEVICE_ADAPTER_TAG(DeviceAdapter);\n\n using ComponentType = typename ArrayHandleType::ValueType::ComponentType;\n vtkm::cont::ArrayHandle<ComponentType> outField;\n\n vtkm::worklet::DispatcherMapField<vtkm::worklet::Magnitude, DeviceAdapter>\n dispatcher;\n dispatcher.Invoke(inField, outField);\n\n std::string outFieldName = this->GetOutputFieldName();\n if (outFieldName == \"\")\n {\n outFieldName = fieldMetadata.GetName() + \"_magnitude\";\n }\n\n return vtkm::filter::ResultField(inDataSet,\n outField,\n outFieldName,\n fieldMetadata.GetAssociation(),\n fieldMetadata.GetCellSetName());\n}\n\n}\n} // namespace vtkm::filter\n////\n//// END-EXAMPLE FilterFieldImpl.cxx\n////\n\n////\n//// BEGIN-EXAMPLE RandomArrayAccess.cxx\n////\nnamespace vtkm {\nnamespace worklet {\n\nstruct ReverseArrayCopy : vtkm::worklet::WorkletMapField\n{\n typedef void ControlSignature(FieldIn<> inputArray,\n WholeArrayOut<> outputArray);\n typedef void ExecutionSignature(_1, _2, WorkIndex);\n typedef _1 InputDomain;\n\n template<typename InputType, typename OutputArrayPortalType>\n VTKM_EXEC\n void operator()(const InputType &inputValue,\n const OutputArrayPortalType &outputArrayPortal,\n vtkm::Id workIndex) const\n {\n vtkm::Id outIndex = outputArrayPortal.GetNumberOfValues() - workIndex - 1;\n if (outIndex >= 0)\n {\n outputArrayPortal.Set(outIndex, inputValue);\n }\n else\n {\n this->RaiseError(\"Output array not sized correctly.\");\n }\n }\n};\n\n}\n} // namespace vtkm::worklet\n\ntemplate<typename T, typename Storage>\nVTKM_CONT\nvtkm::cont::ArrayHandle<T>\nInvokeReverseArrayCopy(const vtkm::cont::ArrayHandle<T,Storage> &inArray)\n{\n vtkm::cont::ArrayHandle<T> outArray;\n outArray.Allocate(inArray.GetNumberOfValues());\n\n vtkm::worklet::DispatcherMapField<vtkm::worklet::ReverseArrayCopy> dispatcher;\n dispatcher.Invoke(inArray, outArray);\n\n return outArray;\n}\n////\n//// END-EXAMPLE RandomArrayAccess.cxx\n////\n\n#include <vtkm/cont/DataSetFieldAdd.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid Test()\n{\n static const vtkm::Id ARRAY_SIZE = 10;\n typedef vtkm::Vec<vtkm::FloatDefault,3> Vec3;\n Vec3 inputBuffer[ARRAY_SIZE];\n\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n inputBuffer[index] = TestValue(index, Vec3());\n }\n\n vtkm::cont::ArrayHandle<Vec3> inputArray =\n vtkm::cont::make_ArrayHandle(inputBuffer, ARRAY_SIZE);\n\n vtkm::cont::DynamicArrayHandle outputDynamicArray =\n InvokeMagnitude(inputArray);\n vtkm::cont::ArrayHandle<vtkm::FloatDefault> outputArray;\n outputDynamicArray.CopyTo(outputArray);\n\n VTKM_TEST_ASSERT(outputArray.GetNumberOfValues() == ARRAY_SIZE,\n \"Bad output array size.\");\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n Vec3 testValue = TestValue(index, Vec3());\n vtkm::Float64 expectedValue = sqrt(vtkm::dot(testValue,testValue));\n vtkm::Float64 gotValue = outputArray.GetPortalConstControl().Get(index);\n VTKM_TEST_ASSERT(test_equal(expectedValue, gotValue), \"Got bad value.\");\n }\n outputArray.ReleaseResources();\n\n vtkm::cont::DataSet inputDataSet;\n vtkm::cont::CellSetStructured<1> cellSet(\"1D_mesh\");\n cellSet.SetPointDimensions(ARRAY_SIZE);\n inputDataSet.AddCellSet(cellSet);\n vtkm::cont::DataSetFieldAdd::AddPointField(\n inputDataSet, \"test_values\", inputArray);\n\n vtkm::filter::FieldMagnitude fieldMagFilter;\n vtkm::filter::ResultField magResult =\n fieldMagFilter.Execute(inputDataSet, \"test_values\");\n magResult.FieldAs(outputArray);\n\n VTKM_TEST_ASSERT(outputArray.GetNumberOfValues() == ARRAY_SIZE,\n \"Bad output array size.\");\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n Vec3 testValue = TestValue(index, Vec3());\n vtkm::Float64 expectedValue = sqrt(vtkm::dot(testValue,testValue));\n vtkm::Float64 gotValue = outputArray.GetPortalConstControl().Get(index);\n VTKM_TEST_ASSERT(test_equal(expectedValue, gotValue), \"Got bad value.\");\n }\n\n vtkm::cont::ArrayHandle<Vec3> outputArray2 =\n InvokeReverseArrayCopy(inputArray);\n VTKM_TEST_ASSERT(outputArray2.GetNumberOfValues() == ARRAY_SIZE,\n \"Bad output array size.\");\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n Vec3 expectedValue = TestValue(ARRAY_SIZE - index - 1, Vec3());\n Vec3 gotValue = outputArray2.GetPortalConstControl().Get(index);\n VTKM_TEST_ASSERT(test_equal(expectedValue, gotValue), \"Got bad value.\");\n }\n}\n\n} // anonymous namespace\n\nint UseWorkletMapField(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6773936152458191,
"alphanum_fraction": 0.686979353427887,
"avg_line_length": 26.907642364501953,
"blob_id": "3a85706137183e2c329c66716d3315cf8b417144",
"content_id": "329b69fbe736dc379e5aa443a50da00160c19f1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8763,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 314,
"path": "/examples/DynamicArrayHandle.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/ArrayHandleIndex.h>\n#include <vtkm/cont/DeviceAdapter.h>\n#include <vtkm/cont/DynamicArrayHandle.h>\n\n#include <vtkm/cont/internal/StorageError.h>\n\n#include <vtkm/VecTraits.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE CreateDynamicArrayHandle.cxx\n////\nVTKM_CONT\nvtkm::cont::DynamicArrayHandle\nLoadDynamicArray(const void *buffer, vtkm::Id length, std::string type)\n{\n vtkm::cont::DynamicArrayHandle handle;\n if (type == \"float\")\n {\n vtkm::cont::ArrayHandle<vtkm::Float32> concreteArray =\n vtkm::cont::make_ArrayHandle(\n reinterpret_cast<const vtkm::Float32*>(buffer), length);\n handle = concreteArray;\n } else if (type == \"int\") {\n vtkm::cont::ArrayHandle<vtkm::Int32> concreteArray =\n vtkm::cont::make_ArrayHandle(\n reinterpret_cast<const vtkm::Int32*>(buffer), length);\n handle = concreteArray;\n }\n return handle;\n}\n////\n//// END-EXAMPLE CreateDynamicArrayHandle.cxx\n////\n\nvoid TryLoadDynamicArray()\n{\n vtkm::Float32 scalarBuffer[10];\n vtkm::cont::DynamicArrayHandle handle =\n LoadDynamicArray(scalarBuffer, 10, \"float\");\n VTKM_TEST_ASSERT(\n (handle.IsTypeAndStorage<vtkm::Float32, VTKM_DEFAULT_STORAGE_TAG>()),\n \"Type not right.\");\n VTKM_TEST_ASSERT(\n !(handle.IsTypeAndStorage<vtkm::Int32, VTKM_DEFAULT_STORAGE_TAG>()),\n \"Type not right.\");\n\n vtkm::Int32 idBuffer[10];\n handle = LoadDynamicArray(idBuffer, 10, \"int\");\n VTKM_TEST_ASSERT(\n (handle.IsTypeAndStorage<vtkm::Int32, VTKM_DEFAULT_STORAGE_TAG>()),\n \"Type not right.\");\n VTKM_TEST_ASSERT(\n !(handle.IsTypeAndStorage<vtkm::Float32, VTKM_DEFAULT_STORAGE_TAG>()),\n \"Type not right.\");\n}\n\nvoid NonTypeQueriesDynamicArrayHandle()\n{\n ////\n //// BEGIN-EXAMPLE NonTypeQueriesDynamicArrayHandle.cxx\n ////\n std::vector<vtkm::Float32> scalarBuffer(10);\n vtkm::cont::DynamicArrayHandle scalarDynamicHandle(\n vtkm::cont::make_ArrayHandle(scalarBuffer));\n\n // This returns 10.\n vtkm::Id scalarArraySize = scalarDynamicHandle.GetNumberOfValues();\n\n // This returns 1.\n vtkm::IdComponent scalarComponents =\n scalarDynamicHandle.GetNumberOfComponents();\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(scalarArraySize == 10, \"Got wrong array size.\");\n VTKM_TEST_ASSERT(scalarComponents == 1, \"Got wrong vec size.\");\n //// RESUME-EXAMPLE\n\n std::vector<vtkm::Vec<vtkm::Float32,3> > vectorBuffer(20);\n vtkm::cont::DynamicArrayHandle vectorDynamicHandle(\n vtkm::cont::make_ArrayHandle(vectorBuffer));\n\n // This returns 20.\n vtkm::Id vectorArraySize = vectorDynamicHandle.GetNumberOfValues();\n\n // This returns 3.\n vtkm::IdComponent vectorComponents =\n vectorDynamicHandle.GetNumberOfComponents();\n //// PAUSE-EXAMPLE\n VTKM_TEST_ASSERT(vectorArraySize == 20, \"Got wrong array size.\");\n VTKM_TEST_ASSERT(vectorComponents == 3, \"Got wrong vec size.\");\n //// RESUME-EXAMPLE\n ////\n //// END-EXAMPLE NonTypeQueriesDynamicArrayHandle.cxx\n ////\n}\n\nvoid DynamicArrayHandleNewInstance()\n{\n ////\n //// BEGIN-EXAMPLE DynamicArrayHandleNewInstance.cxx\n ////\n std::vector<vtkm::Float32> scalarBuffer(10);\n vtkm::cont::DynamicArrayHandle dynamicHandle(\n vtkm::cont::make_ArrayHandle(scalarBuffer));\n\n // This creates a new empty array of type Float32.\n vtkm::cont::DynamicArrayHandle newDynamicArray = dynamicHandle.NewInstance();\n ////\n //// END-EXAMPLE DynamicArrayHandleNewInstance.cxx\n ////\n\n VTKM_TEST_ASSERT(newDynamicArray.GetNumberOfValues() == 0,\n \"New array not empty.\");\n VTKM_TEST_ASSERT((newDynamicArray.IsTypeAndStorage<\n vtkm::Float32,VTKM_DEFAULT_STORAGE_TAG>()),\n \"New array is wrong type.\");\n}\n\nvoid QueryCastDynamicArrayHandle()\n{\n ////\n //// BEGIN-EXAMPLE QueryDynamicArrayHandle.cxx\n ////\n std::vector<vtkm::Float32> scalarBuffer(10);\n vtkm::cont::ArrayHandle<vtkm::Float32> concreteHandle =\n vtkm::cont::make_ArrayHandle(scalarBuffer);\n vtkm::cont::DynamicArrayHandle dynamicHandle(concreteHandle);\n\n // This returns true\n bool isFloat32Array = dynamicHandle.IsSameType(concreteHandle);\n\n // This returns false\n bool isIdArray =\n dynamicHandle.IsType<vtkm::cont::ArrayHandle<vtkm::Id> >();\n\n // This returns true\n bool isFloat32 =\n dynamicHandle.IsTypeAndStorage<vtkm::Float32,VTKM_DEFAULT_STORAGE_TAG>();\n\n // This returns false\n bool isId =\n dynamicHandle.IsTypeAndStorage<vtkm::Id,VTKM_DEFAULT_STORAGE_TAG>();\n\n // This returns false\n bool isErrorStorage = dynamicHandle.IsTypeAndStorage<\n vtkm::Float32,\n vtkm::cont::ArrayHandleCounting<vtkm::Float32>::StorageTag>();\n ////\n //// END-EXAMPLE QueryDynamicArrayHandle.cxx\n ////\n\n VTKM_TEST_ASSERT(isFloat32Array, \"Didn't query right.\");\n VTKM_TEST_ASSERT(!isIdArray, \"Didn't query right.\");\n VTKM_TEST_ASSERT(isFloat32, \"Didn't query right.\");\n VTKM_TEST_ASSERT(!isId, \"Didn't query right.\");\n VTKM_TEST_ASSERT(!isErrorStorage, \"Didn't query right.\");\n\n ////\n //// BEGIN-EXAMPLE CastDynamicArrayHandle.cxx\n ////\n dynamicHandle.CopyTo(concreteHandle);\n ////\n //// END-EXAMPLE CastDynamicArrayHandle.cxx\n ////\n\n VTKM_TEST_ASSERT(concreteHandle.GetNumberOfValues() == 10,\n \"Unexpected length\");\n}\n\n////\n//// BEGIN-EXAMPLE UsingCastAndCall.cxx\n////\nstruct PrintArrayContentsFunctor\n{\n template<typename T, typename Storage>\n VTKM_CONT\n void operator()(const vtkm::cont::ArrayHandle<T,Storage> &array) const\n {\n this->PrintArrayPortal(array.GetPortalConstControl());\n }\n\nprivate:\n template<typename PortalType>\n VTKM_CONT\n void PrintArrayPortal(const PortalType &portal) const\n {\n for (vtkm::Id index = 0; index < portal.GetNumberOfValues(); index++)\n {\n // All ArrayPortal objects have ValueType for the type of each value.\n typedef typename PortalType::ValueType ValueType;\n\n ValueType value = portal.Get(index);\n\n vtkm::IdComponent numComponents =\n vtkm::VecTraits<ValueType>::GetNumberOfComponents(value);\n for (vtkm::IdComponent componentIndex = 0;\n componentIndex < numComponents;\n componentIndex++)\n {\n std::cout << \" \"\n << vtkm::VecTraits<ValueType>::GetComponent(value,\n componentIndex);\n }\n std::cout << std::endl;\n }\n }\n};\n\ntemplate<typename DynamicArrayType>\nvoid PrintArrayContents(const DynamicArrayType &array)\n{\n array.CastAndCall(PrintArrayContentsFunctor());\n}\n////\n//// END-EXAMPLE UsingCastAndCall.cxx\n////\n\nnamespace second_def {\n\n////\n//// BEGIN-EXAMPLE DynamicArrayHandleBase.cxx\n////\ntemplate<typename TypeList, typename StorageList>\nvoid PrintArrayContents(\n const vtkm::cont::DynamicArrayHandleBase<TypeList,StorageList> &array)\n{\n array.CastAndCall(PrintArrayContentsFunctor());\n}\n////\n//// END-EXAMPLE DynamicArrayHandleBase.cxx\n////\n\n} // namespace second_def\n\n////\n//// BEGIN-EXAMPLE CastAndCallStorage.cxx\n////\nstruct MyIdStorageList :\n vtkm::ListTagBase<\n vtkm::cont::StorageTagBasic,\n vtkm::cont::ArrayHandleIndex::StorageTag>\n{ };\n\nvoid PrintIds(vtkm::cont::DynamicArrayHandle array)\n{\n PrintArrayContents(array.ResetStorageList(MyIdStorageList()));\n}\n////\n//// END-EXAMPLE CastAndCallStorage.cxx\n////\n\nvoid TryPrintArrayContents()\n{\n vtkm::cont::ArrayHandleIndex implicitArray(10);\n\n vtkm::cont::ArrayHandle<vtkm::Id> concreteArray;\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n implicitArray, concreteArray);\n\n vtkm::cont::DynamicArrayHandle dynamicArray = concreteArray;\n\n second_def::PrintArrayContents(dynamicArray);\n\n ////\n //// BEGIN-EXAMPLE CastAndCallAllTypes.cxx\n ////\n PrintArrayContents(dynamicArray.ResetTypeList(vtkm::TypeListTagAll()));\n ////\n //// END-EXAMPLE CastAndCallAllTypes.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE CastAndCallSingleType.cxx\n ////\n PrintArrayContents(dynamicArray.ResetTypeList(vtkm::TypeListTagId()));\n ////\n //// END-EXAMPLE CastAndCallSingleType.cxx\n ////\n\n dynamicArray = implicitArray;\n\n PrintIds(dynamicArray);\n\n ////\n //// BEGIN-EXAMPLE CastAndCallTypeAndStorage.cxx\n ////\n PrintArrayContents(dynamicArray.\n ResetTypeList(vtkm::TypeListTagId()).\n ResetStorageList(MyIdStorageList()));\n ////\n //// END-EXAMPLE CastAndCallTypeAndStorage.cxx\n ////\n}\n\nvoid Test()\n{\n TryLoadDynamicArray();\n NonTypeQueriesDynamicArrayHandle();\n DynamicArrayHandleNewInstance();\n QueryCastDynamicArrayHandle();\n TryPrintArrayContents();\n}\n\n} // anonymous namespace\n\nint DynamicArrayHandle(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6827957034111023,
"alphanum_fraction": 0.6899641752243042,
"avg_line_length": 17,
"blob_id": "a46d8b7f9ad40869ebf935b64bf098b955732bba",
"content_id": "a27fdeb31291551987725682b9ac89b38f3aa1a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1116,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 62,
"path": "/examples/EnvironmentModifierMacros.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Types.h>\n\n#include <vtkm/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE EnvironmentModifierMacro.cxx\n////\ntemplate<typename ValueType>\nVTKM_EXEC_CONT\nValueType Square(const ValueType &inValue)\n{\n return inValue * inValue;\n}\n////\n//// END-EXAMPLE EnvironmentModifierMacro.cxx\n////\n\n////\n//// BEGIN-EXAMPLE SuppressExecWarnings.cxx\n////\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename Functor>\nVTKM_EXEC_CONT\nvoid OverlyComplicatedForLoop(Functor &functor, vtkm::Id numInterations)\n{\n for (vtkm::Id index = 0; index < numInterations; index++)\n {\n functor();\n }\n}\n////\n//// END-EXAMPLE SuppressExecWarnings.cxx\n////\n\nstruct TestFunctor\n{\n vtkm::Id Count;\n\n VTKM_CONT\n TestFunctor() : Count(0) { }\n\n VTKM_CONT\n void operator()() { this->Count++; }\n};\n\nvoid Test()\n{\n VTKM_TEST_ASSERT(Square(2) == 4, \"Square function doesn't square.\");\n\n TestFunctor functor;\n OverlyComplicatedForLoop(functor, 10);\n VTKM_TEST_ASSERT(functor.Count == 10, \"Bad iterations.\");\n}\n\n} // anonymous namespace\n\nint EnvironmentModifierMacros(int, char *[])\n{\n return vtkm::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6549053192138672,
"alphanum_fraction": 0.6669535040855408,
"avg_line_length": 21.063291549682617,
"blob_id": "5a26d2fd5bed772e64fdc0f8a07106fc0a86bd0b",
"content_id": "19ce640fde1ba1c716d2dbee86683a470f77cd6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3486,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 158,
"path": "/examples/ErrorHandling.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Assert.h>\n#include <vtkm/StaticAssert.h>\n#include <vtkm/TypeTraits.h>\n\n#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/Error.h>\n#include <vtkm/cont/ErrorBadValue.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/WorkletMapField.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n#include <type_traits>\n\nnamespace ErrorHandlingNamespace {\n\n////\n//// BEGIN-EXAMPLE CatchingErrors.cxx\n////\nint main(int argc, char **argv)\n{\n try\n {\n // Do something cool with VTK-m\n // ...\n //// PAUSE-EXAMPLE\n throw vtkm::cont::ErrorBadValue(\"Oh, no!\");\n //// RESUME-EXAMPLE\n }\n catch (vtkm::cont::Error error)\n {\n std::cout << error.GetMessage() << std::endl;\n return 1;\n }\n return 0;\n}\n////\n//// END-EXAMPLE CatchingErrors.cxx\n////\n\n////\n//// BEGIN-EXAMPLE Assert.cxx\n////\ntemplate<typename T>\nVTKM_CONT\nT GetArrayValue(vtkm::cont::ArrayHandle<T> arrayHandle, vtkm::Id index)\n{\n VTKM_ASSERT(index >= 0);\n VTKM_ASSERT(index < arrayHandle.GetNumberOfValues());\n ////\n //// END-EXAMPLE Assert.cxx\n ////\n return arrayHandle.GetPortalConstControl().Get(index);\n}\n\nVTKM_CONT\nvoid TryGetArrayValue()\n{\n vtkm::Float32 buffer[] = {2.0f, 5.0f};\n GetArrayValue(vtkm::cont::make_ArrayHandle(buffer,2), 0);\n GetArrayValue(vtkm::cont::make_ArrayHandle(buffer,2), 1);\n}\n\n////\n//// BEGIN-EXAMPLE StaticAssert.cxx\n////\ntemplate<typename T>\nVTKM_EXEC_CONT\nvoid MyMathFunction(T &value)\n{\n VTKM_STATIC_ASSERT(\n (std::is_same<typename vtkm::TypeTraits<T>::DimensionalityTag,\n vtkm::TypeTraitsScalarTag>::value));\n\n VTKM_STATIC_ASSERT_MSG(\n sizeof(T) >= 4, \"MyMathFunction needs types with at least 32 bits.\");\n////\n//// END-EXAMPLE StaticAssert.cxx\n////\n for (vtkm::IdComponent iteration = 0; iteration < 5; iteration++)\n {\n value = value*value;\n }\n}\n\nVTKM_EXEC_CONT\nvoid TryMyMathFunction()\n{\n vtkm::Id value(4);\n MyMathFunction(value);\n}\n\n////\n//// BEGIN-EXAMPLE ExecutionErrors.cxx\n////\nstruct SquareRoot : vtkm::worklet::WorkletMapField\n{\npublic:\n typedef void ControlSignature(FieldIn<Scalar>, FieldOut<Scalar>);\n typedef _2 ExecutionSignature(_1);\n\n template<typename T>\n VTKM_EXEC\n T operator()(T x) const\n {\n if (x < 0)\n {\n this->RaiseError(\"Cannot take the square root of a negative number.\");\n }\n return vtkm::Sqrt(x);\n }\n};\n////\n//// END-EXAMPLE ExecutionErrors.cxx\n////\n\nVTKM_CONT\nvoid TrySquareRoot()\n{\n vtkm::cont::ArrayHandle<vtkm::Float32> output;\n\n vtkm::worklet::DispatcherMapField<SquareRoot> dispatcher;\n\n std::cout << \"Trying valid input.\" << std::endl;\n vtkm::cont::ArrayHandleCounting<vtkm::Float32> validInput(0.0f, 1.0f, 10);\n dispatcher.Invoke(validInput, output);\n\n std::cout << \"Trying invalid input.\" << std::endl;\n vtkm::cont::ArrayHandleCounting<vtkm::Float32> invalidInput(-2.0, 1.0f, 10);\n bool errorCaught = false;\n try\n {\n dispatcher.Invoke(invalidInput, output);\n }\n catch (vtkm::cont::ErrorExecution error)\n {\n std::cout << \"Caught this error:\" << std::endl;\n std::cout << error.GetMessage() << std::endl;\n errorCaught = true;\n }\n VTKM_TEST_ASSERT(errorCaught, \"Did not get expected error.\");\n}\n\nvoid Test()\n{\n VTKM_TEST_ASSERT(ErrorHandlingNamespace::main(0, NULL) != 0, \"No error?\");\n TryGetArrayValue();\n TryMyMathFunction();\n TrySquareRoot();\n}\n\n} // namespace ErrorHandlingNamespace\n\nint ErrorHandling(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(ErrorHandlingNamespace::Test);\n}\n"
},
{
"alpha_fraction": 0.7089508175849915,
"alphanum_fraction": 0.7213989496231079,
"avg_line_length": 23.808822631835938,
"blob_id": "620d4df35df71403aec2b5faf59f10786e302ecc",
"content_id": "c260614990ceef89b4b7ce7f1d8ffce82c4b0ff1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1687,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 68,
"path": "/examples/ArrayHandleDiscard.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/ArrayHandleDiscard.h>\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE ArrayHandleDiscard.cxx\n////\ntemplate<typename InputArrayType,\n typename OutputArrayType1,\n typename OutputArrayType2>\nVTKM_CONT\nvoid DoFoo(InputArrayType input,\n OutputArrayType1 output1,\n OutputArrayType2 output2);\n\ntemplate<typename InputArrayType>\nVTKM_CONT\ninline\nvtkm::cont::ArrayHandle<vtkm::FloatDefault> DoBar(InputArrayType input)\n{\n VTKM_IS_ARRAY_HANDLE(InputArrayType);\n\n vtkm::cont::ArrayHandle<vtkm::FloatDefault> keepOutput;\n\n vtkm::cont::ArrayHandleDiscard<vtkm::FloatDefault> discardOutput;\n\n DoFoo(input, keepOutput, discardOutput);\n\n return keepOutput;\n}\n////\n//// END-EXAMPLE ArrayHandleDiscard.cxx\n////\n\ntemplate<typename InputArrayType,\n typename OutputArrayType1,\n typename OutputArrayType2>\nVTKM_CONT\ninline void DoFoo(InputArrayType input,\n OutputArrayType1 output1,\n OutputArrayType2 output2)\n{\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::\n Copy(input, output1);\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::\n Copy(input, output2);\n}\n\nvoid Test()\n{\n vtkm::cont::ArrayHandleCounting<vtkm::FloatDefault> inputArray(0, 10, 10);\n\n vtkm::cont::ArrayHandle<vtkm::FloatDefault> outputArray =\n DoBar(inputArray);\n\n VTKM_TEST_ASSERT(outputArray.GetNumberOfValues() == 10, \"Wrong size.\");\n}\n\n} // anonymous namespace\n\nint ArrayHandleDiscard(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6270819902420044,
"alphanum_fraction": 0.6744537949562073,
"avg_line_length": 33.7593994140625,
"blob_id": "e42c622326c6623ae3cbc7269a328bb7e6b3c33f",
"content_id": "5aea1e315422be15718bfbff73442bf3d115499f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4623,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 133,
"path": "/examples/ArrayHandleCoordinateSystems.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayHandleCartesianProduct.h>\n#include <vtkm/cont/ArrayHandleUniformPointCoordinates.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid UniformPointCoordinates()\n{\n std::cout << \"Trying uniform point coordinates.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleUniformPointCoordinates.cxx\n ////\n // Create a set of point coordinates for a uniform grid in the space between\n // -5 and 5 in the x direction and -3 and 3 in the y and z directions. The\n // uniform sampling is spaced in 0.08 unit increments in the x direction (for\n // 126 samples), 0.08 unit increments in the y direction (for 76 samples) and\n // 0.24 unit increments in the z direction (for 26 samples). That makes\n // 248,976 values in the array total.\n vtkm::cont::ArrayHandleUniformPointCoordinates uniformCoordinates(\n vtkm::Id3(126, 76, 26),\n vtkm::make_Vec<vtkm::FloatDefault>(-5.0f, -3.0f, -3.0f),\n vtkm::make_Vec<vtkm::FloatDefault>(0.08f, 0.08f, 0.24f)\n );\n ////\n //// END-EXAMPLE ArrayHandleUniformPointCoordinates.cxx\n ////\n\n VTKM_TEST_ASSERT(uniformCoordinates.GetNumberOfValues() == 248976,\n \"Wrong number of values in uniform coordinates.\");\n VTKM_TEST_ASSERT(\n test_equal(uniformCoordinates.GetPortalConstControl().Get(0),\n vtkm::make_Vec(-5.0, -3.0, -3.0)),\n \"Bad first point coordinate.\");\n VTKM_TEST_ASSERT(\n test_equal(uniformCoordinates.GetPortalConstControl().Get(248975),\n vtkm::make_Vec(5.0, 3.0, 3.0)),\n \"Bad last point coordinate.\");\n}\n\ntemplate<typename ArrayHandleType>\nvoid CheckRectilinearPointCoordinates(ArrayHandleType rectilinearCoordinates)\n{\n VTKM_TEST_ASSERT(rectilinearCoordinates.GetNumberOfValues() == 12,\n \"Wrong number of values.\");\n\n VTKM_TEST_ASSERT(\n test_equal(rectilinearCoordinates.GetPortalControl().Get(0),\n vtkm::make_Vec(0.0, 0.0, 0.0)),\n \"Bad value at 0.\");\n VTKM_TEST_ASSERT(\n test_equal(rectilinearCoordinates.GetPortalControl().Get(4),\n vtkm::make_Vec(1.1, 2.0, 0.0)),\n \"Bad value at 4.\");\n VTKM_TEST_ASSERT(\n test_equal(rectilinearCoordinates.GetPortalControl().Get(11),\n vtkm::make_Vec(5.0, 2.0, 0.5)),\n \"Bad value at 11.\");\n}\n\nvoid RectilinearPointCoordinates()\n{\n std::cout << \"Trying rectilinear point coordinates.\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleCartesianProduct.cxx\n ////\n typedef vtkm::cont::ArrayHandle<vtkm::Float32> AxisArrayType;\n typedef AxisArrayType::PortalControl AxisPortalType;\n\n // Create array for x axis coordinates with values [0.0, 1.1, 5.0]\n AxisArrayType xAxisArray;\n xAxisArray.Allocate(3);\n AxisPortalType xAxisPortal = xAxisArray.GetPortalControl();\n xAxisPortal.Set(0, 0.0f);\n xAxisPortal.Set(1, 1.1f);\n xAxisPortal.Set(2, 5.0f);\n\n // Create array for y axis coordinates with values [0.0, 2.0]\n AxisArrayType yAxisArray;\n yAxisArray.Allocate(2);\n AxisPortalType yAxisPortal = yAxisArray.GetPortalControl();\n yAxisPortal.Set(0, 0.0f);\n yAxisPortal.Set(1, 2.0f);\n\n // Create array for z axis coordinates with values [0.0, 0.5]\n AxisArrayType zAxisArray;\n zAxisArray.Allocate(2);\n AxisPortalType zAxisPortal = zAxisArray.GetPortalControl();\n zAxisPortal.Set(0, 0.0f);\n zAxisPortal.Set(1, 0.5f);\n\n // Create point coordinates for a \"rectilinear grid\" with axis-aligned points\n // with variable spacing by taking the Cartesian product of the three\n // previously defined arrays. This generates the following 3x2x2 = 12 values:\n //\n // [0.0, 0.0, 0.0], [1.1, 0.0, 0.0], [5.0, 0.0, 0.0],\n // [0.0, 2.0, 0.0], [1.1, 2.0, 0.0], [5.0, 2.0, 0.0],\n // [0.0, 0.0, 0.5], [1.1, 0.0, 0.5], [5.0, 0.0, 0.5],\n // [0.0, 2.0, 0.5], [1.1, 2.0, 0.5], [5.0, 2.0, 0.5]\n vtkm::cont::ArrayHandleCartesianProduct<\n AxisArrayType,AxisArrayType,AxisArrayType>rectilinearCoordinates(\n xAxisArray, yAxisArray, zAxisArray);\n ////\n //// END-EXAMPLE ArrayHandleCartesianProduct.cxx\n ////\n CheckRectilinearPointCoordinates(rectilinearCoordinates);\n\n CheckRectilinearPointCoordinates(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleCartesianProduct.cxx\n ////\n vtkm::cont::make_ArrayHandleCartesianProduct(xAxisArray,yAxisArray,zAxisArray)\n ////\n //// END-EXAMPLE MakeArrayHandleCartesianProduct.cxx\n ////\n );\n}\n\nvoid Test()\n{\n UniformPointCoordinates();\n RectilinearPointCoordinates();\n}\n\n} // anonymous namespace\n\nint ArrayHandleCoordinateSystems(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6550322771072388,
"alphanum_fraction": 0.6672223210334778,
"avg_line_length": 25.152587890625,
"blob_id": "43363cfff070d767bf135d8e24629973e7f6f176",
"content_id": "9660a8af4194a1ad06430a965d61a07741d7c746",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9598,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 367,
"path": "/examples/CustomDeviceAdapter.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE DeviceAdapterTagCxx11Thread.h\n////\n#include <vtkm/cont/internal/DeviceAdapterTag.h>\n\n// If this device adapter were to be contributed to VTK-m, then this macro\n// declaration should be moved to DeviceAdapterTag.h and given a unique\n// number.\n#define VTKM_DEVICE_ADAPTER_CXX11_THREAD 101\n\nVTKM_VALID_DEVICE_ADAPTER(Cxx11Thread, VTKM_DEVICE_ADAPTER_CXX11_THREAD);\n////\n//// END-EXAMPLE DeviceAdapterTagCxx11Thread.h\n////\n\n#include <vtkm/cont/internal/ArrayManagerExecution.h>\n\n////\n//// BEGIN-EXAMPLE ArrayManagerExecutionPrototype.cxx\n////\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename T, typename StorageTag, typename DeviceAdapterTag>\nclass ArrayManagerExecution;\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE ArrayManagerExecutionPrototype.cxx\n////\n\n////\n//// BEGIN-EXAMPLE ArrayManagerExecutionCxx11Thread.h\n////\n//// PAUSE-EXAMPLE\n// We did not really put the device adapter components in separate header\n// files, but for the purposes of an example we are pretending we are.\n#if 0\n//// RESUME-EXAMPLE\n#include <vtkm/cont/cxx11/internal/DeviceAdapterTagCxx11Thread.h>\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n\n#include <vtkm/cont/internal/ArrayManagerExecution.h>\n#include <vtkm/cont/internal/ArrayManagerExecutionShareWithControl.h>\n\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename T, typename StorageTag>\nclass ArrayManagerExecution<\n T, StorageTag, vtkm::cont::DeviceAdapterTagCxx11Thread>\n : public vtkm::cont::internal::ArrayManagerExecutionShareWithControl<\n T, StorageTag>\n{\n typedef vtkm::cont::internal::ArrayManagerExecutionShareWithControl\n <T, StorageTag> Superclass;\n\npublic:\n VTKM_CONT\n ArrayManagerExecution(typename Superclass::StorageType *storage)\n : Superclass(storage) { }\n};\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE ArrayManagerExecutionCxx11Thread.h\n////\n\n////\n//// BEGIN-EXAMPLE DeviceAdapterAlgorithmCxx11Thread.h\n////\n//// PAUSE-EXAMPLE\n// We did not really put the device adapter components in separate header\n// files, but for the purposes of an example we are pretending we are.\n#if 0\n//// RESUME-EXAMPLE\n#include <vtkm/cont/cxx11/internal/DeviceAdapterTagCxx11Thread.h>\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n\n#include <vtkm/cont/DeviceAdapterAlgorithm.h>\n#include <vtkm/cont/ErrorExecution.h>\n#include <vtkm/cont/internal/DeviceAdapterAlgorithmGeneral.h>\n\n#include <thread>\n\nnamespace vtkm {\nnamespace cont {\n\ntemplate<>\nstruct DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCxx11Thread>\n : vtkm::cont::internal::DeviceAdapterAlgorithmGeneral<\n DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCxx11Thread>,\n vtkm::cont::DeviceAdapterTagCxx11Thread>\n{\nprivate:\n template<typename FunctorType>\n struct ScheduleKernel1D\n {\n VTKM_CONT\n ScheduleKernel1D(const FunctorType &functor)\n : Functor(functor)\n { }\n\n VTKM_EXEC\n void operator()() const\n {\n try\n {\n for (vtkm::Id threadId = this->BeginId;\n threadId < this->EndId;\n threadId++)\n {\n this->Functor(threadId);\n // If an error is raised, abort execution.\n if (this->ErrorMessage.IsErrorRaised()) { return; }\n }\n }\n catch (vtkm::cont::Error error)\n {\n this->ErrorMessage.RaiseError(error.GetMessage().c_str());\n }\n catch (std::exception error)\n {\n this->ErrorMessage.RaiseError(error.what());\n }\n catch (...)\n {\n this->ErrorMessage.RaiseError(\"Unknown exception raised.\");\n }\n }\n\n FunctorType Functor;\n vtkm::exec::internal::ErrorMessageBuffer ErrorMessage;\n vtkm::Id BeginId;\n vtkm::Id EndId;\n };\n\n template<typename FunctorType>\n struct ScheduleKernel3D\n {\n VTKM_CONT\n ScheduleKernel3D(const FunctorType &functor, vtkm::Id3 maxRange)\n : Functor(functor), MaxRange(maxRange)\n { }\n\n VTKM_EXEC\n void operator()() const\n {\n vtkm::Id3 threadId3D(this->BeginId%this->MaxRange[0],\n (this->BeginId/this->MaxRange[0])%this->MaxRange[1],\n this->BeginId/(this->MaxRange[0]*this->MaxRange[1]));\n\n try\n {\n for (vtkm::Id threadId = this->BeginId;\n threadId < this->EndId;\n threadId++)\n {\n this->Functor(threadId3D);\n // If an error is raised, abort execution.\n if (this->ErrorMessage.IsErrorRaised()) { return; }\n\n threadId3D[0]++;\n if (threadId3D[0] >= MaxRange[0])\n {\n threadId3D[0] = 0;\n threadId3D[1]++;\n if (threadId3D[1] >= MaxRange[1])\n {\n threadId3D[1] = 0;\n threadId3D[2]++;\n }\n }\n }\n }\n catch (vtkm::cont::Error error)\n {\n this->ErrorMessage.RaiseError(error.GetMessage().c_str());\n }\n catch (std::exception error)\n {\n this->ErrorMessage.RaiseError(error.what());\n }\n catch (...)\n {\n this->ErrorMessage.RaiseError(\"Unknown exception raised.\");\n }\n }\n\n FunctorType Functor;\n vtkm::exec::internal::ErrorMessageBuffer ErrorMessage;\n vtkm::Id BeginId;\n vtkm::Id EndId;\n vtkm::Id3 MaxRange;\n };\n\n template<typename KernelType>\n VTKM_CONT\n static void DoSchedule(KernelType kernel,\n vtkm::Id numInstances)\n {\n if (numInstances < 1) { return; }\n\n const vtkm::Id MESSAGE_SIZE = 1024;\n char errorString[MESSAGE_SIZE];\n errorString[0] = '\\0';\n vtkm::exec::internal::ErrorMessageBuffer errorMessage(errorString,\n MESSAGE_SIZE);\n kernel.Functor.SetErrorMessageBuffer(errorMessage);\n kernel.ErrorMessage = errorMessage;\n\n vtkm::Id numThreads =\n static_cast<vtkm::Id>(std::thread::hardware_concurrency());\n if (numThreads > numInstances)\n {\n numThreads = numInstances;\n }\n vtkm::Id numInstancesPerThread = (numInstances+numThreads-1)/numThreads;\n\n std::thread *threadPool = new std::thread[numThreads];\n vtkm::Id beginId = 0;\n for (vtkm::Id threadIndex = 0; threadIndex < numThreads; threadIndex++)\n {\n vtkm::Id endId = std::min(beginId+numInstancesPerThread, numInstances);\n KernelType threadKernel = kernel;\n threadKernel.BeginId = beginId;\n threadKernel.EndId = endId;\n std::thread newThread(threadKernel);\n threadPool[threadIndex].swap(newThread);\n beginId = endId;\n }\n\n for (vtkm::Id threadIndex = 0; threadIndex < numThreads; threadIndex++)\n {\n threadPool[threadIndex].join();\n }\n\n delete[] threadPool;\n\n if (errorMessage.IsErrorRaised())\n {\n throw vtkm::cont::ErrorExecution(errorString);\n }\n }\n\npublic:\n template<typename FunctorType>\n VTKM_CONT\n static void Schedule(FunctorType functor, vtkm::Id numInstances)\n {\n DoSchedule(ScheduleKernel1D<FunctorType>(functor), numInstances);\n }\n\n template<typename FunctorType>\n VTKM_CONT\n static void Schedule(FunctorType functor, vtkm::Id3 maxRange)\n {\n vtkm::Id numInstances = maxRange[0]*maxRange[1]*maxRange[2];\n DoSchedule(ScheduleKernel3D<FunctorType>(functor, maxRange), numInstances);\n }\n\n VTKM_CONT\n static void Synchronize()\n {\n // Nothing to do. This device schedules all of its operations using a\n // split/join paradigm. This means that the if the control threaad is\n // calling this method, then nothing should be running in the execution\n // environment.\n }\n};\n\n}\n} // namespace vtkm::cont\n////\n//// END-EXAMPLE DeviceAdapterAlgorithmCxx11Thread.h\n////\n\n////\n//// BEGIN-EXAMPLE DeviceAdapterTimerImplementationCxx11Thread.h\n////\n#include <chrono>\n\nnamespace vtkm {\nnamespace cont {\n\ntemplate<>\nclass DeviceAdapterTimerImplementation<vtkm::cont::DeviceAdapterTagCxx11Thread>\n{\npublic:\n VTKM_CONT\n DeviceAdapterTimerImplementation()\n {\n this->Reset();\n }\n\n VTKM_CONT\n void Reset()\n {\n vtkm::cont::DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCxx11Thread>\n ::Synchronize();\n this->StartTime = std::chrono::high_resolution_clock::now();\n }\n\n VTKM_CONT\n vtkm::Float64 GetElapsedTime()\n {\n vtkm::cont::DeviceAdapterAlgorithm<vtkm::cont::DeviceAdapterTagCxx11Thread>\n ::Synchronize();\n std::chrono::high_resolution_clock::time_point endTime =\n std::chrono::high_resolution_clock::now();\n\n std::chrono::high_resolution_clock::duration elapsedTicks =\n endTime - this->StartTime;\n\n std::chrono::duration<vtkm::Float64> elapsedSeconds(elapsedTicks);\n\n return elapsedSeconds.count();\n }\n\nprivate:\n std::chrono::high_resolution_clock::time_point StartTime;\n};\n\n}\n} // namespace vtkm::cont\n////\n//// END-EXAMPLE DeviceAdapterTimerImplementationCxx11Thread.h\n////\n\n////\n//// BEGIN-EXAMPLE UnitTestDeviceAdapterCxx11Thread.cxx\n////\n//// PAUSE-EXAMPLE\n// We did not really put the device adapter components in separate header\n// files, but for the purposes of an example we are pretending we are.\n#if 0\n//// RESUME-EXAMPLE\n#include <vtkm/cont/cxx11/DeviceAdapterCxx11Thread.h>\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n\n#include <vtkm/cont/testing/TestingDeviceAdapter.h>\n\nint UnitTestDeviceAdapterCxx11Thread(int, char *[])\n{\n return vtkm::cont::testing::TestingDeviceAdapter<\n vtkm::cont::DeviceAdapterTagCxx11Thread>::Run();\n}\n////\n//// END-EXAMPLE UnitTestDeviceAdapterCxx11Thread.cxx\n////\n\nint CustomDeviceAdapter(int argc, char *argv[])\n{\n return UnitTestDeviceAdapterCxx11Thread(argc, argv);\n}\n"
},
{
"alpha_fraction": 0.6715521216392517,
"alphanum_fraction": 0.6799615621566772,
"avg_line_length": 26.746665954589844,
"blob_id": "4941e20ca16292c5c7d94fa4c66890105b4a6e4e",
"content_id": "3d014daa9a00a58afcb3649850bdc6ebdd66adef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4162,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 150,
"path": "/examples/TransferringArguments.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#define VTKM_DEVICE_ADAPTER VTKM_DEVICE_ADAPTER_SERIAL\n\n#include <vtkm/cont/arg/TypeCheckTagArray.h>\n#include <vtkm/cont/arg/TypeCheckTagExecObject.h>\n#include <vtkm/cont/arg/TransportTagArrayIn.h>\n#include <vtkm/cont/arg/TransportTagArrayOut.h>\n#include <vtkm/cont/arg/TransportTagExecObject.h>\n\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nnamespace TypeCheckNamespace {\n\n////\n//// BEGIN-EXAMPLE TypeCheck.cxx\n////\nstruct MyExecObject : vtkm::exec::ExecutionObjectBase { vtkm::Id Value; };\n\nvoid DoTypeChecks()\n{\n ////\n //// PAUSE-EXAMPLE\n ////\n std::cout << \"Testing type checks\" << std::endl;\n ////\n //// RESUME-EXAMPLE\n ////\n using vtkm::cont::arg::TypeCheck;\n using vtkm::cont::arg::TypeCheckTagArray;\n using vtkm::cont::arg::TypeCheckTagExecObject;\n\n bool check1 = TypeCheck<TypeCheckTagExecObject, MyExecObject>::value; // true\n bool check2 = TypeCheck<TypeCheckTagExecObject, vtkm::Id>::value; // false\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float32> ArrayType;\n\n bool check3 = // true\n TypeCheck<TypeCheckTagArray<vtkm::TypeListTagField>, ArrayType>::value;\n bool check4 = // false\n TypeCheck<TypeCheckTagArray<vtkm::TypeListTagIndex>, ArrayType>::value;\n bool check5 = TypeCheck<TypeCheckTagExecObject, ArrayType>::value; // false\n ////\n //// PAUSE-EXAMPLE\n ////\n VTKM_TEST_ASSERT(check1 == true, \"Type check failed.\");\n VTKM_TEST_ASSERT(check2 == false, \"Type check failed.\");\n VTKM_TEST_ASSERT(check3 == true, \"Type check failed.\");\n VTKM_TEST_ASSERT(check4 == false, \"Type check failed.\");\n VTKM_TEST_ASSERT(check5 == false, \"Type check failed.\");\n ////\n //// RESUME-EXAMPLE\n ////\n}\n////\n//// END-EXAMPLE TypeCheck.cxx\n////\n\n} // namespace TypeCheckNamespace\n\nusing namespace TypeCheckNamespace;\n\nnamespace TransportNamespace {\n\n////\n//// BEGIN-EXAMPLE Transport.cxx\n////\nstruct MyExecObject : vtkm::exec::ExecutionObjectBase { vtkm::Id Value; };\n\ntypedef vtkm::cont::ArrayHandle<vtkm::Id> ArrayType;\n\nvoid DoTransport(const MyExecObject &inExecObject,\n const ArrayType &inArray,\n const ArrayType &outArray)\n{\n ////\n //// PAUSE-EXAMPLE\n ////\n std::cout << \"Testing transports.\" << std::endl;\n ////\n //// RESUME-EXAMPLE\n ////\n typedef VTKM_DEFAULT_DEVICE_ADAPTER_TAG Device;\n\n using vtkm::cont::arg::Transport;\n using vtkm::cont::arg::TransportTagArrayIn;\n using vtkm::cont::arg::TransportTagArrayOut;\n using vtkm::cont::arg::TransportTagExecObject;\n\n // The executive object transport just passes the object through.\n typedef Transport<TransportTagExecObject,MyExecObject,Device>\n ExecObjectTransport;\n MyExecObject passedExecObject =\n ExecObjectTransport()(inExecObject, inArray, 10, 10);\n\n // The array in transport returns a read-only array portal.\n typedef Transport<TransportTagArrayIn,ArrayType,Device> ArrayInTransport;\n ArrayInTransport::ExecObjectType inPortal =\n ArrayInTransport()(inArray, inArray, 10, 10);\n\n // The array out transport returns an allocated array portal.\n typedef Transport<TransportTagArrayOut,ArrayType,Device> ArrayOutTransport;\n ArrayOutTransport::ExecObjectType outPortal =\n ArrayOutTransport()(outArray, inArray, 10, 10);\n ////\n //// PAUSE-EXAMPLE\n ////\n VTKM_TEST_ASSERT(passedExecObject.Value == 5, \"Bad exec object.\");\n CheckPortal(inPortal);\n VTKM_TEST_ASSERT(outPortal.GetNumberOfValues() == 10, \"Bad array out.\");\n ////\n //// RESUME-EXAMPLE\n ////\n}\n////\n//// END-EXAMPLE Transport.cxx\n////\n\nvoid DoTransport()\n{\n MyExecObject execObject;\n execObject.Value = 5;\n\n vtkm::Id buffer[10];\n for (vtkm::Id index = 0; index < 10; index++)\n {\n buffer[index] = TestValue(index, vtkm::Id());\n }\n\n DoTransport(execObject, vtkm::cont::make_ArrayHandle(buffer,10), ArrayType());\n}\n\n} // namespace TransportNamespace\n\nusing namespace TransportNamespace;\n\nvoid Test()\n{\n DoTypeChecks();\n DoTransport();\n}\n\n} // anonymous namespace\n\nint TransferringArguments(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6163021922111511,
"alphanum_fraction": 0.6166998147964478,
"avg_line_length": 32.53333282470703,
"blob_id": "1d00e8a1024e85d46784ed7ac97f59eeb4749421",
"content_id": "4d2fbcc9d1d9af0b63b2a647203f5c2fafac1598",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 2515,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 75,
"path": "/examples/ExtractExample.cmake",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "set(BEGIN_EXAMPLE_REGEX \"////[ ]*BEGIN-EXAMPLE\")\nset(END_EXAMPLE_REGEX \"////[ ]*END-EXAMPLE\")\nset(PAUSE_EXAMPLE_REGEX \"////[ ]*PAUSE-EXAMPLE[ ]*$\")\nset(RESUME_EXAMPLE_REGEX \"////[ ]*RESUME-EXAMPLE[ ]*$\")\n\nfunction(extract_from_file extracted_files_var src_file)\n set(extracted_files)\n\n latex_get_output_path(output_dir)\n\n file(STRINGS ${src_file} src_lines)\n\n set(active_files)\n set(example_paused OFF)\n\n foreach(line IN LISTS src_lines)\n if(\"${line}\" MATCHES \"////\")\n if(\"${line}\" MATCHES \"${BEGIN_EXAMPLE_REGEX}\")\n string(REGEX MATCH \"[^ ]+[ ]*$\" out_name \"${line}\")\n message(STATUS \"Generating ${out_name}\")\n list(APPEND active_files ${out_name})\n # Remove files to begin appending.\n file(REMOVE ${output_dir}/listing_${out_name})\n list(APPEND extracted_files ${output_dir}/listing_${out_name})\n elseif(\"${line}\" MATCHES \"${END_EXAMPLE_REGEX}\")\n string(REGEX MATCH \"[^ ]+[ ]*$\" out_name \"${line}\")\n list(FIND active_files \"${out_name}\" out_index)\n if(${out_index} LESS 0)\n message(WARNING \"Name mismatch extracting from ${src_file}. ${out_name} never started.\")\n else()\n list(REMOVE_AT active_files ${out_index})\n endif()\n elseif(\"${line}\" MATCHES \"${PAUSE_EXAMPLE_REGEX}\")\n if(example_paused)\n message(WARNING \"Badly nested pause/resume pair in ${src_file}.\")\n else()\n set(example_paused ON)\n endif()\n elseif(\"${line}\" MATCHES \"${RESUME_EXAMPLE_REGEX}\")\n if(example_paused)\n set(example_paused OFF)\n else()\n message(WARNING \"Badly nested pause/resume pair in ${src_file}.\")\n endif()\n endif()\n else()\n if(NOT example_paused)\n foreach(file IN LISTS active_files)\n file(APPEND ${output_dir}/listing_${file} \"${line}\\n\")\n endforeach()\n endif()\n endif()\n endforeach()\n\n foreach(unended_file IN LISTS active_files)\n message(WARNING \"Out file ${unended_file} from ${src_file} started but never ended.\")\n endforeach()\n\n if(example_paused)\n message(WARNING \"Paused capture in ${src_file} never resumed.\")\n endif()\n\n set(${extracted_files_var} ${extracted_files})\nendfunction()\n\nfunction(extract_examples created_files_var)\n set(created_files)\n\n foreach(src_file ${ARGN})\n extract_from_file(extracted_files ${src_file})\n list(APPEND created_files ${extracted_files})\n endforeach()\n\n set(${created_files_var} ${created_files} PARENT_SCOPE)\nendfunction()\n"
},
{
"alpha_fraction": 0.7039687633514404,
"alphanum_fraction": 0.7046194076538086,
"avg_line_length": 24.19672203063965,
"blob_id": "029c78ae39f99fafb5e8b257f2477bfdeb214610",
"content_id": "ecfb2d9d5ebc51e825932f373a1a6bf6fa9d7a79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1537,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 61,
"path": "/examples/IO.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE VTKDataSetWriter.cxx\n////\n#include <vtkm/io/writer/VTKDataSetWriter.h>\n\nvoid SaveDataAsVTKFile(vtkm::cont::DataSet data)\n{\n vtkm::io::writer::VTKDataSetWriter writer(\"data.vtk\");\n\n writer.WriteDataSet(data);\n}\n////\n//// END-EXAMPLE VTKDataSetWriter.cxx\n////\n\n////\n//// BEGIN-EXAMPLE VTKDataSetReader.cxx\n////\n#include <vtkm/io/reader/VTKDataSetReader.h>\n\nvtkm::cont::DataSet OpenDataFromVTKFile()\n{\n vtkm::io::reader::VTKDataSetReader reader(\"data.vtk\");\n\n return reader.ReadDataSet();\n}\n////\n//// END-EXAMPLE VTKDataSetReader.cxx\n////\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid TestIO()\n{\n std::cout << \"Writing data\" << std::endl;\n vtkm::cont::testing::MakeTestDataSet makeDataSet;\n vtkm::cont::DataSet createdData = makeDataSet.Make3DExplicitDataSetCowNose();\n SaveDataAsVTKFile(createdData);\n\n std::cout << \"Reading data\" << std::endl;\n vtkm::cont::DataSet readData = OpenDataFromVTKFile();\n\n const vtkm::cont::CellSet &createdCellSet = createdData.GetCellSet().CastToBase();\n const vtkm::cont::CellSet &readCellSet = readData.GetCellSet().CastToBase();\n VTKM_TEST_ASSERT(\n createdCellSet.GetNumberOfCells() == readCellSet.GetNumberOfCells(),\n \"Createded and read data do not match.\");\n VTKM_TEST_ASSERT(\n createdCellSet.GetNumberOfPoints() == readCellSet.GetNumberOfPoints(),\n \"Createded and read data do not match.\");\n}\n\n}\n\nint IO(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(TestIO);\n}\n"
},
{
"alpha_fraction": 0.7215447425842285,
"alphanum_fraction": 0.7215447425842285,
"avg_line_length": 14.375,
"blob_id": "edbcca3c986020acfc466ac3266ba2f46460e56e",
"content_id": "664e99a4f003358c4439413849dea7141a4ff709",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 32,
"path": "/examples/DeviceAdapterAlgorithms.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n////\n//// BEGIN-EXAMPLE DeviceAdapterAlgorithmPrototype.cxx\n////\nnamespace vtkm {\nnamespace cont {\n\ntemplate<typename DeviceAdapterTag>\nstruct DeviceAdapterAlgorithm;\n\n}\n} // namespace vtkm::cont\n////\n//// END-EXAMPLE DeviceAdapterAlgorithmPrototype.cxx\n////\n\nnamespace {\n\nvoid Test()\n{\n\n}\n\n} // anonymous namespace\n\nint DeviceAdapterAlgorithms(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.5090909004211426,
"alphanum_fraction": 0.5486363768577576,
"avg_line_length": 24,
"blob_id": "d6c425cd1b35060a9011b41f30208de58e4c28eb",
"content_id": "5582482048fb9d6fb29303d12ab9dd24b3307fc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2200,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 88,
"path": "/examples/NewtonsMethod.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Matrix.h>\n#include <vtkm/NewtonsMethod.h>\n\n#include <vtkm/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE NewtonsMethod.cxx\n////\n// A functor for the mathematical function f(x) = [dot(x,x),x[0]*x[1]]\nstruct FunctionFunctor\n{\n template<typename T>\n VTKM_EXEC_CONT\n vtkm::Vec<T,2> operator()(const vtkm::Vec<T,2> &x) const\n {\n return vtkm::make_Vec(vtkm::dot(x,x), x[0]*x[1]);\n }\n};\n\n// A functor for the Jacobian of the mathematical function\n// f(x) = [dot(x,x),x[0]*x[1]], which is\n// | 2*x[0] 2*x[1] |\n// | x[1] x[0] |\nstruct JacobianFunctor\n{\n template<typename T>\n VTKM_EXEC_CONT\n vtkm::Matrix<T,2,2> operator()(const vtkm::Vec<T,2> &x) const\n {\n vtkm::Matrix<T,2,2> jacobian;\n jacobian(0,0) = 2*x[0];\n jacobian(0,1) = 2*x[1];\n jacobian(1,0) = x[1];\n jacobian(1,1) = x[0];\n\n return jacobian;\n }\n};\n\nVTKM_EXEC\nvoid SolveNonlinear()\n{\n // Use Newton's method to solve the nonlinear system of equations:\n //\n // x^2 + y^2 = 2\n // x*y = 1\n //\n // There are two possible solutions, which are (x=1,y=1) and (x=-1,y=-1).\n // The one found depends on the starting value.\n vtkm::Vec<vtkm::Float32,2> answer1 =\n vtkm::NewtonsMethod(JacobianFunctor(),\n FunctionFunctor(),\n vtkm::make_Vec(2.0f, 1.0f),\n vtkm::make_Vec(1.0f, 0.0f));\n // answer1 is [1,1]\n\n vtkm::Vec<vtkm::Float32,2> answer2 =\n vtkm::NewtonsMethod(JacobianFunctor(),\n FunctionFunctor(),\n vtkm::make_Vec(2.0f, 1.0f),\n vtkm::make_Vec(0.0f, -2.0f));\n // answer2 is [-1,-1]\n //// PAUSE-EXAMPLE\n std::cout << answer1 << \" \" << answer2 << std::endl;\n\n VTKM_TEST_ASSERT(test_equal(answer1, vtkm::make_Vec(1,1), 0.01),\n \"Bad answer 1.\");\n VTKM_TEST_ASSERT(test_equal(answer2, vtkm::make_Vec(-1,-1), 0.01),\n \"Bad answer 2.\");\n //// RESUME-EXAMPLE\n}\n////\n//// END-EXAMPLE NewtonsMethod.cxx\n////\n\nvoid Run()\n{\n SolveNonlinear();\n}\n\n} // anonymous namespace\n\nint NewtonsMethod(int, char*[])\n{\n return vtkm::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.6836155652999878,
"alphanum_fraction": 0.6934161186218262,
"avg_line_length": 28.029577255249023,
"blob_id": "519a3afe111a4703c53299123ee42128f4d5b639",
"content_id": "2c3f2db1ab42861fdd998cf22ed8babb8752cd85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 20611,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 710,
"path": "/examples/FunctionInterface.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/internal/FunctionInterface.h>\n\n#include <vtkm/StaticAssert.h>\n\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/DeviceAdapter.h>\n#include <vtkm/cont/DynamicArrayHandle.h>\n#include <vtkm/cont/ErrorBadType.h>\n\n#include <vtkm/cont/internal/DynamicTransform.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n#include <string.h>\n\nnamespace {\n\nvoid BasicFunctionInterface()\n{\n ////\n //// BEGIN-EXAMPLE DefineFunctionInterface.cxx\n ////\n // FunctionInterfaces matching some common POSIX functions.\n vtkm::internal::FunctionInterface<size_t(const char *)>\n strlenInterface;\n\n vtkm::internal::FunctionInterface<char *(char *, const char *s2, size_t)>\n strncpyInterface;\n ////\n //// END-EXAMPLE DefineFunctionInterface.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE UseMakeFunctionInterface.cxx\n ////\n const char *s = \"Hello World\";\n static const size_t BUFFER_SIZE = 100;\n char *buffer = (char *)malloc(BUFFER_SIZE);\n\n strlenInterface =\n vtkm::internal::make_FunctionInterface<size_t>(s);\n\n strncpyInterface =\n vtkm::internal::make_FunctionInterface<char *>(buffer, s, BUFFER_SIZE);\n ////\n //// END-EXAMPLE UseMakeFunctionInterface.cxx\n ////\n\n std::cout << \"Trying interfaces.\" << std::endl;\n\n strlenInterface.InvokeCont(strlen);\n VTKM_TEST_ASSERT(strlenInterface.GetReturnValue() == 11, \"Bad length.\");\n\n strncpyInterface.InvokeCont(strncpy);\n VTKM_TEST_ASSERT(strncpyInterface.GetReturnValue() == buffer,\n \"Bad return value.\");\n VTKM_TEST_ASSERT(strncmp(s, buffer, BUFFER_SIZE) == 0, \"Did not copy.\");\n\n ////\n //// BEGIN-EXAMPLE FunctionInterfaceArity.cxx\n ////\n VTKM_STATIC_ASSERT(\n vtkm::internal::FunctionInterface<size_t(const char *)>::ARITY == 1);\n\n vtkm::IdComponent arity = strncpyInterface.GetArity(); // arity = 3\n ////\n //// END-EXAMPLE FunctionInterfaceArity.cxx\n ////\n\n VTKM_TEST_ASSERT(arity == 3, \"Unexpected arity.\");\n\n free(buffer);\n}\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceGetParameter.cxx\n////\nvoid GetFirstParameterResolved(\n const vtkm::internal::FunctionInterface<void(std::string)> &interface)\n{\n // The following two uses of GetParameter are equivalent\n std::cout << interface.GetParameter<1>() << std::endl;\n std::cout << interface.GetParameter(vtkm::internal::IndexTag<1>())\n << std::endl;\n}\n\ntemplate<typename FunctionSignature>\nvoid GetFirstParameterTemplated(\n const vtkm::internal::FunctionInterface<FunctionSignature> &interface)\n{\n // The following two uses of GetParameter are equivalent\n std::cout << interface.template GetParameter<1>() << std::endl;\n std::cout << interface.GetParameter(vtkm::internal::IndexTag<1>())\n << std::endl;\n}\n////\n//// END-EXAMPLE FunctionInterfaceGetParameter.cxx\n////\n\nvoid TryGetParameter()\n{\n std::cout << \"Getting parameters.\" << std::endl;\n\n GetFirstParameterResolved(\n vtkm::internal::make_FunctionInterface<void>(std::string(\"foo\")));\n GetFirstParameterTemplated(\n vtkm::internal::make_FunctionInterface<void>(std::string(\"bar\")));\n}\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceSetParameter.cxx\n////\nvoid SetFirstParameterResolved(\n vtkm::internal::FunctionInterface<void(std::string)> &interface,\n const std::string &newFirstParameter)\n{\n // The following two uses of SetParameter are equivalent\n interface.SetParameter<1>(newFirstParameter);\n interface.SetParameter(newFirstParameter, vtkm::internal::IndexTag<1>());\n}\n\ntemplate<typename FunctionSignature, typename T>\nvoid SetFirstParameterTemplated(\n vtkm::internal::FunctionInterface<FunctionSignature> &interface,\n T newFirstParameter)\n{\n // The following two uses of SetParameter are equivalent\n interface.template SetParameter<1>(newFirstParameter);\n interface.SetParameter(newFirstParameter, vtkm::internal::IndexTag<1>());\n}\n////\n//// END-EXAMPLE FunctionInterfaceSetParameter.cxx\n////\n\nvoid TrySetParameter()\n{\n std::cout << \"Setting parameters.\" << std::endl;\n\n vtkm::internal::FunctionInterface<void(std::string)> functionInterface;\n\n SetFirstParameterResolved(functionInterface, std::string(\"foo\"));\n VTKM_TEST_ASSERT(functionInterface.GetParameter<1>() == \"foo\",\n \"Did not set string.\");\n\n SetFirstParameterTemplated(functionInterface, std::string(\"bar\"));\n VTKM_TEST_ASSERT(functionInterface.GetParameter<1>() == \"bar\",\n \"Did not set string.\");\n}\n\nvoid BasicInvoke()\n{\n ////\n //// BEGIN-EXAMPLE FunctionInterfaceBasicInvoke.cxx\n ////\n vtkm::internal::FunctionInterface<size_t(const char *)> strlenInterface;\n strlenInterface.SetParameter<1>(\"Hello world\");\n\n strlenInterface.InvokeCont(strlen);\n\n size_t length = strlenInterface.GetReturnValue(); // length = 11\n ////\n //// END-EXAMPLE FunctionInterfaceBasicInvoke.cxx\n ////\n\n VTKM_TEST_ASSERT(length == 11, \"Bad length.\");\n}\n\nnamespace TransformedInvokeNamespace {\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceTransformInvoke.cxx\n////\n// Our transform converts C strings to integers, leaves everything else alone.\nstruct TransformFunctor\n{\n template<typename T>\n VTKM_CONT\n const T &operator()(const T &x) const\n {\n return x;\n }\n\n VTKM_CONT\n const vtkm::Int32 operator()(const char *x) const\n {\n return atoi(x);\n }\n};\n\n// The function we are invoking simply compares two numbers.\nstruct IsSameFunctor\n{\n template<typename T1, typename T2>\n VTKM_CONT\n bool operator()(const T1 &x, const T2 &y) const\n {\n return x == y;\n }\n};\n\nvoid TryTransformedInvoke()\n{\n vtkm::internal::FunctionInterface<bool(const char *, vtkm::Int32)>\n functionInterface =\n vtkm::internal::make_FunctionInterface<bool>((const char *)\"42\",\n (vtkm::Int32)42);\n\n functionInterface.InvokeCont(IsSameFunctor(), TransformFunctor());\n\n bool isSame = functionInterface.GetReturnValue(); // isSame = true\n ////\n //// PAUSE-EXAMPLE\n ////\n VTKM_TEST_ASSERT(isSame, \"Did not get right return value.\");\n ////\n //// RESUME-EXAMPLE\n ////\n}\n////\n//// END-EXAMPLE FunctionInterfaceTransformInvoke.cxx\n////\n\n} // namespace TransformedInvokeNamespace\n\nusing namespace TransformedInvokeNamespace;\n\nnamespace ReturnContainerNamespace {\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceReturnContainer.cxx\n////\ntemplate<typename ResultType, bool Valid> struct PrintReturnFunctor;\n\ntemplate<typename ResultType>\nstruct PrintReturnFunctor<ResultType, true>\n{\n VTKM_CONT\n void operator()(\n const vtkm::internal::FunctionInterfaceReturnContainer<ResultType> &x)\n const\n {\n std::cout << x.Value << std::endl;\n }\n};\n\ntemplate<typename ResultType>\nstruct PrintReturnFunctor<ResultType, false>\n{\n VTKM_CONT\n void operator()(\n const vtkm::internal::FunctionInterfaceReturnContainer<ResultType> &)\n const\n {\n std::cout << \"No return type.\" << std::endl;\n }\n};\n\ntemplate<typename FunctionInterfaceType>\nvoid PrintReturn(const FunctionInterfaceType &functionInterface)\n{\n typedef typename FunctionInterfaceType::ResultType ResultType;\n typedef vtkm::internal::FunctionInterfaceReturnContainer<ResultType>\n ReturnContainerType;\n\n PrintReturnFunctor<ResultType, ReturnContainerType::VALID> printReturn;\n printReturn(functionInterface.GetReturnValueSafe());\n}\n////\n//// END-EXAMPLE FunctionInterfaceReturnContainer.cxx\n////\n\n} // namespace ReturnContainerNamespace\n\nvoid TryPrintReturn()\n{\n vtkm::internal::FunctionInterface<size_t(const char *)> strlenInterface;\n strlenInterface.SetParameter<1>(\"Hello world\");\n strlenInterface.InvokeCont(strlen);\n ReturnContainerNamespace::PrintReturn(strlenInterface);\n\n ReturnContainerNamespace::PrintReturn(\n vtkm::internal::make_FunctionInterface<void>(\n (const char *)\"Hello world\"));\n}\n\nvoid Append()\n{\n ////\n //// BEGIN-EXAMPLE FunctionInterfaceAppend.cxx\n ////\n using vtkm::internal::FunctionInterface;\n using vtkm::internal::make_FunctionInterface;\n\n typedef FunctionInterface<void(std::string, vtkm::Id)>\n InitialFunctionInterfaceType;\n InitialFunctionInterfaceType initialFunctionInterface =\n make_FunctionInterface<void>(std::string(\"Hello World\"), vtkm::Id(42));\n\n typedef FunctionInterface<void(std::string, vtkm::Id, std::string)>\n AppendedFunctionInterfaceType1;\n AppendedFunctionInterfaceType1 appendedFunctionInterface1 =\n initialFunctionInterface.Append(std::string(\"foobar\"));\n // appendedFunctionInterface1 has parameters (\"Hello World\", 42, \"foobar\")\n\n typedef InitialFunctionInterfaceType::AppendType<vtkm::Float32>::type\n AppendedFunctionInterfaceType2;\n AppendedFunctionInterfaceType2 appendedFunctionInterface2 =\n initialFunctionInterface.Append(vtkm::Float32(3.141));\n // appendedFunctionInterface2 has parameters (\"Hello World\", 42, 3.141)\n ////\n //// END-EXAMPLE FunctionInterfaceAppend.cxx\n ////\n\n std::cout << \"Checking appended interface 1.\" << std::endl;\n VTKM_TEST_ASSERT(appendedFunctionInterface1.GetParameter<1>() == std::string(\"Hello World\"),\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(appendedFunctionInterface1.GetParameter<2>() == 42,\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(appendedFunctionInterface1.GetParameter<3>() == std::string(\"foobar\"),\n \"Bad value in interface.\");\n\n std::cout << \"Checking appended interface 2.\" << std::endl;\n VTKM_TEST_ASSERT(appendedFunctionInterface2.GetParameter<1>() == std::string(\"Hello World\"),\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(appendedFunctionInterface2.GetParameter<2>() == 42,\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(appendedFunctionInterface2.GetParameter<3>() == vtkm::Float32(3.141),\n \"Bad value in interface.\");\n}\n\nvoid Replace()\n{\n ////\n //// BEGIN-EXAMPLE FunctionInterfaceReplace.cxx\n ////\n using vtkm::internal::FunctionInterface;\n using vtkm::internal::make_FunctionInterface;\n\n typedef FunctionInterface<void(std::string, vtkm::Id)>\n InitialFunctionInterfaceType;\n InitialFunctionInterfaceType initialFunctionInterface =\n make_FunctionInterface<void>(std::string(\"Hello World\"), vtkm::Id(42));\n\n typedef FunctionInterface<void(vtkm::Float32, vtkm::Id)>\n ReplacedFunctionInterfaceType1;\n ReplacedFunctionInterfaceType1 replacedFunctionInterface1 =\n initialFunctionInterface.Replace<1>(vtkm::Float32(3.141));\n // replacedFunctionInterface1 has parameters (3.141, 42)\n\n typedef InitialFunctionInterfaceType::ReplaceType<2, std::string>::type\n ReplacedFunctionInterfaceType2;\n ReplacedFunctionInterfaceType2 replacedFunctionInterface2 =\n initialFunctionInterface.Replace<2>(std::string(\"foobar\"));\n // replacedFunctionInterface2 has parameters (\"Hello World\", \"foobar\")\n ////\n //// END-EXAMPLE FunctionInterfaceReplace.cxx\n ////\n\n std::cout << \"Checking replaced interface 1.\" << std::endl;\n VTKM_TEST_ASSERT(replacedFunctionInterface1.GetParameter<1>() == vtkm::Float32(3.141),\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(replacedFunctionInterface1.GetParameter<2>() == 42,\n \"Bad value in interface.\");\n\n std::cout << \"Checking replaced interface 2.\" << std::endl;\n VTKM_TEST_ASSERT(replacedFunctionInterface2.GetParameter<1>() == \"Hello World\",\n \"Bad value in interface.\");\n VTKM_TEST_ASSERT(replacedFunctionInterface2.GetParameter<2>() == \"foobar\",\n \"Bad value in interface.\");\n}\n\nvoid NextFunctionChainCall(\n const vtkm::internal::FunctionInterface<void(vtkm::Id *, vtkm::Id)> ¶meters)\n{\n vtkm::Id expectedValue = TestValue(0, vtkm::Id());\n\n vtkm::Id *array = parameters.GetParameter<1>();\n vtkm::Id numValues = parameters.GetParameter<2>();\n\n std::cout << \"Checking values.\" << std::endl;\n for (vtkm::Id index = 0; index < numValues; index++)\n {\n VTKM_TEST_ASSERT(array[index] == expectedValue, \"Bad value.\");\n }\n}\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceAppendAndReplace.cxx\n////\ntemplate<typename FunctionInterfaceType>\nvoid FunctionCallChain(const FunctionInterfaceType ¶meters,\n vtkm::Id arraySize)\n{\n // In this hypothetical function call chain, this function replaces the\n // first parameter with an array of that type and appends the array size\n // to the end of the parameters.\n\n typedef typename FunctionInterfaceType::template ParameterType<1>::type\n ArrayValueType;\n\n // Allocate and initialize array.\n ArrayValueType value = parameters.template GetParameter<1>();\n ArrayValueType *array = new ArrayValueType[arraySize];\n for (vtkm::Id index = 0; index < arraySize; index++)\n {\n array[index] = value;\n }\n\n // Call next function with modified parameters.\n NextFunctionChainCall(\n parameters.template Replace<1>(array).Append(arraySize));\n\n // Clean up.\n delete[] array;\n}\n////\n//// END-EXAMPLE FunctionInterfaceAppendAndReplace.cxx\n////\n\nvoid TryAppendReplace()\n{\n std::cout << \"Using replace and append in function call chain.\" << std::endl;\n FunctionCallChain(\n vtkm::internal::make_FunctionInterface<void>(TestValue(0, vtkm::Id())),\n 10);\n}\n\nnamespace StaticTransformNamespace {\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceStaticTransform.cxx\n////\nstruct ParametersToPointersFunctor {\n template<typename T, vtkm::IdComponent Index>\n struct ReturnType {\n typedef const T *type;\n };\n\n template<typename T, vtkm::IdComponent Index>\n VTKM_CONT\n const T *operator()(const T &x, vtkm::internal::IndexTag<Index>) const {\n return &x;\n }\n};\n\ntemplate<typename FunctionInterfaceType>\nVTKM_CONT\ntypename FunctionInterfaceType::\n template StaticTransformType<ParametersToPointersFunctor>::type\nParametersToPointers(const FunctionInterfaceType &functionInterface)\n{\n return functionInterface.StaticTransformCont(ParametersToPointersFunctor());\n}\n////\n//// END-EXAMPLE FunctionInterfaceStaticTransform.cxx\n////\n\n} // namespace StaticTransformNamespace\n\nusing namespace StaticTransformNamespace;\n\nvoid TryStaticTransform()\n{\n vtkm::internal::FunctionInterface<void(vtkm::Float32, vtkm::Int32)>\n originalFunctionInterface =\n vtkm::internal::make_FunctionInterface<void>(TestValue(1,vtkm::Float32()),\n TestValue(2,vtkm::Int32()));\n\n vtkm::internal::FunctionInterface<void(const vtkm::Float32 *, const vtkm::Int32 *)>\n transformedFunctionInterface =\n ParametersToPointers(originalFunctionInterface);\n\n VTKM_TEST_ASSERT(\n *transformedFunctionInterface.GetParameter<1>() == TestValue(1,vtkm::Float32()),\n \"Bad value in pointer.\");\n VTKM_TEST_ASSERT(\n *transformedFunctionInterface.GetParameter<2>() == TestValue(2,vtkm::Int32()),\n \"Bad value in pointer.\");\n}\n\nnamespace DynamicTransformNamespace {\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceDynamicTransform.cxx\n////\nstruct UnpackNumbersTransformFunctor {\n template<typename InputType,\n typename ContinueFunctor,\n vtkm::IdComponent Index>\n VTKM_CONT\n void operator()(const InputType &input,\n const ContinueFunctor &continueFunction,\n vtkm::internal::IndexTag<Index>) const\n {\n continueFunction(input);\n }\n\n template<typename ContinueFunctor, vtkm::IdComponent Index>\n VTKM_CONT\n void operator()(const std::string &input,\n const ContinueFunctor &continueFunction,\n vtkm::internal::IndexTag<Index>) const\n {\n if ((input[0] >= '0') && (input[0] <= '9'))\n {\n std::stringstream stream(input);\n vtkm::FloatDefault value;\n stream >> value;\n continueFunction(value);\n }\n else\n {\n continueFunction(input);\n }\n }\n};\n\n////\n//// PAUSE-EXAMPLE\n////\nstruct CheckFunctor\n{\n VTKM_CONT\n void operator()(vtkm::Float32 value1, std::string value2) const\n {\n VTKM_TEST_ASSERT(test_equal(value1, 42), \"Wrong converted value.\");\n VTKM_TEST_ASSERT(value2 == \"Hello World\", \"Wrong passed value\");\n }\n\n template<typename T1, typename T2>\n VTKM_CONT\n void operator()(T1, T2) const\n {\n VTKM_TEST_FAIL(\"Called wrong form of CheckFunctor\");\n }\n};\n////\n//// RESUME-EXAMPLE\n////\nstruct UnpackNumbersFinishFunctor {\n template<typename FunctionInterfaceType>\n VTKM_CONT\n void operator()(FunctionInterfaceType &functionInterface) const\n {\n // Do something\n ////\n //// PAUSE-EXAMPLE\n ////\n functionInterface.InvokeCont(CheckFunctor());\n ////\n //// RESUME-EXAMPLE\n ////\n }\n};\n\ntemplate<typename FunctionInterfaceType>\nvoid DoUnpackNumbers(const FunctionInterfaceType &functionInterface)\n{\n functionInterface.DynamicTransformCont(UnpackNumbersTransformFunctor(),\n UnpackNumbersFinishFunctor());\n}\n////\n//// END-EXAMPLE FunctionInterfaceDynamicTransform.cxx\n////\n\n} // namespace DynamicTransformNamespace\n\nusing namespace DynamicTransformNamespace;\n\nvoid TryDynamicTransform()\n{\n vtkm::internal::FunctionInterface<void(std::string, std::string)>\n functionInterface = vtkm::internal::make_FunctionInterface<void>(\n std::string(\"42\"), std::string(\"Hello World\"));\n DoUnpackNumbers(functionInterface);\n}\n\nnamespace DynamicTransformFunctorNamespace {\n\n////\n//// BEGIN-EXAMPLE DynamicTransform.cxx\n////\ntemplate<typename Device>\nstruct ArrayCopyFunctor {\n template<typename Signature>\n VTKM_CONT\n void operator()(\n vtkm::internal::FunctionInterface<Signature> functionInterface) const\n {\n functionInterface.InvokeCont(*this);\n }\n\n template<typename T, class CIn, class COut>\n VTKM_CONT\n void operator()(const vtkm::cont::ArrayHandle<T, CIn> &input,\n vtkm::cont::ArrayHandle<T, COut> &output) const\n {\n vtkm::cont::DeviceAdapterAlgorithm<Device>::Copy(input, output);\n }\n\n template<typename TIn, typename TOut, class CIn, class COut>\n VTKM_CONT\n void operator()(const vtkm::cont::ArrayHandle<TIn, CIn> &,\n vtkm::cont::ArrayHandle<TOut, COut> &) const\n {\n throw vtkm::cont::ErrorBadType(\"Arrays to copy must be the same type.\");\n }\n};\n\ntemplate<typename Device>\nvoid CopyDynamicArrays(vtkm::cont::DynamicArrayHandle input,\n vtkm::cont::DynamicArrayHandle output,\n Device)\n{\n vtkm::internal::FunctionInterface<void(vtkm::cont::DynamicArrayHandle,\n vtkm::cont::DynamicArrayHandle)>\n functionInterface =\n vtkm::internal::make_FunctionInterface<void>(input, output);\n\n functionInterface.DynamicTransformCont(\n vtkm::cont::internal::DynamicTransform(), ArrayCopyFunctor<Device>());\n}\n////\n//// END-EXAMPLE DynamicTransform.cxx\n////\n\n} // namespace DynamicTransformFunctorNamespace\n\nusing namespace DynamicTransformFunctorNamespace;\n\nvoid TryDynamicTransformFunctor()\n{\n static const vtkm::Id ARRAY_SIZE = 10;\n vtkm::Float32 buffer[ARRAY_SIZE];\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n buffer[index] = TestValue(index, vtkm::Float32());\n }\n\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray =\n vtkm::cont::make_ArrayHandle(buffer, ARRAY_SIZE);\n vtkm::cont::ArrayHandle<vtkm::Float32> outputArray;\n\n CopyDynamicArrays(inputArray, outputArray, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n CheckPortal(outputArray.GetPortalConstControl());\n}\n\nnamespace ForEachNamespace {\n\n////\n//// BEGIN-EXAMPLE FunctionInterfaceForEach.cxx\n////\nstruct PrintArgumentFunctor{\n template<typename T, vtkm::IdComponent Index>\n VTKM_CONT\n void operator()(const T &argument, vtkm::internal::IndexTag<Index>) const\n {\n std::cout << Index << \":\" << argument << \" \";\n }\n};\n\ntemplate<typename FunctionInterfaceType>\nVTKM_CONT\nvoid PrintArguments(const FunctionInterfaceType &functionInterface)\n{\n std::cout << \"( \";\n functionInterface.ForEachCont(PrintArgumentFunctor());\n std::cout << \")\" << std::endl;\n}\n////\n//// END-EXAMPLE FunctionInterfaceForEach.cxx\n////\n\n} // namespace ForEachNamespace\n\nusing namespace ForEachNamespace;\n\nvoid TryPrintArguments()\n{\n PrintArguments(\n vtkm::internal::make_FunctionInterface<void>(std::string(\"Hello\"),\n 42,\n std::string(\"World\"),\n 3.14));\n}\n\nvoid Test()\n{\n BasicFunctionInterface();\n TryGetParameter();\n TrySetParameter();\n BasicInvoke();\n TryTransformedInvoke();\n TryPrintReturn();\n Append();\n Replace();\n TryAppendReplace();\n TryStaticTransform();\n TryDynamicTransform();\n TryDynamicTransformFunctor();\n TryPrintArguments();\n}\n\n} // anonymous namespace\n\nint FunctionInterface(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6229138374328613,
"alphanum_fraction": 0.6621560454368591,
"avg_line_length": 26.712499618530273,
"blob_id": "0a7f0515a8f6f2596b81755b22a0b6409c26592b",
"content_id": "76d9bddb69a29a7f4c5d896bb066ca45f5c13256",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2217,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 80,
"path": "/examples/ArrayHandleZip.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleZip.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(const ArrayHandleType array)\n{\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == 3,\n \"Permuted array has wrong size.\");\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == 3,\n \"Permuted portal has wrong size.\");\n\n typedef vtkm::Pair<vtkm::Id, vtkm::Float64> PairType;\n\n VTKM_TEST_ASSERT(test_equal(portal.Get(0), PairType(3, 0.0)),\n \"Zipped array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(1), PairType(0, 0.1)),\n \"Zipped array has wrong value.\");\n VTKM_TEST_ASSERT(test_equal(portal.Get(2), PairType(1, 0.2)),\n \"Zipped array has wrong value.\");\n}\n\nvoid Test()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleZip.cxx\n ////\n typedef vtkm::cont::ArrayHandle<vtkm::Id> ArrayType1;\n typedef ArrayType1::PortalControl PortalType1;\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float64> ArrayType2;\n typedef ArrayType2::PortalControl PortalType2;\n\n // Create an array of vtkm::Id with values [3, 0, 1]\n ArrayType1 array1;\n array1.Allocate(3);\n PortalType1 portal1 = array1.GetPortalControl();\n portal1.Set(0, 3);\n portal1.Set(1, 0);\n portal1.Set(2, 1);\n\n // Create a second array of vtkm::Float32 with values [0.0, 0.1, 0.2]\n ArrayType2 array2;\n array2.Allocate(3);\n PortalType2 portal2 = array2.GetPortalControl();\n portal2.Set(0, 0.0);\n portal2.Set(1, 0.1);\n portal2.Set(2, 0.2);\n\n // Zip the two arrays together to create an array of\n // vtkm::Pair<vtkm::Id, vtkm::Float64> with values [(3,0.0), (0,0.1), (1,0.2)]\n vtkm::cont::ArrayHandleZip<ArrayType1,ArrayType2> zipArray(array1, array2);\n ////\n //// END-EXAMPLE ArrayHandleZip.cxx\n ////\n\n CheckArray(zipArray);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleZip.cxx\n ////\n vtkm::cont::make_ArrayHandleZip(array1,array2)\n ////\n //// END-EXAMPLE MakeArrayHandleZip.cxx\n ////\n );\n}\n\n} // anonymous namespace\n\nint ArrayHandleZip(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6945571303367615,
"alphanum_fraction": 0.7055442929267883,
"avg_line_length": 22.109375,
"blob_id": "4831a5f952a738d6614dca9db29ebc87456d1da8",
"content_id": "aa39a3432167b220811a3499bdb24c0fc41520a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5916,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 256,
"path": "/examples/ArrayHandleAdapt.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/DataSet.h>\n#include <vtkm/cont/DataSetBuilderUniform.h>\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/worklet/PointElevation.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n////\n//// BEGIN-EXAMPLE FictitiousFieldStorage.cxx\n////\n#include <deque>\n\n//// PAUSE-EXAMPLE\nnamespace {\n//// RESUME-EXAMPLE\n////\nstruct FooFields {\n float Pressure;\n float Temperature;\n float Velocity[3];\n // And so on...\n};\n\ntypedef std::deque<FooFields> FooFieldsDeque;\n////\n//// END-EXAMPLE FictitiousFieldStorage.cxx\n////\n\n} // anonymous namespace\n\n////\n//// BEGIN-EXAMPLE ArrayPortalAdapter.cxx\n////\n#include <vtkm/cont/internal/IteratorFromArrayPortal.h>\n#include <vtkm/Assert.h>\n\n//// PAUSE-EXAMPLE\nnamespace {\n//// RESUME-EXAMPLE\n////\n// DequeType expected to be either FooFieldsDeque or const FooFieldsDeque\ntemplate<typename DequeType>\nclass ArrayPortalFooPressure\n{\npublic:\n typedef float ValueType;\n\n VTKM_CONT\n ArrayPortalFooPressure() : Container(NULL) { }\n\n VTKM_CONT\n ArrayPortalFooPressure(DequeType *container) : Container(container) { }\n\n // Required to copy compatible types of ArrayPortalFooPressure. Really needed\n // to copy from non-const to const versions of array portals.\n template<typename OtherDequeType>\n VTKM_CONT\n ArrayPortalFooPressure(const ArrayPortalFooPressure<OtherDequeType> &other)\n : Container(other.GetContainer()) { }\n\n VTKM_CONT\n vtkm::Id GetNumberOfValues() const {\n return static_cast<vtkm::Id>(this->Container->size());\n }\n\n VTKM_CONT\n ValueType Get(vtkm::Id index) const {\n VTKM_ASSERT(index >= 0);\n VTKM_ASSERT(index < this->GetNumberOfValues());\n return (*this->Container)[index].Pressure;\n }\n\n VTKM_CONT\n void Set(vtkm::Id index, ValueType value) const {\n VTKM_ASSERT(index >= 0);\n VTKM_ASSERT(index < this->GetNumberOfValues());\n (*this->Container)[index].Pressure = value;\n }\n\n // Here for the copy constructor.\n VTKM_CONT\n DequeType *GetContainer() const { return this->Container; }\n\nprivate:\n DequeType *Container;\n};\n////\n//// END-EXAMPLE ArrayPortalAdapter.cxx\n////\n\n}\n\n////\n//// BEGIN-EXAMPLE StoragePrototype.cxx\n////\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename T, class StorageTag>\nclass Storage;\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE StoragePrototype.cxx\n////\n\n////\n//// BEGIN-EXAMPLE StorageAdapter.cxx\n////\n// Includes or definition for ArrayPortalFooPressure\n\nstruct StorageTagFooPressure { };\n\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<>\nclass Storage<float, StorageTagFooPressure>\n{\npublic:\n typedef float ValueType;\n\n typedef ArrayPortalFooPressure<FooFieldsDeque> PortalType;\n typedef ArrayPortalFooPressure<const FooFieldsDeque> PortalConstType;\n\n VTKM_CONT\n Storage() : Container(NULL) { }\n\n VTKM_CONT\n Storage(FooFieldsDeque *container) : Container(container) { }\n\n VTKM_CONT\n PortalType GetPortal() { return PortalType(this->Container); }\n\n VTKM_CONT\n PortalConstType GetPortalConst() const {\n return PortalConstType(this->Container);\n }\n\n VTKM_CONT\n vtkm::Id GetNumberOfValues() const {\n return static_cast<vtkm::Id>(this->Container->size());\n }\n\n VTKM_CONT\n void Allocate(vtkm::Id numberOfValues) {\n this->Container->resize(numberOfValues);\n }\n\n VTKM_CONT\n void Shrink(vtkm::Id numberOfValues) {\n this->Container->resize(numberOfValues);\n }\n\n VTKM_CONT\n void ReleaseResources() { this->Container->clear(); }\n\nprivate:\n FooFieldsDeque *Container;\n};\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE StorageAdapter.cxx\n////\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE ArrayHandleAdapter.cxx\n////\nclass ArrayHandleFooPressure\n : public vtkm::cont::ArrayHandle<float, StorageTagFooPressure>\n{\nprivate:\n typedef vtkm::cont::internal::Storage<float, StorageTagFooPressure>\n StorageType;\n\npublic:\n VTKM_ARRAY_HANDLE_SUBCLASS_NT(\n ArrayHandleFooPressure,\n (vtkm::cont::ArrayHandle<float, StorageTagFooPressure>));\n\n VTKM_CONT\n ArrayHandleFooPressure(FooFieldsDeque *container)\n : Superclass(StorageType(container)) { }\n};\n////\n//// END-EXAMPLE ArrayHandleAdapter.cxx\n////\n\n////\n//// BEGIN-EXAMPLE UsingArrayHandleAdapter.cxx\n////\nVTKM_CONT\nvoid GetElevationAirPressure(vtkm::cont::DataSet grid, FooFieldsDeque *fields)\n{\n // Make an array handle that points to the pressure values in the fields.\n ArrayHandleFooPressure pressureHandle(fields);\n\n // Use the elevation worklet to estimate atmospheric pressure based on the\n // height of the point coordinates. Atmospheric pressure is 101325 Pa at\n // sea level and drops about 12 Pa per meter.\n vtkm::worklet::PointElevation elevation;\n elevation.SetLowPoint(vtkm::make_Vec(0.0, 0.0, 0.0));\n elevation.SetHighPoint(vtkm::make_Vec(0.0, 0.0, 2000.0));\n elevation.SetRange(101325.0, 77325.0);\n\n vtkm::worklet::DispatcherMapField<vtkm::worklet::PointElevation>\n dispatcher(elevation);\n dispatcher.Invoke(grid.GetCoordinateSystem().GetData(), pressureHandle);\n\n // Make sure the values are flushed back to the control environment.\n pressureHandle.GetPortalConstControl();\n\n // Now the pressure field is in the fields container.\n}\n////\n//// END-EXAMPLE UsingArrayHandleAdapter.cxx\n////\n\nvoid Test()\n{\n vtkm::cont::DataSet grid =\n vtkm::cont::DataSetBuilderUniform::Create(vtkm::Id3(2, 2, 50));\n\n FooFieldsDeque fields(4*50);\n GetElevationAirPressure(grid, &fields);\n\n vtkm::Float32 value = 101325.0f;\n for (vtkm::Id heightIndex = 0; heightIndex < 50; heightIndex++)\n {\n for (vtkm::Id slabIndex = 0; slabIndex < 4; slabIndex++)\n {\n VTKM_TEST_ASSERT(test_equal(fields[4*heightIndex+slabIndex].Pressure,\n value),\n \"Bad value.\");\n }\n value -= 12.0f;\n }\n}\n\n} // anonymous namespace\n\nint ArrayHandleAdapt(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6978873014450073,
"alphanum_fraction": 0.7061032652854919,
"avg_line_length": 25.962024688720703,
"blob_id": "5fe422c5b5fb3b9f941be0657c502351d17585e6",
"content_id": "34eaadc072ecc51437f4e127db807a2a0333bf3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4260,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 158,
"path": "/examples/ArrayHandleImplicit.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "// The following method for declaring an implicit array still works, but\n// it is much easier to use the ArrayHandleImplict class.\n////\n//// BEGIN-EXAMPLE ImplicitArrayPortal.cxx\n////\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/StorageImplicit.h>\n\nclass ArrayPortalEvenNumbers\n{\npublic:\n typedef vtkm::Id ValueType;\n\n VTKM_EXEC_CONT\n ArrayPortalEvenNumbers() : NumberOfValues(0) { }\n\n VTKM_EXEC_CONT\n ArrayPortalEvenNumbers(vtkm::Id numValues) : NumberOfValues(numValues) { }\n\n VTKM_EXEC_CONT\n vtkm::Id GetNumberOfValues() const { return this->NumberOfValues; }\n\n VTKM_EXEC_CONT\n ValueType Get(vtkm::Id index) const { return 2*index; }\n\nprivate:\n vtkm::Id NumberOfValues;\n};\n////\n//// END-EXAMPLE ImplicitArrayPortal.cxx\n////\n\n// More of the example no longer being used.\n////\n//// BEGIN-EXAMPLE ImplicitArrayStorage.cxx\n////\ntypedef vtkm::cont::StorageTagImplicit<ArrayPortalEvenNumbers>\n StorageTagEvenNumbers;\n////\n//// END-EXAMPLE ImplicitArrayStorage.cxx\n////\n\n// More of the example no longer being used.\n////\n//// BEGIN-EXAMPLE ImplicitArrayHandle.cxx\n////\nclass ArrayHandleEvenNumbers\n : public vtkm::cont::ArrayHandle<vtkm::Id, StorageTagEvenNumbers>\n{\npublic:\n VTKM_ARRAY_HANDLE_SUBCLASS_NT(\n ArrayHandleEvenNumbers,\n (vtkm::cont::ArrayHandle<vtkm::Id,StorageTagEvenNumbers>));\n\n VTKM_CONT\n ArrayHandleEvenNumbers(vtkm::Id length)\n : Superclass(ArrayPortalEvenNumbers(length)) { }\n};\n////\n//// END-EXAMPLE ImplicitArrayHandle.cxx\n////\n\n////\n//// BEGIN-EXAMPLE ImplicitArrayFunctor.cxx\n////\nstruct DoubleIndexFunctor\n{\n VTKM_EXEC_CONT\n vtkm::Id operator()(vtkm::Id index) const\n {\n return 2*index;\n }\n};\n////\n//// END-EXAMPLE ImplicitArrayFunctor.cxx\n////\n\n////\n//// BEGIN-EXAMPLE ImplicitArrayHandle2.cxx\n////\n#include <vtkm/cont/ArrayHandleImplicit.h>\n\nclass ArrayHandleDoubleIndex\n : public vtkm::cont::ArrayHandleImplicit<vtkm::Id, DoubleIndexFunctor>\n{\npublic:\n VTKM_ARRAY_HANDLE_SUBCLASS_NT(\n ArrayHandleDoubleIndex,\n (vtkm::cont::ArrayHandleImplicit<vtkm::Id,DoubleIndexFunctor>));\n\n VTKM_CONT\n ArrayHandleDoubleIndex(vtkm::Id numberOfValues)\n : Superclass(DoubleIndexFunctor(), numberOfValues) { }\n};\n////\n//// END-EXAMPLE ImplicitArrayHandle2.cxx\n////\n\n#include <vtkm/cont/DeviceAdapter.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid Test()\n{\n ////\n //// BEGIN-EXAMPLE DeclareImplicitArray.cxx\n ////\n vtkm::cont::ArrayHandleImplicit<vtkm::Id, DoubleIndexFunctor>\n implicitArray(DoubleIndexFunctor(), 50);\n ////\n //// END-EXAMPLE DeclareImplicitArray.cxx\n ////\n\n ArrayHandleDoubleIndex implicitArray2(50);\n ArrayHandleEvenNumbers implicitArray3(50);\n\n vtkm::cont::ArrayHandle<vtkm::Id> explicitArray;\n vtkm::cont::ArrayHandle<vtkm::Id> explicitArray2;\n vtkm::cont::ArrayHandle<vtkm::Id> explicitArray3;\n\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n implicitArray, explicitArray);\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n implicitArray2, explicitArray2);\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n implicitArray3, explicitArray3);\n\n VTKM_TEST_ASSERT(explicitArray.GetNumberOfValues() == 50, \"Wrong num vals.\");\n VTKM_TEST_ASSERT(explicitArray2.GetNumberOfValues() == 50, \"Wrong num vals.\");\n VTKM_TEST_ASSERT(explicitArray3.GetNumberOfValues() == 50, \"Wrong num vals.\");\n for (vtkm::Id index = 0; index < explicitArray.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(explicitArray.GetPortalConstControl().Get(index) == 2*index,\n \"Bad array value.\");\n VTKM_TEST_ASSERT(explicitArray2.GetPortalConstControl().Get(index) == 2*index,\n \"Bad array value.\");\n VTKM_TEST_ASSERT(explicitArray3.GetPortalConstControl().Get(index) == 2*index,\n \"Bad array value.\");\n }\n\n // Just an example of using make_ArrayHandleImplicit\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleImplicit.cxx\n ////\n vtkm::cont::make_ArrayHandleImplicit<vtkm::Id>(DoubleIndexFunctor(), 50);\n ////\n //// END-EXAMPLE MakeArrayHandleImplicit.cxx\n ////\n}\n\n} // anonymous namespace\n\nint ArrayHandleImplicit(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.5873592495918274,
"alphanum_fraction": 0.6240465044975281,
"avg_line_length": 28.923913955688477,
"blob_id": "974de49f3f7d19efde21df47d34f0f4f1237feaa",
"content_id": "2c2ab9d85c5192a6c37e4522337abda6d453e3a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2753,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 92,
"path": "/examples/ArrayHandleCounting.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandleCounting.h>\n#include <vtkm/cont/ArrayHandleIndex.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\ntemplate<typename ArrayHandleType>\nvoid CheckArray(const ArrayHandleType array,\n typename ArrayHandleType::ValueType startValue,\n typename ArrayHandleType::ValueType stepValue,\n vtkm::Id expectedLength)\n{\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == expectedLength,\n \"Array has wrong size.\");\n\n typename ArrayHandleType::PortalConstControl portal =\n array.GetPortalConstControl();\n VTKM_TEST_ASSERT(portal.GetNumberOfValues() == expectedLength,\n \"Portal has wrong size.\");\n\n typename ArrayHandleType::ValueType expectedValue = startValue;\n for (vtkm::Id index = 0; index < expectedLength; index++)\n {\n VTKM_TEST_ASSERT(test_equal(portal.Get(index), expectedValue),\n \"Array has wrong value.\");\n expectedValue = expectedValue + stepValue;\n }\n}\n\nvoid Test()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleIndex.cxx\n ////\n // Create an array containing [0, 1, 2, 3, ..., 49].\n vtkm::cont::ArrayHandleIndex indexArray(50);\n ////\n //// END-EXAMPLE ArrayHandleIndex.cxx\n ////\n CheckArray(indexArray, 0, 1, 50);\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleCountingBasic.cxx\n ////\n // Create an array containing [-1.0, -0.9, -0.8, ..., 0.9, 1.0]\n vtkm::cont::ArrayHandleCounting<vtkm::Float32> sampleArray(-1.0f, 0.1f, 21);\n ////\n //// END-EXAMPLE ArrayHandleCountingBasic.cxx\n ////\n CheckArray(sampleArray, -1.0f, 0.1f, 21);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE MakeArrayHandleCountingBasic.cxx\n ////\n // Create an array of 50 entries, all containing the number 3.\n vtkm::cont::make_ArrayHandleCounting(-1.0f, 0.1f, 21)\n ////\n //// END-EXAMPLE MakeArrayHandleCountingBasic.cxx\n ////\n , -1.0f, 0.1f, 21);\n ////\n //// BEGIN-EXAMPLE ArrayHandleCountingBackward.cxx\n ////\n // Create an array containing [49, 48, 47, 46, ..., 0].\n vtkm::cont::ArrayHandleCounting<vtkm::Id> backwardIndexArray(49, -1, 50);\n ////\n //// END-EXAMPLE ArrayHandleCountingBackward.cxx\n ////\n CheckArray(backwardIndexArray, 49, -1, 50);\n\n CheckArray(\n ////\n //// BEGIN-EXAMPLE ArrayHandleCountingVec.cxx\n ////\n // Create an array containg [(0,-3,75), (1,2,25), (3,7,-25)]\n vtkm::cont::make_ArrayHandleCounting(vtkm::make_Vec(0, -3, 75),\n vtkm::make_Vec(1, 5, -50),\n 3)\n ////\n //// END-EXAMPLE ArrayHandleCountingVec.cxx\n ////\n , vtkm::make_Vec(0, -3, 75), vtkm::make_Vec(1, 5, -50), 3);\n}\n\n} // anonymous namespace\n\nint ArrayHandleCounting(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.5222112536430359,
"alphanum_fraction": 0.5774925947189331,
"avg_line_length": 19.67346954345703,
"blob_id": "69220cfbb288c804cb2968f80fe5a1749c1c80b8",
"content_id": "d1381d7f0580b94717dcbbf05cdf1bcb74be9727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1013,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 49,
"path": "/examples/Matrix.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/Matrix.h>\n\n#include <vtkm/testing/Testing.h>\n\nnamespace {\n\nVTKM_CONT\nvoid BuildMatrix()\n{\n std::cout << \"Building matrix containing \" << std::endl\n << \"| 0 1 2 |\" << std::endl\n << \"| 10 11 12 |\" << std::endl;\n\n ////\n //// BEGIN-EXAMPLE BuildMatrix.cxx\n ////\n vtkm::Matrix<vtkm::Float32, 2, 3> matrix;\n\n // Using parenthesis notation.\n matrix(0,0) = 0.0f;\n matrix(0,1) = 1.0f;\n matrix(0,2) = 2.0f;\n\n // Using bracket notation.\n matrix[1][0] = 10.0f;\n matrix[1][1] = 11.0f;\n matrix[1][2] = 12.0f;\n ////\n //// END-EXAMPLE BuildMatrix.cxx\n ////\n\n vtkm::Vec<vtkm::Float32,2> termVec(1.0f, 0.1f);\n vtkm::Vec<vtkm::Float32,3> multVec = vtkm::MatrixMultiply(termVec, matrix);\n// std::cout << multVec << std::endl;\n VTKM_TEST_ASSERT(test_equal(multVec, vtkm::make_Vec(1.0, 2.1, 3.2)),\n \"Unexpected product.\");\n}\n\nvoid Run()\n{\n BuildMatrix();\n}\n\n} // anonymous namespace\n\nint Matrix(int, char*[])\n{\n return vtkm::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.6965088844299316,
"alphanum_fraction": 0.7099992036819458,
"avg_line_length": 28.052255630493164,
"blob_id": "1f6d5509a148e5dd9b82a88dc09b7c30768a834a",
"content_id": "375f70471065b62eb93365368bc801ba077ab47e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 12231,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 421,
"path": "/examples/ArrayHandleDerived.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "////\n//// BEGIN-EXAMPLE DerivedArrayPortal.cxx\n////\n#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayPortal.h>\n\ntemplate<typename P1, typename P2>\nclass ArrayPortalConcatenate\n{\npublic:\n typedef P1 PortalType1;\n typedef P2 PortalType2;\n typedef typename PortalType1::ValueType ValueType;\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC_CONT\n ArrayPortalConcatenate() : Portal1(), Portal2() { }\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC_CONT\n ArrayPortalConcatenate(const PortalType1 &portal1, const PortalType2 portal2)\n : Portal1(portal1), Portal2(portal2) { }\n\n /// Copy constructor for any other ArrayPortalConcatenate with a portal type\n /// that can be copied to this portal type. This allows us to do any type\n /// casting that the portals do (like the non-const to const cast).\n VTKM_SUPPRESS_EXEC_WARNINGS\n template<typename OtherP1, typename OtherP2>\n VTKM_EXEC_CONT\n ArrayPortalConcatenate(const ArrayPortalConcatenate<OtherP1,OtherP2> &src)\n : Portal1(src.GetPortal1()), Portal2(src.GetPortal2()) { }\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC_CONT\n vtkm::Id GetNumberOfValues() const {\n return\n this->Portal1.GetNumberOfValues() + this->Portal2.GetNumberOfValues();\n }\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC_CONT\n ValueType Get(vtkm::Id index) const {\n if (index < this->Portal1.GetNumberOfValues())\n {\n return this->Portal1.Get(index);\n }\n else\n {\n return this->Portal2.Get(index - this->Portal1.GetNumberOfValues());\n }\n }\n\n VTKM_SUPPRESS_EXEC_WARNINGS\n VTKM_EXEC_CONT\n void Set(vtkm::Id index, const ValueType &value) const {\n if (index < this->Portal1.GetNumberOfValues())\n {\n this->Portal1.Set(index, value);\n }\n else\n {\n this->Portal2.Set(index - this->Portal1.GetNumberOfValues(), value);\n }\n }\n\n VTKM_EXEC_CONT\n const PortalType1 &GetPortal1() const { return this->Portal1; }\n VTKM_EXEC_CONT\n const PortalType2 &GetPortal2() const { return this->Portal2; }\n\nprivate:\n PortalType1 Portal1;\n PortalType2 Portal2;\n};\n////\n//// END-EXAMPLE DerivedArrayPortal.cxx\n////\n\n////\n//// BEGIN-EXAMPLE DerivedArrayStorage.cxx\n////\ntemplate<typename ArrayHandleType1, typename ArrayHandleType2>\nstruct StorageTagConcatenate { };\n\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename ArrayHandleType1, typename ArrayHandleType2>\nclass Storage<\n typename ArrayHandleType1::ValueType,\n StorageTagConcatenate<ArrayHandleType1, ArrayHandleType2> >\n{\npublic:\n typedef typename ArrayHandleType1::ValueType ValueType;\n\n typedef ArrayPortalConcatenate<\n typename ArrayHandleType1::PortalControl,\n typename ArrayHandleType2::PortalControl> PortalType;\n typedef ArrayPortalConcatenate<\n typename ArrayHandleType1::PortalConstControl,\n typename ArrayHandleType2::PortalConstControl> PortalConstType;\n\n VTKM_CONT\n Storage() : Valid(false) { }\n\n VTKM_CONT\n Storage(const ArrayHandleType1 array1, const ArrayHandleType2 array2)\n : Array1(array1), Array2(array2), Valid(true) { }\n\n VTKM_CONT\n PortalType GetPortal() {\n VTKM_ASSERT(this->Valid);\n return PortalType(this->Array1.GetPortalControl(),\n this->Array2.GetPortalControl());\n }\n\n VTKM_CONT\n PortalConstType GetPortalConst() const {\n VTKM_ASSERT(this->Valid);\n return PortalConstType(this->Array1.GetPortalConstControl(),\n this->Array2.GetPortalConstControl());\n }\n\n VTKM_CONT\n vtkm::Id GetNumberOfValues() const {\n VTKM_ASSERT(this->Valid);\n return this->Array1.GetNumberOfValues() + this->Array2.GetNumberOfValues();\n }\n\n VTKM_CONT\n void Allocate(vtkm::Id numberOfValues) {\n VTKM_ASSERT(this->Valid);\n // This implementation of allocate, which allocates the same amount in both\n // arrays, is arbitrary. It could, for example, leave the size of Array1\n // alone and change the size of Array2. Or, probably most likely, it could\n // simply throw an error and state that this operation is invalid.\n vtkm::Id half = numberOfValues/2;\n this->Array1.Allocate(numberOfValues-half);\n this->Array2.Allocate(half);\n }\n\n VTKM_CONT\n void Shrink(vtkm::Id numberOfValues) {\n VTKM_ASSERT(this->Valid);\n if (numberOfValues < this->Array1.GetNumberOfValues())\n {\n this->Array1.Shrink(numberOfValues);\n this->Array2.Shrink(0);\n }\n else\n {\n this->Array2.Shrink(numberOfValues - this->Array1.GetNumberOfValues());\n }\n }\n\n VTKM_CONT\n void ReleaseResources() {\n VTKM_ASSERT(this->Valid);\n this->Array1.ReleaseResources();\n this->Array2.ReleaseResources();\n }\n\n // Requried for later use in ArrayTransfer class.\n VTKM_CONT\n const ArrayHandleType1 &GetArray1() const {\n VTKM_ASSERT(this->Valid);\n return this->Array1;\n }\n VTKM_CONT\n const ArrayHandleType2 &GetArray2() const {\n VTKM_ASSERT(this->Valid);\n return this->Array2;\n }\n\nprivate:\n ArrayHandleType1 Array1;\n ArrayHandleType2 Array2;\n bool Valid;\n};\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE DerivedArrayStorage.cxx\n////\n\n////\n//// BEGIN-EXAMPLE ArrayTransferPrototype.cxx\n////\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename T,typename StorageTag,typename DeviceAdapterTag>\nclass ArrayTransfer;\n\n}\n}\n} //namespace vtkm::cont::internal\n////\n//// END-EXAMPLE ArrayTransferPrototype.cxx\n////\n\n////\n//// BEGIN-EXAMPLE DerivedArrayTransfer.cxx\n////\nnamespace vtkm {\nnamespace cont {\nnamespace internal {\n\ntemplate<typename ArrayHandleType1,\n typename ArrayHandleType2,\n typename Device>\nclass ArrayTransfer<\n typename ArrayHandleType1::ValueType,\n StorageTagConcatenate<ArrayHandleType1,ArrayHandleType2>,\n Device>\n{\npublic:\n typedef typename ArrayHandleType1::ValueType ValueType;\n\nprivate:\n typedef StorageTagConcatenate<ArrayHandleType1,ArrayHandleType2>\n StorageTag;\n typedef vtkm::cont::internal::Storage<ValueType,StorageTag>\n StorageType;\n\npublic:\n typedef typename StorageType::PortalType PortalControl;\n typedef typename StorageType::PortalConstType PortalConstControl;\n\n typedef ArrayPortalConcatenate<\n typename ArrayHandleType1::template ExecutionTypes<Device>::Portal,\n typename ArrayHandleType2::template ExecutionTypes<Device>::Portal>\n PortalExecution;\n typedef ArrayPortalConcatenate<\n typename ArrayHandleType1::template ExecutionTypes<Device>::PortalConst,\n typename ArrayHandleType2::template ExecutionTypes<Device>::PortalConst>\n PortalConstExecution;\n\n VTKM_CONT\n ArrayTransfer(StorageType *storage)\n : Array1(storage->GetArray1()), Array2(storage->GetArray2())\n { }\n\n VTKM_CONT\n vtkm::Id GetNumberOfValues() const {\n return this->Array1.GetNumberOfValues() + this->Array2.GetNumberOfValues();\n }\n\n VTKM_CONT\n PortalConstExecution PrepareForInput(bool vtkmNotUsed(updateData)) {\n return PortalConstExecution(this->Array1.PrepareForInput(Device()),\n this->Array2.PrepareForInput(Device()));\n }\n\n VTKM_CONT\n PortalExecution PrepareForInPlace(bool vtkmNotUsed(updateData)) {\n return PortalExecution(this->Array1.PrepareForInPlace(Device()),\n this->Array2.PrepareForInPlace(Device()));\n }\n\n VTKM_CONT\n PortalExecution PrepareForOutput(vtkm::Id numberOfValues)\n {\n // This implementation of allocate, which allocates the same amount in both\n // arrays, is arbitrary. It could, for example, leave the size of Array1\n // alone and change the size of Array2. Or, probably most likely, it could\n // simply throw an error and state that this operation is invalid.\n vtkm::Id half = numberOfValues/2;\n return PortalExecution(\n this->Array1.PrepareForOutput(numberOfValues-half, Device()),\n this->Array2.PrepareForOutput(half, Device()));\n }\n\n VTKM_CONT\n void RetrieveOutputData(StorageType *vtkmNotUsed(storage)) const {\n // Implementation of this method should be unnecessary. The internal\n // array handles should automatically retrieve the output data as\n // necessary.\n }\n\n VTKM_CONT\n void Shrink(vtkm::Id numberOfValues) {\n if (numberOfValues < this->Array1.GetNumberOfValues())\n {\n this->Array1.Shrink(numberOfValues);\n this->Array2.Shrink(0);\n }\n else\n {\n this->Array2.Shrink(numberOfValues - this->Array1.GetNumberOfValues());\n }\n }\n\n VTKM_CONT\n void ReleaseResources() {\n this->Array1.ReleaseResourcesExecution();\n this->Array2.ReleaseResourcesExecution();\n }\n\nprivate:\n ArrayHandleType1 Array1;\n ArrayHandleType2 Array2;\n};\n\n}\n}\n} // namespace vtkm::cont::internal\n////\n//// END-EXAMPLE DerivedArrayTransfer.cxx\n////\n\n////\n//// BEGIN-EXAMPLE DerivedArrayHandle.cxx\n////\ntemplate<typename ArrayHandleType1, typename ArrayHandleType2>\nclass ArrayHandleConcatenate\n : public vtkm::cont::ArrayHandle<\n typename ArrayHandleType1::ValueType,\n StorageTagConcatenate<ArrayHandleType1,ArrayHandleType2> >\n{\npublic:\n VTKM_ARRAY_HANDLE_SUBCLASS(\n ArrayHandleConcatenate,\n (ArrayHandleConcatenate<ArrayHandleType1,ArrayHandleType2>),\n (vtkm::cont::ArrayHandle<\n typename ArrayHandleType1::ValueType,\n StorageTagConcatenate<ArrayHandleType1,ArrayHandleType2> >));\n\nprivate:\n typedef vtkm::cont::internal::Storage<ValueType,StorageTag> StorageType;\n\npublic:\n VTKM_CONT\n ArrayHandleConcatenate(const ArrayHandleType1 &array1,\n const ArrayHandleType2 &array2)\n : Superclass(StorageType(array1, array2)) { }\n};\n////\n//// END-EXAMPLE DerivedArrayHandle.cxx\n////\n\n#include <vtkm/cont/ArrayHandleIndex.h>\n#include <vtkm/cont/DeviceAdapter.h>\n#include <vtkm/cont/DynamicArrayHandle.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\nvoid Test()\n{\n const vtkm::Id HALF_ARRAY_SIZE = 25;\n const vtkm::Id ARRAY_SIZE = 2*HALF_ARRAY_SIZE;\n vtkm::cont::ArrayHandleIndex inputArray(ARRAY_SIZE);\n\n typedef vtkm::cont::ArrayHandle<vtkm::Id> BaseArrayType;\n BaseArrayType array1;\n BaseArrayType array2;\n\n ArrayHandleConcatenate<BaseArrayType,BaseArrayType>\n concatArray(array1, array2);\n\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n inputArray, concatArray);\n\n VTKM_TEST_ASSERT(array1.GetNumberOfValues() == HALF_ARRAY_SIZE,\"Wrong size.\");\n VTKM_TEST_ASSERT(array2.GetNumberOfValues() == HALF_ARRAY_SIZE,\"Wrong size.\");\n for (vtkm::Id index = 0; index < HALF_ARRAY_SIZE; index++)\n {\n VTKM_TEST_ASSERT(array1.GetPortalConstControl().Get(index) == index,\n \"Wrong value.\");\n VTKM_TEST_ASSERT(\n array2.GetPortalConstControl().Get(index) == index+HALF_ARRAY_SIZE,\n \"Wrong value.\");\n }\n\n ArrayHandleConcatenate<BaseArrayType,BaseArrayType>\n switchedArray(array2, array1);\n BaseArrayType targetArray;\n\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n switchedArray, targetArray);\n for (vtkm::Id index = 0; index < HALF_ARRAY_SIZE; index++)\n {\n VTKM_TEST_ASSERT(targetArray.GetPortalConstControl().Get(index)\n == index+HALF_ARRAY_SIZE,\n \"Wrong value.\");\n }\n for (vtkm::Id index = HALF_ARRAY_SIZE; index < ARRAY_SIZE; index++)\n {\n VTKM_TEST_ASSERT(targetArray.GetPortalConstControl().Get(index)\n == index-HALF_ARRAY_SIZE,\n \"Wrong value.\");\n }\n\n // Check all PrepareFor* methods.\n concatArray.ReleaseResourcesExecution();\n concatArray.PrepareForInput(VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n concatArray.PrepareForInPlace(VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n concatArray.PrepareForOutput(ARRAY_SIZE+1, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n\n // Make sure that the array can go into and out of a dynamic array handle.\n vtkm::cont::DynamicArrayHandle dynamicArray = concatArray;\n\n ArrayHandleConcatenate<BaseArrayType,BaseArrayType> concatArrayShallowCopy;\n VTKM_TEST_ASSERT(concatArray != concatArrayShallowCopy, \"Huh?\");\n dynamicArray.CopyTo(concatArrayShallowCopy);\n VTKM_TEST_ASSERT(concatArray == concatArrayShallowCopy,\n \"Did not get array out of dynamic.\");\n}\n\n} // anonymous namespace\n\nint ArrayHandleDerived(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6345732808113098,
"alphanum_fraction": 0.6536417603492737,
"avg_line_length": 27.061403274536133,
"blob_id": "1d45e64dff5ca73128d11216a88c808dc0b7dd1d",
"content_id": "1a71cf96acf2f6f91f3490e766b4c53107401d59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9597,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 342,
"path": "/examples/Traits.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/TypeTraits.h>\n#include <vtkm/VecTraits.h>\n\n#include <vtkm/testing/Testing.h>\n\n#include <typeinfo>\n\n////\n//// BEGIN-EXAMPLE TypeTraits.cxx\n////\n#include <vtkm/TypeTraits.h>\n\n#include <vtkm/Math.h>\n//// PAUSE-EXAMPLE\nnamespace TraitsExamples {\n//// RESUME-EXAMPLE\n\ntemplate<typename T>\nT AnyRemainder(const T &numerator, const T &denominator);\n\nnamespace detail {\n\ntemplate<typename T>\nT AnyRemainderImpl(const T &numerator,\n const T &denominator,\n vtkm::TypeTraitsIntegerTag,\n vtkm::TypeTraitsScalarTag)\n{\n return numerator % denominator;\n}\n\ntemplate<typename T>\nT AnyRemainderImpl(const T &numerator,\n const T &denominator,\n vtkm::TypeTraitsRealTag,\n vtkm::TypeTraitsScalarTag)\n{\n // The VTK-m math library contains a Remainder function that operates on\n // floating point numbers.\n return vtkm::Remainder(numerator, denominator);\n}\n\ntemplate<typename T, typename NumericTag>\nT AnyRemainderImpl(const T &numerator,\n const T &denominator,\n NumericTag,\n vtkm::TypeTraitsVectorTag)\n{\n T result;\n for (int componentIndex = 0;\n componentIndex < T::NUM_COMPONENTS;\n componentIndex++)\n {\n result[componentIndex] =\n AnyRemainder(numerator[componentIndex], denominator[componentIndex]);\n }\n return result;\n}\n\n} // namespace detail\n\ntemplate<typename T>\nT AnyRemainder(const T &numerator, const T &denominator)\n{\n return detail::AnyRemainderImpl(\n numerator,\n denominator,\n typename vtkm::TypeTraits<T>::NumericTag(),\n typename vtkm::TypeTraits<T>::DimensionalityTag());\n}\n////\n//// END-EXAMPLE TypeTraits.cxx\n////\n\nvoid TryRemainder()\n{\n vtkm::Id m1 = AnyRemainder(7, 3);\n VTKM_TEST_ASSERT(m1 == 1, \"Got bad remainder\");\n\n vtkm::Float32 m2 = AnyRemainder(7.0f, 3.0f);\n VTKM_TEST_ASSERT(test_equal(m2, 1), \"Got bad remainder\");\n\n vtkm::Id3 m3 = AnyRemainder(vtkm::Id3(10, 9, 8), vtkm::Id3(7, 6, 5));\n VTKM_TEST_ASSERT(test_equal(m3, vtkm::Id3(3, 3, 3)), \"Got bad remainder\");\n\n vtkm::Vec<vtkm::Float32,3> m4 =\n AnyRemainder(vtkm::make_Vec(10, 9, 8), vtkm::make_Vec(7, 6, 5));\n VTKM_TEST_ASSERT(test_equal(m4, vtkm::make_Vec(3, 3, 3)), \"Got bad remainder\");\n}\n\ntemplate<typename T>\nstruct TypeTraits;\n\n////\n//// BEGIN-EXAMPLE TypeTraitsImpl.cxx\n////\n//// PAUSE-EXAMPLE\n#if 0\n//// RESUME-EXAMPLE\nnamespace vtkm {\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n\ntemplate<>\nstruct TypeTraits<vtkm::Float32>\n{\n using NumericTag = vtkm::TypeTraitsRealTag;\n using DimensionalityTag = vtkm::TypeTraitsScalarTag;\n\n VTKM_EXEC_CONT\n static vtkm::Float32 ZeroInitialization() { return vtkm::Float32(0); }\n};\n\n//// PAUSE-EXAMPLE\n#if 0\n//// RESUME-EXAMPLE\n}\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n////\n//// END-EXAMPLE TypeTraitsImpl.cxx\n////\n\nvoid TryCustomTypeTraits()\n{\n using CustomTraits = TraitsExamples::TypeTraits<vtkm::Float32>;\n using OriginalTraits = vtkm::TypeTraits<vtkm::Float32>;\n\n VTKM_STATIC_ASSERT((std::is_same<\n CustomTraits::NumericTag,\n OriginalTraits::NumericTag>::value));\n VTKM_STATIC_ASSERT((std::is_same<\n CustomTraits::DimensionalityTag,\n OriginalTraits::DimensionalityTag>::value));\n\n VTKM_TEST_ASSERT(CustomTraits::ZeroInitialization()\n == OriginalTraits::ZeroInitialization(),\n \"Bad zero initialization.\");\n}\n\n} // namespace TraitsExamples\n\n////\n//// BEGIN-EXAMPLE VecTraits.cxx\n////\n#include <vtkm/VecTraits.h>\n//// PAUSE-EXAMPLE\nnamespace TraitsExamples {\n//// RESUME-EXAMPLE\n\n// This functor provides a total ordering of vectors. Every compared vector\n// will be either less, greater, or equal (assuming all the vector components\n// also have a total ordering).\ntemplate<typename T>\nstruct LessTotalOrder\n{\n VTKM_EXEC_CONT\n bool operator()(const T &left, const T &right)\n {\n for (int index = 0; index < vtkm::VecTraits<T>::NUM_COMPONENTS; index++)\n {\n typedef typename vtkm::VecTraits<T>::ComponentType ComponentType;\n const ComponentType &leftValue =\n vtkm::VecTraits<T>::GetComponent(left, index);\n const ComponentType &rightValue =\n vtkm::VecTraits<T>::GetComponent(right, index);\n if (leftValue < rightValue) { return true; }\n if (rightValue < leftValue) { return false; }\n }\n // If we are here, the vectors are equal (or at least equivalent).\n return false;\n }\n};\n\n// This functor provides a partial ordering of vectors. It returns true if and\n// only if all components satisfy the less operation. It is possible for\n// vectors to be neither less, greater, nor equal, but the transitive closure\n// is still valid.\ntemplate<typename T>\nstruct LessPartialOrder\n{\n VTKM_EXEC_CONT\n bool operator()(const T &left, const T &right)\n {\n for (int index = 0; index < vtkm::VecTraits<T>::NUM_COMPONENTS; index++)\n {\n typedef typename vtkm::VecTraits<T>::ComponentType ComponentType;\n const ComponentType &leftValue =\n vtkm::VecTraits<T>::GetComponent(left, index);\n const ComponentType &rightValue =\n vtkm::VecTraits<T>::GetComponent(right, index);\n if (!(leftValue < rightValue)) { return false; }\n }\n // If we are here, all components satisfy less than relation.\n return true;\n }\n};\n////\n//// END-EXAMPLE VecTraits.cxx\n////\n\nvoid TryLess()\n{\n LessTotalOrder<vtkm::Id> totalLess1;\n VTKM_TEST_ASSERT(totalLess1(1,2), \"Bad less.\");\n VTKM_TEST_ASSERT(!totalLess1(2,1), \"Bad less.\");\n VTKM_TEST_ASSERT(!totalLess1(1,1), \"Bad less.\");\n\n LessPartialOrder<vtkm::Id> partialLess1;\n VTKM_TEST_ASSERT(partialLess1(1,2), \"Bad less.\");\n VTKM_TEST_ASSERT(!partialLess1(2,1), \"Bad less.\");\n VTKM_TEST_ASSERT(!partialLess1(1,1), \"Bad less.\");\n\n LessTotalOrder<vtkm::Id3> totalLess3;\n VTKM_TEST_ASSERT(totalLess3(vtkm::Id3(1,2,3),vtkm::Id3(3,2,1)), \"Bad less.\");\n VTKM_TEST_ASSERT(!totalLess3(vtkm::Id3(3,2,1),vtkm::Id3(1,2,3)), \"Bad less.\");\n VTKM_TEST_ASSERT(!totalLess3(vtkm::Id3(1,2,3),vtkm::Id3(1,2,3)), \"Bad less.\");\n VTKM_TEST_ASSERT(totalLess3(vtkm::Id3(1,2,3),vtkm::Id3(2,3,4)), \"Bad less.\");\n\n LessPartialOrder<vtkm::Id3> partialLess3;\n VTKM_TEST_ASSERT(!partialLess3(vtkm::Id3(1,2,3),vtkm::Id3(3,2,1)), \"Bad less.\");\n VTKM_TEST_ASSERT(!partialLess3(vtkm::Id3(3,2,1),vtkm::Id3(1,2,3)), \"Bad less.\");\n VTKM_TEST_ASSERT(!partialLess3(vtkm::Id3(1,2,3),vtkm::Id3(1,2,3)), \"Bad less.\");\n VTKM_TEST_ASSERT(partialLess3(vtkm::Id3(1,2,3),vtkm::Id3(2,3,4)), \"Bad less.\");\n}\n\ntemplate<typename T>\nstruct VecTraits;\n\n////\n//// BEGIN-EXAMPLE VecTraitsImpl.cxx\n////\n//// PAUSE-EXAMPLE\n#if 0\n//// RESUME-EXAMPLE\nnamespace vtkm {\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n\ntemplate<>\nstruct VecTraits<vtkm::Id3>\n{\n typedef vtkm::Id ComponentType;\n static const int NUM_COMPONENTS = 3;\n typedef vtkm::VecTraitsTagSizeStatic IsSizeStatic;\n typedef vtkm::VecTraitsTagMultipleComponents HasMultipleComponents;\n\n VTKM_EXEC_CONT\n static vtkm::IdComponent GetNumberOfComponents(const vtkm::Id3 &) {\n return NUM_COMPONENTS;\n }\n\n VTKM_EXEC_CONT\n static const vtkm::Id &GetComponent(const vtkm::Id3 &vector, int component) {\n return vector[component];\n }\n VTKM_EXEC_CONT\n static vtkm::Id &GetComponent(vtkm::Id3 &vector, int component) {\n return vector[component];\n }\n\n VTKM_EXEC_CONT\n static void SetComponent(vtkm::Id3 &vector, int component, vtkm::Id value) {\n vector[component] = value;\n }\n\n template<vtkm::IdComponent DestSize>\n VTKM_EXEC_CONT\n static void\n CopyInto(const vtkm::Id3 &src, vtkm::Vec<vtkm::Id,DestSize> &dest)\n {\n for (vtkm::IdComponent index = 0;\n (index < NUM_COMPONENTS) && (index < DestSize);\n index++)\n {\n dest[index] = src[index];\n }\n }\n};\n\n//// PAUSE-EXAMPLE\n#if 0\n//// RESUME-EXAMPLE\n} // namespace vtkm\n//// PAUSE-EXAMPLE\n#endif\n//// RESUME-EXAMPLE\n////\n//// END-EXAMPLE VecTraitsImpl.cxx\n////\n\nvoid TryCustomVecTriats()\n{\n using CustomTraits = TraitsExamples::VecTraits<vtkm::Id3>;\n using OriginalTraits = vtkm::VecTraits<vtkm::Id3>;\n\n VTKM_STATIC_ASSERT((std::is_same<\n CustomTraits::ComponentType,\n OriginalTraits::ComponentType>::value));\n VTKM_STATIC_ASSERT(CustomTraits::NUM_COMPONENTS\n == OriginalTraits::NUM_COMPONENTS);\n VTKM_STATIC_ASSERT((std::is_same<\n CustomTraits::HasMultipleComponents,\n OriginalTraits::HasMultipleComponents>::value));\n VTKM_STATIC_ASSERT((std::is_same<\n CustomTraits::IsSizeStatic,\n OriginalTraits::IsSizeStatic>::value));\n\n vtkm::Id3 value = TestValue(10, vtkm::Id3());\n VTKM_TEST_ASSERT(CustomTraits::GetNumberOfComponents(value)\n == OriginalTraits::GetNumberOfComponents(value),\n \"Wrong size.\");\n VTKM_TEST_ASSERT(CustomTraits::GetComponent(value, 1)\n == OriginalTraits::GetComponent(value, 1),\n \"Wrong component.\");\n\n CustomTraits::SetComponent(value, 2, 0);\n VTKM_TEST_ASSERT(value[2] == 0, \"Did not set component.\");\n\n vtkm::Id2 shortValue;\n CustomTraits::CopyInto(value, shortValue);\n VTKM_TEST_ASSERT(test_equal(shortValue, TestValue(10, vtkm::Id2())),\n \"Bad extract.\");\n}\n\nvoid Test()\n{\n TryRemainder();\n TryCustomTypeTraits();\n TryLess();\n TryCustomVecTriats();\n}\n\n} // namespace TraitsExamples\n\nint Traits(int, char *[])\n{\n return vtkm::testing::Testing::Run(TraitsExamples::Test);\n}\n"
},
{
"alpha_fraction": 0.6926843523979187,
"alphanum_fraction": 0.7012288570404053,
"avg_line_length": 25.845361709594727,
"blob_id": "5330b9db44c50931bb6e8ae4d0aff0da134e3b97",
"content_id": "4fde5fa35d777f64c5338310d12468f5278f9c8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10416,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 388,
"path": "/examples/ArrayHandle.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n#include <vtkm/cont/ArrayPortalToIterators.h>\n#include <vtkm/cont/DeviceAdapter.h>\n#include <vtkm/cont/StorageBasic.h>\n\n#include <vtkm/exec/FunctorBase.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\n#include <algorithm>\n#include <vector>\n\nnamespace {\n\nvtkm::Float32 TestValue(vtkm::Id index)\n{\n return static_cast<vtkm::Float32>(1 + 0.001 * index);\n}\n\nvoid CheckArrayValues(const vtkm::cont::ArrayHandle<vtkm::Float32> &array,\n vtkm::Float32 factor = 1)\n{\n // So far all the examples are using 50 entries. Could change.\n VTKM_TEST_ASSERT(array.GetNumberOfValues() == 50, \"Wrong number of values\");\n\n for (vtkm::Id index = 0; index < array.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(\n array.GetPortalConstControl().Get(index) == TestValue(index)*factor,\n \"Bad data value.\");\n }\n}\n\nvoid BasicConstruction()\n{\n ////\n //// BEGIN-EXAMPLE CreateArrayHandle.cxx\n ////\nvtkm::cont::ArrayHandle<vtkm::Float32> outputArray;\n ////\n //// END-EXAMPLE CreateArrayHandle.cxx\n ////\n\n ////\n //// BEGIN-EXAMPLE ArrayHandleStorageParameter.cxx\n ////\nvtkm::cont::ArrayHandle<vtkm::Float32,vtkm::cont::StorageTagBasic> arrayHandle;\n ////\n //// END-EXAMPLE ArrayHandleStorageParameter.cxx\n ////\n}\n\nvoid ArrayHandleFromCArray()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleFromCArray.cxx\n ////\n vtkm::Float32 dataBuffer[50];\n // Populate dataBuffer with meaningful data. Perhaps read data from a file.\n //// PAUSE-EXAMPLE\n for (vtkm::Id index = 0; index < 50; index++)\n {\n dataBuffer[index] = TestValue(index);\n }\n //// RESUME-EXAMPLE\n\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray =\n vtkm::cont::make_ArrayHandle(dataBuffer, 50);\n ////\n //// END-EXAMPLE ArrayHandleFromCArray.cxx\n ////\n\n CheckArrayValues(inputArray);\n}\n\nvtkm::Float32 GetValueForArray(vtkm::Id index)\n{\n return TestValue(index);\n}\n\nvoid AllocateAndFillArrayHandle()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandlePopulate.cxx\n ////\n ////\n //// BEGIN-EXAMPLE ArrayHandleAllocate.cxx\n ////\n vtkm::cont::ArrayHandle<vtkm::Float32> arrayHandle;\n\n const vtkm::Id ARRAY_SIZE = 50;\n arrayHandle.Allocate(ARRAY_SIZE);\n ////\n //// END-EXAMPLE ArrayHandleAllocate.cxx\n ////\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float32>::PortalControl PortalType;\n PortalType portal = arrayHandle.GetPortalControl();\n\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n portal.Set(index, GetValueForArray(index));\n }\n ////\n //// END-EXAMPLE ArrayHandlePopulate.cxx\n ////\n\n CheckArrayValues(arrayHandle);\n}\n\n////\n//// BEGIN-EXAMPLE ArrayOutOfScope.cxx\n////\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32> BadDataLoad()\n{\n ////\n //// BEGIN-EXAMPLE ArrayHandleFromVector.cxx\n ////\n std::vector<vtkm::Float32> dataBuffer;\n // Populate dataBuffer with meaningful data. Perhaps read data from a file.\n //// PAUSE-EXAMPLE\n dataBuffer.resize(50);\n for (vtkm::Id index = 0; index < 50; index++)\n {\n dataBuffer[index] = TestValue(index);\n }\n //// RESUME-EXAMPLE\n\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray =\n vtkm::cont::make_ArrayHandle(dataBuffer);\n ////\n //// END-EXAMPLE ArrayHandleFromVector.cxx\n ////\n //// PAUSE-EXAMPLE\n CheckArrayValues(inputArray);\n //// RESUME-EXAMPLE\n\n return inputArray;\n // THIS IS WRONG! At this point dataBuffer goes out of scope and deletes its\n // memory. However, inputArray has a pointer to that memory, which becomes an\n // invalid pointer in the returned object. Bad things will happen when the\n // ArrayHandle is used.\n}\n\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::Float32> SafeDataLoad()\n{\n std::vector<vtkm::Float32> dataBuffer;\n // Populate dataBuffer with meaningful data. Perhaps read data from a file.\n //// PAUSE-EXAMPLE\n dataBuffer.resize(50);\n for (vtkm::Id index = 0; index < 50; index++)\n {\n dataBuffer[index] = TestValue(index);\n }\n //// RESUME-EXAMPLE\n\n vtkm::cont::ArrayHandle<vtkm::Float32> tmpArray =\n vtkm::cont::make_ArrayHandle(dataBuffer);\n\n // This copies the data from one ArrayHandle to another (in the execution\n // environment). Although it is an extraneous copy, it is usually pretty fast\n // on a parallel device. Another option is to make sure that the buffer in\n // the std::vector never goes out of scope before all the ArrayHandle\n // references, but this extra step allows the ArrayHandle to manage its own\n // memory and ensure everything is valid.\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray;\n vtkm::cont::DeviceAdapterAlgorithm<VTKM_DEFAULT_DEVICE_ADAPTER_TAG>::Copy(\n tmpArray, inputArray);\n\n return inputArray;\n // This is safe.\n}\n////\n//// END-EXAMPLE ArrayOutOfScope.cxx\n////\n\nvoid ArrayHandleFromVector()\n{\n BadDataLoad();\n}\n\nvoid CheckSafeDataLoad()\n{\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray = SafeDataLoad();\n CheckArrayValues(inputArray);\n}\n\n////\n//// BEGIN-EXAMPLE SimpleArrayPortal.cxx\n////\ntemplate<typename T>\nclass SimpleScalarArrayPortal\n{\npublic:\n typedef T ValueType;\n\n // There is no specification for creating array portals, but they generally\n // need a constructor like this to be practical.\n VTKM_EXEC_CONT\n SimpleScalarArrayPortal(ValueType *array, vtkm::Id numberOfValues)\n : Array(array), NumberOfValues(numberOfValues) { }\n\n VTKM_EXEC_CONT\n SimpleScalarArrayPortal() : Array(NULL), NumberOfValues(0) { }\n\n VTKM_EXEC_CONT\n vtkm::Id GetNumberOfValues() const { return this->NumberOfValues; }\n\n VTKM_EXEC_CONT\n ValueType Get(vtkm::Id index) const { return this->Array[index]; }\n\n VTKM_EXEC_CONT\n void Set(vtkm::Id index, ValueType value) const {\n this->Array[index] = value;\n }\n\nprivate:\n ValueType *Array;\n vtkm::Id NumberOfValues;\n};\n////\n//// END-EXAMPLE SimpleArrayPortal.cxx\n////\n\n////\n//// BEGIN-EXAMPLE ArrayPortalToIterators.cxx\n////\ntemplate<typename PortalType>\nVTKM_CONT\nstd::vector<typename PortalType::ValueType>\nCopyArrayPortalToVector(const PortalType &portal)\n{\n typedef typename PortalType::ValueType ValueType;\n std::vector<ValueType> result(portal.GetNumberOfValues());\n\n vtkm::cont::ArrayPortalToIterators<PortalType> iterators(portal);\n\n std::copy(iterators.GetBegin(), iterators.GetEnd(), result.begin());\n\n return result;\n}\n////\n//// END-EXAMPLE ArrayPortalToIterators.cxx\n////\n\nvoid TestArrayPortalVectors()\n{\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray = SafeDataLoad();\n std::vector<vtkm::Float32> buffer =\n CopyArrayPortalToVector(inputArray.GetPortalConstControl());\n\n VTKM_TEST_ASSERT(buffer.size() == inputArray.GetNumberOfValues(),\n \"Vector was sized wrong.\");\n\n for (vtkm::Id index = 0; index < inputArray.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(buffer[index] == TestValue(index), \"Bad data value.\");\n }\n\n SimpleScalarArrayPortal<vtkm::Float32> portal(&buffer.at(0), buffer.size());\n\n ////\n //// BEGIN-EXAMPLE ArrayPortalToIteratorBeginEnd.cxx\n ////\n std::vector<vtkm::Float32> myContainer(portal.GetNumberOfValues());\n\n std::copy(vtkm::cont::ArrayPortalToIteratorBegin(portal),\n vtkm::cont::ArrayPortalToIteratorEnd(portal),\n myContainer.begin());\n ////\n //// END-EXAMPLE ArrayPortalToIteratorBeginEnd.cxx\n ////\n\n for (vtkm::Id index = 0; index < inputArray.GetNumberOfValues(); index++)\n {\n VTKM_TEST_ASSERT(myContainer[index] == TestValue(index), \"Bad data value.\");\n }\n}\n\n////\n//// BEGIN-EXAMPLE ControlPortals.cxx\n////\ntemplate<typename T>\nvoid SortCheckArrayHandle(vtkm::cont::ArrayHandle<T> arrayHandle)\n{\n typedef typename vtkm::cont::ArrayHandle<T>::PortalControl\n PortalType;\n typedef typename vtkm::cont::ArrayHandle<T>::PortalConstControl\n PortalConstType;\n\n PortalType readwritePortal = arrayHandle.GetPortalControl();\n // This is actually pretty dumb. Sorting would be generally faster in\n // parallel in the execution environment using the device adapter algorithms.\n std::sort(vtkm::cont::ArrayPortalToIteratorBegin(readwritePortal),\n vtkm::cont::ArrayPortalToIteratorEnd(readwritePortal));\n\n PortalConstType readPortal = arrayHandle.GetPortalConstControl();\n for (vtkm::Id index = 1; index < readPortal.GetNumberOfValues(); index++)\n {\n if (readPortal.Get(index-1) > readPortal.Get(index))\n {\n //// PAUSE-EXAMPLE\n VTKM_TEST_FAIL(\"Sorting is wrong!\");\n //// RESUME-EXAMPLE\n std::cout << \"Sorting is wrong!\" << std::endl;\n break;\n }\n }\n}\n////\n//// END-EXAMPLE ControlPortals.cxx\n////\n\nvoid TestControlPortalsExample()\n{\n SortCheckArrayHandle(SafeDataLoad());\n}\n\n////\n//// BEGIN-EXAMPLE ExecutionPortals.cxx\n////\ntemplate<typename T, typename Device>\nstruct DoubleFunctor : public vtkm::exec::FunctorBase\n{\n typedef typename vtkm::cont::ArrayHandle<T>::\n template ExecutionTypes<Device>::PortalConst InputPortalType;\n typedef typename vtkm::cont::ArrayHandle<T>::\n template ExecutionTypes<Device>::Portal OutputPortalType;\n\n VTKM_CONT\n DoubleFunctor(InputPortalType inputPortal, OutputPortalType outputPortal)\n : InputPortal(inputPortal), OutputPortal(outputPortal) { }\n\n VTKM_EXEC\n void operator()(vtkm::Id index) const {\n this->OutputPortal.Set(index, 2*this->InputPortal.Get(index));\n }\n\n InputPortalType InputPortal;\n OutputPortalType OutputPortal;\n};\n\ntemplate<typename T, typename Device>\nvoid DoubleArray(vtkm::cont::ArrayHandle<T> inputArray,\n vtkm::cont::ArrayHandle<T> outputArray,\n Device)\n{\n vtkm::Id numValues = inputArray.GetNumberOfValues();\n\n DoubleFunctor<T, Device> functor(\n inputArray.PrepareForInput(Device()),\n outputArray.PrepareForOutput(numValues, Device()));\n\n vtkm::cont::DeviceAdapterAlgorithm<Device>::Schedule(functor, numValues);\n}\n////\n//// END-EXAMPLE ExecutionPortals.cxx\n////\n\nvoid TestExecutionPortalsExample()\n{\n vtkm::cont::ArrayHandle<vtkm::Float32> inputArray = SafeDataLoad();\n CheckArrayValues(inputArray);\n vtkm::cont::ArrayHandle<vtkm::Float32> outputArray;\n DoubleArray(inputArray, outputArray, VTKM_DEFAULT_DEVICE_ADAPTER_TAG());\n CheckArrayValues(outputArray, 2);\n}\n\nvoid Test()\n{\n BasicConstruction();\n ArrayHandleFromCArray();\n ArrayHandleFromVector();\n AllocateAndFillArrayHandle();\n CheckSafeDataLoad();\n TestArrayPortalVectors();\n TestControlPortalsExample();\n TestExecutionPortalsExample();\n}\n\n} // anonymous namespace\n\nint ArrayHandle(int, char *[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6942809224128723,
"alphanum_fraction": 0.7186711430549622,
"avg_line_length": 28,
"blob_id": "0aaf2a0b1c2d095ebdf3fd1c759f50375fc36103",
"content_id": "f4088f66b3a5f48309f906c4b3292b2c82b5a540",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2378,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 82,
"path": "/examples/UsePointElevationWorklet.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/cont/ArrayHandle.h>\n\n#include <vtkm/worklet/DispatcherMapField.h>\n#include <vtkm/worklet/PointElevation.h>\n\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE UsePointElevationWorklet.cxx\n////\nVTKM_CONT\nvtkm::cont::ArrayHandle<vtkm::FloatDefault>\nComputeAirPressure(\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> > pointCoordinates)\n{\n vtkm::worklet::PointElevation elevationWorklet;\n\n // Use the elevation worklet to estimate atmospheric pressure based on the\n // height of the point coordinates. Atmospheric pressure is 101325 Pa at\n // sea level and drops about 12 Pa per meter.\n elevationWorklet.SetLowPoint(vtkm::Vec<vtkm::Float64,3>(0.0, 0.0, 0.0));\n elevationWorklet.SetHighPoint(vtkm::Vec<vtkm::Float64,3>(0.0, 0.0, 2000.0));\n elevationWorklet.SetRange(101325.0, 77325.0);\n\n vtkm::worklet::DispatcherMapField<vtkm::worklet::PointElevation>\n elevationDispatcher(elevationWorklet);\n\n vtkm::cont::ArrayHandle<vtkm::FloatDefault> pressure;\n\n elevationDispatcher.Invoke(pointCoordinates, pressure);\n\n return pressure;\n}\n////\n//// END-EXAMPLE UsePointElevationWorklet.cxx\n////\n\nvoid DoPointElevation()\n{\n using InputArrayType =\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::FloatDefault,3> >;\n InputArrayType pointCoordinates;\n\n const vtkm::Id ARRAY_SIZE = 10;\n pointCoordinates.Allocate(ARRAY_SIZE);\n InputArrayType::PortalControl inputPortal =\n pointCoordinates.GetPortalControl();\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n inputPortal.Set(index, vtkm::Vec<vtkm::FloatDefault,3>(\n static_cast<vtkm::FloatDefault>(index)));\n }\n\n using OutputArrayType = vtkm::cont::ArrayHandle<vtkm::FloatDefault>;\n OutputArrayType pressure = ComputeAirPressure(pointCoordinates);\n\n vtkm::cont::printSummary_ArrayHandle(pressure, std::cout);\n std::cout << std::endl;\n\n OutputArrayType::PortalConstControl outputPortal =\n pressure.GetPortalConstControl();\n for (vtkm::Id index = 0; index < ARRAY_SIZE; index++)\n {\n vtkm::FloatDefault value = outputPortal.Get(index);\n VTKM_TEST_ASSERT(test_equal(value, 101325.0 - 12*index),\n \"Bad value from worklet result.\");\n }\n}\n\nvoid Test()\n{\n DoPointElevation();\n}\n\n} // anonymous namespace\n\nint UsePointElevationWorklet(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(Test);\n}\n"
},
{
"alpha_fraction": 0.6147129535675049,
"alphanum_fraction": 0.6243178248405457,
"avg_line_length": 32.19565200805664,
"blob_id": "ae0afdc75e64814f6838131a200e220cea97658f",
"content_id": "1cf7bccc3d696d36183f03198e31fd2bb73a9a94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4581,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 138,
"path": "/examples/CellOperations.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/exec/CellDerivative.h>\n#include <vtkm/exec/CellInterpolate.h>\n#include <vtkm/exec/ParametricCoordinates.h>\n\n#include <vtkm/worklet/DispatcherMapTopology.h>\n#include <vtkm/worklet/WorkletMapTopology.h>\n\n#include <vtkm/cont/testing/MakeTestDataSet.h>\n#include <vtkm/cont/testing/Testing.h>\n\nnamespace {\n\n////\n//// BEGIN-EXAMPLE CellCenters.cxx\n////\nstruct CellCenters : vtkm::worklet::WorkletMapPointToCell\n{\n typedef void ControlSignature(CellSetIn,\n FieldInPoint<> inputField,\n FieldOutCell<> outputField);\n typedef void ExecutionSignature(CellShape, PointCount, _2, _3);\n typedef _1 InputDomain;\n\n template<typename CellShapeTag,typename FieldInVecType,typename FieldOutType>\n VTKM_EXEC\n void operator()(CellShapeTag shape,\n vtkm::IdComponent pointCount,\n const FieldInVecType &inputField,\n FieldOutType &outputField) const\n {\n vtkm::Vec<vtkm::FloatDefault,3> center =\n vtkm::exec::ParametricCoordinatesCenter(pointCount, shape, *this);\n outputField = vtkm::exec::CellInterpolate(inputField, center, shape, *this);\n }\n};\n////\n//// END-EXAMPLE CellCenters.cxx\n////\n\nvoid TryCellCenters()\n{\n std::cout << \"Trying CellCenters worklet.\" << std::endl;\n\n vtkm::cont::DataSet dataSet =\n vtkm::cont::testing::MakeTestDataSet().Make3DUniformDataSet0();\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float32> ArrayType;\n ArrayType centers;\n\n vtkm::worklet::DispatcherMapTopology<CellCenters> dispatcher;\n dispatcher.Invoke(dataSet.GetCellSet(),\n dataSet.GetField(\"pointvar\").GetData().Cast<ArrayType>(),\n centers);\n\n vtkm::cont::printSummary_ArrayHandle(centers, std::cout);\n std::cout << std::endl;\n\n VTKM_TEST_ASSERT(centers.GetNumberOfValues() ==\n dataSet.GetCellSet().GetNumberOfCells(),\n \"Bad number of cells.\");\n VTKM_TEST_ASSERT(test_equal(60.1875, centers.GetPortalConstControl().Get(0)),\n \"Bad first value.\");\n}\n////\n//// BEGIN-EXAMPLE CellDerivatives.cxx\n////\nstruct CellDerivatives : vtkm::worklet::WorkletMapPointToCell\n{\n typedef void ControlSignature(CellSetIn,\n FieldInPoint<> inputField,\n FieldInPoint<Vec3> pointCoordinates,\n FieldOutCell<> outputField);\n typedef void ExecutionSignature(CellShape, PointCount, _2, _3, _4);\n typedef _1 InputDomain;\n\n template<typename CellShapeTag,\n typename FieldInVecType,\n typename PointCoordVecType,\n typename FieldOutType>\n VTKM_EXEC\n void operator()(CellShapeTag shape,\n vtkm::IdComponent pointCount,\n const FieldInVecType &inputField,\n const PointCoordVecType &pointCoordinates,\n FieldOutType &outputField) const\n {\n vtkm::Vec<vtkm::FloatDefault,3> center =\n vtkm::exec::ParametricCoordinatesCenter(pointCount, shape, *this);\n outputField = vtkm::exec::CellDerivative(inputField,\n pointCoordinates,\n center,\n shape,\n *this);\n }\n};\n////\n//// END-EXAMPLE CellDerivatives.cxx\n////\n\nvoid TryCellDerivatives()\n{\n std::cout << \"Trying CellDerivatives worklet.\" << std::endl;\n\n vtkm::cont::DataSet dataSet =\n vtkm::cont::testing::MakeTestDataSet().Make3DUniformDataSet0();\n\n typedef vtkm::cont::ArrayHandle<vtkm::Float32> ArrayType;\n vtkm::cont::ArrayHandle<vtkm::Vec<vtkm::Float32,3> > derivatives;\n\n vtkm::worklet::DispatcherMapTopology<CellDerivatives> dispatcher;\n dispatcher.Invoke(dataSet.GetCellSet(),\n dataSet.GetField(\"pointvar\").GetData().Cast<ArrayType>(),\n dataSet.GetCoordinateSystem().GetData(),\n derivatives);\n\n vtkm::cont::printSummary_ArrayHandle(derivatives, std::cout);\n std::cout << std::endl;\n\n VTKM_TEST_ASSERT(derivatives.GetNumberOfValues() ==\n dataSet.GetCellSet().GetNumberOfCells(),\n \"Bad number of cells.\");\n VTKM_TEST_ASSERT(test_equal(vtkm::make_Vec(10.025,30.075,60.125),\n derivatives.GetPortalConstControl().Get(0)),\n \"Bad first value.\");\n}\n\nvoid Run()\n{\n TryCellCenters();\n TryCellDerivatives();\n}\n\n} // anonymous namespace\n\nint CellOperations(int, char*[])\n{\n return vtkm::cont::testing::Testing::Run(Run);\n}\n"
},
{
"alpha_fraction": 0.6943759918212891,
"alphanum_fraction": 0.705355167388916,
"avg_line_length": 28.361841201782227,
"blob_id": "d3af646eb3f359f92266086d82a51af980beb0c0",
"content_id": "7d58f80fc0696dfeef0c6c33d82aabac3eef411c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4463,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 152,
"path": "/examples/CellShapes.cxx",
"repo_name": "sujin-philip/VTKmUsersGuide",
"src_encoding": "UTF-8",
"text": "#include <vtkm/CellShape.h>\n#include <vtkm/CellTraits.h>\n#include <vtkm/VectorAnalysis.h>\n\n#include <vtkm/exec/FunctorBase.h>\n\n#include <vtkm/testing/Testing.h>\n\nnamespace CellShapesExamples {\n\n////\n//// BEGIN-EXAMPLE CellShapeIdToTag.cxx\n////\nvoid CellFunction(vtkm::CellShapeTagTriangle)\n{\n std::cout << \"In CellFunction for triangles.\" << std::endl;\n}\n\nvoid DoSomethingWithACell()\n{\n // Calls CellFunction overloaded with a vtkm::CellShapeTagTriangle.\n CellFunction(vtkm::CellShapeIdToTag<vtkm::CELL_SHAPE_TRIANGLE>::Tag());\n}\n////\n//// END-EXAMPLE CellShapeIdToTag.cxx\n////\n\n////\n//// BEGIN-EXAMPLE GenericCellNormal.cxx\n////\nnamespace detail {\n\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename PointCoordinatesVector, typename WorkletType>\nVTKM_EXEC_CONT\ntypename PointCoordinatesVector::ComponentType\nCellNormalImpl(const PointCoordinatesVector &pointCoordinates,\n vtkm::CellTopologicalDimensionsTag<2>,\n const WorkletType &worklet)\n{\n if (pointCoordinates.GetNumberOfComponents() >= 3)\n {\n return vtkm::TriangleNormal(pointCoordinates[0],\n pointCoordinates[1],\n pointCoordinates[2]);\n }\n else\n {\n worklet.RaiseError(\"Degenerate polygon.\");\n return typename PointCoordinatesVector::ComponentType();\n }\n}\n\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename PointCoordinatesVector,\n vtkm::IdComponent Dimensions,\n typename WorkletType>\nVTKM_EXEC_CONT\ntypename PointCoordinatesVector::ComponentType\nCellNormalImpl(const PointCoordinatesVector &,\n vtkm::CellTopologicalDimensionsTag<Dimensions>,\n const WorkletType &worklet)\n{\n worklet.RaiseError(\"Only polygons supported for cell normals.\");\n return typename PointCoordinatesVector::ComponentType();\n}\n\n} // namespace detail\n\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename CellShape,\n typename PointCoordinatesVector,\n typename WorkletType>\nVTKM_EXEC_CONT\ntypename PointCoordinatesVector::ComponentType\nCellNormal(CellShape,\n const PointCoordinatesVector &pointCoordinates,\n const WorkletType &worklet)\n{\n return detail::CellNormalImpl(\n pointCoordinates,\n typename vtkm::CellTraits<CellShape>::TopologicalDimensionsTag(),\n worklet);\n}\n\nVTKM_SUPPRESS_EXEC_WARNINGS\ntemplate<typename PointCoordinatesVector,\n typename WorkletType>\nVTKM_EXEC_CONT\ntypename PointCoordinatesVector::ComponentType\nCellNormal(vtkm::CellShapeTagGeneric shape,\n const PointCoordinatesVector &pointCoordinates,\n const WorkletType &worklet)\n{\n switch(shape.Id)\n {\n vtkmGenericCellShapeMacro(\n return CellNormal(CellShapeTag(), pointCoordinates, worklet));\n default:\n worklet.RaiseError(\"Unknown cell type.\");\n return typename PointCoordinatesVector::ComponentType();\n }\n}\n////\n//// END-EXAMPLE GenericCellNormal.cxx\n////\n\nstruct FakeWorklet : vtkm::exec::FunctorBase { };\n\nvoid Run()\n{\n std::cout << \"Basic identifier to tag.\" << std::endl;\n DoSomethingWithACell();\n\n std::cout << \"Function with dynamic lookup of cell shape.\" << std::endl;\n typedef vtkm::Vec<vtkm::FloatDefault,3> Vec3;\n\n vtkm::Vec<Vec3,3> pointCoordinates;\n pointCoordinates[0] = Vec3(0.0f, 0.0f, 0.0f);\n pointCoordinates[1] = Vec3(1.0f, 0.0f, 0.0f);\n pointCoordinates[2] = Vec3(0.0f, 1.0f, 0.0f);\n\n Vec3 expectedNormal(0.0f, 0.0f, 1.0f);\n\n char errorBuffer[256];\n errorBuffer[0] = '\\0';\n vtkm::exec::internal::ErrorMessageBuffer errorMessage(errorBuffer, 256);\n FakeWorklet worklet;\n worklet.SetErrorMessageBuffer(errorMessage);\n\n Vec3 normal = CellNormal(vtkm::CellShapeTagTriangle(),\n pointCoordinates,\n worklet);\n VTKM_TEST_ASSERT(!errorMessage.IsErrorRaised(), \"Error finding normal.\");\n VTKM_TEST_ASSERT(test_equal(normal, expectedNormal), \"Bad normal.\");\n\n normal = CellNormal(vtkm::CellShapeTagGeneric(vtkm::CELL_SHAPE_TRIANGLE),\n pointCoordinates,\n worklet);\n VTKM_TEST_ASSERT(!errorMessage.IsErrorRaised(), \"Error finding normal.\");\n VTKM_TEST_ASSERT(test_equal(normal, expectedNormal), \"Bad normal.\");\n\n CellNormal(vtkm::CellShapeTagLine(), pointCoordinates, worklet);\n VTKM_TEST_ASSERT(errorMessage.IsErrorRaised(), \"Expected error not raised.\");\n}\n\n} // namespace CellShapesExamples\n\nint CellShapes(int, char*[])\n{\n return vtkm::testing::Testing::Run(CellShapesExamples::Run);\n}\n"
}
] | 51 |
JohnFNovak/clplot
|
https://github.com/JohnFNovak/clplot
|
36909426eec9dfd12addc0bea219ffdd97de29d9
|
50bddb83cca24ac9e45747945ba13d5b41d79e62
|
c741a63c99f18b1794ab0cefec2aeb965965d3e3
|
refs/heads/master
| 2021-01-02T22:58:21.300040 | 2014-05-28T16:01:49 | 2014-05-28T16:01:49 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4671318531036377,
"alphanum_fraction": 0.477414608001709,
"avg_line_length": 34.94719314575195,
"blob_id": "1bf7f893d4cf39fc00e9f0357ae8637819addbd4",
"content_id": "c43c0f21539aeeae1d916d032b8f1415587da072",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10892,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 303,
"path": "/clplot/plot.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n# June 4, 2012 - July 19, 2012\n\n# this sub-file holds the functions relating to generating the output\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport time\nimport string\nimport globe\nimport pickle\n\n\ndef plot_tiles(tiles, numbered=0, **kwargs):\n dic = globe.dic\n for i, t in enumerate(tiles):\n if not dic['columnsfirst']:\n plt.subplot2grid((dic['layout'][0], dic['layout'][1]),\n (((i - 1) - (i - 1) %\n dic['layout'][1]) / dic['layout'][1],\n ((i - 1) % dic['layout'][1])))\n if dic['columnsfirst']:\n plt.subplot2grid((dic['layout'][0], dic['layout'][1]),\n ((i - 1) % dic['layout'][1]),\n (((i - 1) - (i - 1) %\n dic['layout'][1]) / dic['layout'][1]))\n plot(t[0], '', Print=False)\n outputname = tiles[-1][1] + \"_tiled\"\n if numbered != 0:\n outputname = outputname + '_' + str(numbered)\n outputname = outputname + \".\" + dic['TYPE']\n plt.tight_layout() # Experimental, and may cause problems\n plt.savefig(outputname)\n if dic['Verbose'] > 0:\n print\"printed to\", outputname\n # if dic['EmbedData']:\n # EmbedData(outputname, data)\n #check = subprocess.call(['open', outputname])\n plt.clf()\n\n\ndef plot(data, outputfile, numbered=0, Print=True, **kwargs):\n \"\"\"This function takes a list z of lists and trys to plot them. the first\n list is always x, and the folowing are always y's\"\"\"\n\n # data format:\n # [[f_id, b_id], filename, output, x_label, y_label,\n # x, y, x_err, y_err, x_sys_err, y_sys_err]\n\n dic = globe.dic\n points = dic['colorstyle']\n\n if dic['Ucolor']:\n colors = dic['Ucolor']\n else:\n colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n if dic['Ustyle']:\n style = dic['Ustyle']\n else:\n style = ['o', 'v', '^', '<', ' > ', '1', '2', '3', '4', '-', '--',\n '-.', ':', 's', 'p', '*', 'h', 'H', ' + ', 'x', 'D', 'd', '|',\n '_', '.', ', ']\n for s in style:\n for c in colors:\n points.append(str(c + s))\n\n size = [dic['default_marker_size']] * len(points)\n for i in range(len(points)):\n if len(points[i].split(';')) == 2:\n points[i] = points[i].split(';')[0]\n size[i] = float(points[i].split(';')[1])\n\n plottingerrors = True\n\n if dic['x_range']:\n plt.xlim(dic['x_range'])\n if dic['y_range']:\n plt.ylim(dic['y_range'])\n x_label = '/'.join(sorted(set([d[3] for d in data if d[3]])))\n plt.xlabel(x_label, fontsize=dic['fontsize'])\n if dic['y_label']:\n plt.ylabel(dic['y_label'], fontsize=dic['fontsize'])\n if dic['x_log']:\n plt.xscale('log', nonposx='clip')\n if dic['y_log']:\n plt.yscale('log', nonposy='clip')\n\n plt.tick_params(axis='both', which='major', labelsize=dic['fontsize']*0.75)\n plt.tick_params(axis='both', which='minor', labelsize=dic['fontsize']*0.75)\n\n if dic['legend']:\n parse_legend(data)\n\n if dic['norm']:\n for d in data:\n X = np.array(d[5]).astype(float)\n Y = np.array(d[6]).astype(float)\n width = np.mean(X[1:] - X[:-1])\n Y = Y / np.sum(Y * width)\n d[6] = Y.tolist()\n\n for k, d in enumerate(data):\n X, Y, X_err, Y_err, X_sys_err, Y_sys_err = d[5:11]\n marker = points[k % len(points)]\n msize = size[k % len(points)]\n ecolor = points[k % len(points)][0]\n fcolor = points[k % len(points)][0]\n if marker[-1] == '!':\n fcolor = 'white'\n marker = marker[:-1]\n X = [float(x) * dic['xscaled'] for x in X]\n Y = [float(x) * dic['yscaled'] for x in Y]\n X_err = [float(x) * dic['xscaled'] for x in X_err]\n Y_err = [float(x) * dic['yscaled'] for x in Y_err]\n if plottingerrors and not dic['errorbands']:\n plt.errorbar(X, Y,\n xerr=X_err,\n yerr=Y_err,\n fmt=marker, label=d[4],\n mec=ecolor, mfc=fcolor, ms=msize)\n if plottingerrors and dic['errorbands']:\n if all([y == 0 for y in Y_err]):\n plt.errorbar(X, Y,\n xerr=[0] * len(X),\n yerr=[0] * len(Y),\n fmt=marker, label=d[4],\n mec=ecolor, mfc=fcolor, ms=msize)\n else:\n plt.errorbar(X, Y,\n xerr=[0] * len(X),\n yerr=[0] * len(Y),\n fmt=marker, label=d[4],\n mec=ecolor, mfc=fcolor, ms=msize)\n plt.fill_between(np.array(X),\n np.array(Y) + np.array(Y_err),\n np.array(Y) - np.array(Y_err),\n facecolor=ecolor, alpha=dic['alpha'],\n interpolate=True, linewidth=0)\n if dic['plot_sys_err']:\n plt.fill_between(np.array(X),\n np.array(Y) + np.array(Y_sys_err),\n np.array(Y) - np.array(Y_sys_err),\n facecolor=ecolor, alpha=dic['alpha'],\n interpolate=True, linewidth=0)\n if not plottingerrors:\n plt.plot(X, Y, points[k % len(points)])\n\n plt.grid(dic['grid'])\n\n if dic['legend']:\n plt.legend()\n\n if dic['interactive']:\n if dic['keep_live']:\n plt.ion()\n plt.show(block=False)\n else:\n plt.show()\n return\n\n outputname = outputfile\n\n if numbered != 0:\n outputname = outputname + \"_\" + str(numbered)\n if dic['MULTIP']:\n outputname = outputname + \"_mp\"\n\n outputname = outputname + \".\" + dic['TYPE']\n\n if Print:\n plt.tight_layout() # Experimental, and may cause problems\n plt.savefig(outputname)\n if dic['Verbose'] > 0:\n print\"printed to\", outputname\n if dic['EmbedData']:\n EmbedData(outputname, data)\n #check = subprocess.call(['open', outputname])\n plt.clf()\n\n\ndef parse_legend(data):\n # dic = globe.dic\n # delimiters = ['/', '-', '.', '/', '-', '.']\n delimiters = ['/', '-']\n labels = [x[4] for x in data]\n\n for divider in delimiters:\n tester = labels[0].split(divider)\n\n # From the front\n for i in labels:\n if len(i.split(divider)) > len(tester):\n tester = i.split(divider)\n hold = [0]*len(tester)\n\n for i in range(1, len(labels)):\n for j in range(len(labels[i].split(divider))):\n if tester[j] == labels[i].split(divider)[j] and hold[j]\\\n == 0:\n hold[j] = 1\n if tester[j] != labels[i].split(divider)[j] and hold[j]\\\n == 1:\n hold[j] = 0\n\n for i in range(len(hold)):\n if hold[len(hold)-1-i] == 1:\n for j in range(len(labels)):\n temp = []\n for k in range(len(labels[j].split(divider))):\n if k != len(hold) - 1 - i:\n temp.append(labels[j].split(divider)[k])\n labels[j] = string.join(temp, divider)\n\n tester = labels[0].split(divider)\n\n # From the back\n for i in labels:\n if len(i.split(divider)) > len(tester):\n tester = i.split(divider)\n hold = [0]*len(tester)\n\n for i in range(1, len(labels)):\n temp = len(labels[i].split(divider)) - 1 - j\n temp_labels = labels[i].split(divider)\n for j in range(temp):\n if tester[temp] == temp_labels[temp] and hold[temp] == 0:\n hold[temp] = 1\n if tester[temp] != temp_labels[temp] and hold[temp] == 1:\n hold[temp] = 0\n\n for i in range(len(hold)):\n if hold[len(hold)-1-i] == 1:\n for j in range(len(labels)):\n temp = []\n for k in range(len(labels[j].split(divider))):\n if k != len(hold)-1-i:\n temp.append(labels[j].split(divider)[k])\n labels[j] = string.join(temp, divider)\n\n\ndef EmbedData(outputname, data):\n dic = globe.dic\n StringToEmbed = \"Creation time: \" + time.ctime() + '\\n'\n StringToEmbed += \"Current directory: \" + os.path.abspath('.') + '\\n'\n StringToEmbed += \"Creation command: \" + ' '.join(sys.argv) + '\\n'\n StringToEmbed += \"Plotted values:\" + '\\n'\n for i, d in enumerate(data):\n X, Y, X_err, Y_err, X_sys_err, Y_sys_err = d[5:11]\n StringToEmbed += 'Plot %d\\n' % i\n StringToEmbed += 'x ' + ' '.join(map(str, X)) + '\\n'\n StringToEmbed += 'x_err ' + ' '.join(map(str, X_err)) + '\\n'\n StringToEmbed += 'x_sys_err ' + ' '.join(map(str, X_err)) + '\\n'\n StringToEmbed += 'y ' + ' '.join(map(str, Y)) + '\\n'\n StringToEmbed += 'y_err ' + ' '.join(map(str, Y_err)) + '\\n'\n StringToEmbed += 'y_sys_err ' + ' '.join(map(str, Y_sys_err)) + '\\n'\n StringToEmbed += 'PickleDump:'\n StringToEmbed += pickle.dumps(data)\n if dic['TYPE'] == 'jpg':\n with open(outputname, 'a') as f:\n f.write(StringToEmbed)\n elif dic['TYPE'] == 'pdf':\n if dic['Verbose'] > 0:\n print \"Warning!!! Embedding data in pdfs is not reliable storage!\"\n print \"Many PDF viewers will strip data which is not rendered!\"\n with open(outputname, 'r') as f:\n filetext = f.read().split('\\n')\n obj_count = 0\n for line in filetext:\n if ' obj' in line:\n obj_count = max(int(line.split()[0]), obj_count)\n if 'xref' in line:\n break\n StringToEmbed = '%d 0 obj\\n<</Novak\\'s_EmbedData >>\\nstream\\n' % (\n obj_count + 1) + StringToEmbed + 'endstream\\nendobj'\n with open(outputname, 'w') as f:\n f.write('\\n'.join(filetext[:2] + [StringToEmbed] + filetext[2:]))\n\n\ndef reload_plot(filename):\n if not os.path.isfile(filename):\n print filename, 'does not exist'\n return None\n with open(filename, 'r') as f:\n data = f.read()\n if len(data.split('PickleDump:')) > 1:\n new = []\n for d in data.split('PickleDump:')[1:]:\n new.append(pickle.loads(d)[0])\n return new\n return None\n\n\nif __name__ == '__main__':\n print \"This code is part of CLP\"\n"
},
{
"alpha_fraction": 0.48132073879241943,
"alphanum_fraction": 0.49158406257629395,
"avg_line_length": 36.2292594909668,
"blob_id": "aba74949b3ad86ec7d7d7fb7f1e29fb7abfeace1",
"content_id": "8339017c204b9bc66137cbc5e76edb28c413c0be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17051,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 458,
"path": "/clplot/helpers.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n\n# This sub-file contains helper functions\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\nimport collections\nimport itertools\nimport globe\nimport sys\nimport math as m\nimport code\n\n\ndef is_it_ordered(vals):\n \"\"\"This function takes a list of numbers are returns whether or not they\n are in order\"\"\"\n\n ordered = 0\n\n if vals == sorted(vals):\n ordered = 1\n if vals == sorted(vals, reverse=True):\n ordered = -1\n\n return ordered\n\n\ndef check_type(x, specific=False):\n \"\"\"This function returns a string. It returns \"str\" if x is a string, and\n \"num\" if x is a number\"\"\"\n try:\n float(x)\n except ValueError:\n verdict = \"str\"\n else:\n if not specific:\n verdict = \"num\"\n else:\n try:\n int(x)\n except ValueError:\n verdict = \"float\"\n else:\n verdict = \"int\"\n\n return verdict\n\n\ndef skip(iterator, n):\n \"\"\"Advance the iterator n-steps ahead. If n is none, consume entirely.\"\"\"\n collections.deque(itertools.islice(iterator, n), maxlen=0)\n\n\ndef givehelp(a):\n \"\"\"This command prints out some help\"\"\"\n\n print \"\"\"This is a utility which trys to inteligently create plots from\n text files. In many cases it can produce reasonable plots even if no\n information is provided by the user other than the filenames.\\n\"\"\"\n if a == 0:\n print \"for more help call this program with the '-help' flag\"\n if a == 1:\n print \"\"\"This program takes a number of flags:\n -i: Input: The input files can be listed first, or they can be listed\n following the '-i' flag.\n -o: Output: The output files will be given the same names as the input\n files unless otherwise specified. The output files can be\n specifiec by listing them after the '-o' flag\n -f: Format: the format of the data in the input files can be specified\n with '-f'. Each format flag should start with either 'c' or\n 'r', specifying wether the data should be read as columns\n or row. The following characters each represent a row or\n column. They can be: 'x', 'y', '_', '*', or a numeral\n (<10). 'x' specifies the x values, 'y' specifies 'y'\n values'. Rows or columens marked with '_' will be skipped.\n 'y's or '_'s can be proceeded by a numeral, and the 'y' or\n '_' will be read that many times. Formats will only be used\n if their dimensions exactly fit the data found in the file,\n unless the format string is ended with a '*', then the\n format will be used of any data found in the file which has\n dimensions greater than or equal to that stated in the\n format flag.\n -mp: Multiplot Pile. This flag should be followed by the number of y's\n which the user wants to have plotted in the same window. It\n should be noted that if one block of text contains multiple\n y columns or rows, the '-mp' flag will cause them to be\n treated individually\n -mt: Multiplot Tile. This flag should be followed by the number of\n tiles desired for each plot printed to file\n -t: Type. The '-t' flag can be used to change the output type. The\n following are acceptable: bmp, emf, eps, gif, jpeg, jpg,\n pdf, png, ps, raw, rgba, svg, svgz, tif, tiff\n -c: Color. The '-c' flag can be used to set the color. Multiple colors\n can be specified and they will be iterated over. The color\n options are: b, g, r, c, m, y, k\n -s: Point Style: The '-s' flag can be used to specify the point style.\n Multiple styles can be specified and they will be iterated\n over. The point style options are:-, --, -., :, ., , , o,\n v, ^, <, >, 1, 2, 3, 4, s, p, *, h, H, +, x, D, d, |, _ .\n To plot with hollow points, append the style with '!'. Note\n that it may be necessary to put a style in quotes because\n the command line my try to interpret it.\n -cs: Color/Style, or Custom Style: The '-cs' flag can be used to\n directly specify the point color and style to be used. All\n of the colors and styles listed previously will work. The\n flags must be a valid color followed (without a space) by a\n valid point style. Ex: blue, big diamond, hollow -'bD!'\n -xl, -yl: Set X and y labels. SHould be followed by a string, which can\n be in quotes\n -logx, -logy: set X and/or Y axes to be log scales\n -xr, -yr: Set scale of X and Y ranges, should be followed with two\n numbers sepearated by a colon. Ex: -xr 1:5\n -layout: Used to specify the tiled output layout. Write input as\n <# rows>:<# columns>\n -legend: This will turn on keys in the plots. On each plot things will\n be named using a unique combination of column heading,\n column number, and filename\n -bands: This will plot all y error bars as y error bands\n -fontsize: This sets the size of the font used for axis labels and\n titles. The default it 20.\n -grid : This turns on background grids\n -systematic: This sets the size of the systematic error. It is a\n percent and is added to the y error bars.\n -sys_err: This turns on the plotting of systematic errors.\n -markersize: changes the default marker size. Default 5\n -yscaled: Scale all of the y values by a constant number\n -xscaled: Scale all of the x values by a constant number\n -alpha: Sets the 'opaque-ness' of shaded objects (like error bars).\n Number [0, 1], default 0.25\n -norm: Normalizes all plots\n\n a '!' flag will start clplot in interactive mode.\n\n Example:\n I have a large number of files and I would like them to be plotted\n with 9 plots tiled per output. I would like them to be eps\n files, and I have a thing for green circles. In each file\n the data is in columns 6 wide, but I only want the first\n and fourth columns plotted. The first column is x, the\n other will be y. I would type:\n # python plot.py * -t eps -mt 9 -c b -s o -f x3_y*\"\"\"\n\n exit(1)\n\n\ndef read_flags():\n dic = globe.dic\n case = 0 # 0 is reading files, 1 is outputs, 2 is formats, etc\n\n if len(sys.argv) == 1:\n givehelp(0)\n\n for flag in sys.argv[1:]:\n # performance tweak: Check if the flag has a dash in it first,\n # otherwise its a filename, i.e. \"if '-' in flag: \"\n if \"-f\" == flag:\n # format flag\n case = 2\n elif \"-i\" == flag:\n # input file flag\n case = 0\n elif \"-r\" == flag:\n # input file flag\n case = 21\n elif \"-o\" == flag:\n # output file flag\n case = 1\n elif \"-t\" == flag:\n # output type flag\n case = 3\n elif \"-mp\" == flag:\n # multiplot pile flag\n case = 4\n elif \"-mt\" == flag:\n # multiplot tile flag\n case = 5\n elif \"-h\" == flag[:2]:\n givehelp(1)\n elif \"-c\" == flag:\n case = 6\n elif \"-s\" == flag:\n case = 7\n elif \"-xr\" == flag:\n case = 8\n elif \"-yr\" == flag:\n case = 9\n elif \"-xl\" == flag:\n case = 10\n elif \"-yl\" == flag:\n case = 11\n elif \"-logx\" == flag:\n dic['x_log'] = True\n if dic['x_range']:\n if dic['x_range'][0] <= 0:\n dic['x_range'] = None\n elif \"-logy\" == flag:\n dic['y_log'] = True\n if dic['y_range']:\n if dic['y_range'][0] <= 0:\n dic['y_range'] = None\n elif '-layout' == flag:\n case = 12\n elif '-cs' == flag:\n case = 13\n elif '-fontsize' == flag:\n case = 14\n elif '-systematic' == flag:\n case = 15\n elif '-xscaled' == flag:\n case = 16\n elif '-yscaled' == flag:\n case = 17\n elif '-markersize' == flag:\n case = 18\n elif '-alpha' == flag:\n case = 19\n elif len(flag) >= 2 and '-v' == flag[:2]:\n case = 20\n elif '-columnsfirst' == flag:\n dic['columnsfirst'] = True\n elif \"-legend\" == flag:\n dic['legend'] = True\n elif '-bands' == flag:\n dic['errorbands'] = True\n elif '-grid' == flag:\n dic['grid'] = True\n elif '-sys_err' == flag:\n dic['plot_sys_err'] = True\n elif '-norm' == flag:\n dic['norm'] = True\n elif \"-\" == flag[0] and not case in [7, 8, 9, 20]:\n case = -1\n print \"flag\", flag, \"not recognized\"\n elif flag in ['!', 'I', 'interact']:\n dic['Verbose'] = -1\n dic['interactive'] = True\n else:\n # if there is not a flag, and we are reading filenames or formats\n if case == 0:\n dic['files'].append(flag)\n if case == 1:\n dic['outputs'].append(flag)\n if case == 2:\n dic['formats'].append(flag)\n if case == 3:\n if flag[0] == '.':\n dic['TYPE'].append(flag[1:])\n else:\n dic['TYPE'].append(flag)\n if case == 4:\n dic['MULTIP'] = int(flag) # number of plots per plot\n if case == 5:\n dic['MULTIT'] = int(flag) # number of plots per plot\n if case == 6:\n dic['Ucolor'].append(flag)\n if case == 7:\n dic['Ustyle'].append(flag)\n if case == 8:\n dic['x_range'] = map(float, flag.split(\":\"))\n if case == 9:\n dic['y_range'] = map(float, flag.split(\":\"))\n if case == 10:\n dic['x_label'] = flag\n if case == 11:\n dic['y_label'] = flag\n if case == 12:\n dic['layout'] = tuple(map(int, flag.split(\":\")))\n if case == 13:\n dic['colorstyle'].append(flag)\n if case == 14:\n dic['fontsize'] = float(flag)\n if case == 15:\n dic['sys_err_default'] = float(flag)\n if case == 16:\n dic['xscaled'] = float(flag)\n if case == 17:\n dic['yscaled'] = float(flag)\n if case == 18:\n dic['default_marker_size'] = float(flag)\n if case == 19:\n dic['alpha'] = float(flag)\n if case == 20:\n dic['Verbose'] = int(flag)\n if case == 21:\n dic['replots'].append(flag)\n if case == -1:\n print \"ignoring\", flag\n\n if dic['MULTIT'] and dic['layout']:\n if (dic['layout'][0]*dic['layout'][1] < int(dic['MULTIT'])):\n print \"The layout that you specified was too small\"\n dic['layout'] = plot_arragnement()\n else:\n print \"We are using the layout you specified:\", dic['layout'][0],\n print \"by\", dic['layout'][1]\n if dic['MULTIT'] and not dic['layout']:\n dic['layout'] = plot_arragnement()\n\n if dic['outputs'] and (len(dic['outputs']) !=\n len(dic['files'])) and not (dic['MULTIT'] or\n dic['MULTIP']):\n print \"If you are going to specify output names\",\n print \"you must specify one output file per input file.\"\n\n\ndef plot_arragnement():\n \"\"\"This function looks at dic['MULTIT'] and decides how to structure the\n multiplot it returns a 2 tuple which is the root for the first 2 argument\n of the subplot command\"\"\"\n\n dic = globe.dic\n found = False\n\n if m.sqrt(float(dic['MULTIT'])) % 1 == 0:\n # Or multiplot can be square\n form = (int(m.sqrt(float(dic['MULTIT']))),\n int(m.sqrt(float(dic['MULTIT']))))\n found = True\n elif int(dic['MULTIT']) == 3:\n form = (1, 3)\n found = True\n if not found:\n looking = True\n a = 1\n while looking and a * (a + 1) <= int(dic['MULTIT']):\n if float(dic['MULTIT']) == float(a * (a + 1)):\n looking = False\n found = True\n else:\n a = a + 1\n if found:\n form = (a, a + 1)\n if not found and m.sqrt(float(dic['MULTIT']) + 1) % 1 == 0:\n form = (int(m.sqrt(float(dic['MULTIT']) + 1)),\n int(m.sqrt(float(dic['MULTIT']) + 1)))\n found = True\n if not found:\n looking = True\n a = 1\n while looking and a * (a + 1) <= int(dic['MULTIT']) + 1:\n if float(dic['MULTIT']) + 1 == float(a * (a + 1)):\n looking = False\n found = True\n else:\n a = a + 1\n if found:\n form = (a, a + 1)\n if not found and m.sqrt(float(dic['MULTIT']) + 2) % 1 == 0:\n form = (int(m.sqrt(float(dic['MULTIT']) + 2)),\n int(m.sqrt(float(dic['MULTIT']) + 2)))\n found = True\n if not found:\n looking = True\n a = 1\n while looking and a * (a + 1) <= int(dic['MULTIT']) + 2:\n if float(dic['MULTIT']) + 2 == float(a * (a + 1)):\n looking = False\n found = True\n else:\n a = a + 1\n if found:\n form = (a, a + 1)\n if not found:\n looking = True\n a = 1\n while looking and a * (a + 1) <= int(dic['MULTIT']):\n if float(dic['MULTIT']) <= float(a * (a + 1)):\n looking = False\n found = True\n else:\n a = a + 1\n if found:\n form = (a, a + 1)\n\n if dic['Verbose'] > 0:\n print \" I have decided that the multiplots will be\", form[0],\n print \"by\", form[1]\n\n return form\n\n\ndef interact(**kwargs):\n global Opts\n code.InteractiveConsole(locals=dict(globals().items() +\n kwargs.items())).interact()\n return True\n\n\ndef choose_from(prompt, options, default=' ', info=None):\n options = map(str, options)\n choice = False\n if not default:\n default = ' '\n if default == ' ':\n prompt = prompt + ': '\n else:\n prompt = prompt + ' [%s]' % (default) + ': '\n while not choice:\n choice = raw_input(prompt) or default\n if choice[0].lower() == 'q' and not choice in options:\n sys.exit(1)\n if choice == '?' and not '?' in options:\n if info and len(info) == len(options):\n for i, o in enumerate(options):\n print o, ':', info[i]\n else:\n print 'Options: ' + ', '.join(options)\n print \"'/' to exit\"\n if not choice in options:\n choice = False\n return choice\n\n\ndef choose_multiple(prompt, options, default=' ', info=None):\n options = map(str, options)\n choices = []\n choice = False\n if not default:\n default = ' '\n while True:\n if not choices:\n if default == ' ':\n t_prompt = prompt + ': '\n else:\n t_prompt = prompt + ' [%s]' % (default) + ': '\n else:\n t_prompt = prompt + ' (%s)' % (', '.join(map(str, choices))) + ': '\n choice = raw_input(t_prompt) or default\n if choice[0].lower() == 'q' and not choice in options:\n sys.exit(1)\n if choice == '?' and not '?' in options:\n if info and len(info) == len(options):\n for i, o in enumerate(options):\n print o, ':', info[i]\n else:\n print 'Options: ' + ', '.join(options)\n print \"'/' to exit\"\n if choice == '/':\n return choices\n if choice == 'a':\n return options\n elif choice in options and not choice in choices:\n choices.append(choice)\n default = '/'\n elif choice in options and choice in choices and choice == default:\n return choices\n elif not choice in options:\n choice = False\n\n\nif __name__ == '__main__':\n print \"This code is part of CLP\"\n"
},
{
"alpha_fraction": 0.4683668613433838,
"alphanum_fraction": 0.47940167784690857,
"avg_line_length": 33.559322357177734,
"blob_id": "97fd585d2bbc916aab98c60261de4172dd63fdbe",
"content_id": "c76f6157a4745940b46d367f0363b4a7eca4edde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4078,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 118,
"path": "/clplot/data_handler.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n\n# This sub-file handles file reading stuff\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\nimport string\nimport globe\nfrom helpers import check_type\nimport os\n\n\ndef make_blocks(dataarray):\n dic = globe.dic\n\n def block(d, d2):\n blank = {'dims': [len(d), 1], 'data': [], 'labels': None,\n 'Format': None, 'x_label': dic['x_label'],\n 'y_label': dic['y_label']}\n current = [check_type(x) for x in d]\n if 'str' in current:\n if dic['Verbose'] > -1:\n print \"you seem to have text interspersed with your data\"\n print \"Does this look familiar?:\", ' '.join(d)\n # if all([x == 'str' for x in current]):\n if current.count('str') > (len(current) / 2.):\n # more than half the enrties are strings, so we will assume it's\n # column titles\n if len(d) == len(d2):\n if dic['Verbose'] > -1:\n print \"we are going to use\", string.join(d),\n print \"as labels\"\n blank['labels'] = d\n blank['Format'] = 'c'\n blank['dims'][1] = 0\n elif (current[0] == 'str'\n and not any([x == 'str' for x in current[1:]])):\n # The first column in a string, and nothing else is\n blank['labels'] = [d[0]]\n blank['Format'] = 'r'\n blank['dims'][0] = len(d) - 1\n blank['data'].append(d)\n elif not any([x == 'str' for x in d]):\n blank['data'].append(d)\n\n return blank\n\n blocks = []\n for i, d in enumerate(dataarray):\n if i == 0: # first pass\n blocks.append(block(d, dataarray[i + 1]))\n previous = [check_type(x) for x in d]\n else:\n current = [check_type(x) for x in d]\n check = (((current == previous) # same as the previous pass\n or (blocks[-1]['Format'] == 'c'\n and blocks[-1]['dims'][1] == 0\n and len(d) == blocks[-1]['dims'][0]))\n and ((blocks[-1]['Format'] == 'r'\n and not any([x == 'str' for x in current[1:]]))\n or (blocks[-1]['Format'] != 'r'\n and not any([x == 'str' for x in current]))))\n if check:\n blocks[-1]['dims'][1] += 1\n if blocks[-1]['Format'] == 'r':\n blocks[-1]['labels'].append(d[0])\n blocks[-1]['data'].append(d[1:])\n else:\n blocks[-1]['data'].append(d)\n else:\n if blocks[-1]['dims'][1] <= 1:\n # The previous block was only one line long\n del(blocks[-1])\n if (i + 1) < len(dataarray):\n blocks.append(block(d, dataarray[i + 1]))\n previous = current\n\n return blocks\n\n\ndef read_data(filename):\n if not os.path.isfile(filename):\n print filename, 'does not exist'\n return []\n with open(filename, \"r\") as datafile:\n test = datafile.readline()\n test = datafile.readline()\n while test[0] == \"#\" and len(test) > 1: # Not a comment or empty\n test = datafile.readline()\n\n delimiters = [' ', ',', ';', '\\t']\n while delimiters:\n d = delimiters.pop()\n if len([x.strip() for x in test.split(d) if x.strip()]) > 1:\n break\n if not d:\n print \"Um, we can't figure out what you are using for data seperation\"\n return False\n\n with open(filename, \"r\") as datafile:\n data = datafile.read().split('\\n')\n data = [line.split(d) for line in data if line.strip()]\n\n if len(data) < 2:\n print filename, 'does not contain sufficient data'\n print 'length of printable data is too short'\n return []\n\n return data\n\n\nif __name__ == '__main__':\n print \"This code is part of CLP\"\n"
},
{
"alpha_fraction": 0.3967909514904022,
"alphanum_fraction": 0.40836456418037415,
"avg_line_length": 41.35933303833008,
"blob_id": "0c11dc82596c7a31960f586f494de1bf406cf5db",
"content_id": "b7fe217e0706125ebe55299341d2695b359882dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15207,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 359,
"path": "/clplot/clplot.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n# June 4, 2012 - July 19, 2012\n\n# Run with: python clplot.py\n# or just: ./clplot.py (after making it executable, obviously)\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\nimport globe\nfrom structure import structure\nfrom helpers import read_flags, interact, choose_from, check_type, choose_multiple\nfrom plot import plot, plot_tiles, reload_plot\nfrom data_handler import make_blocks, read_data\nimport sys\nimport os\nimport pickle\n\n\ndef init(data=[], files=globe.dic['files'], replot=globe.dic['replots']):\n dic = globe.dic\n\n for i, filename in enumerate(files):\n if dic['Verbose'] > 0:\n print \"loading\", filename\n sys_err = dic['sys_err_default']\n if len(filename.split('#')) == 2:\n sys_err = float(filename.split('#')[1].strip())\n filename = filename.split('#')[0].strip()\n if dic['outputs']:\n output = dic['outputs'].pop()\n else:\n output = '.'.join(filename.split('.')[:-1])\n dic['numbered'] = 0\n\n # Now read data file\n blocks = make_blocks(read_data(filename))\n\n if blocks:\n for j, b in enumerate(blocks):\n if dic['GroupBy'] == 'files':\n data.append([[i, j], filename, output, b, sys_err])\n elif dic['GroupBy'] == 'blocks':\n data.append([[j, i], filename, output, b, sys_err])\n\n data.sort(key=lambda x: x[0])\n data = structure(data)\n\n for i, filename in enumerate(replot):\n if dic['Verbose'] > 0:\n print \"reloading data from\", filename\n if len(filename.split('#')) == 2:\n filename = filename.split('#')[0].strip()\n data = data + reload_plot(filename)\n\n return data\n\n\ndef clplot(data):\n dic = globe.dic\n\n # data format:\n # [[f_id, b_id, c_id], filename, output, x_label, y_label,\n # x, y, x_err, y_err, x_sys_err, y_sys_err]\n\n if not dic['MULTIP']:\n # multiplot flag not give, group plots by file, then block\n l = lambda x: '-'.join(map(str, x))\n groups = [[d for d in data if l(d[0][:2]) == f]\n for f in set([l(x[0][:2]) for x in data])]\n else:\n groups = [data[(i * dic['MULTIP']):((i + 1) * dic['MULTIP'])]\n for i in range((len(data) / dic['MULTIP']) + 1)]\n\n plots = []\n for g in groups:\n if g:\n outputfile = '-'.join(sorted(set([d[2] for d in g])))\n plots.append([g, outputfile])\n\n tiles = []\n tiled_count = 0\n tile_name = ''\n for p in plots:\n e_args = {}\n if [x[1] for x in plots].count(p[1]) > 1:\n e_args['numbered'] = [x for x in plots if x[1] == p[1]].index(p)\n if dic['MULTIT']:\n tiles.append(p)\n if len(tiles) == dic['MULTIT']:\n if tile_name != p[1]:\n tiled_count = 0\n plot_tiles(tiles, numbered=tiled_count)\n tiles = []\n tiled_count += 1\n tile_name = p[1]\n else:\n plot(*p, **e_args)\n\n if tiles:\n plot_tiles(tiles, numbered=tiled_count)\n\n\ndef interactive_plot(data=None, load=None):\n \"\"\"Interactive Mode!\"\"\"\n dic = globe.dic\n\n command = True\n history = []\n\n if not load:\n files = dic['files']\n mode = choose_from('load all data to plots?',\n ['n', 'y'],\n default='y',\n info=['the initial plots list will be empty and the user will create plots individually from loaded data',\n 'all of the data loaded will be grouped into plots as they would be in the non-interactive mode'])\n if mode == 'n':\n mode = 's'\n plots = [[]]\n elif mode == 'y':\n mode = 'a'\n if not dic['MULTIP']:\n # multiplot flag not give, group plots by file, then block\n l = lambda x: '-'.join(map(str, x))\n groups = [[d for d in data if l(d[0][:2]) == f]\n for f in set([l(x[0][:2]) for x in data])]\n else:\n groups = [data[(i * dic['MULTIP']):((i + 1) * dic['MULTIP'])]\n for i in range((len(data) / dic['MULTIP']) + 1)]\n\n plots = groups\n # interact(**{'dic': dic, 'data': data, 'plots': plots})\n else:\n data, plots, history, mode = load\n\n if mode == 's':\n default = 'a'\n if mode == 'a':\n default = 'g'\n\n blocks = list(set([x[1] + '_' + str(x[0][1]) for x in data]))\n while command:\n print '#=====================#'\n print ['%d: %s cols by %d rows' % (i + 1, len(p), len(p[0][6]))\n for i, p in enumerate(plots) if p and p[0]]\n command = choose_from('?',\n ['!', 'g', 'G', 'f', 'a', 'd', 's', 'q'],\n default=default,\n info=['drops user into an interactive python shell',\n 'generates plots without writing them to file',\n 'generates plots, writes them to file, then exits',\n 'load new data from file',\n 'add data to current plots, or add data to new plot',\n 'delete data from plots, or delete entire plots',\n 'enter plot point/line style',\n 'exit'])\n history.append(command)\n if command == '!':\n interact(**{'dic': dic, 'data': data, 'plots': plots})\n elif command == 'g':\n if mode == 'a':\n for p in plots:\n clplot(p)\n elif mode == 's':\n if len([p for p in plots if p]) > 1:\n for p in plots:\n c = choose_from('Plot %d: %s cols by %d rows ?' % (i + 1, len(p), len(p[0][6])),\n ['y', 'n'],\n default='y')\n if c == 'y':\n clplot(p)\n elif len([p for p in plots if p]) == 1:\n clplot(plots[0])\n elif command == 'G':\n dic['interactive'] = False\n if mode == 'a':\n clplot(data)\n elif mode == 's':\n if len([p for p in plots if p]) > 1:\n for p in plots:\n c = choose_from('Plot %d: %s cols by %d rows ?' % (i + 1, len(p), len(p[0][6])),\n ['y', 'n'],\n default='y')\n if c == 'y':\n clplot(p)\n elif len([p for p in plots if p]) == 1:\n clplot(plots[0])\n sys.exit(1)\n elif command == 'f':\n new_file = raw_input('file to load: ').strip()\n if os.path.isfile(new_file):\n files.append(new_file)\n data += structure(init(files=[new_file]))\n blocks = list(set([x[1] + '_' + str(x[0][1]) for x in data]))\n elif command == 'a':\n print 'adding data to plot'\n print 'select file:'\n for i, f in enumerate(files):\n n_b = len(set([' '.join(map(str, x[0][:2]))\n for x in data if x[1] == f]))\n print '%d- file: %s [# blocks = %d]' % (i + 1, f, n_b)\n f_choice = int(choose_from(\"selection\",\n map(str,\n range(1, 1 + len(files))),\n default='1')) - 1\n blocks = list(set([' '.join([x[1], 'block:', str(x[0][1] + 1)]) for\n x in data if x[1] == files[int(f_choice)]]))\n blocks.sort(key=lambda x: x.split(' ')[-1])\n print '-------------'\n print 'file %s' % (files[int(f_choice)])\n print 'select block:'\n for i, b in enumerate(blocks):\n n_c = len(set([' '.join(map(str, x[0])) for x in data if\n ' '.join([x[1], 'block:', str(x[0][1] + 1)]) == b]))\n n_r = len([x for x in data if ' '.join([x[1], 'block:', str(x[0][1] + 1)]) == b][0][6])\n print '%d- [# cols = %d, # rows = %d]' % (i + 1, n_c, n_r)\n choice = int(choose_from(\"selection\",\n map(str,\n range(1, 1 + len(blocks))),\n default='1')) - 1\n cols = [d for d in data\n if ' '.join([d[1], 'block:', str(d[0][1] + 1)]) ==\n blocks[choice]]\n print '-------------'\n print 'file %s' % (files[int(f_choice)])\n print 'block %d- [# cols = %d, # rows = %d]' % (choice + 1, len(cols), len(cols[0][6]))\n print 'select columns:'\n for i, d in enumerate(cols):\n print '%d- col: %d [len %d title: %s]' % (i + 1, d[0][1] + 1, len(d[6]), d[4])\n choices = choose_multiple(\"selections\",\n range(1, 1 + len(cols)),\n default='1')\n print '-------------'\n if choices == map(str, range(1, 1 + len(cols))):\n print 'adding all columns as plot'\n if not plots[-1]:\n plots[-1] = cols\n else:\n plots.append(cols)\n choices = []\n for choice in choices:\n print choice\n good = False\n if check_type(choice) == 'num':\n size = len(cols[int(choice) - 1][6])\n choice = int(choice) - 1\n if not plots[0]:\n print 'No plots, starting new plot'\n plots[0].append(cols[choice])\n good = True\n elif len([p for p in plots if p and len(p[0]) > 6\n and len(p[0][6]) == size]) > 1:\n # print size, [len(p[0][6]) for p in plots]\n print 'Multiple plots of an appropriate dimension have',\n print 'been found'\n opts = [p for p in plots if p[6] == size]\n for i, o in enumerate(opts):\n print i, ':', o\n c = choose_from(\"select one ('n' for new)\",\n map(str, range(1, 1 + len(opts))) + ['n'],\n default='1')\n if check_type(c) == 'num':\n plots[plots.index(opts[int(c) - 1])].append(cols[choice])\n good = True\n elif len([p for p in plots if p and len(p[0]) > 6\n and len(p[0][6]) == size]) == 1:\n # print size, [len(p[0][6]) for p in plots]\n print 'One plot has been found with the appropriate',\n print 'dimension.'\n new = choose_from('start new plot? (y/n)',\n ['y', 'n'],\n default='n')\n if new == 'n':\n plots[plots.index([p for p in plots if p\n and len(p[0]) > 6\n and len(p[0][6]) == size][0]\n )].append(cols[int(choice)])\n good = True\n else:\n print 'no plot of appropriate dimension has been found.'\n if not good:\n print 'starting new plot'\n plots.append([cols[int(choice)]])\n blocks = list(set([x[1] + '_' + str(x[0][1]) for x in data]))\n elif command == 'd':\n if len(plots) > 1:\n print 'select plots:'\n for i, p in enumerate(plots):\n # print p\n print '%d- %s cols by %d rows' % (i + 1, len(p), len(p[0][6]))\n choices = choose_multiple(\"selection\",\n range(1, 1 + len(plots)),\n default='1')\n else:\n choices = ['0']\n done = choose_from(\"delete plots:[%s] ('n' to select columns)?\" % ', '.join(choices),\n ['y', 'n'],\n default='n')\n choices = map(int, choices)\n if done == 'y':\n for c in choices:\n del(plots[c])\n else:\n for c in choices:\n print 'plot %s' % (c)\n print 'columns:'\n for i, d in enumerate(plots[choice]):\n print '%d- file: %s block: %d col: %d [len %d title: %s]' % (i + 1, d[1], d[0][0] + 1, d[0][1] + 1, len(d[6]), d[4])\n choice2 = int(choose_from(\"selection\",\n map(str,\n range(1, 1 + len(plots[choice]))),\n default='1')) - 1\n s = choose_from('delete?', ['y', 'n'], default='n')\n if s == 'y':\n del(plots[choice][choice2])\n elif command == 's':\n dic['Ustyle'] = [raw_input('style: ')] + dic['Ustyle']\n elif command == 'q':\n if dic['SavePrompt']:\n save = choose_from('save?', ['y', 'n'], default='y')\n if save == 'y':\n default = dic['DefaultSave']\n fname = raw_input('filename? [%s]: ' % (default)) or default\n pickle.dump((data, plots, history, mode), open(fname, 'w'))\n sys.exit(1)\n\n\ndef main():\n \"\"\"A Python program that takes a file or list of filesand creates plots of\n the data.\"\"\"\n dic = globe.dic\n read_flags()\n if dic['interactive']:\n if dic['LoadFromSavePrompt']:\n load = choose_from('load saved state?',\n ['y', 'n'],\n default='n',\n info=['user can load options, data, and plots from previous session',\n 'data will be loaded from scratch'])\n if load == 'y':\n default = dic['DefaultSave']\n fname = raw_input('filename? [%s]: ' % (default)) or default\n print 'loading from saved state, not loading files from',\n print 'command line arguments'\n interactive_plot(load=pickle.load(open(fname, 'r')))\n return\n data = init()\n interactive_plot(data=data)\n else:\n data = init()\n clplot(data)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.32510510087013245,
"alphanum_fraction": 0.3397728204727173,
"avg_line_length": 44.63673400878906,
"blob_id": "782c156b80af5efd22091ef62d72fc5f819b65e8",
"content_id": "6255f3c40ca7d7e9da800d3bff0451a44b740b65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11181,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 245,
"path": "/clplot/structure.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n\n# This sub-file does some of the automatic structure determination\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\nimport numpy as np\nimport globe\nfrom helpers import check_type, is_it_ordered, choose_from\n\n\ndef structure(data):\n \"\"\"This function takes a rectangular array of data and plots it. First\n looks at the dimensions of the data, the it 'decides' the best way to plot\n it.'\"\"\"\n dic = globe.dic\n\n new = []\n\n for d in data:\n if dic['interactive'] or dic['Verbose'] > 1:\n print 'determining form of data from', d[1]\n Form = None\n w = d[3]['dims'][0]\n h = d[3]['dims'][1]\n block = np.array(d[3]['data'])\n\n # Check if a prespecified format will work\n if dic['formats']:\n if d[3]['Format']:\n if dic['Verbose'] > 1:\n print 'data handler decided appropriat format was',\n print d[3]['Format']\n formats = [x for x in dic['formats'] if x[0] == d[3]['Format']]\n else:\n formats = dic['formats']\n for f in formats:\n l = len(f) - 1\n if dic['Verbose'] > 2:\n print 'checking', f, l, w, h\n wild = '*' in f\n if f[0] == \"c\" and (l == w or (l < w and wild)):\n if dic['Verbose'] > 0:\n print \"Using specified format:\", f\n Form = f\n break\n elif f[0] == \"r\" and (l == h or (l < h and wild)):\n if dic['Verbose'] > 0:\n print \"Using specified format:\", f\n Form = f\n\n if not Form:\n if w == 2 and h > 2:\n # the good old fashioned two columns\n if dic['Verbose'] > 0:\n print \"the good old fashioned two columns\"\n Form = 'cxy'\n elif w > 2 and h == 2:\n # the good old fashioned two rows\n if dic['Verbose'] > 0:\n print \"the good old fashioned two rows\"\n Form = 'rxy'\n elif h > (w * 3):\n Form = 'cx' + ('y' * (w - 1))\n elif w > (h * 3):\n Form = 'rx' + ('y' * (h - 1))\n else:\n rows = [is_it_ordered(block[:, x].tolist()) for x in range(w)]\n cols = [is_it_ordered(block[x, :].tolist()) for x in range(h)]\n if cols.count(1) > rows.count(1):\n Form = 'rx' + ('y' * (h - 1))\n elif rows.count(1) > cols.count(1):\n Form = 'cx' + ('y' * (w - 1))\n else:\n print \"I have no idea what's going on\"\n Form = 'cx' + ('y' * (w - 1))\n\n if dic['interactive']:\n print 'block is', w, 'columns by', h, 'rows'\n if d[3]['labels']:\n print 'column labels:', d[3]['labels']\n print 'first column [:30]', block[:, 0][:30]\n print 'first row', block[0, :]\n if Form:\n print 'The code has estimated that the correct format is', Form\n c = choose_from('enter by hand?', ['y', 'n'], default='y',\n info=['enter form manually',\n 'use the form determined by the code'])\n Form = None\n if c == 'y':\n if d[3]['Format']:\n print 'the block processor concluded the data was by',\n if d[3]['Format'] == 'c':\n print 'columns'\n if d[3]['Format'] == 'r':\n print 'rows'\n c = choose_from('is that okay?', ['y', 'n'], default='y',\n info=['yes, use that determination',\n 'no, select rows/columns by hand'])\n if c == 'y':\n Form = d[3]['Format']\n if not Form:\n Form = choose_from('columns or rows?',\n ['c', 'r'],\n default='c',\n info=['the data is arranged by columns',\n 'the data is arranged by rows'])\n if Form == 'c':\n size = w\n elif Form == 'r':\n size = h\n for i in range(size):\n if Form[0] == 'c':\n print 'column', i + 1, 'starts with', block[0, i]\n if d[3]['labels']:\n print 'labeled:', d[3]['labels'][i]\n if Form[0] == 'r':\n print 'row', i + 1, 'starts with', block[i, 0]\n c = choose_from('include?',\n ['x', 'y', 'e', 'q', 's', 'S', 'n'],\n info=['include as x row/column',\n 'include as y row/column',\n 'include as error on previous row/column',\n 'do not include and ignore all following rows/columns',\n 'include as percent systematic error on previous row/column',\n 'include as absolute systematic error on previous row/column',\n 'do not include'])\n if c == 'q':\n Form += '*'\n break\n elif c == 'n':\n Form += '_'\n else:\n Form += c\n\n if Form:\n if Form[0] == 'r':\n block = block.T\n needx = True\n mults = 0\n for j, c in enumerate(Form[1:]):\n if check_type(c) == 'num':\n mults += (int(c) - 1)\n if c == \"x\":\n if dic['x_label']:\n d[3]['x_label'] = dic['x_label']\n elif d[3]['labels']:\n d[3]['x_label'] = d[3]['labels'][j + mults]\n x = block[:, j + mults].tolist()\n needx = False\n break\n if needx:\n print \"No x specified in format\"\n x = range(h)\n count = 0\n for j in range(len(Form) - 1):\n if check_type(Form[j + 1 + mults]) == 'num':\n for k in range(1, int(Form[j + 1 + mults])):\n if Form[j + 2 + mults] == \"y\":\n new.append([d[0] + [j + mults], d[1], d[2],\n d[3]['x_label']])\n if d[3]['labels']:\n new[-1].append(d[3]['labels'][j + mults])\n else:\n new[-1].append('_'.join(map(str, [d[2],\n 'block', d[0][1], 'col',\n j + mults])))\n new[-1] = new[-1] + [x, block[:, count].tolist()]\n new[-1].append([0]*len(x)) # x err\n new[-1].append([0]*len(x)) # y err\n new[-1].append([0]*len(x)) # x sys err\n new[-1].append(d[-1] *\n block[:, count].astype(float)\n ) # y sys err\n elif Form[j + 2 + mults] == \"e\":\n if Form[j + 1 + mults] == \"y\":\n new[-1][-3] = block[:, count].tolist()\n if Form[j + 1 + mults] == \"x\":\n new[-1][-4] = block[:, count].tolist()\n elif Form[j + 2 + mults] == \"s\":\n # % systematic error\n if Form[j + 1 + mults] == \"y\":\n new[-1][-1] = (new[-1][-5] *\n block[:, count]).tolist()\n if Form[j + 1 + mults] == \"x\":\n new[-1][-2] = (new[-1][-6] *\n block[:, count]).tolist()\n elif Form[j + 2 + mults] == \"S\":\n # abs systematic error\n if Form[j + 1 + mults] == \"y\":\n new[-1][-1] = block[:, count].tolist()\n if Form[j + 1 + mults] == \"x\":\n new[-1][-2] = block[:, count].tolist()\n count = count + 1\n mults = mults + 1\n elif Form[j + 1 + mults] == \"y\":\n new.append([d[0] + [j + 1 + mults], d[1], d[2],\n d[3]['x_label']])\n if d[3]['labels']:\n new[-1].append(d[3]['labels'][j + mults])\n else:\n new[-1].append('_'.join(map(str, [d[2],\n 'block', d[0][1], 'col',\n j + mults])))\n new[-1] = new[-1] + [x, block[:, count].tolist()]\n new[-1].append([0]*len(x)) # x err\n new[-1].append([0]*len(x)) # y err\n new[-1].append([0]*len(x)) # x sys err\n new[-1].append(d[-1] *\n block[:, count].astype(float)) # y sys err\n elif Form[j + 1 + mults] == \"x\":\n x = block[:, count].tolist()\n elif Form[j + 1 + mults] == \"e\":\n if Form[j + mults] == \"y\":\n new[-1][-3] = block[:, count].tolist()\n if Form[j + mults] == \"x\":\n new[-1][-4] = block[:, count].tolist()\n elif Form[j + 1 + mults] == \"s\":\n # % systematic error\n if Form[j + mults] == \"y\":\n new[-1][-1] = (new[-1][-5] *\n block[:, count]).tolist()\n if Form[j + mults] == \"x\":\n new[-1][-2] = (new[-1][-6] *\n block[:, count]).tolist()\n elif Form[j + 1 + mults] == \"S\":\n # abs systematic error\n if Form[j + mults] == \"y\":\n new[-1][-1] = block[:, count].tolist()\n if Form[j + mults] == \"x\":\n new[-1][-2] = block[:, count].tolist()\n count = count + 1\n if j + mults + 2 == len(Form):\n break\n\n return new\n\n\nif __name__ == '__main__':\n print \"This code is part of CPL\"\n"
},
{
"alpha_fraction": 0.7231884002685547,
"alphanum_fraction": 0.7318840622901917,
"avg_line_length": 48.28571319580078,
"blob_id": "be4252b5e700ab3e5a912232428958a7a0cf170b",
"content_id": "dbc34d20942967cf53a90bad952eb377938052e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2070,
"license_type": "no_license",
"max_line_length": 663,
"num_lines": 42,
"path": "/README.md",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#clplot - Command Line Plotting\n\n##A command line plotting utility written in python\n\nWe live on pypi!\n> pip install commandlineplot\n\n###John Novak\n\njohn (period) franc (period) novak (at) gmail (dot) com <br />\nThis project was born: June 6, 2012 <br />\nLast Updated: Check the Repo \n\n####What it is:<br />\n an intelligent command line plotting utility. You give it a text file (or multiple text files) and it gives you plots. It can take a decent handful of plotting flags giving the user the ability to make relatively complicated and customized plots quickly.\n\n###Note to the discerning:<br />\nSep 11, 2013:\n>This is not the most elegant piece of code that was ever written. This was one of the first python projects I ever took on, before I had ever even heard of 'pep8'. It grew like a wet thing in a dark corner. \"Rewrite plot code\" has been on my to-do list for 6 months, but I haven't done it yet. Why? Two reasons: I'm trying to finish my doctorate, so this isn't a top priority, and frankly, it works. I crack it open all the time and add things that I need (like error bands, axes scaling, hollow points, etc) and I think \"this needs to be gutted\". Once the university accepts my thesis, then I'll attack this. Well, maybe beer first, then I'll attack this.<br />\n\nMay 9, 2014: The time has come to get this code up to snuff. I have returned, christened with my PhD.\n\n\n####requires:<br />\npython<br />\nnumpy<br />\nmatplotlib<br />\n\n####To use:<br />\n python clplot.py <something to plot>\n\n for more information call<br />\n python clplot.py -help\n\n####Contents of this file:<br />\nclplot/clplot.py - the heart of the program. <br />\nclplot/plot.py - sub-module handles the actual plotting. <br />\nclplot/structure.py - sub-module which handles the automatic structure determination. <br />\nclplot/helpers.py - sub-module which is collection of helper functions. <br />\nclplot/data\\_handler.py - sub-module which handles file interactions. <br />\nclplot/globe.py - sub-module which holds shared globals. <br />\nclplot/test/various .txt files - testing files\n"
},
{
"alpha_fraction": 0.4869281053543091,
"alphanum_fraction": 0.49673202633857727,
"avg_line_length": 22.09433937072754,
"blob_id": "7dc14e43a42d3414b8089e577749bb2f8f9d9ec4",
"content_id": "0010aa6b4267d3870987e13989c7fe33a7efebda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 53,
"path": "/clplot/globe.py",
"repo_name": "JohnFNovak/clplot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n# Part of CLP\n# A universal command line plotting script\n#\n# John Novak\n\n# This sub-file just holds a global dictionary\n\n# written for Python 2.6. Requires Scipy, Numpy, and Matplotlib\n\ndic = {'formats': [],\n 'outputs': [],\n 'TYPE': 'pdf',\n 'MULTIT': None,\n 'MULTIP': None,\n 'layout': None,\n 'columnsfirst': False,\n 'Ucolor': [],\n 'Ustyle': [],\n 'x_range': None,\n 'y_range': None,\n 'x_label': None,\n 'y_label': None,\n 'x_log': False,\n 'y_log': False,\n 'numbered': None,\n 'files': [],\n 'replots': [],\n 'legend': False,\n 'colorstyle': [],\n 'errorbands': False,\n 'fontsize': 20,\n 'grid': False,\n 'sys_err_default': 0,\n 'default_marker_size': 5,\n 'plot_sys_err': False,\n 'yscaled': 1,\n 'xscaled': 1,\n 'alpha': 0.25,\n 'norm': False,\n 'EmbedData': True,\n 'Verbose': 0,\n 'GroupBy': 'files',\n 'interactive': False,\n 'keep_live': False,\n 'LoadFromSavePrompt': True,\n 'SavePrompt': True,\n 'DefaultSave': 'default_save.plots'}\n\n\nif __name__ == '__main__':\n print \"This code is part of CLP\"\n"
}
] | 7 |
jerseycity2018/team-17
|
https://github.com/jerseycity2018/team-17
|
877140d874c0dbcd4aa5ba5f04e9365447094e24
|
d0ac516b6bba65494abb1ef46a6bd9c85be13c21
|
ccd0fdd9a544860c736d7b0175ffd80ac0109937
|
refs/heads/master
| 2020-04-02T19:29:40.922879 | 2018-11-21T16:30:21 | 2018-11-21T16:30:21 | 154,735,977 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7586206793785095,
"alphanum_fraction": 0.7586206793785095,
"avg_line_length": 28,
"blob_id": "c2d90a374c586e498088ec8dfeb2cfd8457ee6ed",
"content_id": "2860f870580df5b34aaff6ed72b3ba895d2b5d0e",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 1,
"path": "/static/assets/readme.txt",
"repo_name": "jerseycity2018/team-17",
"src_encoding": "UTF-8",
"text": "This is a folder for assets!\n"
},
{
"alpha_fraction": 0.6891002058982849,
"alphanum_fraction": 0.6898317337036133,
"avg_line_length": 23.339284896850586,
"blob_id": "2ac323a91ee75a33ec7a4eee409c4270d8c52173",
"content_id": "cb4438035a13facc5a0759708f82ff4bc8f5ffad",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 56,
"path": "/User.py",
"repo_name": "jerseycity2018/team-17",
"src_encoding": "UTF-8",
"text": "from flask import Flask, flash, request, redirect, render_template, session, abort\nfrom flask_bootstrap import Bootstrap\nimport os\nimport json\n\napp = Flask(__name__)\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\nBootstrap(app)\n\[email protected]('/')\ndef index():\n#\twith open('user_data.json') as json_user_data:\n#\t\td = json.load(json_user_data)\n\treturn render_template('index.html')\n\[email protected]('/login', methods=['POST'])\ndef login():\n\tusername = request.form['username']\n\tpassword = request.form['password']\n\tprint(username)\n\tprint(password)\n\tresult = check(username, password)\n\tif result == True:\n\t\treturn render_template('profile2.html', )\n\telse:\n\t\tflash('wrong password or username')\n\treturn render_template('login.html')\n\[email protected]('/loadlogin', methods=['GET','POST'])\ndef dashboard():\n\tif request.method=='GET':\n\t\treturn render_template('login.html')\n\t\n\ndef check(username, password):\n\twith open('users.json') as json_user_data :\n\t\td = json.load(json_user_data)\n\t\tfor users in d['users']:\n\t\t\tif users['username']==username and users['password']==password:\n\t\t\t\treturn True \n\treturn False\n\n\n\n\n\n\n#def checkUser(username, password):\n#\trs = c.execute(\"SELECT * FROM username_tbl WHERE username = %s AND password = %s\", (username, password))\n#\t# rs = con.execute(\"SELECT * FROM beers\")\n#\tresult = rs.first()\n#\tif result is None:\n#\t\treturn None\n#\telse:\n#\t\tprint(result)\n#\t\treturn result \n\n\t\n"
},
{
"alpha_fraction": 0.7800911068916321,
"alphanum_fraction": 0.7826935648918152,
"avg_line_length": 152.8000030517578,
"blob_id": "21efae03f42e2c10216fc4788e1a987de31c8e11",
"content_id": "e9d54478c866a80a20ca3729ee2b839b52e19f0a",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1537,
"license_type": "no_license",
"max_line_length": 808,
"num_lines": 10,
"path": "/README.md",
"repo_name": "jerseycity2018/team-17",
"src_encoding": "UTF-8",
"text": "FOOD PLATE Copyright 2018 TM\nContributors: Yan Lawrence, Chia-Yi (Aaron) Liang, Nayana Palanivel, Dhruvil Patel, Sam Silverstein. \n\nFood Plate is a sustainability-focused web and mobile application aimed towards incentivizing young adults to learn to love the Earth. It is a partnership with Rare, a nonprofit that strives to teach humans how to be better to their planet. The focus of Food Plate is encouraging a shift towards a plant-based lifestyle. The app is a social driven, gamelike means to track the types of food consumed in a day. It emphasizes on impact by providing the user with a visual summary of the composition of their diet weekly. This is done in the hopes of encouraging users to reflect on their lifestyle and how they could improve it for the betterment of the Earth. The leaderboard/ranking effort was included to incite excitement in users to feel challenged to reach the goals and statistics of those around them. \n\nDependencies include flask and flask-bootstrap. Please install before and then write \"FLASK_APP=User.py flask run\" to run the app. \n\n\n\n##### The code (\"Code\") in this repository was created solely by the student teams during a coding competition hosted by JPMorgan Chase Bank, N.A. (\"JPMC\").\t\t\t\t\t\tJPMC did not create or contribute to the development of the Code. This Code is provided AS IS and JPMC makes no warranty of any kind, express or implied, as to the Code,\t\t\t\t\t\tincluding but not limited to, merchantability, satisfactory quality, non-infringement, title or fitness for a particular purpose or use."
}
] | 3 |
gjankowiak/crate-o-snakes
|
https://github.com/gjankowiak/crate-o-snakes
|
a5acd665133e91aecb558946994cee2185f608de
|
d3c53703da95526813e2659d15ec05f5ce5f0bb7
|
91a80b7592cae9b0312c90cd269f1465276b9063
|
refs/heads/master
| 2016-09-06T19:31:13.559226 | 2015-08-24T06:30:35 | 2015-08-24T06:30:35 | 30,869,387 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5272276997566223,
"alphanum_fraction": 0.5445544719696045,
"avg_line_length": 19.200000762939453,
"blob_id": "f48680ac53b5bf817e90a8e448a2f509aed72dfb",
"content_id": "0420f5cbc3a3713a3b232005d3ba612e42e27dfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 20,
"path": "/nputils.py",
"repo_name": "gjankowiak/crate-o-snakes",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\ndef blkdiag(matrices):\n matrices = filter(lambda x:x.size, matrices)\n\n shapes = []\n\n for mat in matrices:\n shapes.append(list(mat.shape))\n\n shapes = np.array(shapes)\n\n M_shape = np.sum(shapes, axis=0)\n\n M = np.zeros(M_shape)\n\n [n, m] = [0, 0]\n for i, s in enumerate(shapes):\n M[n:n+s[0], m:m+s[1]] = matrices[i]\n [n, m] = [n+s[0], m+s[1]]\n"
},
{
"alpha_fraction": 0.6447124481201172,
"alphanum_fraction": 0.6508967280387878,
"avg_line_length": 38.43902587890625,
"blob_id": "b395c0f184f45b96dbbffb8b6a89be89205464e8",
"content_id": "42d32916dbdd36737b580a293c7c63cd36df44f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3234,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 82,
"path": "/compute.py",
"repo_name": "gjankowiak/crate-o-snakes",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.sparse as sp\nimport dolfin\n\ndef fenics_spy(matrix, show=False, title=\"\"):\n \"\"\"Plot the sparsity pattern of a Dolfin GenericMatrix\n wrapper around matplotlib.pyplot.spy\n\n See cos.compute.spy for details on arguments\n \"\"\"\n rows, cols, values = matrix.data()\n mat = sp.csr_matrix((values, cols, rows))\n spy(mat, show=show, title=title)\n\ndef spy(M, show=False, title=\"\"):\n \"\"\"Plot the sparsity pattern of a numpy sparse matrix\n wrapper around matplotlib.pyplot.spy\n\n Keyword arguments:\n show: whether to hold the plot (using dolfin.show())\n title: an optional title for the plot\n \"\"\"\n plt.spy(M, markersize=0.5)\n plt.title(title)\n if show:\n plt.show()\n\ndef numpy_matrix(matrix, m, n):\n \"\"\"Given a Dolfin GenericMatrix, return a copy as a numpy CSR matrix.\n\n m: number of rows of the resulting matrix\n n: number of columns of the resulting matrix\n \"\"\"\n if type(matrix) == dolfin.cpp.la.Vector:\n rows, cols, values = sp.find(matrix.array())\n return sp.coo_matrix((values, (rows, cols)), shape=(m, n))\n else:\n rows, cols, values = matrix.data(deepcopy=True)\n return sp.csr_matrix((values, cols, rows))\n\ndef stitch(funcs, V_fine, c_w, c_h, N_w, N_h):\n \"\"\"Stitch the dolfin Functions funcs into a global V_Fine function\n\n This is used for MsFEM type methods, where funcs are functions defined on the cells of a coarse mesh\n that need to be stitched into a function over the whole domain. The meshes need to be compatible and\n indexed in the same way, and the DOFs need to be at the vertices, which effectively limits it to\n CG1 functions defined over a regular rectangular mesh (built with UnitSquareMesh).\n\n funcs: a list of Functions defined on the cell mesh\n V_fine: the global function space\n c_w: the width of the cell mesh, as passed to UnitSquareMesh\n c_h: the width of the cell mesh, as passed to UnitSquareMesh\n N_w: the width of the global mesh, as passed to UnitSquareMesh\n N_h: the width of the global mesh, as passed to UnitSquareMesh\n \"\"\"\n\n if not funcs:\n return None\n if N_w % c_w != 0:\n raise ValueError(\"Cell and global meshes have incompatible widths ({0} and {1})\".format(c_w, N_w))\n if N_h % c_h != 0:\n raise ValueError(\"Cell and global meshes have incompatible heights ({0} and {1})\".format(c_h, N_h))\n cells_per_side_w = N_w//c_w\n cells_per_side_h = N_h//c_h\n vtd = dolfin.vertex_to_dof_map(funcs[0].function_space())\n vtd_f = dolfin.vertex_to_dof_map(V_fine)\n v_f = dolfin.Function(V_fine)\n v_f_vec = v_f.vector()\n dof_per_vertex = max(1, reduce(lambda s,i:s+max(1, i.num_sub_spaces()), V_fine.split(), 0))\n\n offset = 0\n\n idc = np.array([[j*(dof_per_vertex*(N_w+1))+i for i in range(dof_per_vertex*(c_w+1))] for j in range(c_h+1)]).ravel()\n\n for j in range(cells_per_side_h):\n for i in range(cells_per_side_w):\n vertices_f = vtd_f[idc+offset]\n v_f_vec[vertices_f] = funcs[j*cells_per_side_w+i].vector()[vtd]\n offset += c_w*dof_per_vertex\n offset += ((N_w+1)*(c_h-1)+1)*dof_per_vertex\n return v_f\n"
}
] | 2 |
EclipseETS/KiCad_BOM_Generator
|
https://github.com/EclipseETS/KiCad_BOM_Generator
|
8f6fdde06ac559cfad52da1edf6ba748fc183afc
|
4730a93f8fc74db01843cdcfe92dbb42708538f7
|
6fb58ed8bf57ec3270f2880afbe230b10defa5b8
|
refs/heads/master
| 2021-01-21T11:37:26.336473 | 2017-05-21T21:56:12 | 2017-05-21T21:56:12 | 91,748,972 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5555742383003235,
"alphanum_fraction": 0.5727517604827881,
"avg_line_length": 28.92708396911621,
"blob_id": "ccb6375031dd9ad0ef8462b34dcf90a5120833fc",
"content_id": "aae0aca74839f82ac9bec951b4134fd1617339d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5939,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 192,
"path": "/BoM_Script_Eclipse.py",
"repo_name": "EclipseETS/KiCad_BOM_Generator",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 15 18:21:18 2017\r\n\r\n@author: Jul\r\n\r\n#==============================================================================\r\n\r\nDescription:\r\n1- Ouvrir le BOM generer par KiBom.\r\n2- Lire le fichier\r\n3- Generer le csv Project_MasterBom\r\n4- Ecrire dans le Project_MasterBom\r\n5- Generer les csv Digikey_MasterBom, Wurth_MasterBom, autre_MasterBom\r\n6- Parser le csv pour les différents suppliers\r\n7- Ecrire dans les csv des suppliers respectifs\r\n\r\nOption: Generer les noms des supliers automatiquement\r\n\r\n#==============================================================================\r\n\"\"\"\r\n\r\n\r\n# Import modules\r\nimport pandas as pd\r\n\r\n# Todo: Modifier en fonction\r\n# Todo: Faire une interface graphique\r\n\r\n# Todo: Inclure le nom du responsable de la commande\r\n#!/usr/bin/env python\r\nimport urllib2\r\nfrom bs4 import BeautifulSoup\r\nimport sys\r\nimport re\r\n\r\ndef digikey_part_is_reeled(html_tree):\r\n '''Returns True if this Digi-Key part is reeled or Digi-reeled.'''\r\n qty_tiers = list(get_digikey_price_tiers(html_tree).keys())\r\n if len(qty_tiers) > 0 and min(qty_tiers) >= 100:\r\n return True\r\n if html_tree.find('table',\r\n id='product-details-reel-pricing') is not None:\r\n return True\r\n return False\r\n\r\n \r\ndef get_digikey_price(pnumber, quantity):\r\n page = urllib2.urlopen( \\\r\n \"http://search.digikey.ca/scripts/DkSearch/dksus.dll?Detail?name=\" + pnumber)\r\n\r\n soup = BeautifulSoup(page,'lxml')\r\n\r\n '''Get the pricing tiers from the parsed tree of the Digikey product page.'''\r\n price_tiers = {}\r\n try:\r\n for tr in soup.find('table', id='product-dollars').find_all('tr'):\r\n try:\r\n td = tr.find_all('td')\r\n qty = int(re.sub('[^0-9]', '', td[0].text))\r\n price_tiers[qty] = float(re.sub('[^0-9\\.]', '', td[1].text))\r\n except (TypeError, AttributeError, ValueError,\r\n IndexError): # Happens when there's no <td> in table row.\r\n continue\r\n except AttributeError:\r\n # This happens when no pricing info is found in the tree.\r\n print 'No Digikey pricing information found!'\r\n return 0 # Return empty price tiers.\r\n if min(price_tiers) >= 100:\r\n print \"reel\"\r\n else:\r\n while (price_tiers.get(quantity, None) == None):\r\n quantity -= 1\r\n print pnumber \r\n print price_tiers.get(quantity, None)\r\n return price_tiers.get(quantity, None)\r\n \r\n\r\n\r\n# File path du fichier\r\n# Todo: Inclure un file path\r\nfp = '/home/jean-francois/Git/Eclipse Solar Car/Template_Hardware/Project_Template/Project_Template_bom2.csv'\r\n\r\n\r\n# Lit le Bom genere par KiBom\r\n#get_digikey_price('535-13445-2-ND', 10)\r\ndf1=pd.read_csv(fp)\r\n\r\n\r\n# Lit les colonnes standards a Eclipse\r\ndf2 = pd.DataFrame(df1, columns = ['Component', 'References', 'Value', 'Footprint', 'Quantity Per PCB', 'Description', 'Manufacturer', 'Manufacturer Part Number', 'Supplier', 'Supplier Part Number'])\r\n\r\n# Ecrit dans un nouveau fichier le Master Bom\r\n# Todo: Renommer le nom de fichier\r\ndf2.to_csv('MasterBom.csv', index = False)\r\n\r\n# Scan pour les colonnes pertinente a la commande\r\ndf3 = pd.DataFrame(df1, columns = ['Manufacturer', 'Manufacturer Part Number', 'Supplier', 'Supplier Part Number', 'Quantity Per PCB'])\r\n\r\n# Scan pour le supplier Digikey\r\ndf4 = df3[df3['Supplier'].notnull() & (df3['Supplier'] == \"Digikey\")& (df3['Manufacturer'] != \"Wurth Electronics Inc.\")]\r\n# Remet l'index a zero\r\ndf4 = df4.reset_index(drop=True)\r\n# Fait commencer l'index a 1\r\ndf4.index = df4.index + 1 \r\n\r\n#df4['Price'] = get_digikey_price(pnumber, quantity)\r\n\r\ndf4['Unit price'] = df4.apply(lambda df4: get_digikey_price(df4['Supplier Part Number'], df4['Quantity Per PCB']), axis=1)\r\ndf4['Ext price'] = df4['Unit price'] * df4['Quantity Per PCB']\r\n#print df4.values\r\n \r\n# Ecrit dans un fichier csv les pieces chez digikey \r\ndf4.to_csv('DigikeyBom.csv')\r\n\r\n# Scan pour le supplier Wurth\r\ndf5 = df3[df3['Supplier'].notnull() & (df3['Manufacturer'] == \"Wurth Electronics Inc.\")]\r\n# Remet l'index a zero\r\ndf5 = df5.reset_index(drop=True)\r\n# Fait commencer l'index a 1\r\ndf5.index = df5.index + 1 \r\n# Ecrit dans un fichier csv les pieces chez digikey \r\n# Todo: Ajouter le nom du projet\r\ndf5.to_csv('WurthBom.csv')\r\n\r\n# Scan pour les autres supplier que Wurth et digikey\r\ndf6 = df3[df3['Supplier'].notnull() & (df3['Manufacturer'] != \"Wurth Electronics Inc.\") & (df3['Supplier'] != \"Digikey\")]\r\n# Remet l'index a zero\r\ndf6 = df6.reset_index(drop=True)\r\n# Fait commencer l'index a 1\r\ndf6.index = df6.index + 1\r\n# Ecrit dans un fichier csv les autres pieces a commander\r\ndf6.to_csv('OtherBom.csv')\r\n\r\n\r\n\r\n#==============================================================================\r\n# import tkinter\r\n# from tkinter.filedialog import askopenfilename\r\n# \r\n#==============================================================================\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#==============================================================================\r\n# def browse():\r\n# global infile\r\n# infile=askopenfilename()\r\n# \r\n# def newfile();:\r\n# global oufile\r\n# outfine=askopenfilename()\r\n# \r\n# def BomFunction(outfile=outfile)\r\n# df = pandas.read_csv(infile)\r\n# \r\n# \r\n# \r\n#==============================================================================\r\n\r\n\r\n\r\n#==============================================================================\r\n# \r\n# root=tkinter.Tk()\r\n# \r\n# root.title(\"Bom Generator\")\r\n# \r\n# \r\n# label=tkinter.Label(root, text=\"Bom Generator for Eclipse\")\r\n# label.pack()\r\n# \r\n# \r\n# browseButton=tkinter.Button(root,text=\"Browse\", command=browse)\r\n# browseButton.pack()\r\n# \r\n# \r\n# \r\n# root.mainloop()\r\n# \r\n# \r\n#==============================================================================\r\n\r\n\r\n\r\n# For windows installer\r\n# pip install pyinstaller\r\n# pyinstaller --onefile --windoed \"name.py\"\r\n"
}
] | 1 |
ccie29824/CiscoDevNet-sastre
|
https://github.com/ccie29824/CiscoDevNet-sastre
|
e5329d1374c77da2e89e775990ca6c07f18af023
|
f5692dfa40903738c8ae7503932836bc3d928944
|
f048bd7316eca68fd35ce62c70cbc5ba638add77
|
refs/heads/master
| 2023-05-29T23:15:59.171507 | 2021-05-01T16:59:55 | 2021-05-01T17:30:42 | 375,074,608 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6235188245773315,
"alphanum_fraction": 0.6295723915100098,
"avg_line_length": 30.819671630859375,
"blob_id": "671914f56d3e17fb43ee291aab09e61370303a63",
"content_id": "d2fe85de49fa9892b19ec73728c1733376b8b7d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7764,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 244,
"path": "/cisco_sdwan/tasks/utils.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.tasks.utils\n This module implements task utility classes and functions\n\"\"\"\nimport os\nimport re\nimport argparse\nfrom datetime import date\nfrom getpass import getpass\nfrom pathlib import Path\nfrom cisco_sdwan.base.catalog import catalog_tags, op_catalog_tags, op_catalog_commands, CATALOG_TAG_ALL, OpType\nfrom cisco_sdwan.base.models_base import filename_safe, DATA_DIR, ExtendedTemplate\nfrom .common import Task\n\n# Default local data store\nDEFAULT_WORKDIR_FORMAT = 'backup_{address}_{date:%Y%m%d}'\n\n\ndef default_workdir(address):\n return DEFAULT_WORKDIR_FORMAT.format(date=date.today(), address=address or 'VMANAGE-ADDRESS')\n\n\nclass TaskOptions:\n _task_options = {}\n\n @classmethod\n def task(cls, task_str):\n task_cls = cls._task_options.get(task_str)\n if task_cls is None:\n raise argparse.ArgumentTypeError(f'Invalid task. Options are: {cls.options()}.')\n return task_cls\n\n @classmethod\n def options(cls):\n return ', '.join(cls._task_options)\n\n @classmethod\n def register(cls, task_name):\n \"\"\"\n Decorator used for registering tasks.\n The class being decorated needs to be a subclass of Task.\n :param task_name: String presented to the user in order to select a task\n :return: decorator\n \"\"\"\n def decorator(task_cls):\n if not isinstance(task_cls, type) or not issubclass(task_cls, Task):\n raise SastreException(f'Invalid task registration attempt: {task_cls.__name__}')\n\n cls._task_options[task_name] = task_cls\n return task_cls\n\n return decorator\n\n\nclass TagOptions:\n tag_options = catalog_tags() | {CATALOG_TAG_ALL}\n\n @classmethod\n def tag(cls, tag_str):\n if tag_str not in cls.tag_options:\n raise argparse.ArgumentTypeError(f'\"{tag_str}\" is not a valid tag. Available tags: {cls.options()}.')\n\n return tag_str\n\n @classmethod\n def tag_list(cls, tag_str_list):\n return [cls.tag(tag_str) for tag_str in tag_str_list]\n\n @classmethod\n def options(cls):\n return ', '.join(sorted(cls.tag_options, key=lambda x: '' if x == CATALOG_TAG_ALL else x))\n\n\nclass OpCmdOptions:\n @classmethod\n def tags(cls, op_type: OpType) -> str:\n return ', '.join(\n sorted(op_catalog_tags(op_type) | {CATALOG_TAG_ALL}, key=lambda x: '' if x == CATALOG_TAG_ALL else x)\n )\n\n @classmethod\n def commands(cls, op_type: OpType) -> str:\n return ', '.join(sorted(op_catalog_commands(op_type)))\n\n\nclass OpCmdSemantics(argparse.Action):\n # Using an action as opposed to a type check so that it can evaluate the full command line passed as opposed to\n # individual tokens.\n op_type: OpType = None\n\n def __call__(self, parser, namespace, values, option_string=None):\n full_command = ' '.join(values)\n pass_options = [\n len(values) == 1 and CATALOG_TAG_ALL in values,\n len(values) == 1 and set(values) <= op_catalog_tags(self.op_type),\n full_command in op_catalog_commands(self.op_type)\n ]\n if not any(pass_options):\n raise argparse.ArgumentError(self, f'\"{full_command}\" is not valid. '\n f'Group options: {OpCmdOptions.tags(self.op_type)}. '\n f'Command options: {OpCmdOptions.commands(self.op_type)}.')\n\n setattr(namespace, self.dest, values)\n\n\nclass RTCmdSemantics(OpCmdSemantics):\n op_type: OpType = OpType.RT\n\n\nclass StateCmdSemantics(OpCmdSemantics):\n op_type: OpType = OpType.STATE\n\n\nclass StatsCmdSemantics(OpCmdSemantics):\n op_type: OpType = OpType.STATS\n\n\ndef regex_type(regex_str):\n try:\n re.compile(regex_str)\n except (re.error, TypeError):\n if regex_str is not None:\n raise argparse.ArgumentTypeError(f'\"{regex_str}\" is not a valid regular expression.') from None\n\n return regex_str\n\n\ndef existing_file_type(workdir_str):\n if not Path(DATA_DIR, workdir_str).exists():\n raise argparse.ArgumentTypeError(f'Work directory \"{workdir_str}\" not found.')\n\n return workdir_str\n\n\ndef filename_type(name_str):\n # Also allow . on filename, on top of what's allowed by filename_safe\n if re.sub(r'\\.', '_', name_str) != filename_safe(name_str):\n raise argparse.ArgumentTypeError(\n f'Invalid name \"{name_str}\". Only alphanumeric characters, \"-\", \"_\", and \".\" are allowed.'\n )\n return name_str\n\n\ndef uuid_type(uuid_str):\n if re.match(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', uuid_str) is None:\n raise argparse.ArgumentTypeError(f'\"{uuid_str}\" is not a valid item ID.')\n\n return uuid_str\n\n\ndef non_empty_type(src_str):\n out_str = src_str.strip()\n if len(out_str) == 0:\n raise argparse.ArgumentTypeError('Value cannot be empty.')\n\n return out_str\n\n\ndef ipv4_type(ipv4_str):\n if re.match(r'\\d+(?:\\.\\d+){3}$', ipv4_str) is None:\n raise argparse.ArgumentTypeError(f'\"{ipv4_str}\" is not a valid IPv4 address.')\n\n return ipv4_str\n\n\ndef site_id_type(site_id_str):\n try:\n site_id = int(site_id_str)\n if not 0 <= site_id <= 4294967295:\n raise ValueError()\n except ValueError:\n raise argparse.ArgumentTypeError(f'\"{site_id_str}\" is not a valid site-id.') from None\n\n return site_id_str\n\n\ndef version_type(version_str):\n # Development versions may follow this format: '20.1.999-98'\n if re.match(r'(\\d+[.-])*\\d+$', version_str) is None:\n raise argparse.ArgumentTypeError(f'\"{version_str}\" is not a valid version identifier.')\n\n return '.'.join(([str(int(v)) for v in version_str.replace('-', '.').split('.')] + ['0', ])[:2])\n\n\ndef int_type(min_val, max_val, value_str):\n try:\n value_int = int(value_str)\n if not min_val <= value_int <= max_val:\n raise ValueError()\n except ValueError:\n raise argparse.ArgumentTypeError(f'Invalid value: \"{value_str}\". Must be an integer between '\n f'{min_val} and {max_val}, inclusive.') from None\n\n return value_int\n\n\nclass EnvVar(argparse.Action):\n def __init__(self, nargs=None, envvar=None, required=True, default=None, **kwargs):\n if nargs is not None:\n raise ValueError('nargs not allowed')\n if envvar is None:\n raise ValueError('envvar is required')\n\n default = os.environ.get(envvar) or default\n required = required and default is None\n super().__init__(default=default, required=required, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, values)\n\n\nclass PromptArg:\n def __init__(self, argument, prompt, secure_prompt=False, validate=non_empty_type):\n self.argument = argument\n self.prompt = prompt\n self.prompt_func = getpass if secure_prompt else input\n self.validate = validate\n\n def __call__(self):\n while True:\n try:\n value = self.validate(self.prompt_func(self.prompt))\n except argparse.ArgumentTypeError as ex:\n print(f'{ex} Please try again, or ^C to terminate.')\n else:\n return value\n\n\ndef ext_template_type(template_str):\n try:\n ExtendedTemplate(template_str)('test')\n except re.error:\n raise argparse.ArgumentTypeError('regular expression is invalid') from None\n except (KeyError, ValueError) as ex:\n raise argparse.ArgumentTypeError(ex) from None\n\n return template_str\n\n\nclass SastreException(Exception):\n \"\"\" Exception for main app errors \"\"\"\n pass\n"
},
{
"alpha_fraction": 0.5764117240905762,
"alphanum_fraction": 0.5768405795097351,
"avg_line_length": 60.359649658203125,
"blob_id": "319ef10dfd22feb8a0c036cebcff8277882b213b",
"content_id": "b04486ed388439ff5864af7eae9387e34bfb34ab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6995,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 114,
"path": "/cisco_sdwan/tasks/implementation/_backup.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "import argparse\nfrom cisco_sdwan.__version__ import __doc__ as title\nfrom cisco_sdwan.base.rest_api import RestAPIException\nfrom cisco_sdwan.base.catalog import catalog_iter, CATALOG_TAG_ALL\nfrom cisco_sdwan.base.models_base import ServerInfo\nfrom cisco_sdwan.base.models_vmanage import (DeviceConfig, DeviceConfigRFS, DeviceTemplate, DeviceTemplateAttached,\n DeviceTemplateValues, EdgeInventory, ControlInventory, EdgeCertificate)\nfrom cisco_sdwan.tasks.utils import TaskOptions, TagOptions, filename_type, regex_type, default_workdir\nfrom cisco_sdwan.tasks.common import regex_search, clean_dir, Task\n\n\[email protected]('backup')\nclass TaskBackup(Task):\n @staticmethod\n def parser(task_args, target_address=None):\n task_parser = argparse.ArgumentParser(description=f'{title}\\nBackup task:')\n task_parser.prog = f'{task_parser.prog} backup'\n task_parser.formatter_class = argparse.RawDescriptionHelpFormatter\n\n task_parser.add_argument('--workdir', metavar='<directory>', type=filename_type,\n default=default_workdir(target_address),\n help='backup destination (default: %(default)s)')\n task_parser.add_argument('--no-rollover', action='store_true',\n help='by default, if workdir already exists (before a new backup is saved) the old '\n 'workdir is renamed using a rolling naming scheme. This option disables this '\n 'automatic rollover.')\n task_parser.add_argument('--regex', metavar='<regex>', type=regex_type,\n help='regular expression matching item names to be backed up, within selected tags')\n task_parser.add_argument('tags', metavar='<tag>', nargs='+', type=TagOptions.tag,\n help='one or more tags for selecting items to be backed up. Multiple tags should be '\n f'separated by space. Available tags: {TagOptions.options()}. Special tag '\n f'\"{CATALOG_TAG_ALL}\" selects all items, including WAN edge certificates and '\n 'device configurations.')\n return task_parser.parse_args(task_args)\n\n def runner(self, parsed_args, api, task_output=None):\n self.log_info('Starting backup: vManage URL: \"%s\" -> Local workdir: \"%s\"', api.base_url, parsed_args.workdir)\n\n # Backup workdir must be empty for a new backup\n saved_workdir = clean_dir(parsed_args.workdir, max_saved=0 if parsed_args.no_rollover else 99)\n if saved_workdir:\n self.log_info('Previous backup under \"%s\" was saved as \"%s\"', parsed_args.workdir, saved_workdir)\n\n target_info = ServerInfo(server_version=api.server_version)\n if target_info.save(parsed_args.workdir):\n self.log_info('Saved vManage server information')\n\n # Backup items not registered to the catalog, but to be included when tag is 'all'\n if CATALOG_TAG_ALL in parsed_args.tags:\n edge_certs = EdgeCertificate.get(api)\n if edge_certs is None:\n self.log_error('Failed backup WAN edge certificates')\n elif edge_certs.save(parsed_args.workdir):\n self.log_info('Saved WAN edge certificates')\n\n for inventory, info in ((EdgeInventory.get(api), 'WAN edge'), (ControlInventory.get(api), 'controller')):\n if inventory is None:\n self.log_error('Failed retrieving %s inventory', info)\n continue\n\n for uuid, _, hostname, _ in inventory.extended_iter():\n if hostname is None:\n self.log_debug('Skipping %s, no hostname', uuid)\n continue\n\n for item, config_type in ((DeviceConfig.get(api, DeviceConfig.api_params(uuid)), 'CFS'),\n (DeviceConfigRFS.get(api, DeviceConfigRFS.api_params(uuid)), 'RFS')):\n if item is None:\n self.log_error('Failed backup %s device configuration %s', config_type, hostname)\n continue\n if item.save(parsed_args.workdir, item_name=hostname, item_id=uuid):\n self.log_info('Done %s device configuration %s', config_type, hostname)\n\n # Backup items registered to the catalog\n for _, info, index_cls, item_cls in catalog_iter(*parsed_args.tags, version=api.server_version):\n item_index = index_cls.get(api)\n if item_index is None:\n self.log_debug('Skipped %s, item not supported by this vManage', info)\n continue\n if item_index.save(parsed_args.workdir):\n self.log_info('Saved %s index', info)\n\n matched_item_iter = (\n (item_id, item_name) for item_id, item_name in item_index\n if parsed_args.regex is None or regex_search(parsed_args.regex, item_name)\n )\n for item_id, item_name in matched_item_iter:\n item = item_cls.get(api, item_id)\n if item is None:\n self.log_error('Failed backup %s %s', info, item_name)\n continue\n if item.save(parsed_args.workdir, item_index.need_extended_name, item_name, item_id):\n self.log_info('Done %s %s', info, item_name)\n\n # Special case for DeviceTemplateAttached and DeviceTemplateValues\n if isinstance(item, DeviceTemplate):\n devices_attached = DeviceTemplateAttached.get(api, item_id)\n if devices_attached is None:\n self.log_error('Failed backup %s %s attached devices', info, item_name)\n continue\n if devices_attached.save(parsed_args.workdir, item_index.need_extended_name, item_name, item_id):\n self.log_info('Done %s %s attached devices', info, item_name)\n else:\n self.log_debug('Skipped %s %s attached devices, none found', info, item_name)\n continue\n\n try:\n uuid_list = [uuid for uuid, _ in devices_attached]\n values = DeviceTemplateValues(api.post(DeviceTemplateValues.api_params(item_id, uuid_list),\n DeviceTemplateValues.api_path.post))\n if values.save(parsed_args.workdir, item_index.need_extended_name, item_name, item_id):\n self.log_info('Done %s %s values', info, item_name)\n except RestAPIException as ex:\n self.log_error('Failed backup %s %s values: %s', info, item_name, ex)\n"
},
{
"alpha_fraction": 0.7022842764854431,
"alphanum_fraction": 0.7265651226043701,
"avg_line_length": 51.06167221069336,
"blob_id": "090f5efbdce59d84b96aa1ea28ab9524f208d769",
"content_id": "793bb2de979e0fc68856b661a828f123b2b6aeb5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11820,
"license_type": "permissive",
"max_line_length": 332,
"num_lines": 227,
"path": "/CHANGELOG.md",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "Sastre 1.13 [April 30, 2021]\n============================\n\n#### Enhancements:\n[#67] Performance improvements to show realtime commands. Thread pool is now used to send multiple requests in parallel. \n Pool size is fixed to 10.\n[#68] Added show realtime omp adv-routes command, displaying advertised OMP routes from one or more WAN edges / vSmarts.\n[#71] Validation of template attach/detach, in a testbed with 200 devices. Action timeout increased to 20 minutes.\n[#72] Show-template values now by default display values for all templates with attachments when no match criteria is\n provided (i.e. no --name, --id or --regex).\n[#70] All show command output can now be exported as CSV files.\n[#60] Show task has been expanded with state and statistics subcommands.\n\nSastre 1.12 [March 10, 2021]\n============================\n\n#### Enhancements:\n- [#59] Template attach requests used in restore task (--attach and --force options) are improved to split attachment \n requests in chunks of up to 10 devices. Dry-run mode is now supported with --attach option.\n- [#63] Template detach requests used in delete task (--detach option) are improved to split detach requests in chunks of\n up to 10 devices. Dry-run mode is now supported with --detach option.\n- [#64] (Sastre-Pro) New attach task providing further customization on device template attach operations. Templates and \n devices can be filtered by regular expressions and device properties (reachability, system-ip, etc). Also, the maximum \n number of devices per vManage template attach request can be customized. By default, Sastre will split attach \n requests in chunks of up to 10 devices.\n- [#65] (Sastre-Pro) New detach task providing further customization on device template detach operations. Templates and \n devices can be filtered by regular expressions and device properties (reachability, system-ip, etc). Also, the maximum \n number of devices per vManage template detach request can be customized. By default, Sastre will split detach \n requests in chunks of up to 10 devices.\n \nIn this version we are also bumping up the minimal Python requirements to 3.8.\n\nSastre 1.11 [November 25, 2020]\n============================\n\n#### Enhancements:\n- [#20] Validated support for vManage 20.3.x and included new API endpoints:\n - Policy lists: fax protocol, modem passthrough, trunk group\n - Policy definitions: PRI ISDN port\n- [#47] The data store location can now be customized via the SASTRE_ROOT_DIR environment variable. When SASTRE_ROOT_DIR is not set, the data store is data/ under the directory where Sastre is run. This is the default behavior, as in all previous releases. When SASTRE_ROOT_DIR is set, the data store becomes $SASTRE_ROOT_DIR/data/.\n- [#48] Updated Dockerfile and container run instructions for better integration with CX CAT tool\n\n#### Fixes:\n- [#40] User not prompted for cx pid when it was not provided via cli or environment variable, if the task didn't require api. This has been fixed.\n\nSastre 1.10 [November 2, 2020]\n============================\n\n#### Enhancements:\n- [#29] Support for VMANAGE_PORT environment variable as an option to set TCP port for target vManage.\n- [#25] Python 3.9 support verified.\n\n#### Fixes:\n- [#10] A traceback would be generated on API authorization issues. E.g. read-only account used for a backup task (which requires POST calls). This has been fixed and a clear error message is now displayed.\n- [#35] vBond configuration check on restore task not working on multi-tenant mode. This has been fixed.\n- [#36] Migrate task would fail migration of cli-based device templates and feature templates containing a mix of vmanage and cEdge devices. This has been fixed.\n\nSastre 1.9 [October 13, 2020]\n============================\n\n#### Fixes:\n- [#27] CustomApp Policy restore failure in 20.3.1. \n\nSastre 1.8 [October 2, 2020]\n============================\n\n#### Enhancements:\n- Added Dockerfile and instructions to build and run the container (in the readme file).\n\nSastre 1.7 [September 16, 2020]\n============================\n\n#### Enhancements:\n- (Sastre-Pro) Including per-task time savings to AIDE metric collection. Also added support for CX project ID parameter.\n- (Sastre-Pro) Added show dpi summary realtime command\n\nSastre 1.6 [September 2, 2020]\n============================\n\n#### Enhancements:\n- Show software added to show task (Sastre-Pro feature).\n\n#### Fixes:\n- Improved show task to gracefully handle cases where older vManage/device releases may not have all queried table fields available. Whenever a particular device doesn't have a table field, \"N/A\" is returned.\n- Report task would fail with no report generated if any of its subtasks fail. This has been fixed, a report is still created containing the output of all non-failed subtasks. \n\nSastre 1.5 [September 2, 2020]\n============================\n\n#### New features:\n- New Show task available only on Sastre-Pro. Enable execution of select real-time commands across multiple devices and easy visualization in tables.\n\nSastre 1.4 [August 12, 2020]\n============================\n\n#### New features:\n- New Report task, which creates a report file consolidating the output of list configuration, list certificate, show-template values and show-template references.\n\n#### Fixes:\n- CustomApp policies were causing an exception during backup. This has been fixed.\n\nSastre 1.3 [July 23, 2020]\n============================\n\n#### Enhancements:\n- Split into Sastre and Sastre-Pro. Sastre-Pro will contain additional features. Current plan is to maintain release numbers in sync between the two variants.\n\nSastre 1.2 [June 22, 2020]\n============================\n\n#### New features:\n- Migrate task, allowing migration of feature templates and device templates to be compatible with vManage 20.1.\n- Transform option added to list task, allowing user to test name-regex transforms against existing item names.\n- References option added to show-template task, providing information on which device-templates reference a particular\n feature template.\n\n#### Enhancements:\n- vManage information (address, user and password) is no longer required when a task uses local workdir as\n source. For instance, list or show-template tasks when --workdir is provided.\n- Backup task now allows disabling of the automatic workdir rollover mechanism using the --no-rollover option. This\n is useful when the backup directory is being managed by an external version control tool (e.g. git).\n- Backup task now also include device configurations when tag 'all' is used. This includes WAN edges and controllers,\n also RFS and CFS configurations.\n\n\nSastre 0.37 [April 21, 2020]\n============================\n\n#### Fixes:\n- Restore task with --attach option when one or more WAN Edges or vSmarts are offline would show a warning that the\n template attach failed, even though it was successfully attached (with sync pending for offline devices).\n Similarly, if one or more vSmarts are offline vSmart policy would not be activated (with sync pending).\n This has been fixed.\n\n\nSastre 0.36 [April 10, 2020]\n============================\n\n#### Enhancements:\n- Validated support for vManage 20.1 and included new API endpoints:\n - Policy lists: media profile, translation profile, translation rules, supervisory disconnect, FQDN\n - Policy definitions: Dial peer, SRST phone profile, FXS port, FXO port, FXS-DID port, SSL decryption, SSL UTD profile\n - Voice policies, custom application policies\n- New API model versioning scheme to restrict REST API queries to only the endpoints supported by the target vManage.\n- User is now prompted for vManage address, username or password if they are not provided via command line or environment variables.\n\n\nSastre 0.35 [Mar 3, 2020]\n==========================\n\n#### Enhancements:\n- Backup task now also backup device certificates when the 'all' tag is used. The restore task does not restore\n certificates.\n- New certificate task, allowing device certificate validity status to be restored from a backup or set to a\n desired value (i.e. valid, invalid or staging).\n- List task now contains two sub-modes: configuration or certificate. List configuration works the same way as on\n previous releases by listing configuration items (e.g. device templates, feature templates, policies, etc.).\n The new certificate sub-mode allows listing of device certificate information from vManage or from a backup.\n- Restore task now verifies whether vBond is configured (Administration > Settings > vBond). If vBond is not\n configured, device templates are skipped from the restore as it would otherwise fail. A warning message notifies\n when this happens.\n\n\nSastre 0.34 [Jan 9, 2020]\n==========================\n\n#### Enhancements:\n- Validated support for vManage 19.3 and included new API endpoints supporting device access policies.\n- Included vManage version check. A warning is displayed during restore task if the vManage version on backup is\n newer than the version on target vManage. Maintenance releases (i.e. 3rd digit in the version number) are ignored\n for the purpose of this verification.\n\n\nSastre 0.33 [Dec 6, 2019]\n==========================\n\n#### Enhancements:\n- Sastre is now published to PyPI as cisco-sdwan package. When installed via pip, sdwan or sastre can be used to\n run the application.\n- When installed via source on github, the application can now be called using sdwan.py or sastre.py.\n\n\nSastre 0.31 [Nov 18, 2019]\n==========================\n\n#### Enhancements:\n- Template attach and reattach functions now support CLI templates. This means that restore --attach and --force\n options now support CLI templates in addition to feature-based device templates.\n- Added --regex option to backup task, allowing finner granularity into items included in the backup.\n\n\nSastre 0.30 [Oct 25, 2019]\n==========================\n\n#### Enhancements:\n- Backups now always create a new workdir. If the target workdir is already present, Sastre will save it with a\n number extension. For instance, if the target workdir is 'backup_production_20191022' and there is already a\n backup under this directory, this existing backup is moved to 'backup_production_20191022_1'. The number extension\n can go up to 99. At this point Sastre starts deleting the previous backup.\n\n#### Non-backwards compatible enhancements:\n- Backup database is changed in release 0.30. Individual items (e.g. device templates, feature templates, etc) are\n now stored with a filename containing the actual item name, as opposed to the item uuid. The directories where\n items are saved were also changed.\n In order to guarantee a filesystem safe filename, item name characters other than a-z, A-Z, ' ', '-' or '_' are\n replaced with an underscore '_' in the filename. In case of name collision, Sastre falls back to using filenames\n in the format <item name>_<item id>. For instance, if there is one device template named VEDGE_1K_v1 and another\n VEDGE/1K/v1, both will have the same filename-safe name (i.e. VEDGE_1K_v1). Sastre will save them as\n VEDGE_1K_v1_<uuid item 1>.json and VEDGE_1K_v1_<uuid item 2>.json.\n The latest release using the old backup format was tagged as 'v0.2'. If there is a need to use older backups,\n just git checkout this tag (git checkout v0.2).\n\n\nSastre 0.22 [Oct 10, 2019]\n==========================\n\n#### Enhancements:\n- Improved error handling for malformed json files in the backup. When backup json files fail to be loaded\n (i.e. parsed) additional details are now provided in the log message.\n\n\nSastre 0.21 [Oct 5, 2019]\n==========================\n\n#### Enhancements:\n- Added --force option to restore task. vManage items with the same name as backup items but with differences in\n their contents are updated with data from the backup. README file contains additional details.\n\n\n"
},
{
"alpha_fraction": 0.6022304892539978,
"alphanum_fraction": 0.613382875919342,
"avg_line_length": 19.69230842590332,
"blob_id": "f81581975176bb82c6c033f85472a6c6ecf4d83e",
"content_id": "5d764449f3a668fb97797599478ee24db9521dc0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 13,
"path": "/sdwan.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\"\"\"\nSastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n\"\"\"\nimport re\nimport sys\n\nfrom cisco_sdwan.cmd import main\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw?|\\.exe)?$', '', sys.argv[0])\n sys.exit(main())\n"
},
{
"alpha_fraction": 0.6079724431037903,
"alphanum_fraction": 0.6103701591491699,
"avg_line_length": 42.6143798828125,
"blob_id": "20ed07c3cf61aa19e1e8bf312d2d3e66aba1f791",
"content_id": "0d8cff81c6e9eb8f293175b3b670ca6682d482d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6673,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 153,
"path": "/cisco_sdwan/cmd.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.cmd\n This module implements the command line top-level parser and task dispatcher\n\"\"\"\nimport logging\nimport logging.config\nimport logging.handlers\nimport argparse\nimport json\nimport sys\nfrom pathlib import Path\nfrom requests.exceptions import ConnectionError\nfrom .base.rest_api import Rest, LoginFailedException\nfrom .base.catalog import catalog_size, op_catalog_size\nfrom .base.models_base import ModelException, SASTRE_ROOT_DIR\nfrom .__version__ import __version__ as version\nfrom .__version__ import __doc__ as title\nfrom .tasks.utils import TaskOptions, EnvVar, non_empty_type, PromptArg\nfrom .tasks.implementation import *\n\n# vManage REST API defaults\nVMANAGE_PORT = '8443'\nREST_TIMEOUT = 300\nBASE_URL = 'https://{address}:{port}'\n\n# Default logging configuration - JSON formatted\n# Reason for setting level at chardet.charsetprober is to prevent unwanted debug messages from requests module\nLOGGING_CONFIG = '''\n{\n \"version\": 1,\n \"formatters\": {\n \"simple\": {\n \"format\": \"%(levelname)s: %(message)s\"\n },\n \"detailed\": {\n \"format\": \"%(asctime)s: %(name)s: %(levelname)s: %(message)s\"\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"WARN\",\n \"formatter\": \"simple\"\n },\n \"file\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": \"logs/sastre.log\",\n \"backupCount\": 3,\n \"maxBytes\": 204800,\n \"level\": \"DEBUG\",\n \"formatter\": \"detailed\"\n }\n },\n \"root\": {\n \"handlers\": [\"console\", \"file\"],\n \"level\": \"DEBUG\"\n },\n \"loggers\": {\n \"chardet.charsetprober\": {\n \"level\": \"INFO\"\n }\n }\n}\n'''\n\n\ndef main():\n # Top-level cli parser\n cli_parser = argparse.ArgumentParser(description=title)\n cli_parser.add_argument('-a', '--address', metavar='<vmanage-ip>', action=EnvVar, required=False,\n envvar='VMANAGE_IP', type=non_empty_type,\n help='vManage IP address, can also be defined via VMANAGE_IP environment variable. '\n 'If neither is provided user is prompted for the address.')\n cli_parser.add_argument('-u', '--user', metavar='<user>', action=EnvVar, required=False,\n envvar='VMANAGE_USER', type=non_empty_type,\n help='username, can also be defined via VMANAGE_USER environment variable. '\n 'If neither is provided user is prompted for username.')\n cli_parser.add_argument('-p', '--password', metavar='<password>', action=EnvVar, required=False,\n envvar='VMANAGE_PASSWORD', type=non_empty_type,\n help='password, can also be defined via VMANAGE_PASSWORD environment variable. '\n ' If neither is provided user is prompted for password.')\n cli_parser.add_argument('--port', metavar='<port>', default=VMANAGE_PORT, action=EnvVar, envvar='VMANAGE_PORT',\n help='vManage port number, can also be defined via VMANAGE_PORT environment variable '\n '(default: %(default)s)')\n cli_parser.add_argument('--timeout', metavar='<timeout>', type=int, default=REST_TIMEOUT,\n help='REST API timeout (default: %(default)s)')\n cli_parser.add_argument('--verbose', action='store_true',\n help='increase output verbosity')\n cli_parser.add_argument('--version', action='version',\n version=f'Sastre Version {version}. Catalog: {catalog_size()} configuration items, '\n f'{op_catalog_size()} operational items.')\n cli_parser.add_argument('task', metavar='<task>', type=TaskOptions.task,\n help=f'task to be performed ({TaskOptions.options()})')\n cli_parser.add_argument('task_args', metavar='<arguments>', nargs=argparse.REMAINDER,\n help='task parameters, if any')\n cli_parser.set_defaults(prompt_arguments_api=[\n PromptArg('address', 'vManage address: '),\n PromptArg('user', 'vManage user: '),\n PromptArg('password', 'vManage password: ', secure_prompt=True)\n ])\n cli_args = cli_parser.parse_args()\n\n # Logging setup\n logging_config = json.loads(LOGGING_CONFIG)\n console_handler = logging_config.get('handlers', {}).get('console')\n if cli_args.verbose and console_handler is not None:\n console_handler['level'] = 'INFO'\n\n file_handler = logging_config.get('handlers', {}).get('file')\n if file_handler is not None:\n file_handler['filename'] = str(Path(SASTRE_ROOT_DIR, file_handler['filename']))\n Path(file_handler['filename']).parent.mkdir(parents=True, exist_ok=True)\n\n logging.config.dictConfig(logging_config)\n\n # Prepare task\n task = cli_args.task()\n target_address = cli_args.address\n parsed_task_args = task.parser(cli_args.task_args, target_address=target_address)\n is_api_required = task.is_api_required(parsed_task_args)\n\n # Evaluate whether user must be prompted for additional arguments\n prompt_args_list = getattr(cli_args, 'prompt_arguments', [])\n if is_api_required:\n prompt_args_list.extend(getattr(cli_args, 'prompt_arguments_api', []))\n try:\n for prompt_arg in prompt_args_list:\n if getattr(cli_args, prompt_arg.argument) is None:\n setattr(cli_args, prompt_arg.argument, prompt_arg())\n except KeyboardInterrupt:\n sys.exit(1)\n\n # Dispatch task\n try:\n if is_api_required:\n if target_address != cli_args.address:\n # Target address changed, re-run parser\n parsed_task_args = task.parser(cli_args.task_args, target_address=cli_args.address)\n\n base_url = BASE_URL.format(address=cli_args.address, port=cli_args.port)\n with Rest(base_url, cli_args.user, cli_args.password, timeout=cli_args.timeout) as api:\n # Dispatch to the appropriate task handler\n task.runner(parsed_task_args, api)\n\n else:\n # Dispatch to the appropriate task handler without api connection\n task.runner(parsed_task_args)\n\n task.log_info('Task completed %s', task.outcome('successfully', 'with caveats: {tally}'))\n except (LoginFailedException, ConnectionError, FileNotFoundError, ModelException) as ex:\n logging.getLogger(__name__).critical(ex)\n"
},
{
"alpha_fraction": 0.5828516483306885,
"alphanum_fraction": 0.5876685976982117,
"avg_line_length": 27.83333396911621,
"blob_id": "a2ebdb0f4975ce4a8369eb4c46f92d9d063efcb6",
"content_id": "6100c4b535063050c4900bf4b391888e42ecfb48",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1038,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 36,
"path": "/setup.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "import setuptools\nfrom cisco_sdwan import __version__, __author__, __email__, __url__\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"cisco-sdwan\",\n version=__version__,\n author=__author__,\n author_email=__email__,\n description=\"Automation Tools for Cisco SD-WAN Powered by Viptela\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=__url__,\n packages=setuptools.find_packages(),\n package_data={\n \"cisco_sdwan.migration\": [\"*.json\", \"feature_templates/*.json\"]\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.8',\n install_requires=[\n 'requests',\n ],\n entry_points={\n 'console_scripts': [\n 'sdwan=cisco_sdwan.cmd:main',\n 'sastre=cisco_sdwan.cmd:main',\n ],\n },\n)\n"
},
{
"alpha_fraction": 0.6202474236488342,
"alphanum_fraction": 0.6224228143692017,
"avg_line_length": 38.63835144042969,
"blob_id": "b51d32031b2bc9a8a9bda217a35e2d2ea3dd45e8",
"content_id": "07b18905f90bca9a8f1fc3dd9bebf600d5468e53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 30799,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 777,
"path": "/cisco_sdwan/base/models_base.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.base.models_base\n This module implements vManage base API models\n\"\"\"\nimport json\nimport re\nfrom os import environ\nfrom pathlib import Path\nfrom itertools import zip_longest\nfrom collections import namedtuple\nfrom typing import Sequence, Dict, Tuple, Union, Iterator, Callable, Mapping, Any\nfrom operator import attrgetter\nfrom requests.exceptions import Timeout\nfrom .rest_api import RestAPIException, Rest\n\n\n# Top-level directory for local data store\nSASTRE_ROOT_DIR = Path(environ.get('SASTRE_ROOT_DIR', Path.cwd()))\nDATA_DIR = str(Path(SASTRE_ROOT_DIR, 'data'))\n\n\nclass UpdateEval:\n def __init__(self, data):\n self.is_policy = isinstance(data, list)\n # Master template updates (PUT requests) return a dict containing 'data' key. Non-master templates don't.\n self.is_master = isinstance(data, dict) and 'data' in data\n\n # This is to homogenize the response payload variants\n self.data = data.get('data') if self.is_master else data\n\n @property\n def need_reattach(self):\n return not self.is_policy and 'processId' in self.data\n\n @property\n def need_reactivate(self):\n return self.is_policy and len(self.data) > 0\n\n def templates_affected_iter(self):\n return iter(self.data.get('masterTemplatesAffected', []))\n\n def __str__(self):\n return json.dumps(self.data, indent=2)\n\n def __repr__(self):\n return json.dumps(self.data)\n\n\nclass ApiPath:\n \"\"\"\n Groups the API path for different operations available in an API item (i.e. get, post, put, delete).\n Each field contains a str with the API path, or None if the particular operations is not supported on this item.\n \"\"\"\n __slots__ = ('get', 'post', 'put', 'delete')\n\n def __init__(self, get, *other_ops):\n \"\"\"\n :param get: URL path for get operations\n :param other_ops: URL path for post, put and delete operations, in this order. If an item is not specified\n the same URL as the last operation provided is used.\n \"\"\"\n self.get = get\n last_op = other_ops[-1] if other_ops else get\n for field, value in zip_longest(self.__slots__[1:], other_ops, fillvalue=last_op):\n setattr(self, field, value)\n\n\nclass OperationalItem:\n \"\"\"\n Base class for operational data API elements\n \"\"\"\n api_path = None\n api_params = None\n fields_std = None\n fields_ext = None\n field_conversion_fns = {}\n\n def __init__(self, payload: Mapping[str, Any]) -> None:\n self.timestamp = payload['header']['generatedOn']\n\n self._data = payload['data']\n\n # Some vManage endpoints don't provide all properties in the 'columns' list, which is where 'title' is\n # defined. For those properties without a title, infer one based on the property name.\n self._meta = {attribute_safe(field['property']): field for field in payload['header']['fields']}\n title_dict = {attribute_safe(field['property']): field['title'] for field in payload['header']['columns']}\n for field_property, field in self._meta.items():\n field['title'] = title_dict.get(field_property, field['property'].replace('_', ' ').title())\n\n @property\n def field_names(self) -> Tuple[str, ...]:\n return tuple(self._meta.keys())\n\n def field_info(self, *field_names: str, info: str = 'title', default: Union[None, str] = 'N/A') -> tuple:\n \"\"\"\n Retrieve metadata about one or more fields.\n :param field_names: One or more field name to retrieve metadata from.\n :param info: Indicate which metadata to retrieve. By default, field title is returned.\n :param default: Value to be returned when a field_name does not exist.\n :return: tuple with one or more elements representing the desired metadata for each field requested.\n \"\"\"\n if len(field_names) == 1:\n return self._meta.get(field_names[0], {}).get(info, default),\n\n return tuple(entry.get(info, default) for entry in default_getter(*field_names, default={})(self._meta))\n\n def field_value_iter(self, *field_names: str, **conv_fn_map: Mapping[str, Callable]) -> Iterator[namedtuple]:\n \"\"\"\n Iterate over entries of a realtime instance. Only fields/columns defined by field_names are yield. Type\n conversion of one or more fields is supported by passing a callable that takes one argument (i.e. the field\n value) and returns the converted value. E.g. passing average_latency=int will convert a string average_latency\n field to an integer.\n :param field_names: Specify one or more field names to retrieve.\n :param conv_fn_map: Keyword arguments passed allow type conversions on fields.\n :return: A FieldValue object (named tuple) with attributes for each field_name.\n \"\"\"\n FieldValue = namedtuple('FieldValue', field_names)\n\n def default_conv_fn(field_val):\n return field_val if field_val is not None else ''\n\n conv_fn_list = [conv_fn_map.get(field_name, default_conv_fn) for field_name in field_names]\n field_properties = self.field_info(*field_names, info='property', default=None)\n\n def getter_fn(obj):\n return FieldValue._make(\n conv_fn(obj.get(field_property)) if field_property is not None else 'N/A'\n for conv_fn, field_property in zip(conv_fn_list, field_properties)\n )\n\n return (getter_fn(entry) for entry in self._data)\n\n @classmethod\n def get(cls, api: Rest, *args, **kwargs):\n try:\n instance = cls.get_raise(api, *args, **kwargs)\n return instance\n except (RestAPIException, Timeout):\n # Timeouts are more common with operational items, while less severe. Capturing here to allow execution to\n # proceed and not fail the whole task\n return None\n\n @classmethod\n def get_raise(cls, api: Rest, *args, **kwargs):\n raise NotImplementedError()\n\n def __str__(self) -> str:\n return json.dumps(self._data, indent=2)\n\n def __repr__(self) -> str:\n return json.dumps(self._data)\n\n\nclass RealtimeItem(OperationalItem):\n \"\"\"\n RealtimeItem represents a vManage realtime monitoring API element defined by an ApiPath with a GET path.\n An instance of this class can be created to retrieve and parse realtime endpoints.\n \"\"\"\n api_params = ('deviceId',)\n\n def __init__(self, payload: Mapping[str, Any]) -> None:\n super().__init__(payload)\n\n @classmethod\n def get_raise(cls, api: Rest, *args, **kwargs):\n params = kwargs or dict(zip(cls.api_params, args))\n return cls(api.get(cls.api_path.get, **params))\n\n\nclass BulkStatsItem(OperationalItem):\n \"\"\"\n BulkStatsItem represents a vManage bulk statistics API element defined by an ApiPath with a GET path. It supports\n vManage pagination protocol internally, abstracting it from the user.\n An instance of this class can be created to retrieve and parse bulk statistics endpoints.\n \"\"\"\n api_params = ('endDate', 'startDate', 'count', 'timeZone')\n fields_to_avg = tuple()\n field_node_id = 'vdevice_name'\n field_entry_time = 'entry_time'\n\n def __init__(self, payload: Mapping[str, Any]) -> None:\n super().__init__(payload)\n self._page_info = payload['pageInfo']\n\n @property\n def next_page(self) -> Union[str, None]:\n return self._page_info['scrollId'] if self._page_info['hasMoreData'] else None\n\n def add_payload(self, payload: Mapping[str, Any]) -> None:\n self._data.extend(payload['data'])\n self._page_info = payload['pageInfo']\n\n @classmethod\n def get_raise(cls, api: Rest, *args, **kwargs):\n params = kwargs or dict(zip(cls.api_params, args))\n obj = cls(api.get(cls.api_path.get, **params))\n while True:\n next_page = obj.next_page\n if next_page is None:\n break\n payload = api.get(cls.api_path.get, scrollId=next_page)\n obj.add_payload(payload)\n\n return obj\n\n @staticmethod\n def time_series_key(sample: namedtuple) -> str:\n \"\"\"\n Default key used to split a BulkStatsItem into its different time series. Subclasses need to override this as\n needed for the particular endpoint in question\n \"\"\"\n return sample.vdevice_name\n\n @staticmethod\n def last_n_secs(n_secs: int, sample_list: Sequence[namedtuple]) -> Iterator[namedtuple]:\n yield sample_list[0]\n\n oldest_ts = sample_list[0].entry_time - n_secs * 1000\n for sample in sample_list[1:]:\n if sample.entry_time < oldest_ts:\n break\n yield sample\n\n @staticmethod\n def average_fields(sample_list: Sequence[namedtuple], *fields_to_avg: str) -> dict:\n def average(values):\n avg = sum(values) / len(values)\n # If original values were integer, convert average back to integer\n return round(avg) if isinstance(values[0], int) else avg\n\n values_get_fn = attrgetter(*fields_to_avg)\n values_iter = (values_get_fn(sample) for sample in sample_list)\n\n return dict(zip(fields_to_avg, (average(field_samples) for field_samples in zip(*values_iter))))\n\n def aggregated_value_iter(self, interval_secs: int, *field_names: str,\n **conv_fn_map: Mapping[str, Callable]) -> Iterator[namedtuple]:\n # Split bulk stats samples into different time series\n time_series_dict = {}\n for sample in self.field_value_iter(self.field_entry_time, *field_names, **conv_fn_map):\n time_series_dict.setdefault(self.time_series_key(sample), []).append(sample)\n\n # Sort each time series by entry_time with newest samples first\n sort_key = attrgetter(self.field_entry_time)\n for time_series in time_series_dict.values():\n time_series.sort(key=sort_key, reverse=True)\n\n # Aggregation over newest n samples\n Aggregate = namedtuple('Aggregate', field_names)\n values_get_fn = attrgetter(*field_names)\n fields_to_avg = set(field_names) & set(self.fields_to_avg)\n for time_series_name, time_series in time_series_dict.items():\n if not time_series:\n continue\n\n series_last_n = list(self.last_n_secs(interval_secs, time_series))\n newest_sample = Aggregate._make(values_get_fn(series_last_n[0]))\n\n if fields_to_avg:\n yield newest_sample._replace(**self.average_fields(series_last_n, *fields_to_avg))\n else:\n yield newest_sample\n\n\nclass BulkStateItem(OperationalItem):\n \"\"\"\n BulkStateItem represents a vManage bulk state API element defined by an ApiPath with a GET path. It supports\n vManage pagination protocol internally, abstracting it from the user.\n An instance of this class can be created to retrieve and parse bulk state endpoints.\n \"\"\"\n api_params = ('count', )\n field_node_id = 'vdevice_name'\n\n def __init__(self, payload: Mapping[str, Any]) -> None:\n super().__init__(payload)\n self._page_info = payload['pageInfo']\n\n @property\n def next_page(self) -> Union[str, None]:\n return self._page_info['endId'] if self._page_info['moreEntries'] else None\n\n def add_payload(self, payload: Mapping[str, Any]) -> None:\n self._data.extend(payload['data'])\n self._page_info = payload['pageInfo']\n\n @property\n def page_item_count(self) -> int:\n return self._page_info['count']\n\n @classmethod\n def get_raise(cls, api: Rest, *args, **kwargs):\n params = kwargs or dict(zip(cls.api_params, args))\n obj = cls(api.get(cls.api_path.get, **params))\n while True:\n next_page = obj.next_page\n if next_page is None:\n break\n payload = api.get(cls.api_path.get, startId=next_page, count=obj.page_item_count)\n obj.add_payload(payload)\n\n return obj\n\n\ndef attribute_safe(raw_attribute):\n return re.sub(r'[^a-zA-Z0-9_]', '_', raw_attribute)\n\n\nclass ApiItem:\n \"\"\"\n ApiItem represents a vManage API element defined by an ApiPath with GET, POST, PUT and DELETE paths. An instance\n of this class can be created to store the contents of that vManage API element (self.data field).\n \"\"\"\n api_path = None # An ApiPath instance\n id_tag = None\n name_tag = None\n\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this api item\n \"\"\"\n self.data = data\n\n @property\n def uuid(self):\n return self.data[self.id_tag] if self.id_tag is not None else None\n\n @property\n def name(self):\n return self.data[self.name_tag] if self.name_tag is not None else None\n\n @property\n def is_empty(self):\n return self.data is None or len(self.data) == 0\n\n @classmethod\n def get(cls, api, *path_entries):\n try:\n return cls.get_raise(api, *path_entries)\n except RestAPIException:\n return None\n\n @classmethod\n def get_raise(cls, api, *path_entries):\n return cls(api.get(cls.api_path.get, *path_entries))\n\n def __str__(self):\n return json.dumps(self.data, indent=2)\n\n def __repr__(self):\n return json.dumps(self.data)\n\n\nclass IndexApiItem(ApiItem):\n \"\"\"\n IndexApiItem is an index-type ApiItem that can be iterated over, returning iter_fields\n \"\"\"\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this API item.\n \"\"\"\n super().__init__(data.get('data') if isinstance(data, dict) else data)\n\n # Iter_fields should be defined in subclasses and needs to be a tuple subclass.\n iter_fields = None\n # Extended_iter_fields should be defined in subclasses that use extended_iter, needs to be a tuple subclass.\n extended_iter_fields = None\n\n def __iter__(self):\n return self.iter(*self.iter_fields)\n\n def iter(self, *iter_fields):\n return (default_getter(*iter_fields)(elem) for elem in self.data)\n\n def extended_iter(self, default=None):\n \"\"\"\n Returns an iterator where each entry is composed of the combined fields of iter_fields and extended_iter_fields.\n None is returned on any fields that are missing in an entry\n :param default: Value to return when a field does not exist\n :return: The iterator\n \"\"\"\n return (default_getter(*self.iter_fields, *self.extended_iter_fields, default=default)(elem)\n for elem in self.data)\n\n\nclass ConfigItem(ApiItem):\n \"\"\"\n ConfigItem is an ApiItem that can be backed up and restored\n \"\"\"\n store_path = None\n store_file = None\n root_dir = DATA_DIR\n factory_default_tag = 'factoryDefault'\n readonly_tag = 'readOnly'\n owner_tag = 'owner'\n info_tag = 'infoTag'\n type_tag = None\n post_filtered_tags = None\n skip_cmp_tag_set = set()\n name_check_regex = re.compile(r'(?=^.{1,128}$)[^&<>! \"]+$')\n\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this configuration item\n \"\"\"\n super().__init__(data)\n\n def is_equal(self, other):\n local_cmp_dict = {k: v for k, v in self.data.items() if k not in self.skip_cmp_tag_set | {self.id_tag}}\n other_cmp_dict = {k: v for k, v in other.items() if k not in self.skip_cmp_tag_set | {self.id_tag}}\n\n return sorted(json.dumps(local_cmp_dict)) == sorted(json.dumps(other_cmp_dict))\n\n @property\n def is_readonly(self):\n return self.data.get(self.factory_default_tag, False) or self.data.get(self.readonly_tag, False)\n\n @property\n def is_system(self):\n return self.data.get(self.owner_tag, '') == 'system' or self.data.get(self.info_tag, '') == 'aci'\n\n @property\n def type(self):\n return self.data.get(self.type_tag)\n\n @classmethod\n def get_filename(cls, ext_name, item_name, item_id):\n if item_name is None or item_id is None:\n # Assume store_file does not have variables\n return cls.store_file\n\n safe_name = filename_safe(item_name) if not ext_name else '{name}_{uuid}'.format(name=filename_safe(item_name),\n uuid=item_id)\n return cls.store_file.format(item_name=safe_name, item_id=item_id)\n\n @classmethod\n def load(cls, node_dir, ext_name=False, item_name=None, item_id=None, raise_not_found=False, use_root_dir=True):\n \"\"\"\n Factory method that loads data from a json file and returns a ConfigItem instance with that data\n\n :param node_dir: String indicating directory under root_dir used for all files from a given vManage node.\n :param ext_name: True indicates that item_names need to be extended (with item_id) in order to make their\n filename safe version unique. False otherwise.\n :param item_name: (Optional) Name of the item being loaded. Variable used to build the filename.\n :param item_id: (Optional) UUID for the item being loaded. Variable used to build the filename.\n :param raise_not_found: (Optional) If set to True, raise FileNotFoundError if file is not found.\n :param use_root_dir: True indicates that node_dir is under the root_dir. When false, item should be located\n directly under node_dir/store_path\n :return: ConfigItem object, or None if file does not exist and raise_not_found=False\n \"\"\"\n dir_path = Path(cls.root_dir, node_dir, *cls.store_path) if use_root_dir else Path(node_dir, *cls.store_path)\n file_path = dir_path.joinpath(cls.get_filename(ext_name, item_name, item_id))\n try:\n with open(file_path, 'r') as read_f:\n data = json.load(read_f)\n except FileNotFoundError:\n if raise_not_found:\n has_detail = item_name is not None and item_id is not None\n detail = f': {item_name}, {item_id}' if has_detail else ''\n raise FileNotFoundError(f'{cls.__name__} file not found{detail}') from None\n return None\n except json.decoder.JSONDecodeError as ex:\n raise ModelException(f'Invalid JSON file: {file_path}: {ex}') from None\n else:\n return cls(data)\n\n def save(self, node_dir, ext_name=False, item_name=None, item_id=None):\n \"\"\"\n Save data (i.e. self.data) to a json file\n\n :param node_dir: String indicating directory under root_dir used for all files from a given vManage node.\n :param ext_name: True indicates that item_names need to be extended (with item_id) in order to make their\n filename safe version unique. False otherwise.\n :param item_name: (Optional) Name of the item being saved. Variable used to build the filename.\n :param item_id: (Optional) UUID for the item being saved. Variable used to build the filename.\n :return: True indicates data has been saved. False indicates no data to save (and no file has been created).\n \"\"\"\n if self.is_empty:\n return False\n\n dir_path = Path(self.root_dir, node_dir, *self.store_path)\n dir_path.mkdir(parents=True, exist_ok=True)\n\n with open(dir_path.joinpath(self.get_filename(ext_name, item_name, item_id)), 'w') as write_f:\n json.dump(self.data, write_f, indent=2)\n\n return True\n\n def post_data(self, id_mapping_dict, new_name=None):\n \"\"\"\n Build payload to be used for POST requests against this config item. From self.data, perform item id\n replacements defined in id_mapping_dict, also remove item id and rename item with new_name (if provided).\n :param id_mapping_dict: {<old item id>: <new item id>} dict. Matches of <old item id> are replaced with\n <new item id>\n :param new_name: String containing new name\n :return: Dict containing payload for POST requests\n \"\"\"\n # Delete keys that shouldn't be on post requests\n filtered_keys = {\n self.id_tag,\n '@rid',\n 'createdOn',\n 'lastUpdatedOn'\n }\n if self.post_filtered_tags is not None:\n filtered_keys.update(self.post_filtered_tags)\n post_dict = {k: v for k, v in self.data.items() if k not in filtered_keys}\n\n # Rename item\n if new_name is not None:\n post_dict[self.name_tag] = new_name\n\n return update_ids(id_mapping_dict, post_dict)\n\n def put_data(self, id_mapping_dict):\n \"\"\"\n Build payload to be used for PUT requests against this config item. From self.data, perform item id\n replacements defined in id_mapping_dict.\n :param id_mapping_dict: {<old item id>: <new item id>} dict. Matches of <old item id> are replaced with\n <new item id>\n :return: Dict containing payload for PUT requests\n \"\"\"\n filtered_keys = {\n '@rid',\n 'createdOn',\n 'lastUpdatedOn'\n }\n put_dict = {k: v for k, v in self.data.items() if k not in filtered_keys}\n\n return update_ids(id_mapping_dict, put_dict)\n\n @property\n def id_references_set(self):\n \"\"\"\n Return all references to other item ids by this item\n :return: Set containing id-based references\n \"\"\"\n filtered_keys = {\n self.id_tag,\n }\n filtered_data = {k: v for k, v in self.data.items() if k not in filtered_keys}\n\n return set(re.findall(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}',\n json.dumps(filtered_data)))\n\n def get_new_name(self, name_template: str) -> Tuple[str, bool]:\n \"\"\"\n Return a new valid name for this item based on the format string template provided. Variable {name} is replaced\n with the existing item name. Other variables are provided via kwargs.\n :param name_template: str containing the name template to construct the new name.\n For example: migrated_{name&G_Branch_184_(.*)}\n :return: Tuple containing new name and an indication whether it is valid\n \"\"\"\n is_valid = False\n\n try:\n new_name = ExtendedTemplate(name_template)(self.data[self.name_tag])\n except KeyError:\n new_name = None\n else:\n if self.name_check_regex.search(new_name) is not None:\n is_valid = True\n\n return new_name, is_valid\n\n def find_key(self, key, from_key=None):\n \"\"\"\n Returns a list containing the values of all occurrences of key inside data. Matched values that are dict or list\n are not included.\n :param key: Key to search\n :param from_key: Top-level key under which to start the search\n :return: List\n \"\"\"\n match_list = []\n\n def find_in(json_obj):\n if isinstance(json_obj, dict):\n matched_val = json_obj.get(key)\n if matched_val is not None and not isinstance(matched_val, dict) and not isinstance(matched_val, list):\n match_list.append(matched_val)\n for value in json_obj.values():\n find_in(value)\n\n elif isinstance(json_obj, list):\n for elem in json_obj:\n find_in(elem)\n\n return match_list\n\n return find_in(self.data) if from_key is None else find_in(self.data[from_key])\n\n\n# Used for IndexConfigItem iter_fields when they follow (<item-id-label>, <item-name-label>) format\nIdName = namedtuple('IdName', ['id', 'name'])\n\n\nclass IndexConfigItem(ConfigItem):\n \"\"\"\n IndexConfigItem is an index-type ConfigItem that can be iterated over, returning iter_fields\n \"\"\"\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this configuration item.\n \"\"\"\n super().__init__(data.get('data') if isinstance(data, dict) else data)\n\n # When iter_fields is a regular tuple, it is completely opaque. However, if it is an IdName, then it triggers\n # an evaluation of whether there is collision amongst the filename_safe version of all names in this index.\n # need_extended_name = True indicates that there is collision and that extended names should be used when\n # saving/loading to/from backup\n if isinstance(self.iter_fields, IdName):\n filename_safe_set = {filename_safe(item_name, lower=True) for item_name in self.iter(self.iter_fields.name)}\n self.need_extended_name = len(filename_safe_set) != len(self.data)\n else:\n self.need_extended_name = False\n\n # Iter_fields should be defined in subclasses and needs to be a tuple subclass.\n # When it follows the format (<item-id>, <item-name>), use an IdName namedtuple instead of regular tuple.\n iter_fields = None\n # Extended_iter_fields should be defined in subclasses that use extended_iter, needs to be a tuple subclass.\n extended_iter_fields = None\n\n store_path = ('inventory', )\n\n @classmethod\n def create(cls, item_list: Sequence[ConfigItem], id_hint_dict: Dict[str, str]):\n def item_dict(item_obj: ConfigItem):\n return {\n key: item_obj.data.get(key, id_hint_dict.get(item_obj.name)) for key in cls.iter_fields\n }\n\n index_dict = {\n 'data': [item_dict(item) for item in item_list]\n }\n return cls(index_dict)\n\n def __iter__(self):\n return self.iter(*self.iter_fields)\n\n def iter(self, *iter_fields):\n return (default_getter(*iter_fields)(elem) for elem in self.data)\n\n def extended_iter(self, default=None):\n \"\"\"\n Returns an iterator where each entry is composed of the combined fields of iter_fields and extended_iter_fields.\n None is returned on any fields that are missing in an entry\n :param default: Value to return when a field does not exist\n :return: The iterator\n \"\"\"\n return (default_getter(*self.iter_fields, *self.extended_iter_fields, default=default)(elem)\n for elem in self.data)\n\n\nclass ServerInfo:\n root_dir = DATA_DIR\n store_file = 'server_info.json'\n\n def __init__(self, **kwargs):\n \"\"\"\n :param kwargs: key-value pairs of information about the vManage server\n \"\"\"\n self.data = kwargs\n\n def __getattr__(self, item):\n attr = self.data.get(item)\n if attr is None:\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{item}'\")\n return attr\n\n @classmethod\n def load(cls, node_dir):\n \"\"\"\n Factory method that loads data from a json file and returns a ServerInfo instance with that data\n\n :param node_dir: String indicating directory under root_dir used for all files from a given vManage node.\n :return: ServerInfo object, or None if file does not exist\n \"\"\"\n dir_path = Path(cls.root_dir, node_dir)\n file_path = dir_path.joinpath(cls.store_file)\n try:\n with open(file_path, 'r') as read_f:\n data = json.load(read_f)\n except FileNotFoundError:\n return None\n except json.decoder.JSONDecodeError as ex:\n raise ModelException(f\"Invalid JSON file: {file_path}: {ex}\") from None\n else:\n return cls(**data)\n\n def save(self, node_dir):\n \"\"\"\n Save data (i.e. self.data) to a json file\n\n :param node_dir: String indicating directory under root_dir used for all files from a given vManage node.\n :return: True indicates data has been saved. False indicates no data to save (and no file has been created).\n \"\"\"\n dir_path = Path(self.root_dir, node_dir)\n dir_path.mkdir(parents=True, exist_ok=True)\n\n with open(dir_path.joinpath(self.store_file), 'w') as write_f:\n json.dump(self.data, write_f, indent=2)\n\n return True\n\n\ndef filename_safe(name: str, lower: bool = False) -> str:\n \"\"\"\n Perform the necessary replacements in <name> to make it filename safe.\n Any char that is not a-z, A-Z, 0-9, '_', ' ', or '-' is replaced with '_'. Convert to lowercase, if lower=True.\n :param lower: If True, apply str.lower() to result.\n :param name: name string to be converted\n :return: string containing the filename-save version of item_name\n \"\"\"\n # Inspired by Django's slugify function\n cleaned = re.sub(r'[^\\w\\s-]', '_', name)\n return cleaned.lower() if lower else cleaned\n\n\ndef update_ids(id_mapping_dict, item_data):\n def replace_id(match):\n matched_id = match.group(0)\n return id_mapping_dict.get(matched_id, matched_id)\n\n dict_json = re.sub(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}',\n replace_id, json.dumps(item_data))\n\n return json.loads(dict_json)\n\n\nclass ExtendedTemplate:\n template_pattern = re.compile(r'{name(?:\\s+(?P<regex>.*?))?}')\n\n def __init__(self, template):\n self.src_template = template\n self.label_value_map = None\n\n def __call__(self, name):\n def regex_replace(match_obj):\n regex = match_obj.group('regex')\n if regex is not None:\n regex_p = re.compile(regex)\n if not regex_p.groups:\n raise KeyError('regular expression must include at least one capturing group')\n\n value, regex_p_subs = regex_p.subn(''.join(f'\\\\{group+1}' for group in range(regex_p.groups)), name)\n new_value = value if regex_p_subs else ''\n else:\n new_value = name\n\n label = 'name_{count}'.format(count=len(self.label_value_map))\n self.label_value_map[label] = new_value\n\n return f'{{{label}}}'\n\n self.label_value_map = {}\n template, name_p_subs = self.template_pattern.subn(regex_replace, self.src_template)\n if not name_p_subs:\n raise KeyError('template must include {name} variable')\n\n return template.format(**self.label_value_map)\n\n\ndef default_getter(*fields, default=None):\n if len(fields) == 1:\n def getter_fn(obj):\n return obj.get(fields[0], default)\n else:\n def getter_fn(obj):\n return tuple(obj.get(field, default) for field in fields)\n\n return getter_fn\n\n\nclass ModelException(Exception):\n \"\"\" Exception for REST API model errors \"\"\"\n pass\n"
},
{
"alpha_fraction": 0.6878072619438171,
"alphanum_fraction": 0.6958090662956238,
"avg_line_length": 36.61442565917969,
"blob_id": "e92f89d9d49830178898e6c7b790a0241ed69a5c",
"content_id": "f5312fa8f53f6f685ab978669a3d5194590c2c3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 57362,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 1525,
"path": "/cisco_sdwan/base/models_vmanage.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.base.models_vmanage\n This module implements vManage API models\n\"\"\"\nfrom typing import Iterable, Set\nfrom pathlib import Path\nfrom collections import namedtuple\nfrom urllib.parse import quote_plus\nfrom .catalog import register, op_register\nfrom .models_base import (ApiItem, IndexApiItem, ConfigItem, IndexConfigItem, RealtimeItem, BulkStatsItem,\n BulkStateItem, ApiPath, IdName)\n\n\n#\n# Non-config items\n#\nclass DeviceModeCli(ApiItem):\n api_path = ApiPath(None, 'template/config/device/mode/cli', None, None)\n id_tag = 'id'\n\n @staticmethod\n def api_params(device_type, *device_ids):\n return {\n \"deviceType\": device_type,\n \"devices\": [{\"deviceId\": device_id} for device_id in device_ids]\n }\n\n\nclass DeviceTemplateAttach(ApiItem):\n api_path = ApiPath(None, 'template/device/config/attachfeature', None, None)\n id_tag = 'id'\n\n @staticmethod\n def api_params(template_input_iter, is_edited):\n \"\"\"\n Build dictionary used to provide input parameters for api POST call\n :param template_input_iter: An iterable of (<template_id>, <input_list>) tuples. Input_list is a list where\n each entry represents one attached device and is a dictionary of input\n variable names and values.\n :param is_edited: True if this is an in-place re-attach, False if this is a template attach.\n :return: Dictionary used to provide POST input parameters\n \"\"\"\n def template_entry(template_id, template_input_list):\n return {\n \"templateId\": template_id,\n \"device\": template_input_list,\n \"isEdited\": is_edited,\n \"isMasterEdited\": False,\n }\n\n return {\n \"deviceTemplateList\": [\n template_entry(item_id, input_list) for item_id, input_list in template_input_iter\n ]\n }\n\n\nclass DeviceTemplateCLIAttach(DeviceTemplateAttach):\n api_path = ApiPath(None, 'template/device/config/attachcli', None, None)\n\n\nclass PolicyVsmartDeactivate(ApiItem):\n api_path = ApiPath(None, 'template/policy/vsmart/deactivate', None, None)\n id_tag = 'id'\n\n\nclass PolicyVsmartActivate(ApiItem):\n api_path = ApiPath(None, 'template/policy/vsmart/activate', None, None)\n id_tag = 'id'\n\n @staticmethod\n def api_params(is_edited):\n return {\"isEdited\": True} if is_edited else {}\n\n\nclass PolicyVsmartStatus(ApiItem):\n api_path = ApiPath('template/policy/vsmart/connectivity/status', None, None, None)\n\n def raise_for_status(self):\n def vsmart_ready(vsmart_entry):\n return vsmart_entry['operationMode'] == 'vmanage'\n\n data_list = self.data.get('data', [])\n if len(data_list) == 0 or not all(vsmart_ready(entry) for entry in data_list):\n raise PolicyVsmartStatusException()\n\n\nclass PolicyVsmartStatusException(Exception):\n \"\"\" Exception indicating Vsmart status is not ready \"\"\"\n pass\n\n\nclass EdgeCertificateSync(ApiItem):\n api_path = ApiPath(None, 'certificate/vedge/list?action=push', None, None)\n id_tag = 'id'\n\n\nclass ActionStatus(ApiItem):\n api_path = ApiPath('device/action/status', None, None, None)\n\n @property\n def status(self):\n return self.data.get('summary', {}).get('status', None)\n\n @property\n def is_completed(self):\n return self.status == 'done'\n\n @property\n def is_successful(self):\n def task_success(task_entry):\n return task_entry['statusId'] == 'success' or task_entry['statusId'] == 'success_scheduled'\n\n data_list = self.data.get('data', [])\n # When action validation fails, returned data is empty\n if len(data_list) == 0:\n return False\n\n return all(task_success(entry) for entry in data_list)\n\n @property\n def activity_details(self):\n def device_details(task_entry):\n return '{hostname}: {activity}'.format(hostname=task_entry.get('host-name', '<unknown>'),\n activity=', '.join(task_entry.get('activity', [])))\n\n data_list = self.data.get('data', [])\n # When action validation fails, returned data is empty\n if len(data_list) == 0:\n return 'No data in action status'\n\n return ', '.join(device_details(entry) for entry in data_list)\n\n\nclass CheckVBond(ApiItem):\n api_path = ApiPath('template/device/config/vbond', None, None, None)\n\n @property\n def is_configured(self):\n return self.data.get('isVbondConfigured', False)\n\n\n#\n# Device Inventory\n#\nclass Device(IndexApiItem):\n api_path = ApiPath('device', None, None, None)\n iter_fields = ('uuid', 'host-name')\n\n extended_iter_fields = ('deviceId', 'site-id', 'reachability', 'device-type', 'device-model')\n\n\nclass EdgeInventory(IndexApiItem):\n api_path = ApiPath('system/device/vedges', None, None, None)\n iter_fields = ('uuid', 'vedgeCertificateState')\n\n extended_iter_fields = ('host-name', 'system-ip')\n\n\nclass ControlInventory(IndexApiItem):\n api_path = ApiPath('system/device/controllers', None, None, None)\n iter_fields = ('uuid', 'validity')\n\n extended_iter_fields = ('host-name', 'system-ip')\n\n @staticmethod\n def is_vsmart(device_type):\n return device_type is not None and device_type == 'vsmart'\n\n @staticmethod\n def is_vbond(device_type):\n return device_type is not None and device_type == 'vbond'\n\n @staticmethod\n def is_manage(device_type):\n return device_type == 'vmanage'\n\n def filtered_iter(self, filter_fn):\n return (\n (item_id, item_name) for item_type, item_id, item_name\n in self.iter('deviceType', *self.iter_fields) if filter_fn(item_type)\n )\n\n\n#\n# Device configuration\n#\nclass DeviceConfig(ConfigItem):\n api_path = ApiPath('template/config/attached', None, None, None)\n store_path = ('device_configs', )\n store_file = '{item_name}.txt'\n\n def save(self, node_dir, ext_name=False, item_name=None, item_id=None):\n \"\"\"\n Save data (i.e. self.data) to a json file\n\n :param node_dir: String indicating directory under root_dir used for all files from a given vManage node.\n :param ext_name: True indicates that item_names need to be extended (with item_id) in order to make their\n filename safe version unique. False otherwise.\n :param item_name: (Optional) Name of the item being saved. Variable used to build the filename.\n :param item_id: (Optional) UUID for the item being saved. Variable used to build the filename.\n :return: True indicates data has been saved. False indicates no data to save (and no file has been created).\n \"\"\"\n if self.is_empty:\n return False\n\n dir_path = Path(self.root_dir, node_dir, *self.store_path)\n dir_path.mkdir(parents=True, exist_ok=True)\n\n with open(dir_path.joinpath(self.get_filename(ext_name, item_name, item_id)), 'w') as write_f:\n write_f.write(self.data['config'])\n\n return True\n\n @staticmethod\n def api_params(device_id):\n # Device uuid is not url-safe\n return quote_plus(device_id)\n\n\nclass DeviceConfigRFS(DeviceConfig):\n store_file = '{item_name}_rfs.txt'\n\n @staticmethod\n def api_params(device_id):\n # Device uuid is not url-safe\n return '{safe_device_id}?type=RFS'.format(safe_device_id=quote_plus(device_id))\n\n\n#\n# Templates\n#\nclass CliOrFeatureApiPath:\n def __init__(self, api_path_feature, api_path_cli):\n self.api_path_feature = api_path_feature\n self.api_path_cli = api_path_cli\n\n def __get__(self, instance, owner):\n # If called from class, assume its a feature template\n is_cli_template = instance is not None and instance.is_type_cli\n\n return self.api_path_cli if is_cli_template else self.api_path_feature\n\n\n# Set of device types that use cedge template class. Updated as of vManage 20.3.2\nCEDGE_SET = {\n \"vedge-CSR-1000v\", \"vedge-ISR-4331\", \"vedge-ISR-4431\", \"vedge-ISR-4461\", \"vedge-ISR-4451-X\",\n \"vedge-C8300-1N1S-4T2X\", \"vedge-IR-1101\", \"vedge-C8300-1N1S-6T\", \"vedge-ISRv\", \"vedge-ISR-4321\", \"vedge-ISR-4351\",\n \"vedge-ISR-4221\", \"vedge-ISR-4221X\", \"vedge-ASR-1001-X\", \"vedge-ASR-1002-X\", \"vedge-ASR-1002-HX\",\n \"vedge-ASR-1001-HX\", \"vedge-C8500-12X4QC\", \"vedge-C8500-12X\", \"vedge-C1101-4P\", \"vedge-C1101-4PLTEP\",\n \"vedge-C1111-4P\", \"vedge-C1161X-8P\", \"vedge-C1111-8P\", \"vedge-C1113-8PLTEEA\", \"vedge-C1121X-8P\", \"vedge-C1111X-8P\",\n \"vedge-C1111-8PW\", \"vedge-C1111-8PLTEEA\", \"vedge-C1121-8PLTEPW\", \"vedge-C1111-8PLTELAW\", \"vedge-C1111-8PLTEEAW\",\n \"vedge-C1111-8PLTELA\", \"vedge-C1111-4PLTEEA\", \"vedge-C1101-4PLTEPW\", \"vedge-C1109-4PLTE2PW\", \"vedge-C1109-4PLTE2P\",\n \"vedge-C1109-2PLTEVZ\", \"vedge-C1109-2PLTEUS\", \"vedge-C1109-2PLTEGB\", \"vedge-C1121X-8PLTEP\", \"vedge-C1161X-8PLTEP\",\n \"vedge-C1113-8PMLTEEA\", \"vedge-C1111-4PLTELA\", \"vedge-C1116-4P\", \"vedge-C1116-4PLTEEA\", \"vedge-C1121-8P\",\n \"vedge-C1121-8PLTEP\", \"vedge-C1128-8PLTEP\", \"vedge-C1121-4PLTEP\", \"vedge-C1121-4P\", \"vedge-C1126-8PLTEP\",\n \"vedge-C1127-8PLTEP\", \"vedge-C1161-8P\", \"vedge-C1117-4P\", \"vedge-C1117-4PM\", \"vedge-C1117-4PLTEEA\",\n \"vedge-C1126X-8PLTEP\", \"vedge-C1127X-8PLTEP\", \"vedge-C1121X-8PLTEPW\", \"vedge-C1127X-8PMLTEP\", \"vedge-C1127-8PMLTEP\",\n \"vedge-C1117-4PLTELA\", \"vedge-nfvis-ENCS5400\", 'vedge-C1113-8PLTEW', 'vedge-ESR-6300', \"vedge-C8300-2N2S-6T\",\n \"vedge-C8300-2N2S-4T2X\", \"vedge-C1117-4PMLTEEA\"\n}\n\n\nclass DeviceTemplate(ConfigItem):\n api_path = CliOrFeatureApiPath(\n ApiPath('template/device/object', 'template/device/feature', 'template/device'),\n ApiPath('template/device/object', 'template/device/cli', 'template/device')\n )\n store_path = ('device_templates', 'template')\n store_file = '{item_name}.json'\n name_tag = 'templateName'\n post_filtered_tags = ('feature', )\n skip_cmp_tag_set = {'createdOn', 'createdBy', 'lastUpdatedBy', 'lastUpdatedOn', '@rid', 'owner', 'infoTag',\n 'templateAttached', 'templateConfigurationEdited'}\n\n @property\n def is_type_cli(self) -> bool:\n return self.data.get('configType', 'template') == 'file'\n\n @property\n def is_cedge(self) -> bool:\n return self.data['deviceType'] in CEDGE_SET\n\n def contains_template(self, template_type: str) -> bool:\n return template_type in self.find_key('templateType')\n\n @property\n def feature_templates(self) -> Set[str]:\n return set(self.find_key('templateId', from_key='generalTemplates'))\n\n\n@register('template_device', 'device template', DeviceTemplate)\nclass DeviceTemplateIndex(IndexConfigItem):\n api_path = ApiPath('template/device', None, None, None)\n store_file = 'device_templates.json'\n iter_fields = IdName('templateId', 'templateName')\n\n @staticmethod\n def is_vsmart(device_type, num_attached):\n return device_type is not None and device_type == 'vsmart' and num_attached > 0\n\n @staticmethod\n def is_not_vsmart(device_type, num_attached):\n return device_type is not None and device_type != 'vsmart' and num_attached > 0\n\n @staticmethod\n def is_cedge(device_type, num_attached):\n return device_type in CEDGE_SET\n\n def filtered_iter(self, filter_fn):\n return (\n (item_id, item_name) for item_type, item_attached, item_id, item_name\n in self.iter('deviceType', 'devicesAttached', *self.iter_fields) if filter_fn(item_type, item_attached)\n )\n\n\n# This is a special case handled under DeviceTemplate\nclass DeviceTemplateAttached(IndexConfigItem):\n api_path = ApiPath('template/device/config/attached', None, None, None)\n store_path = ('device_templates', 'attached')\n store_file = '{item_name}.json'\n iter_fields = ('uuid', 'personality')\n\n\n# This is a special case handled under DeviceTemplate\nclass DeviceTemplateValues(ConfigItem):\n api_path = ApiPath(None, 'template/device/config/input', None, None)\n store_path = ('device_templates', 'values')\n store_file = '{item_name}.json'\n\n @staticmethod\n def api_params(template_id, device_uuid_list):\n \"\"\"\n Build dictionary used to provide input parameters for api POST call\n :param template_id: String containing the template ID\n :param device_uuid_list: List of device UUIDs\n :return: Dictionary used to provide POST input parameters\n \"\"\"\n return {\n \"deviceIds\": device_uuid_list,\n \"isEdited\": False,\n \"isMasterEdited\": False,\n \"templateId\": template_id\n }\n\n def input_list(self, allowed_uuid_set=None):\n \"\"\"\n Return list of device input entries. Each entry represents one attached device and is a dictionary of input\n variable names and values.\n :param allowed_uuid_set: Optional, set of uuids. If provided, only input entries for those uuids are returned\n :return: [{<input_var_name>: <input_var_value>, ...}, ...]\n \"\"\"\n return [entry for entry in self.data.get('data', [])\n if allowed_uuid_set is None or entry.get('csv-deviceId') in allowed_uuid_set]\n\n @staticmethod\n def input_list_devices(input_list: list) -> Iterable[str]:\n return (entry.get('csv-host-name') for entry in input_list)\n\n def values_iter(self):\n return (\n (entry.get('csv-deviceId'), entry.get('csv-host-name'), entry) for entry in self.data.get('data', [])\n )\n\n def title_dict(self):\n return {column['property']: column['title'] for column in self.data.get('header', {}).get('columns', [])}\n\n def __iter__(self):\n return self.values_iter()\n\n\nclass FeatureTemplate(ConfigItem):\n api_path = ApiPath('template/feature/object', 'template/feature')\n store_path = ('feature_templates', )\n store_file = '{item_name}.json'\n id_tag = 'templateId'\n name_tag = 'templateName'\n type_tag = 'templateType'\n skip_cmp_tag_set = {'createdOn', 'createdBy', 'lastUpdatedBy', 'lastUpdatedOn', '@rid', 'owner', 'infoTag',\n 'devicesAttached', 'attachedMastersCount'}\n\n @property\n def device_types(self) -> Set[str]:\n return set(self.data.get('deviceType', []))\n\n @device_types.setter\n def device_types(self, device_type_iter: Iterable[str]) -> None:\n self.data['deviceType'] = [device_type for device_type in device_type_iter]\n\n @property\n def masters_attached(self) -> int:\n \"\"\"\n Returns number of device templates (i.e. master templates) that utilize this feature template\n \"\"\"\n return self.data.get('attachedMastersCount')\n\n @property\n def devices_attached(self) -> int:\n \"\"\"\n Returns number of devices attached to device templates attached to this feature template\n \"\"\"\n return self.data.get('devicesAttached')\n\n\n@register('template_feature', 'feature template', FeatureTemplate)\nclass FeatureTemplateIndex(IndexConfigItem):\n api_path = ApiPath('template/feature', None, None, None)\n store_file = 'feature_templates.json'\n iter_fields = IdName('templateId', 'templateName')\n\n @staticmethod\n def filter_type_default(desired_type: str, desired_is_default: bool, item_type: str, item_is_default: bool) -> bool:\n \"\"\"\n Intended to be used along with partial to create a filter_fn that matches on desired_type and\n desired_is_default values. Partial locks the desired_type and desired_is_default parameters.\n :param desired_type: Desired feature templateType\n :param desired_is_default: Whether to match only factoryDefault templates\n :param item_type: templateType from feature template being matched\n :param item_is_default: factoryDefault from feature template being matched\n :returns: True if conditions matched, false otherwise\n \"\"\"\n if desired_is_default and not item_is_default:\n return False\n\n return desired_type == item_type\n\n def filtered_iter(self, filter_fn):\n return (\n (item_id, item_name) for item_type, item_is_default, item_id, item_name\n in self.iter('templateType', 'factoryDefault', *self.iter_fields) if filter_fn(item_type, item_is_default)\n )\n\n\n#\n# Policy vSmart\n#\n\nclass PolicyVsmart(ConfigItem):\n api_path = ApiPath('template/policy/vsmart/definition', 'template/policy/vsmart')\n store_path = ('policy_templates', 'vSmart')\n store_file = '{item_name}.json'\n name_tag = 'policyName'\n type_tag = 'policyType'\n skip_cmp_tag_set = {'isPolicyActivated', }\n\n\n@register('policy_vsmart', 'VSMART policy', PolicyVsmart)\nclass PolicyVsmartIndex(IndexConfigItem):\n api_path = ApiPath('template/policy/vsmart', None, None, None)\n store_file = 'policy_templates_vsmart.json'\n iter_fields = IdName('policyId', 'policyName')\n\n @property\n def active_policy(self):\n \"\"\"\n Return ID and name from active policy or (None, None) if no policy is active\n :return: (<id>, <name>) or (None, None)\n \"\"\"\n for is_active, item_id, item_name in self.iter('isPolicyActivated', *self.iter_fields):\n if is_active:\n return item_id, item_name\n return None, None\n\n\n#\n# Policy vEdge\n#\n\nclass PolicyVedge(ConfigItem):\n api_path = ApiPath('template/policy/vedge/definition', 'template/policy/vedge')\n store_path = ('policy_templates', 'vEdge')\n store_file = '{item_name}.json'\n name_tag = 'policyName'\n type_tag = 'policyType'\n\n\n@register('policy_vedge', 'edge policy', PolicyVedge)\nclass PolicyVedgeIndex(IndexConfigItem):\n api_path = ApiPath('template/policy/vedge', None, None, None)\n store_file = 'policy_templates_vedge.json'\n iter_fields = IdName('policyId', 'policyName')\n\n\n#\n# Policy Security\n#\n\nclass PolicySecurity(ConfigItem):\n api_path = ApiPath('template/policy/security/definition', 'template/policy/security')\n store_path = ('policy_templates', 'Security')\n store_file = '{item_name}.json'\n name_tag = 'policyName'\n type_tag = 'policyType'\n\n\n@register('policy_security', 'security policy', PolicySecurity)\nclass PolicySecurityIndex(IndexConfigItem):\n api_path = ApiPath('template/policy/security', None, None, None)\n store_file = 'policy_templates_security.json'\n iter_fields = IdName('policyId', 'policyName')\n\n\n#\n# Policy Voice\n#\n\nclass PolicyVoice(ConfigItem):\n api_path = ApiPath('template/policy/voice/definition', 'template/policy/voice')\n store_path = ('policy_templates', 'Voice')\n store_file = '{item_name}.json'\n name_tag = 'policyName'\n type_tag = 'policyType'\n\n\n@register('policy_voice', 'voice policy', PolicyVoice, min_version='20.1')\nclass PolicyVoiceIndex(IndexConfigItem):\n api_path = ApiPath('template/policy/voice', None, None, None)\n store_file = 'policy_templates_voice.json'\n iter_fields = IdName('policyId', 'policyName')\n\n\n#\n# Policy Custom Application\n#\n\nclass PolicyCustomApp(ConfigItem):\n api_path = ApiPath('template/policy/customapp')\n store_path = ('policy_templates', 'CustomApp')\n store_file = '{item_name}.json'\n name_tag = 'appName'\n id_tag = 'appId'\n skip_cmp_tag_set = {'lastUpdated', }\n\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this API item.\n \"\"\"\n # In 20.3.1 the payload returned by vManage contains a 'data' key with the policy definition in it. This is\n # different than on previous versions or other ConfigItems. Overwriting the default __init__ in order to\n # handle both options.\n super().__init__(data.get('data', data))\n\n\n@register('policy_customapp', 'custom application policy', PolicyCustomApp, min_version='20.1')\nclass PolicyCustomAppIndex(IndexConfigItem):\n api_path = ApiPath('template/policy/customapp', None, None, None)\n store_file = 'policy_templates_customapp.json'\n iter_fields = IdName('appId', 'appName')\n\n\n#\n# Policy definitions\n#\n\n# Policy definition base class\nclass PolicyDef(ConfigItem):\n store_file = '{item_name}.json'\n id_tag = 'definitionId'\n name_tag = 'name'\n type_tag = 'type'\n skip_cmp_tag_set = {'lastUpdated', 'referenceCount', 'references', 'activatedId', 'isActivatedByVsmart',\n 'owner', 'infoTag'}\n\n\n# Policy definition index base class\nclass PolicyDefIndex(IndexConfigItem):\n iter_fields = IdName('definitionId', 'name')\n\n\nclass PolicyDefData(PolicyDef):\n api_path = ApiPath('template/policy/definition/data')\n store_path = ('policy_definitions', 'Data')\n\n\n@register('policy_definition', 'data policy definition', PolicyDefData)\nclass PolicyDefDataIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/data', None, None, None)\n store_file = 'policy_definitions_data.json'\n\n\nclass PolicyDefMesh(PolicyDef):\n api_path = ApiPath('template/policy/definition/mesh')\n store_path = ('policy_definitions', 'Mesh')\n\n\n@register('policy_definition', 'mesh policy definition', PolicyDefMesh)\nclass PolicyDefMeshIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/mesh', None, None, None)\n store_file = 'policy_definitions_mesh.json'\n\n\nclass PolicyDefRewriteRule(PolicyDef):\n api_path = ApiPath('template/policy/definition/rewriterule')\n store_path = ('policy_definitions', 'RewriteRule')\n\n\n@register('policy_definition', 'rewrite-rule policy definition', PolicyDefRewriteRule)\nclass PolicyDefRewriteRuleIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/rewriterule', None, None, None)\n store_file = 'policy_definitions_rewriterule.json'\n\n\nclass PolicyDefAclv6(PolicyDef):\n api_path = ApiPath('template/policy/definition/aclv6')\n store_path = ('policy_definitions', 'ACLv6')\n\n\n@register('policy_definition', 'ACLv6 policy definition', PolicyDefAclv6)\nclass PolicyDefAclv6Index(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/aclv6', None, None, None)\n store_file = 'policy_definitions_aclv6.json'\n\n\nclass PolicyDefQosmap(PolicyDef):\n api_path = ApiPath('template/policy/definition/qosmap')\n store_path = ('policy_definitions', 'QoSMap')\n\n\n@register('policy_definition', 'QOS-map policy definition', PolicyDefQosmap)\nclass PolicyDefQosmapIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/qosmap', None, None, None)\n store_file = 'policy_definitions_qosmap.json'\n\n\nclass PolicyDefUrlfiltering(PolicyDef):\n api_path = ApiPath('template/policy/definition/urlfiltering')\n store_path = ('policy_definitions', 'URLFiltering')\n\n\n@register('policy_definition', 'URL-filtering policy definition', PolicyDefUrlfiltering)\nclass PolicyDefUrlfilteringIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/urlfiltering', None, None, None)\n store_file = 'policy_definitions_urlfiltering.json'\n\n\nclass PolicyDefZonebasedfw(PolicyDef):\n api_path = ApiPath('template/policy/definition/zonebasedfw')\n store_path = ('policy_definitions', 'ZoneBasedFW')\n\n\n@register('policy_definition', 'zone-based FW policy definition', PolicyDefZonebasedfw)\nclass PolicyDefZonebasedfwIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/zonebasedfw', None, None, None)\n store_file = 'policy_definitions_zonebasedfw.json'\n\n\nclass PolicyDefApproute(PolicyDef):\n api_path = ApiPath('template/policy/definition/approute')\n store_path = ('policy_definitions', 'AppRoute')\n\n\n@register('policy_definition', 'appRoute policy definition', PolicyDefApproute)\nclass PolicyDefApprouteIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/approute', None, None, None)\n store_file = 'policy_definitions_approute.json'\n\n\nclass PolicyDefVpnmembershipgroup(PolicyDef):\n api_path = ApiPath('template/policy/definition/vpnmembershipgroup')\n store_path = ('policy_definitions', 'VPNMembershipGroup')\n\n\n@register('policy_definition', 'VPN membership policy definition', PolicyDefVpnmembershipgroup)\nclass PolicyDefVpnmembershipgroupIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/vpnmembershipgroup', None, None, None)\n store_file = 'policy_definitions_vpnmembershipgroup.json'\n\n\nclass PolicyDefAcl(PolicyDef):\n api_path = ApiPath('template/policy/definition/acl')\n store_path = ('policy_definitions', 'ACL')\n\n\n@register('policy_definition', 'ACL policy definition', PolicyDefAcl)\nclass PolicyDefAclIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/acl', None, None, None)\n store_file = 'policy_definitions_acl.json'\n\n\nclass PolicyDefHubandspoke(PolicyDef):\n api_path = ApiPath('template/policy/definition/hubandspoke')\n store_path = ('policy_definitions', 'HubAndSpoke')\n\n\n@register('policy_definition', 'Hub-and-spoke policy definition', PolicyDefHubandspoke)\nclass PolicyDefHubandspokeIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/hubandspoke', None, None, None)\n store_file = 'policy_definitions_hubandspoke.json'\n\n\nclass PolicyDefVedgeroute(PolicyDef):\n api_path = ApiPath('template/policy/definition/vedgeroute')\n store_path = ('policy_definitions', 'vEdgeRoute')\n\n\n@register('policy_definition', 'edge-route policy definition', PolicyDefVedgeroute)\nclass PolicyDefVedgerouteIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/vedgeroute', None, None, None)\n store_file = 'policy_definitions_vedgeroute.json'\n\n\nclass PolicyDefIntrusionprevention(PolicyDef):\n api_path = ApiPath('template/policy/definition/intrusionprevention')\n store_path = ('policy_definitions', 'IntrusionPrevention')\n\n\n@register('policy_definition', 'IPS policy definition', PolicyDefIntrusionprevention)\nclass PolicyDefIntrusionpreventionIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/intrusionprevention', None, None, None)\n store_file = 'policy_definitions_intrusionprevention.json'\n\n\nclass PolicyDefControl(PolicyDef):\n api_path = ApiPath('template/policy/definition/control')\n store_path = ('policy_definitions', 'Control')\n\n\n@register('policy_definition', 'control policy definition', PolicyDefControl)\nclass PolicyDefControlIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/control', None, None, None)\n store_file = 'policy_definitions_control.json'\n\n\nclass PolicyDefDnssecurity(PolicyDef):\n api_path = ApiPath('template/policy/definition/dnssecurity')\n store_path = ('policy_definitions', 'DNSSecurity')\n\n\n@register('policy_definition', 'dns-security policy definition', PolicyDefDnssecurity)\nclass PolicyDefDnssecurityIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/dnssecurity', None, None, None)\n store_file = 'policy_definitions_dnssecurity.json'\n\n\nclass PolicyDefCflowd(PolicyDef):\n api_path = ApiPath('template/policy/definition/cflowd')\n store_path = ('policy_definitions', 'Cflowd')\n\n\n@register('policy_definition', 'cflowd policy definition', PolicyDefCflowd)\nclass PolicyDefCflowdIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/cflowd', None, None, None)\n store_file = 'policy_definitions_cflowd.json'\n\n\nclass PolicyDefAMP(PolicyDef):\n api_path = ApiPath('template/policy/definition/advancedMalwareProtection')\n store_path = ('policy_definitions', 'AMP')\n\n\n@register('policy_definition', 'AMP policy definition', PolicyDefAMP)\nclass PolicyDefAMPIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/advancedMalwareProtection', None, None, None)\n store_file = 'policy_definitions_amp.json'\n\n\nclass PolicyDefDeviceAccess(PolicyDef):\n api_path = ApiPath('template/policy/definition/deviceaccesspolicy')\n store_path = ('policy_definitions', 'DeviceAccess')\n\n\n@register('policy_definition', 'device access policy definition', PolicyDefDeviceAccess)\nclass PolicyDefDeviceAccessIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/deviceaccesspolicy', None, None, None)\n store_file = 'policy_definitions_deviceaccess.json'\n\n\nclass PolicyDefDeviceAccessV6(PolicyDef):\n api_path = ApiPath('template/policy/definition/deviceaccesspolicyv6')\n store_path = ('policy_definitions', 'DeviceAccessV6')\n\n\n@register('policy_definition', 'IPv6 device access policy definition', PolicyDefDeviceAccessV6)\nclass PolicyDefDeviceAccessV6Index(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/deviceaccesspolicyv6', None, None, None)\n store_file = 'policy_definitions_deviceaccessv6.json'\n\n\nclass PolicyDefDialPeer(PolicyDef):\n api_path = ApiPath('template/policy/definition/dialpeer')\n store_path = ('policy_definitions', 'DialPeer')\n\n\n@register('policy_definition', 'dial-peer policy definition', PolicyDefDialPeer, min_version='20.1')\nclass PolicyDefDialPeerIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/dialpeer', None, None, None)\n store_file = 'policy_definitions_dialpeer.json'\n\n\nclass PolicyDefPhoneProfile(PolicyDef):\n api_path = ApiPath('template/policy/definition/srstphoneprofile')\n store_path = ('policy_definitions', 'PhoneProfile')\n\n\n@register('policy_definition', 'phone profile policy definition', PolicyDefPhoneProfile, min_version='20.1')\nclass PolicyDefPhoneProfileIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/srstphoneprofile', None, None, None)\n store_file = 'policy_definitions_phoneprofile.json'\n\n\nclass PolicyDefFXOPort(PolicyDef):\n api_path = ApiPath('template/policy/definition/fxoport')\n store_path = ('policy_definitions', 'FXOPort')\n\n\n@register('policy_definition', 'FXO port policy definition', PolicyDefFXOPort, min_version='20.1')\nclass PolicyDefFXOPortIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/fxoport', None, None, None)\n store_file = 'policy_definitions_fxoport.json'\n\n\nclass PolicyDefFXSPort(PolicyDef):\n api_path = ApiPath('template/policy/definition/fxsport')\n store_path = ('policy_definitions', 'FXSPort')\n\n\n@register('policy_definition', 'FXS port policy definition', PolicyDefFXSPort, min_version='20.1')\nclass PolicyDefFXSPortIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/fxsport', None, None, None)\n store_file = 'policy_definitions_fxsport.json'\n\n\nclass PolicyDefFXSDIDPort(PolicyDef):\n api_path = ApiPath('template/policy/definition/fxsdidport')\n store_path = ('policy_definitions', 'FXSDIDPort')\n\n\n@register('policy_definition', 'FXS-DID port policy definition', PolicyDefFXSDIDPort, min_version='20.1')\nclass PolicyDefFXSDIDPortIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/fxsdidport', None, None, None)\n store_file = 'policy_definitions_fxsdidport.json'\n\n\nclass PolicyDefSSLDecryption(PolicyDef):\n api_path = ApiPath('template/policy/definition/ssldecryption')\n store_path = ('policy_definitions', 'SSLDecryption')\n\n\n@register('policy_definition', 'SSL decryption policy definition', PolicyDefSSLDecryption, min_version='20.1')\nclass PolicyDefSSLDecryptionIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/ssldecryption', None, None, None)\n store_file = 'policy_definitions_ssldecryption.json'\n\n\nclass PolicyDefUTDProfile(PolicyDef):\n api_path = ApiPath('template/policy/definition/sslutdprofile')\n store_path = ('policy_definitions', 'SSLUTDProfile')\n\n\n@register('policy_definition', 'SSL decryption UTD profile policy definition', PolicyDefUTDProfile, min_version='20.1')\nclass PolicyDefUTDProfileIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/sslutdprofile', None, None, None)\n store_file = 'policy_definitions_sslutdprofile.json'\n\n\nclass PolicyDefPriisdnPort(PolicyDef):\n api_path = ApiPath('template/policy/definition/priisdnport')\n store_path = ('policy_definitions', 'PriisdnPort')\n\n\n@register('policy_definition', 'pri isdn port policy definition', PolicyDefPriisdnPort, min_version='20.3')\nclass PolicyDefPriisdnPortIndex(PolicyDefIndex):\n api_path = ApiPath('template/policy/definition/priisdnport', None, None, None)\n store_file = 'policy_definitions_priisdnport.json'\n\n\n#\n# Policy lists\n#\n\n# Policy list base class\nclass PolicyList(ConfigItem):\n store_file = '{item_name}.json'\n id_tag = 'listId'\n name_tag = 'name'\n type_tag = 'type'\n skip_cmp_tag_set = {'lastUpdated', 'referenceCount', 'references', 'activatedId', 'isActivatedByVsmart',\n 'owner', 'infoTag'}\n\n\n# Policy list index base class\nclass PolicyListIndex(IndexConfigItem):\n iter_fields = IdName('listId', 'name')\n\n\nclass PolicyListVpn(PolicyList):\n api_path = ApiPath('template/policy/list/vpn')\n store_path = ('policy_lists', 'VPN')\n\n\n@register('policy_list', 'VPN list', PolicyListVpn)\nclass PolicyListVpnIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/vpn', None, None, None)\n store_file = 'policy_lists_vpn.json'\n\n\nclass PolicyListUrlWhiteList(PolicyList):\n api_path = ApiPath('template/policy/list/urlwhitelist')\n store_path = ('policy_lists', 'URLWhitelist')\n\n\n@register('policy_list', 'URL-whitelist list', PolicyListUrlWhiteList)\nclass PolicyListUrlWhileListIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/urlwhitelist', None, None, None)\n store_file = 'policy_lists_urlwhitelist.json'\n\n\nclass PolicyListUrlBlackList(PolicyList):\n api_path = ApiPath('template/policy/list/urlblacklist')\n store_path = ('policy_lists', 'URLBlacklist')\n\n\n@register('policy_list', 'URL-blacklist list', PolicyListUrlBlackList)\nclass PolicyListUrlBlackListIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/urlblacklist', None, None, None)\n store_file = 'policy_lists_urlblacklist.json'\n\n\nclass PolicyListPolicer(PolicyList):\n api_path = ApiPath('template/policy/list/policer')\n store_path = ('policy_lists', 'Policer')\n\n\n@register('policy_list', 'policer list', PolicyListPolicer)\nclass PolicyListPolicerIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/policer', None, None, None)\n store_file = 'policy_lists_policer.json'\n\n\nclass PolicyListIpsSignature(PolicyList):\n api_path = ApiPath('template/policy/list/ipssignature')\n store_path = ('policy_lists', 'IPSSignature')\n\n\n@register('policy_list', 'IPS-signature list', PolicyListIpsSignature)\nclass PolicyListIpsSignatureIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/ipssignature', None, None, None)\n store_file = 'policy_lists_ipssignature.json'\n\n\nclass PolicyListClass(PolicyList):\n api_path = ApiPath('template/policy/list/class')\n store_path = ('policy_lists', 'Class')\n\n\n@register('policy_list', 'class list', PolicyListClass)\nclass PolicyListClassIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/class', None, None, None)\n store_file = 'policy_lists_class.json'\n\n\nclass PolicyListUmbrellaData(PolicyList):\n api_path = ApiPath('template/policy/list/umbrelladata')\n store_path = ('policy_lists', 'UmbrellaData')\n\n\n@register('policy_list', 'umbrella-data list', PolicyListUmbrellaData)\nclass PolicyListUmbrellaDataIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/umbrelladata', None, None, None)\n store_file = 'policy_lists_umbrelladata.json'\n\n\nclass PolicyListPrefix(PolicyList):\n api_path = ApiPath('template/policy/list/prefix')\n store_path = ('policy_lists', 'Prefix')\n\n\n@register('policy_list', 'prefix list', PolicyListPrefix)\nclass PolicyListPrefixIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/prefix', None, None, None)\n store_file = 'policy_lists_prefix.json'\n\n\nclass PolicyListSite(PolicyList):\n api_path = ApiPath('template/policy/list/site')\n store_path = ('policy_lists', 'Site')\n\n\n@register('policy_list', 'site list', PolicyListSite)\nclass PolicyListSiteIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/site', None, None, None)\n store_file = 'policy_lists_site.json'\n\n\nclass PolicyListExtcommunity(PolicyList):\n api_path = ApiPath('template/policy/list/extcommunity')\n store_path = ('policy_lists', 'ExtCommunity')\n\n\n@register('policy_list', 'extended-community list', PolicyListExtcommunity)\nclass PolicyListExtcommunityIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/extcommunity', None, None, None)\n store_file = 'policy_lists_extcommunity.json'\n\n\n# Data Prefix All (template/policy/list/dataprefixall) was purposely not included as it seems to collide with, meaning\n# error, Data Prefix (template/policy/list/dataprefix).\nclass PolicyListDataprefix(PolicyList):\n api_path = ApiPath('template/policy/list/dataprefix')\n store_path = ('policy_lists', 'DataPrefix')\n\n\n@register('policy_list', 'data-prefix list', PolicyListDataprefix)\nclass PolicyListDataprefixIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/dataprefix', None, None, None)\n store_file = 'policy_lists_dataprefix.json'\n\n\nclass PolicyListMirror(PolicyList):\n api_path = ApiPath('template/policy/list/mirror')\n store_path = ('policy_lists', 'Mirror')\n\n\n@register('policy_list', 'mirror list', PolicyListMirror)\nclass PolicyListMirrorIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/mirror', None, None, None)\n store_file = 'policy_lists_mirror.json'\n\n\nclass PolicyListApplication(PolicyList):\n api_path = ApiPath('template/policy/list/app')\n store_path = ('policy_lists', 'App')\n\n\n@register('policy_list', 'application list', PolicyListApplication)\nclass PolicyListApplicationIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/app', None, None, None)\n store_file = 'policy_lists_app.json'\n\n\nclass PolicyListLocalApplication(PolicyList):\n api_path = ApiPath('template/policy/list/localapp')\n store_path = ('policy_lists', 'LocalApp')\n\n\n@register('policy_list', 'local-application list', PolicyListLocalApplication)\nclass PolicyListLocalApplicationIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/localapp', None, None, None)\n store_file = 'policy_lists_localapp.json'\n\n\nclass PolicyListSla(PolicyList):\n api_path = ApiPath('template/policy/list/sla')\n store_path = ('policy_lists', 'SLA')\n\n\n@register('policy_list', 'SLA-class list', PolicyListSla)\nclass PolicyListSlaIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/sla', None, None, None)\n store_file = 'policy_lists_sla.json'\n\n\nclass PolicyListColor(PolicyList):\n api_path = ApiPath('template/policy/list/color')\n store_path = ('policy_lists', 'Color')\n\n\n@register('policy_list', 'color list', PolicyListColor)\nclass PolicyListColorIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/color', None, None, None)\n store_file = 'policy_lists_color.json'\n\n\nclass PolicyListZone(PolicyList):\n api_path = ApiPath('template/policy/list/zone')\n store_path = ('policy_lists', 'Zone')\n\n\n@register('policy_list', 'zone list', PolicyListZone)\nclass PolicyListZoneIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/zone', None, None, None)\n store_file = 'policy_lists_zone.json'\n\n\nclass PolicyListAspath(PolicyList):\n api_path = ApiPath('template/policy/list/aspath')\n store_path = ('policy_lists', 'ASPath')\n\n\n@register('policy_list', 'as-path list', PolicyListAspath)\nclass PolicyListAspathIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/aspath', None, None, None)\n store_file = 'policy_lists_aspath.json'\n\n\nclass PolicyListTloc(PolicyList):\n api_path = ApiPath('template/policy/list/tloc')\n store_path = ('policy_lists', 'TLOC')\n\n\n@register('policy_list', 'TLOC list', PolicyListTloc)\nclass PolicyListTlocIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/tloc', None, None, None)\n store_file = 'policy_lists_tloc.json'\n\n\nclass PolicyListDataipv6prefix(PolicyList):\n api_path = ApiPath('template/policy/list/dataipv6prefix')\n store_path = ('policy_lists', 'DataIPv6Prefix')\n\n\n@register('policy_list', 'data-ipv6-prefix list', PolicyListDataipv6prefix)\nclass PolicyListDataipv6prefixIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/dataipv6prefix', None, None, None)\n store_file = 'policy_lists_dataipv6prefix.json'\n\n\nclass PolicyListIpv6prefix(PolicyList):\n api_path = ApiPath('template/policy/list/ipv6prefix')\n store_path = ('policy_lists', 'IPv6Prefix')\n\n\n@register('policy_list', 'ipv6-prefix list', PolicyListIpv6prefix)\nclass PolicyListIpv6prefixIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/ipv6prefix', None, None, None)\n store_file = 'policy_lists_ipv6prefix.json'\n\n\nclass PolicyListLocaldomain(PolicyList):\n api_path = ApiPath('template/policy/list/localdomain')\n store_path = ('policy_lists', 'LocalDomain')\n\n\n@register('policy_list', 'local-domain list', PolicyListLocaldomain)\nclass PolicyListLocaldomainIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/localdomain', None, None, None)\n store_file = 'policy_lists_localdomain.json'\n\n\nclass PolicyListCommunity(PolicyList):\n api_path = ApiPath('template/policy/list/community')\n store_path = ('policy_lists', 'Community')\n\n\n@register('policy_list', 'community list', PolicyListCommunity)\nclass PolicyListCommunityIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/community', None, None, None)\n store_file = 'policy_lists_community.json'\n\n\n# Umbrella Secret endpoints were removed in 19.3. Will leave it for now.\nclass PolicyListUmbrellaSecret(PolicyList):\n api_path = ApiPath('template/policy/list/umbrellasecret')\n store_path = ('policy_lists', 'UmbrellaSecret')\n\n\n@register('policy_list', 'umbrella secret list', PolicyListUmbrellaSecret)\nclass PolicyListUmbrellaSecretIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/umbrellasecret', None, None, None)\n store_file = 'policy_lists_umbrellasecret.json'\n\n\nclass PolicyListTGApiKey(PolicyList):\n api_path = ApiPath('template/policy/list/tgapikey')\n store_path = ('policy_lists', 'TGApiKey')\n\n\n@register('policy_list', 'threat grid api key list', PolicyListTGApiKey)\nclass PolicyListTGApiKeyIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/tgapikey', None, None, None)\n store_file = 'policy_lists_tgapikey.json'\n\n\nclass PolicyListFQDN(PolicyList):\n api_path = ApiPath('template/policy/list/fqdn')\n store_path = ('policy_lists', 'FQDN')\n\n\n@register('policy_list', 'FQDN list', PolicyListFQDN, min_version='20.1')\nclass PolicyListFQDNIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/fqdn', None, None, None)\n store_file = 'policy_lists_fqdn.json'\n\n# Shows up in SWAGGER for 20.1 but it is failing to be retrieved\n# class PolicyListDataPrefixFQDN(PolicyList):\n# api_path = ApiPath('template/policy/list/dataprefixfqdn')\n# store_path = ('policy_lists', 'DataPrefixFQDN')\n#\n#\n# @register('policy_list', 'data prefix FQDN list', PolicyListDataPrefixFQDN)\n# class PolicyListDataPrefixFQDNIndex(PolicyListIndex):\n# api_path = ApiPath('template/policy/list/dataprefixfqdn', None, None, None)\n# store_file = 'policy_lists_dataprefixfqdn.json'\n\n\nclass PolicyListTransRules(PolicyList):\n api_path = ApiPath('template/policy/list/translationrules')\n store_path = ('policy_lists', 'TranslationRules')\n\n\n@register('policy_list', 'translation rules list', PolicyListTransRules, min_version='20.1')\nclass PolicyListTransRulesIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/translationrules', None, None, None)\n store_file = 'policy_lists_translationrules.json'\n\n\nclass PolicyListTransProfile(PolicyList):\n api_path = ApiPath('template/policy/list/translationprofile')\n store_path = ('policy_lists', 'TranslationProfile')\n\n\n@register('policy_profile', 'translation profile', PolicyListTransProfile, min_version='20.1')\nclass PolicyListTransProfileIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/translationprofile', None, None, None)\n store_file = 'policy_lists_translationprofile.json'\n\n\nclass PolicyListSupervisoryDisc(PolicyList):\n api_path = ApiPath('template/policy/list/supervisorydisc')\n store_path = ('policy_lists', 'SupervisoryDisconnect')\n\n\n@register('policy_list', 'supervisory disconnect list', PolicyListSupervisoryDisc, min_version='20.1')\nclass PolicyListSupervisoryDiscIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/supervisorydisc', None, None, None)\n store_file = 'policy_lists_supervisorydisconnect.json'\n\n\nclass PolicyListMediaProfile(PolicyList):\n api_path = ApiPath('template/policy/list/mediaprofile')\n store_path = ('policy_lists', 'MediaProfile')\n\n\n@register('policy_list', 'media profile list', PolicyListMediaProfile, min_version='20.1')\nclass PolicyListMediaProfileIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/mediaprofile', None, None, None)\n store_file = 'policy_lists_mediaprofile.json'\n\n\nclass PolicyListFaxProtocol(PolicyList):\n api_path = ApiPath('template/policy/list/faxprotocol')\n store_path = ('policy_lists', 'FaxProtocol')\n\n\n@register('policy_list', 'fax protocol list', PolicyListFaxProtocol, min_version='20.3')\nclass PolicyListFaxProtocolIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/faxprotocol', None, None, None)\n store_file = 'policy_lists_faxprotocol.json'\n\n\nclass PolicyListModemPassthrough(PolicyList):\n api_path = ApiPath('template/policy/list/modempassthrough')\n store_path = ('policy_lists', 'ModemPassthrough')\n\n\n@register('policy_list', 'modem passthrough list', PolicyListModemPassthrough, min_version='20.3')\nclass PolicyListModemPassthroughIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/modempassthrough', None, None, None)\n store_file = 'policy_lists_modempassthrough.json'\n\n\nclass PolicyListTrunkGroup(PolicyList):\n api_path = ApiPath('template/policy/list/trunkgroup')\n store_path = ('policy_lists', 'TrunkGroup')\n\n\n@register('policy_list', 'trunk group list', PolicyListTrunkGroup, min_version='20.3')\nclass PolicyListTrunkGroupIndex(PolicyListIndex):\n api_path = ApiPath('template/policy/list/trunkgroup', None, None, None)\n store_file = 'policy_lists_trunkgroup.json'\n\n\n#\n# Admin Settings\n#\nclass SettingsVbond(ConfigItem):\n api_path = ApiPath('settings/configuration/device', None, 'settings/configuration/device', None)\n store_path = ('settings',)\n store_file = 'vbond.json'\n\n def __init__(self, data):\n \"\"\"\n :param data: dict containing the information to be associated with this API item.\n \"\"\"\n # Get requests return a dict as {'data': [{'domainIp': 'vbond.cisco.com', 'port': '12346'}]}\n super().__init__(data.get('data', [''])[0])\n\n @property\n def is_configured(self):\n domain_ip = self.data.get('domainIp', '')\n return len(domain_ip) > 0 and domain_ip != 'Not Configured'\n\n\n#\n# Edge Certificate\n#\nclass EdgeCertificate(IndexConfigItem):\n api_path = ApiPath('certificate/vedge/list', 'certificate/save/vedge/list', None, None)\n store_path = ('certificates', )\n store_file = 'edge_certificates.json'\n iter_fields = ('uuid', 'validity')\n\n extended_iter_fields = ('host-name', 'chasisNumber', 'serialNumber', 'vedgeCertificateState')\n\n _state_lookup = {\n 'tokengenerated': 'token generated',\n 'bootstrapconfiggenerated': 'bootstrap config generated',\n 'csrgenerated': 'CSR generated',\n 'csrfailed': 'CSR failed',\n 'certinstallfailed': 'certificate install failed',\n 'certinstalled': 'certificate installed',\n }\n\n @classmethod\n def state_str(cls, state):\n \"\"\"\n Convert the state field from WAN edge certificate into user-friendly string. If not known, return the original\n state field\n :param state: string containing the WAN edge certificate state field.\n :return: string\n \"\"\"\n return cls._state_lookup.get(state, state)\n\n def status_post_data(self, *new_status):\n \"\"\"\n Build payload to be used for POST requests that update WAN edge certificate validity status\n :param new_status: One or more (<uuid>, <new status>) tuples\n :return: List containing payload for POST requests\n \"\"\"\n new_status_dict = dict(new_status)\n\n return [\n {\n 'chasisNumber': chassis,\n 'serialNumber': serial,\n 'validity': new_status_dict[uuid]\n }\n for uuid, status, hostname, chassis, serial, state in self.extended_iter() if uuid in new_status_dict\n ]\n\n\n#\n# Realtime items\n#\n@op_register('system', 'status', 'System status')\nclass SystemStatus(RealtimeItem):\n api_path = ApiPath('device/system/status', None, None, None)\n fields_std = ('state', 'cpu_user', 'cpu_system', 'mem_total', 'mem_free')\n fields_ext = ('disk_size', 'disk_used')\n\n\n@op_register('bfd', 'sessions', 'BFD sessions')\nclass BfdSessions(RealtimeItem):\n api_path = ApiPath('device/bfd/sessions', None, None, None)\n fields_std = ('system_ip', 'site_id', 'local_color', 'color', 'state')\n fields_ext = ('src_ip', 'src_port', 'dst_ip', 'dst_port')\n\n\n@op_register('control', 'connections', 'Control connections')\nclass DeviceControlConnections(RealtimeItem):\n api_path = ApiPath('device/control/connections', None, None, None)\n fields_std = ('system_ip', 'site_id', 'peer_type', 'local_color', 'remote_color', 'state')\n fields_ext = ('private_ip', 'private_port', 'public_ip', 'public_port', 'instance', 'protocol', 'domain_id')\n\n\n@op_register('control', 'local-properties', 'Control local-properties')\nclass DeviceControlLocalProperties(RealtimeItem):\n api_path = ApiPath('device/control/localproperties', None, None, None)\n fields_std = ('system_ip', 'site_id', 'device_type', 'organization_name', 'domain_id', 'port_hopped')\n fields_ext = ('protocol', 'tls_port', 'certificate_status', 'root_ca_chain_status', 'certificate_validity',\n 'certificate_not_valid_after')\n\n\n@op_register('interface', 'info', 'Interface info')\nclass InterfaceIpv4(RealtimeItem):\n api_path = ApiPath('device/interface', None, None, None)\n fields_std = ('vpn_id', 'ifname', 'af_type', 'ip_address', 'ipv6_address', 'if_admin_status', 'if_oper_status',\n 'desc')\n fields_ext = ('tx_drops', 'rx_drops', 'tx_kbps', 'rx_kbps')\n\n\n@op_register('app-route', 'stats', 'Application-aware route statistics')\nclass AppRouteStats(RealtimeItem):\n api_path = ApiPath('device/app-route/statistics', None, None, None)\n fields_std = ('index', 'remote_system_ip', 'local_color', 'remote_color', 'total_packets', 'loss',\n 'average_latency', 'average_jitter')\n fields_ext = ('mean_loss', 'mean_latency', 'mean_jitter', 'sla_class_index')\n\n\n@op_register('app-route', 'sla-class', 'Application-aware SLA class')\nclass AppRouteSlaClass(RealtimeItem):\n api_path = ApiPath('device/app-route/sla-class', None, None, None)\n fields_std = ('name', 'loss', 'latency', 'jitter')\n fields_ext = ('index', )\n\n\n@op_register('omp', 'summary', 'OMP summary')\nclass DeviceOmpSummary(RealtimeItem):\n api_path = ApiPath('device/omp/summary', None, None, None)\n fields_std = ('operstate', 'ompuptime', 'vsmart_peers', 'routes_received', 'routes_installed', 'routes_sent',\n 'tlocs_received', 'tlocs_installed', 'tlocs_sent')\n fields_ext = ('services_received', 'services_installed', 'services_sent', 'policy_received', 'policy_sent')\n\n\n@op_register('omp', 'peers', 'OMP peers')\nclass DeviceOmpPeers(RealtimeItem):\n api_path = ApiPath('device/omp/peers', None, None, None)\n fields_std = ('peer', 'type', 'site_id', 'state')\n fields_ext = ('domain_id', 'up_time')\n\n\n@op_register('omp', 'adv-routes', 'OMP advertised routes')\nclass DeviceOmpRoutesAdv(RealtimeItem):\n api_path = ApiPath('device/omp/routes/advertised', None, None, None)\n fields_std = ('vpn_id', 'prefix', 'to_peer', 'color', 'ip', 'protocol', 'metric', 'preference')\n fields_ext = ('tag', 'originator', 'site_id')\n\n\n@op_register('tunnel', 'stats', 'Tunnel statistics')\nclass DeviceTunnelStats(RealtimeItem):\n api_path = ApiPath('device/tunnel/statistics', None, None, None)\n fields_std = ('system_ip', 'local_color', 'remote_color', 'tunnel_protocol', 'tunnel_mtu', 'tcp_mss_adjust')\n fields_ext = ('source_ip', 'dest_ip', 'source_port', 'dest_port')\n\n\n@op_register('software', 'info', 'Software info')\nclass DeviceSoftware(RealtimeItem):\n api_path = ApiPath('device/software', None, None, None)\n fields_std = ('version', 'active', 'default')\n fields_ext = ('confirmed', )\n\n\n@op_register('dpi', 'summary', 'DPI summary')\nclass DeviceDpiSummary(RealtimeItem):\n api_path = ApiPath('device/dpi/summary', None, None, None)\n fields_std = ('status', 'current_flows', 'peak_flows', 'current_rate', 'peak_rate')\n fields_ext = ('flows_created', 'flows_expired')\n\n\n#\n# Bulk Statistics Items\n#\n@op_register('app-route', 'stats', 'Application-aware route statistics')\nclass BulkAppRoute(BulkStatsItem):\n api_path = ApiPath('data/device/statistics/approutestatsstatistics', None, None, None)\n fields_std = ('local_system_ip', 'remote_system_ip', 'local_color', 'remote_color', 'total', 'loss', 'latency',\n 'jitter', 'name')\n fields_ext = ('tx_pkts', 'rx_pkts', 'tx_octets', 'rx_octets')\n fields_to_avg = ('total', 'loss', 'latency', 'jitter')\n\n @staticmethod\n def time_series_key(sample: namedtuple) -> str:\n return sample.name\n\n\n@op_register('interface', 'info', 'Interface info')\nclass BulkInterfaceStats(BulkStatsItem):\n api_path = ApiPath('data/device/statistics/interfacestatistics', None, None, None)\n fields_std = ('vpn_id', 'interface', 'tx_kbps', 'rx_kbps', 'tx_pps', 'rx_pps')\n fields_ext = ('rx_pkts', 'tx_pkts', 'rx_drops', 'tx_drops', 'rx_errors', 'tx_errors')\n fields_to_avg = ('tx_kbps', 'rx_kbps', 'tx_pps', 'rx_pps')\n\n @staticmethod\n def time_series_key(sample: namedtuple) -> str:\n return f\"{sample.vdevice_name}_{sample.vpn_id}_{sample.interface}\"\n\n\n@op_register('system', 'status', 'System status')\nclass BulkSystemStats(BulkStatsItem):\n api_path = ApiPath('data/device/statistics/devicesystemstatusstatistics', None, None, None)\n fields_std = ('cpu_user', 'cpu_system', 'mem_util')\n fields_ext = ('mem_used', 'mem_free', 'disk_used', 'disk_avail')\n fields_to_avg = ('cpu_user', 'cpu_system', 'mem_util', 'mem_used', 'mem_free', 'disk_used', 'disk_avail')\n field_conversion_fns = {\n 'cpu_system': abs,\n 'mem_util': lambda x: 100 * x\n }\n\n\n#\n# Bulk State Items\n#\n@op_register('system', 'info', 'System info')\nclass BulkSystemStatus(BulkStateItem):\n api_path = ApiPath('data/device/state/SystemStatus', None, None, None)\n fields_std = ('state', 'total_cpu_count', 'fp_cpu_count', 'linux_cpu_count', 'tcpd_cpu_count')\n fields_ext = ('reboot_reason', 'reboot_type')\n\n\n@op_register('bfd', 'sessions', 'BFD sessions')\nclass BulkBfdSessions(BulkStateItem):\n api_path = ApiPath('data/device/state/BFDSessions', None, None, None)\n fields_std = ('system_ip', 'site_id', 'local_color', 'color', 'state')\n fields_ext = ('src_ip', 'src_port', 'dst_ip', 'dst_port', 'transitions', 'uptime_date')\n\n\n@op_register('control', 'connections', 'Control connections')\nclass BulkControlConnections(BulkStateItem):\n api_path = ApiPath('data/device/state/ControlConnection', None, None, None)\n fields_std = ('system_ip', 'site_id', 'peer_type', 'local_color', 'remote_color', 'state')\n fields_ext = ('private_ip', 'private_port', 'public_ip', 'public_port', 'instance', 'protocol', 'domain_id',\n 'uptime_date')\n\n\n@op_register('control', 'local-properties', 'Control local-properties')\nclass BulkControlLocalProperties(BulkStateItem):\n api_path = ApiPath('data/device/state/ControlLocalProperty', None, None, None)\n fields_std = ('system_ip', 'site_id', 'device_type', 'organization_name', 'domain_id', 'port_hopped')\n fields_ext = ('protocol', 'tls_port', 'certificate_status', 'root_ca_chain_status', 'certificate_validity',\n 'certificate_not_valid_after')\n\n\n@op_register('interface', 'vedge', 'vEdge interfaces')\nclass BulkInterfaceVedge(BulkStateItem):\n api_path = ApiPath('data/device/state/Interface', None, None, None)\n fields_std = ('vpn_id', 'ifname', 'af_type', 'ip_address', 'ipv6_address', 'if_admin_status',\n 'if_oper_status', 'desc')\n fields_ext = ('mtu', 'hwaddr', 'speed_mbps', 'port_type')\n\n\n@op_register('interface', 'cedge', 'cEdge interfaces')\nclass BulkInterfaceCedge(BulkStateItem):\n api_path = ApiPath('data/device/state/CEdgeInterface', None, None, None)\n fields_std = ('vpn_id', 'ifname', 'ip_address', 'ipv4_subnet_mask', 'ipv6_addrs', 'if_admin_status',\n 'if_oper_status', 'description')\n fields_ext = ('mtu', 'hwaddr', 'speed_mbps')\n\n\n@op_register('omp', 'peers', 'OMP peers')\nclass BulkOmpPeers(BulkStateItem):\n api_path = ApiPath('data/device/state/OMPPeer', None, None, None)\n fields_std = ('peer', 'type', 'site_id', 'state')\n fields_ext = ('domain_id', )\n"
},
{
"alpha_fraction": 0.5719298124313354,
"alphanum_fraction": 0.6105263233184814,
"avg_line_length": 30.66666603088379,
"blob_id": "98c2f760b7716daadaa1e9a7da9994ac8afc0126",
"content_id": "4498714e9007e1282585a96fb73683885336058a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 9,
"path": "/cisco_sdwan/__version__.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n\"\"\"\n__copyright__ = \"Copyright (c) 2019-2021 Cisco Systems, Inc. and/or its affiliates\"\n__version__ = \"1.13\"\n__author__ = \"Marcelo Reis\"\n__email__ = \"[email protected]\"\n__url__ = \"https://github.com/CiscoDevNet/sastre\"\n"
},
{
"alpha_fraction": 0.5940672159194946,
"alphanum_fraction": 0.5954904556274414,
"avg_line_length": 43.05775451660156,
"blob_id": "668a6a9fcb91d9bd353d888a0a5251ef409bf6ef",
"content_id": "da572b61b761b78da779d837d95297fd320f05b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26699,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 606,
"path": "/cisco_sdwan/tasks/common.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.tasks.common\n This module implements supporting classes and functions for tasks\n\"\"\"\nimport logging\nimport time\nimport csv\nimport re\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom collections import namedtuple\nfrom typing import List, Tuple, Iterator, Union, Optional\nfrom cisco_sdwan.base.rest_api import Rest, RestAPIException\nfrom cisco_sdwan.base.models_base import DATA_DIR\nfrom cisco_sdwan.base.models_vmanage import (DeviceTemplate, DeviceTemplateValues, DeviceTemplateAttached,\n DeviceTemplateAttach, DeviceTemplateCLIAttach, DeviceModeCli,\n ActionStatus, PolicyVsmartStatus, PolicyVsmartStatusException,\n PolicyVsmartActivate, PolicyVsmartIndex, PolicyVsmartDeactivate,\n Device)\n\n\ndef regex_search(regex, *fields):\n \"\"\"\n Execute regular expression search on provided fields. Match fields in the order provided, stop on first match.\n :param regex: Pattern to match\n :param fields: One or more strings to match\n :return: True if a match is found on any field, False otherwise.\n \"\"\"\n for match_field in fields:\n if re.search(regex, match_field):\n return True\n return False\n\n\nclass Tally:\n def __init__(self, *counters):\n self._tally = {counter: 0 for counter in counters}\n\n def __getattr__(self, counter):\n return self._tally[counter]\n\n def incr(self, counter):\n self._tally[counter] += 1\n\n\nclass TaskArgs:\n \"\"\"\n Used to store arguments for a task\n \"\"\"\n def __init__(self, **kwargs):\n self.data = kwargs\n\n def __getattr__(self, field):\n if field not in self.data:\n raise AttributeError(\"'{cls_name}' object has no attribute '{attr}'\".format(cls_name=type(self).__name__,\n attr=field))\n return self.data[field]\n\n @classmethod\n def from_json(cls, json_obj, mapper=None):\n \"\"\"\n Returns a TaskArgs instance off the provided json_obj\n :param json_obj: A json object parsed from a json encoded string\n :param mapper: An optional dictionary mapping arg_name strings to a conversion function. This conversion\n function is called with the arg_value from json_object and should return the converted (python\n native) value associated with arg_value.\n :return: TaskArgs instance\n \"\"\"\n mapper_dict = mapper or {}\n kwargs = {arg_name: mapper_dict.get(arg_name, lambda x: x)(arg_value)\n for arg_name, arg_value in json_obj.items()}\n\n return cls(**kwargs)\n\n\nclass Task:\n # Configuration parameters for wait_actions\n ACTION_INTERVAL = 10 # seconds\n ACTION_TIMEOUT = 1800 # 30 minutes\n\n SAVINGS_FACTOR = 1\n\n def __init__(self):\n self.log_count = Tally('debug', 'info', 'warning', 'error', 'critical')\n\n def log_debug(self, *args):\n self._log('debug', *args)\n\n def log_info(self, *args):\n self._log('info', *args)\n\n def log_warning(self, *args):\n self._log('warning', *args)\n\n def log_error(self, *args):\n self._log('error', *args)\n\n def log_critical(self, *args):\n self._log('critical', *args)\n\n def _log(self, level, *args):\n getattr(logging.getLogger(type(self).__name__), level)(*args)\n self.log_count.incr(level)\n\n def outcome(self, success_msg, failure_msg):\n msg_list = list()\n if self.log_count.critical:\n msg_list.append(f'{self.log_count.critical} critical')\n if self.log_count.error:\n msg_list.append(f'{self.log_count.error} errors')\n if self.log_count.warning:\n msg_list.append(f'{self.log_count.warning} warnings')\n\n msg = failure_msg if len(msg_list) > 0 else success_msg\n return msg.format(tally=', '.join(msg_list))\n\n @property\n def savings(self):\n \"\"\"\n Estimate number of hours saved when running this task, when compared with performing the same steps manually.\n \"\"\"\n return self.SAVINGS_FACTOR * self.log_count.info / 60\n\n @staticmethod\n def parser(task_args, **kwargs):\n raise NotImplementedError()\n\n @staticmethod\n def is_api_required(parsed_args):\n return True\n\n def runner(self, parsed_args, api, task_output):\n raise NotImplementedError()\n\n def index_iter(self, backend, catalog_entry_iter):\n \"\"\"\n Return an iterator of indexes loaded from backend. If backend is a Rest API instance, indexes are loaded\n from remote vManage via API. Otherwise items are loaded from local backup under the backend directory.\n :param backend: Rest api instance or directory name\n :param catalog_entry_iter: An iterator of CatalogEntry\n :return: Iterator of (<tag>, <info>, <index>, <item_cls>)\n \"\"\"\n is_api = isinstance(backend, Rest)\n\n def load_index(index_cls, info):\n index = index_cls.get(backend) if is_api else index_cls.load(backend)\n self.log_debug('%s %s %s index',\n 'No' if index is None else 'Loaded',\n 'remote' if is_api else 'local', info)\n return index\n\n all_index_iter = (\n (tag, info, load_index(index_cls, info), item_cls)\n for tag, info, index_cls, item_cls in catalog_entry_iter\n )\n return ((tag, info, index, item_cls) for tag, info, index, item_cls in all_index_iter if index is not None)\n\n @staticmethod\n def item_get(item_cls, backend, item_id, item_name, ext_name):\n if isinstance(backend, Rest):\n return item_cls.get(backend, item_id)\n else:\n return item_cls.load(backend, ext_name, item_name, item_id)\n\n @staticmethod\n def index_get(index_cls, backend):\n return index_cls.get(backend) if isinstance(backend, Rest) else index_cls.load(backend)\n\n def attach_template_data(self, api: Rest, workdir: str, ext_name: bool, templates_iter: Iterator[tuple],\n target_uuid_set: Optional[set] = None) -> Tuple[list, bool]:\n \"\"\"\n Prepare data for template attach considering local backup as the source of truth (i.e. where input values are)\n :param api: Instance of Rest API\n :param workdir: Directory containing saved items\n :param ext_name: Boolean passed to .load methods indicating whether extended item names should be used.\n :param templates_iter: Iterator of (<template_name>, <saved_template_id>, <target_template_id>)\n :param target_uuid_set: (optional) Set of existing device uuids on target node.\n When provided, attach only devices that were previously attached (on saved) and are on\n target node but are not yet attached.\n When absent, re-attach all currently attached devices on target.\n :return: Tuple containing attach data (<template input list>, <isEdited>)\n \"\"\"\n def load_template_input(template_name: str, saved_id: str, target_id: str) -> Union[list, None]:\n if target_id is None:\n self.log_debug('Skip %s, saved template is not on target node', template_name)\n return None\n\n saved_values = DeviceTemplateValues.load(workdir, ext_name, template_name, saved_id)\n if saved_values is None:\n self.log_error('DeviceTemplateValues file not found: %s, %s', template_name, saved_id)\n return None\n if saved_values.is_empty:\n self.log_debug('Skip %s, saved template has no attachments', template_name)\n return None\n\n target_attached_uuid_set = {uuid for uuid, _ in DeviceTemplateAttached.get_raise(api, target_id)}\n if target_uuid_set is None:\n allowed_uuid_set = target_attached_uuid_set\n else:\n saved_attached = DeviceTemplateAttached.load(workdir, ext_name, template_name, saved_id)\n if saved_attached is None:\n self.log_error('DeviceTemplateAttached file not found: %s, %s', template_name, saved_id)\n return None\n saved_attached_uuid_set = {uuid for uuid, _ in saved_attached}\n allowed_uuid_set = target_uuid_set & saved_attached_uuid_set - target_attached_uuid_set\n\n input_list = saved_values.input_list(allowed_uuid_set)\n if len(input_list) == 0:\n self.log_debug('Skip %s, no devices to attach', template_name)\n return None\n\n return input_list\n\n def is_template_cli(template_name: str, saved_id: str) -> bool:\n return DeviceTemplate.load(workdir, ext_name, template_name, saved_id, raise_not_found=True).is_type_cli\n\n template_input_list = [\n (name, target_id, load_template_input(name, saved_id, target_id), is_template_cli(name, saved_id))\n for name, saved_id, target_id in templates_iter\n ]\n return template_input_list, target_uuid_set is None\n\n @staticmethod\n def reattach_template_data(api: Rest, templates_iter: Iterator[tuple]) -> Tuple[list, bool]:\n \"\"\"\n Prepare data for template reattach considering vManage as the source of truth (i.e. where input values are)\n :param api: Instance of Rest API\n :param templates_iter: Iterator of (<template_name>, <target_template_id>)\n :return: Tuple containing attach data (<template input list>, <isEdited>)\n \"\"\"\n def get_template_input(template_id):\n uuid_list = [uuid for uuid, _ in DeviceTemplateAttached.get_raise(api, template_id)]\n values = DeviceTemplateValues(api.post(DeviceTemplateValues.api_params(template_id, uuid_list),\n DeviceTemplateValues.api_path.post))\n return values.input_list()\n\n def is_template_cli(template_id):\n return DeviceTemplate.get_raise(api, template_id).is_type_cli\n\n template_input_list = [\n (template_name, template_id, get_template_input(template_id), is_template_cli(template_id))\n for template_name, template_id in templates_iter\n ]\n return template_input_list, True\n\n def attach(self, api: Rest, template_input_list: List[tuple], is_edited: bool, *, chunk_size: int = 200,\n dryrun: bool = False, log_context: str, raise_on_failure: bool = True) -> int:\n \"\"\"\n Attach device templates to devices\n :param api: Instance of Rest API\n :param template_input_list: List containing payload for template attachment\n :param is_edited: Boolean corresponding to the isEdited tag in the template attach payload\n :param chunk_size: Maximum number of device attachments per request\n :param dryrun: Indicates dryrun mode\n :param raise_on_failure: If True, raise exception on action failures\n :param log_context: Message to log during wait actions\n :return: Number of attachment requests processed\n \"\"\"\n def grouper(attach_cls, request_list):\n while True:\n section_dict = yield from chopper(chunk_size)\n if not section_dict:\n continue\n\n request_details = (\n f\"{template_name} ({', '.join(DeviceTemplateValues.input_list_devices(input_list))})\"\n for template_name, key_dict in section_dict.items() for _, input_list in key_dict.items()\n )\n self.log_info('%sTemplate attach: %s', 'DRY-RUN: ' if dryrun else '', ', '.join(request_details))\n\n if not dryrun:\n template_input_iter = (\n (template_id, input_list)\n for key_dict in section_dict.values() for template_id, input_list in key_dict.items()\n )\n action_worker = attach_cls(\n api.post(attach_cls.api_params(template_input_iter, is_edited), attach_cls.api_path.post)\n )\n self.log_debug('Device template attach requested: %s', action_worker.uuid)\n self.wait_actions(api, [(action_worker, ', '.join(section_dict))], log_context, raise_on_failure)\n\n request_list.append(...)\n\n def feeder(attach_cls, attach_data_iter):\n attach_reqs = []\n group = grouper(attach_cls, attach_reqs)\n next(group)\n for template_name, template_id, input_list in attach_data_iter:\n for input_entry in input_list:\n group.send((template_name, template_id, input_entry))\n group.send(None)\n\n return attach_reqs\n\n # Attach requests for feature-based device templates\n feature_based_iter = ((template_name, template_id, input_list)\n for template_name, template_id, input_list, is_cli in template_input_list\n if input_list is not None and not is_cli)\n feature_based_reqs = feeder(DeviceTemplateAttach, feature_based_iter)\n\n # Attach Requests for cli device templates\n cli_based_iter = ((template_name, template_id, input_list)\n for template_name, template_id, input_list, is_cli in template_input_list\n if input_list is not None and is_cli)\n cli_based_reqs = feeder(DeviceTemplateCLIAttach, cli_based_iter)\n\n return len(feature_based_reqs + cli_based_reqs)\n\n def detach(self, api: Rest, template_iter: Iterator[tuple], device_map: Optional[dict] = None, *,\n chunk_size: int = 200, dryrun: bool = False, log_context: str, raise_on_failure: bool = True) -> int:\n \"\"\"\n Detach devices from device templates\n :param api: Instance of Rest API\n :param template_iter: An iterator of (<template id>, <template name>) tuples containing templates to detach\n :param device_map: {<uuid>: <name>, ...} dict containing allowed devices for the detach. If None, all attached\n devices are detached.\n :param chunk_size: Maximum number of device detachments per request\n :param dryrun: Indicates dryrun mode\n :param raise_on_failure: If True, raise exception on action failures\n :param log_context: Message to log during wait actions\n :return: Number of detach requests processed\n \"\"\"\n def grouper(request_list):\n while True:\n section_dict = yield from chopper(chunk_size)\n if not section_dict:\n continue\n\n wait_list = []\n for device_type, key_dict in section_dict.items():\n request_details = (\n f\"{t_name} ({', '.join(device_map.get(device_id, '-') for device_id in device_id_list)})\"\n for t_name, device_id_list in key_dict.items()\n )\n self.log_info('%sTemplate detach: %s', 'DRY-RUN: ' if dryrun else '', ', '.join(request_details))\n\n if not dryrun:\n id_list = (device_id for device_id_list in key_dict.values() for device_id in device_id_list)\n action_worker = DeviceModeCli(\n api.post(DeviceModeCli.api_params(device_type, *id_list), DeviceModeCli.api_path.post)\n )\n wait_list.append((action_worker, ', '.join(key_dict)))\n self.log_debug('Device template attach requested: %s', action_worker.uuid)\n\n request_list.append(...)\n\n if wait_list:\n self.wait_actions(api, wait_list, log_context, raise_on_failure)\n\n detach_reqs = []\n group = grouper(detach_reqs)\n next(group)\n\n if device_map is None:\n device_map = dict(device_iter(api))\n\n for template_id, template_name in template_iter:\n devices_attached = DeviceTemplateAttached.get(api, template_id)\n if devices_attached is None:\n self.log_warning('Failed to retrieve %s attached devices from vManage', template_name)\n continue\n for uuid, personality in devices_attached:\n if uuid in device_map:\n group.send((personality, template_name, uuid))\n group.send(None)\n\n return len(detach_reqs)\n\n def activate_policy(self, api: Rest, policy_id: Optional[str], policy_name: Optional[str],\n is_edited: bool = False) -> List[tuple]:\n \"\"\"\n :param api: Instance of Rest API\n :param policy_id: ID of policy to activate\n :param policy_name: Name of policy to activate\n :param is_edited: (optional) When true it indicates reactivation of an already active policy (e.x. due to\n in-place modifications)\n :return: List of worker actions to monitor [(<action_worker>, <template_name>), ...]\n \"\"\"\n action_list = []\n if policy_id is None or policy_name is None:\n # No policy is active or policy not on target vManage\n return action_list\n\n try:\n PolicyVsmartStatus.get_raise(api).raise_for_status()\n except (RestAPIException, PolicyVsmartStatusException):\n self.log_debug('vSmarts not in vManage mode or otherwise not ready to have policy activated')\n else:\n action_worker = PolicyVsmartActivate(\n api.post(PolicyVsmartActivate.api_params(is_edited), PolicyVsmartActivate.api_path.post, policy_id)\n )\n self.log_debug('Policy activate requested: %s', action_worker.uuid)\n action_list.append((action_worker, policy_name))\n\n return action_list\n\n def deactivate_policy(self, api: Rest) -> List[tuple]:\n action_list = []\n item_id, item_name = PolicyVsmartIndex.get_raise(api).active_policy\n if item_id is not None and item_name is not None:\n action_worker = PolicyVsmartDeactivate(api.post({}, PolicyVsmartDeactivate.api_path.post, item_id))\n self.log_debug('Policy deactivate requested: %s', action_worker.uuid)\n action_list.append((action_worker, item_name))\n\n return action_list\n\n def wait_actions(self, api: Rest, action_list: List[tuple], log_context: str, raise_on_failure: bool) -> bool:\n \"\"\"\n Wait for actions in action_list to complete\n :param api: Instance of Rest API\n :param action_list: [(<action_worker>, <action_info>), ...]. Where <action_worker> is an instance of ApiItem and\n <action_info> is a str with information about the action. Action_info can be None, in which\n case no messages are logged for individual actions.\n :param log_context: String providing context to log messages\n :param raise_on_failure: If True, raise exception on action failures\n :return: True if all actions completed with success. False otherwise.\n \"\"\"\n\n def upper_first(input_string):\n return input_string[0].upper() + input_string[1:] if len(input_string) > 0 else ''\n\n self.log_info(upper_first(log_context))\n result_list = []\n time_budget = Task.ACTION_TIMEOUT\n for action_worker, action_info in action_list:\n while True:\n action = ActionStatus.get(api, action_worker.uuid)\n if action is None:\n self.log_warning('Failed to retrieve action status from vManage')\n result_list.append(False)\n break\n\n if action.is_completed:\n result_list.append(action.is_successful)\n if action_info is not None:\n if action.is_successful:\n self.log_info('Completed %s', action_info)\n else:\n self.log_warning('Failed %s: %s', action_info, action.activity_details)\n\n break\n\n time_budget -= Task.ACTION_INTERVAL\n if time_budget > 0:\n self.log_info('Waiting...')\n time.sleep(Task.ACTION_INTERVAL)\n else:\n self.log_warning('Wait time limit expired')\n result_list.append(False)\n break\n\n result = all(result_list)\n if result:\n self.log_info('Completed %s', log_context)\n elif raise_on_failure:\n raise WaitActionsException('Failed {context}'.format(context=log_context))\n else:\n self.log_warning('Failed %s', log_context)\n\n return result\n\n\nclass TaskException(Exception):\n \"\"\" Exception for Task errors \"\"\"\n pass\n\n\nclass WaitActionsException(TaskException):\n \"\"\" Exception indicating failure in one or more actions being monitored \"\"\"\n pass\n\n\ndef chopper(section_size: int):\n section = {}\n for _ in range(section_size):\n data = yield\n if data is None:\n break\n primary_key, secondary_key, item = data\n section.setdefault(primary_key, {}).setdefault(secondary_key, []).append(item)\n return section\n\n\ndef device_iter(api: Rest, match_name_regex: Optional[str] = None, match_reachable: bool = False,\n match_site_id: Optional[str] = None, match_system_ip: Optional[str] = None) -> Iterator[tuple]:\n \"\"\"\n Return an iterator over device inventory, filtered by optional conditions.\n :param api: Instance of Rest API\n :param match_name_regex: Regular expression matching device host-name\n :param match_reachable: Boolean indicating whether to include reachable devices only\n :param match_site_id: When present, only include devices with provided site-id\n :param match_system_ip: If present, only include device with provided system-ip\n :return: Iterator of (<device-uuid>, <device-name>) tuples.\n \"\"\"\n return (\n (uuid, name)\n for uuid, name, system_ip, site_id, reachability, *_ in Device.get_raise(api).extended_iter(default='-')\n if (\n (match_name_regex is None or regex_search(match_name_regex, name)) and\n (not match_reachable or reachability == 'reachable') and\n (match_site_id is None or site_id == match_site_id) and\n (match_system_ip is None or system_ip == match_system_ip)\n )\n )\n\n\nclass Table:\n DECIMAL_DIGITS = 1 # Number of decimal digits for float values\n\n def __init__(self, *columns: str, name: Optional[str] = None, meta: Optional[str] = None) -> None:\n self.header = tuple(columns)\n self.name = name\n self.meta = meta\n self._row_class = namedtuple('Row', (f'column_{i}' for i in range(len(columns))))\n self._rows = list()\n\n @staticmethod\n def process_value(value):\n return round(value, Table.DECIMAL_DIGITS) if isinstance(value, float) else value\n\n def add(self, *row_values):\n self._rows.append(self._row_class(*map(self.process_value, row_values)))\n\n def add_marker(self):\n self._rows.append(None)\n\n def extend(self, row_values_iter):\n self._rows.extend(self._row_class(*map(self.process_value, row_values)) for row_values in row_values_iter)\n\n def __iter__(self):\n return iter(self._rows)\n\n def __len__(self):\n total_len = len(self._rows) - self._rows.count(None)\n return total_len if total_len > 0 else 0\n\n def _column_max_width(self, index):\n def cell_length(cell_value):\n return len(str(cell_value))\n\n return max(\n cell_length(self.header[index]),\n max((cell_length(row[index]) for row in self._rows if row is not None)) if len(self) > 0 else 0\n )\n\n def pretty_iter(self):\n def cell_format(width, value):\n return ' {value:{width}} '.format(value=str(value), width=width-2)\n\n def border(line_ch: str, int_edge_ch: str = '+', ext_edge_ch: str = '+') -> str:\n return ext_edge_ch + int_edge_ch.join((line_ch*col_width for col_width in col_width_list)) + ext_edge_ch\n\n col_width_list = [2+self._column_max_width(index) for index in range(len(self.header))]\n border_line = border('-')\n header_border_line = border('=', '=')\n\n if self.name is not None:\n yield f\"*** {self.name} ***\"\n\n yield header_border_line\n yield '|' + '|'.join(cell_format(width, value) for width, value in zip(col_width_list, self.header)) + '|'\n yield header_border_line\n\n done_content_row = False\n for row in self._rows:\n if row is not None:\n done_content_row = True\n yield '|' + '|'.join(cell_format(width, value) for width, value in zip(col_width_list, row)) + '|'\n elif done_content_row:\n done_content_row = False\n yield border_line\n\n if done_content_row:\n yield border_line\n\n def save(self, filename):\n with open(filename, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(self.header)\n writer.writerows(row for row in self._rows if row is not None)\n\n\ndef clean_dir(target_dir_name, max_saved=99):\n \"\"\"\n Clean target_dir_name directory if it exists. If max_saved is non-zero and target_dir_name exists, move it to a new\n directory name in sequence.\n :param target_dir_name: str with the directory to be cleaned\n :param max_saved: int indicating the maximum instances to keep. If 0, target_dir_name is just deleted.\n \"\"\"\n target_dir = Path(DATA_DIR, target_dir_name)\n if target_dir.exists():\n if max_saved > 0:\n save_seq = range(max_saved)\n for elem in save_seq:\n save_path = Path(DATA_DIR, '{workdir}_{count}'.format(workdir=target_dir_name, count=elem+1))\n if elem == save_seq[-1]:\n rmtree(save_path, ignore_errors=True)\n if not save_path.exists():\n target_dir.rename(save_path)\n return save_path.name\n else:\n rmtree(target_dir, ignore_errors=True)\n\n return False\n"
},
{
"alpha_fraction": 0.625806450843811,
"alphanum_fraction": 0.625806450843811,
"avg_line_length": 30,
"blob_id": "f81f95457b47571ac65d38f6a00789cebb217745",
"content_id": "037b32f037792f7451b6b8203d419a2509286512",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 155,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 5,
"path": "/cisco_sdwan/__init__.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n\"\"\"\nfrom .__version__ import __author__, __version__, __copyright__, __email__, __url__\n"
},
{
"alpha_fraction": 0.5268292427062988,
"alphanum_fraction": 0.5275353193283081,
"avg_line_length": 61.570281982421875,
"blob_id": "42673bb45232b678c97a96c4b87bb0efcdc7e19c",
"content_id": "b9bd54057629837cb5ac8b2cf39e4997c1f04978",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15580,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 249,
"path": "/cisco_sdwan/tasks/implementation/_restore.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "import argparse\nfrom cisco_sdwan.__version__ import __doc__ as title\nfrom cisco_sdwan.base.rest_api import RestAPIException, is_version_newer\nfrom cisco_sdwan.base.catalog import catalog_iter, CATALOG_TAG_ALL, ordered_tags\nfrom cisco_sdwan.base.models_base import UpdateEval, ServerInfo\nfrom cisco_sdwan.base.models_vmanage import (DeviceTemplateIndex, PolicyVsmartIndex, EdgeInventory, ControlInventory,\n CheckVBond)\nfrom cisco_sdwan.tasks.utils import TaskOptions, TagOptions, existing_file_type, regex_type, default_workdir\nfrom cisco_sdwan.tasks.common import regex_search, Task, WaitActionsException\n\n\[email protected]('restore')\nclass TaskRestore(Task):\n @staticmethod\n def parser(task_args, target_address=None):\n task_parser = argparse.ArgumentParser(description=f'{title}\\nRestore task:')\n task_parser.prog = f'{task_parser.prog} restore'\n task_parser.formatter_class = argparse.RawDescriptionHelpFormatter\n\n task_parser.add_argument('--workdir', metavar='<directory>', type=existing_file_type,\n default=default_workdir(target_address),\n help='restore source (default: %(default)s)')\n task_parser.add_argument('--regex', metavar='<regex>', type=regex_type,\n help='regular expression matching item names to be restored, within selected tags')\n task_parser.add_argument('--dryrun', action='store_true',\n help='dry-run mode. Items to be restored are listed but not pushed to vManage.')\n task_parser.add_argument('--attach', action='store_true',\n help='attach devices to templates and activate vSmart policy after restoring items')\n task_parser.add_argument('--force', action='store_true',\n help='target vManage items with the same name as the corresponding item in workdir '\n 'are updated with the contents from workdir. Without this option, those items '\n 'are skipped and not overwritten.')\n task_parser.add_argument('tag', metavar='<tag>', type=TagOptions.tag,\n help='tag for selecting items to be restored. Items that are dependencies of the '\n 'specified tag are automatically included. Available tags: '\n f'{TagOptions.options()}. Special tag \"{CATALOG_TAG_ALL}\" selects all items.')\n return task_parser.parse_args(task_args)\n\n def runner(self, parsed_args, api, task_output=None):\n def load_items(index, item_cls):\n item_iter = (\n (item_id, item_cls.load(parsed_args.workdir, index.need_extended_name, item_name, item_id))\n for item_id, item_name in index\n )\n return ((item_id, item_obj) for item_id, item_obj in item_iter if item_obj is not None)\n\n self.log_info('Starting restore%s: Local workdir: \"%s\" -> vManage URL: \"%s\"',\n ', DRY-RUN mode' if parsed_args.dryrun else '', parsed_args.workdir, api.base_url)\n\n local_info = ServerInfo.load(parsed_args.workdir)\n # Server info file may not be present (e.g. backup from older Sastre releases)\n if local_info is not None and is_version_newer(api.server_version, local_info.server_version):\n self.log_warning('Target vManage release (%s) is older than the release used in backup (%s). '\n 'Items may fail to be restored due to incompatibilities across releases.',\n api.server_version, local_info.server_version)\n check_vbond = CheckVBond.get(api)\n if check_vbond is None:\n self.log_warning('Failed retrieving vBond configuration status.')\n is_vbond_set = False\n else:\n is_vbond_set = check_vbond.is_configured\n\n self.log_info('Loading existing items from target vManage')\n target_all_items_map = {\n hash(type(index)): {item_name: item_id for item_id, item_name in index}\n for _, _, index, item_cls in self.index_iter(api, catalog_iter(CATALOG_TAG_ALL, version=api.server_version))\n }\n\n self.log_info('Identifying items to be pushed')\n id_mapping = {} # {<old_id>: <new_id>}, used to replace old (saved) item ids with new (target) ids\n restore_list = [] # [ (<info>, <index_cls>, [(<item_id>, <item>, <id_on_target>), ...]), ...]\n dependency_set = set() # {<item_id>, ...}\n match_set = set() # {<item_id>, ...}\n for tag in ordered_tags(parsed_args.tag):\n if tag == 'template_device' and not is_vbond_set:\n self.log_warning('Will skip %s items because vBond is not configured. '\n 'On vManage, Administration > Settings > vBond.', tag)\n continue\n\n self.log_info('Inspecting %s items', tag)\n tag_iter = (\n (info, index, load_items(index, item_cls))\n for _, info, index, item_cls in self.index_iter(parsed_args.workdir,\n catalog_iter(tag, version=api.server_version))\n )\n for info, index, loaded_items_iter in tag_iter:\n target_item_map = target_all_items_map.get(hash(type(index)))\n if target_item_map is None:\n # Logging at warning level because the backup files did have this item\n self.log_warning('Will skip %s, item not supported by target vManage', info)\n continue\n\n restore_item_list = []\n for item_id, item in loaded_items_iter:\n target_id = target_item_map.get(item.name)\n if target_id is not None:\n # Item already exists on target vManage, record item id from target\n if item_id != target_id:\n id_mapping[item_id] = target_id\n\n if not parsed_args.force:\n # Existing item on target vManage will be used, i.e. will not overwrite it\n self.log_debug('Will skip %s %s, item already on target vManage', info, item.name)\n continue\n\n if item.is_readonly:\n self.log_debug('Will skip read-only %s %s', info, item.name)\n continue\n\n item_matches = (\n (parsed_args.tag == CATALOG_TAG_ALL or parsed_args.tag == tag) and\n (parsed_args.regex is None or regex_search(parsed_args.regex, item.name))\n )\n if item_matches:\n match_set.add(item_id)\n if item_matches or item_id in dependency_set:\n # A target_id that is not None signals a put operation, as opposed to post.\n # target_id will be None unless --force is specified and item name is on target\n restore_item_list.append((item_id, item, target_id))\n dependency_set.update(item.id_references_set)\n\n if len(restore_item_list) > 0:\n restore_list.append((info, index, restore_item_list))\n\n log_prefix = 'DRY-RUN: ' if parsed_args.dryrun else ''\n if len(restore_list) > 0:\n self.log_info('%sPushing items to vManage', log_prefix)\n # Items were added to restore_list following ordered_tags() order (i.e. higher level items before lower\n # level items). The reverse order needs to be followed on restore.\n for info, index, restore_item_list in reversed(restore_list):\n pushed_item_dict = {}\n for item_id, item, target_id in restore_item_list:\n op_info = 'Create' if target_id is None else 'Update'\n reason = ' (dependency)' if item_id in dependency_set - match_set else ''\n\n try:\n if target_id is None:\n # Create new item\n if parsed_args.dryrun:\n self.log_info('%s%s %s %s%s', log_prefix, op_info, info, item.name, reason)\n continue\n # Not using item id returned from post because post can return empty (e.g. local policies)\n api.post(item.post_data(id_mapping), item.api_path.post)\n pushed_item_dict[item.name] = item_id\n else:\n # Update existing item\n update_data = item.put_data(id_mapping)\n if item.get_raise(api, target_id).is_equal(update_data):\n self.log_debug('%s%s skipped (no diffs) %s %s', log_prefix, op_info, info, item.name)\n continue\n\n if parsed_args.dryrun:\n self.log_info('%s%s %s %s%s', log_prefix, op_info, info, item.name, reason)\n continue\n\n put_eval = UpdateEval(api.put(update_data, item.api_path.put, target_id))\n if put_eval.need_reattach:\n if put_eval.is_master:\n self.log_info('Updating %s %s requires reattach', info, item.name)\n attach_data = self.attach_template_data(\n api, parsed_args.workdir, index.need_extended_name,\n [(item.name, item_id, target_id)]\n )\n else:\n self.log_info('Updating %s %s requires reattach of affected templates',\n info, item.name)\n target_templates = {item_id: item_name\n for item_id, item_name in DeviceTemplateIndex.get_raise(api)}\n templates_iter = (\n (target_templates[tgt_id], tgt_id)\n for tgt_id in put_eval.templates_affected_iter()\n )\n attach_data = self.reattach_template_data(api, templates_iter)\n\n # All re-attachments need to be done in a single request, thus 9999 for chunk_size\n num_attach = self.attach(\n api, *attach_data, log_context='reattaching templates', chunk_size=9999\n )\n self.log_debug('Attach requests processed: %s', num_attach)\n elif put_eval.need_reactivate:\n self.log_info('Updating %s %s requires vSmart policy reactivate', info, item.name)\n action_list = self.activate_policy(\n api, *PolicyVsmartIndex.get_raise(api).active_policy, is_edited=True\n )\n self.wait_actions(api, action_list, 'reactivating vSmart policy', raise_on_failure=True)\n except (RestAPIException, WaitActionsException) as ex:\n self.log_error('Failed %s %s %s%s: %s', op_info, info, item.name, reason, ex)\n else:\n self.log_info('Done: %s %s %s%s', op_info, info, item.name, reason)\n\n # Read new ids from target and update id_mapping\n try:\n new_target_item_map = {item_name: item_id for item_id, item_name in index.get_raise(api)}\n for item_name, old_item_id in pushed_item_dict.items():\n id_mapping[old_item_id] = new_target_item_map[item_name]\n except RestAPIException as ex:\n self.log_critical('Failed retrieving %s: %s', info, ex)\n break\n else:\n self.log_info('%sNo items to push', log_prefix)\n\n if parsed_args.attach:\n try:\n target_templates = {item_name: item_id for item_id, item_name in DeviceTemplateIndex.get_raise(api)}\n target_policies = {item_name: item_id for item_id, item_name in PolicyVsmartIndex.get_raise(api)}\n saved_template_index = DeviceTemplateIndex.load(parsed_args.workdir, raise_not_found=True)\n\n # Attach WAN Edge templates\n edge_templates_iter = (\n (saved_name, saved_id, target_templates.get(saved_name))\n for saved_id, saved_name in saved_template_index.filtered_iter(DeviceTemplateIndex.is_not_vsmart)\n )\n attach_data = self.attach_template_data(\n api, parsed_args.workdir, saved_template_index.need_extended_name, edge_templates_iter,\n target_uuid_set={uuid for uuid, _ in EdgeInventory.get_raise(api)}\n )\n reqs = self.attach(api, *attach_data, dryrun=parsed_args.dryrun, log_context='attaching WAN Edges')\n if reqs:\n self.log_debug('%sAttach requests processed: %s', log_prefix, reqs)\n else:\n self.log_info('No WAN Edge attachments needed')\n\n # Attach vSmart template\n vsmart_templates_iter = (\n (saved_name, saved_id, target_templates.get(saved_name))\n for saved_id, saved_name in saved_template_index.filtered_iter(DeviceTemplateIndex.is_vsmart)\n )\n vsmart_set = {\n uuid for uuid, _ in ControlInventory.get_raise(api).filtered_iter(ControlInventory.is_vsmart)\n }\n attach_data = self.attach_template_data(api, parsed_args.workdir,\n saved_template_index.need_extended_name, vsmart_templates_iter,\n target_uuid_set=vsmart_set)\n reqs = self.attach(api, *attach_data, dryrun=parsed_args.dryrun, log_context=\"attaching vSmarts\")\n if reqs:\n self.log_debug('%sAttach requests processed: %s', log_prefix, reqs)\n else:\n self.log_info('No vSmart attachments needed')\n\n # Activate vSmart policy\n if not parsed_args.dryrun:\n _, policy_name = PolicyVsmartIndex.load(parsed_args.workdir, raise_not_found=True).active_policy\n action_list = self.activate_policy(api, target_policies.get(policy_name), policy_name)\n if len(action_list) == 0:\n self.log_info('No vSmart policy to activate')\n else:\n self.wait_actions(api, action_list, 'activating vSmart policy', raise_on_failure=True)\n except (RestAPIException, FileNotFoundError, WaitActionsException) as ex:\n self.log_critical('Attach failed: %s', ex)\n"
},
{
"alpha_fraction": 0.7300771474838257,
"alphanum_fraction": 0.7300771474838257,
"avg_line_length": 20.61111068725586,
"blob_id": "b16ffcf7098a4c7c1cbe304eb079bc3ba43d7a15",
"content_id": "f4d1bc5d14ba176d2e1fac448931403daf29155d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 18,
"path": "/cisco_sdwan/tasks/implementation/__init__.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Sastre - Automation Tools for Cisco SD-WAN Powered by Viptela\n\n cisco_sdwan.tasks.implementation\n This module contains the implementation of user-facing tasks\n\"\"\"\nfrom ._backup import TaskBackup\nfrom ._restore import TaskRestore\nfrom ._delete import TaskDelete\nfrom ._migrate import TaskMigrate\n\n\n__all__ = [\n 'TaskBackup',\n 'TaskRestore',\n 'TaskDelete',\n 'TaskMigrate'\n]\n"
},
{
"alpha_fraction": 0.5723420977592468,
"alphanum_fraction": 0.5725419521331787,
"avg_line_length": 56.517242431640625,
"blob_id": "01b17b5f2ae7288bc25fa84567cdfd66fcc16d6f",
"content_id": "3456b53472a70153c1eb94f3543357b017623937",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5004,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 87,
"path": "/cisco_sdwan/tasks/implementation/_delete.py",
"repo_name": "ccie29824/CiscoDevNet-sastre",
"src_encoding": "UTF-8",
"text": "import argparse\nfrom cisco_sdwan.__version__ import __doc__ as title\nfrom cisco_sdwan.base.rest_api import RestAPIException\nfrom cisco_sdwan.base.catalog import catalog_iter, CATALOG_TAG_ALL, ordered_tags\nfrom cisco_sdwan.base.models_vmanage import DeviceTemplateIndex\nfrom cisco_sdwan.tasks.utils import TaskOptions, TagOptions, regex_type\nfrom cisco_sdwan.tasks.common import regex_search, Task, WaitActionsException\n\n\[email protected]('delete')\nclass TaskDelete(Task):\n @staticmethod\n def parser(task_args, target_address=None):\n task_parser = argparse.ArgumentParser(description=f'{title}\\nDelete task:')\n task_parser.prog = f'{task_parser.prog} delete'\n task_parser.formatter_class = argparse.RawDescriptionHelpFormatter\n\n task_parser.add_argument('--regex', metavar='<regex>', type=regex_type,\n help='regular expression matching item names to be deleted, within selected tags')\n task_parser.add_argument('--dryrun', action='store_true',\n help='dry-run mode. Items matched for removal are listed but not deleted.')\n task_parser.add_argument('--detach', action='store_true',\n help='USE WITH CAUTION! Detach devices from templates and deactivate vSmart policy '\n 'before deleting items. This allows deleting items that are associated with '\n 'attached templates and active policies.')\n task_parser.add_argument('tag', metavar='<tag>', type=TagOptions.tag,\n help='tag for selecting items to be deleted. Available tags: '\n f'{TagOptions.options()}. Special tag \"{CATALOG_TAG_ALL}\" selects all items.')\n return task_parser.parse_args(task_args)\n\n def runner(self, parsed_args, api, task_output=None):\n self.log_info('Starting delete%s: vManage URL: \"%s\"',\n ', DRY-RUN mode' if parsed_args.dryrun else '', api.base_url)\n log_prefix = 'DRY-RUN: ' if parsed_args.dryrun else ''\n\n if parsed_args.detach:\n try:\n template_index = DeviceTemplateIndex.get_raise(api)\n # Detach WAN Edge templates\n reqs = self.detach(api, template_index.filtered_iter(DeviceTemplateIndex.is_not_vsmart),\n dryrun=parsed_args.dryrun, log_context='detaching WAN Edges')\n if reqs:\n self.log_debug('%sDetach requests processed: %s', log_prefix, reqs)\n else:\n self.log_info('No WAN Edge attached')\n # Deactivate vSmart policy\n if not parsed_args.dryrun:\n action_list = self.deactivate_policy(api)\n if len(action_list) == 0:\n self.log_info('No vSmart policy activated')\n else:\n self.wait_actions(api, action_list, 'deactivating vSmart policy', raise_on_failure=True)\n # Detach vSmart template\n reqs = self.detach(api, template_index.filtered_iter(DeviceTemplateIndex.is_vsmart),\n dryrun=parsed_args.dryrun, log_context='detaching vSmarts')\n if reqs:\n self.log_debug('%sDetach requests processed: %s', log_prefix, reqs)\n else:\n self.log_info('No vSmart attached')\n except (RestAPIException, WaitActionsException) as ex:\n self.log_critical('Detach failed: %s', ex)\n return\n\n for tag in ordered_tags(parsed_args.tag, parsed_args.tag != CATALOG_TAG_ALL):\n self.log_info('Inspecting %s items', tag)\n matched_item_iter = (\n (item_name, item_id, item_cls, info)\n for _, info, index, item_cls in self.index_iter(api, catalog_iter(tag, version=api.server_version))\n for item_id, item_name in index\n if parsed_args.regex is None or regex_search(parsed_args.regex, item_name)\n )\n for item_name, item_id, item_cls, info in matched_item_iter:\n item = item_cls.get(api, item_id)\n if item is None:\n self.log_warning('Failed retrieving %s %s', info, item_name)\n continue\n if item.is_readonly or item.is_system:\n self.log_debug('Skipped %s %s %s', 'read-only' if item.is_readonly else 'system', info, item_name)\n continue\n if parsed_args.dryrun:\n self.log_info('DRY-RUN: Delete %s %s', info, item_name)\n continue\n\n if api.delete(item_cls.api_path.delete, item_id):\n self.log_info('Done: Delete %s %s', info, item_name)\n else:\n self.log_warning('Failed deleting %s %s', info, item_name)\n"
}
] | 14 |
StockLin/py3_template
|
https://github.com/StockLin/py3_template
|
0858d5814699c91328840363a5c7297abf7b7173
|
cde1a310350e1ba92599c44b3300eb3fd821b926
|
eb53cabdaee6f6872efb6609b8f002598a290ce4
|
refs/heads/main
| 2023-04-18T00:42:10.902207 | 2021-04-28T13:54:40 | 2021-04-28T13:54:40 | 362,485,475 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.7250000238418579,
"avg_line_length": 19,
"blob_id": "c56825c8025fab922b422f025ba387608bbc68bc",
"content_id": "918feda5a5c02c0e57d7ebe49232c5b402eeea1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "dependency-injector==4.32.2\nsix==1.15.0\n"
},
{
"alpha_fraction": 0.6196531653404236,
"alphanum_fraction": 0.6208092570304871,
"avg_line_length": 23.742856979370117,
"blob_id": "9846281238a7b65b73c6bd92a38c731927f72d69",
"content_id": "ca6a360accb31820fa351b470160ab3754439821",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 35,
"path": "/main.py",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "import os, sys\nimport logging\nfrom dependency_injector.wiring import inject, Provide\nfrom containers import Containers\nfrom config import Config\nfrom apps.example_module.example import IExample\n\n\n@inject\ndef main(example:IExample=Provide[Containers.example_instance]):\n example.run()\n\n\nif __name__ == \"__main__\":\n try:\n container = Containers()\n container.init_resources()\n container.config.from_dict(\n {\n \"name\": Config.name,\n \"period\": Config.period\n } \n )\n\n logging.warning(f\"start main service.\")\n\n container.wire(modules=[sys.modules[__name__]])\n main(*sys.argv[1:])\n\n except KeyboardInterrupt:\n logging.warning(\"KeyboardInterrupt in main.\")\n\n except Exception as e:\n logging.error(f\"unexcepted error...... {str(e)}\")\n raise e"
},
{
"alpha_fraction": 0.5462962985038757,
"alphanum_fraction": 0.5502645373344421,
"avg_line_length": 23.419355392456055,
"blob_id": "e349f44bb9c9c7f343260969acd7523666723071",
"content_id": "394745d0a7577a25e90033a10da3ecf9de036858",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 756,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 31,
"path": "/apps/example_module/example.py",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "import time\nimport logging\nimport abc\n\n\nclass IExample(abc.ABC):\n \n @abc.abstractmethod\n def run(self, name:str, period:int):\n raise NotImplementedError\n\n\nclass Example(IExample):\n\n def __init__(self, name=\"\", period=10):\n self.__name = name\n self.__period = period\n\n def run(self, name=\"\", period=0):\n try:\n self.__name = name if name else self.__name\n self.__period = period if period else self.__period\n\n logging.info(f\"Start Example..... {self.__name }, {self.__period}\")\n\n while True:\n print(f\"hello my example[{self.__period}]...... {self.__name }\")\n time.sleep(int(self.__period))\n\n except Exception as e:\n raise e"
},
{
"alpha_fraction": 0.6994818449020386,
"alphanum_fraction": 0.7253885865211487,
"avg_line_length": 18.399999618530273,
"blob_id": "22581745779fe13a4a91dbe95413e6e9d819a4ce",
"content_id": "6d88d5c9a027b5a2c609a36fcfd64cdfac23d606",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 10,
"path": "/Dockerfile",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "FROM python:3.8\nLABEL maintainer StarkLin \"[email protected]\"\n\nRUN mkdir /app; exit 0\nWORKDIR /app\nADD . /app\n\nRUN pip3 --disable-pip-version-check install -r requirements.txt\n\nCMD [\"python3\", \"main.py\"]"
},
{
"alpha_fraction": 0.6739766001701355,
"alphanum_fraction": 0.6739766001701355,
"avg_line_length": 23.428571701049805,
"blob_id": "3de6ccbf12d3d23338ea688c6b0a7af912d494ae",
"content_id": "d514a350caf4cacc210de1535ab288654e12ea9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 684,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 28,
"path": "/containers.py",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "import os, sys\nimport logging.config\nfrom dependency_injector import containers, providers\nfrom apps.example_module.example import Example\n\n\nclass Containers(containers.DeclarativeContainer):\n config = providers.Configuration()\n\n logging = providers.Resource(\n logging.config.fileConfig,\n fname=os.path.join(os.getcwd(), \"logging.ini\")\n )\n\n # declare objects initialize\n\n example_instance = providers.Singleton(\n Example,\n name=config.name,\n period=config.period\n )\n\n # or factory initail instance\n # example_instance = providers.Factory(\n # Example,\n # name=config.name,\n # period=config.period\n # )\n"
},
{
"alpha_fraction": 0.689497709274292,
"alphanum_fraction": 0.698630154132843,
"avg_line_length": 21,
"blob_id": "cfdcacad89d545735ca93f3bb21e3b06e7ddeed6",
"content_id": "e23d51dc4738eeced912738fd85d2daa4ece6ff6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 10,
"path": "/config.py",
"repo_name": "StockLin/py3_template",
"src_encoding": "UTF-8",
"text": "import os\n\n\ndefault_name = \"DefaultIni\"\ndefault_period = 10\n\n\nclass Config:\n name = os.getenv('name') if os.getenv('name') else default_name\n period = os.getenv('period') if os.getenv('period') else default_period"
}
] | 6 |
badabeast/python_requests_unittest
|
https://github.com/badabeast/python_requests_unittest
|
3b55b4055340a6e5af511ee1a7aa8e4a2bdffbb5
|
ea45cdcbf11b18196976ed3bd8710ca2e9df49c9
|
04747d2d070669d4dbf4d258a717115d59469c0e
|
refs/heads/master
| 2020-06-24T15:25:19.024649 | 2019-07-26T10:46:19 | 2019-07-26T10:46:26 | 199,000,235 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.610230565071106,
"alphanum_fraction": 0.6159942150115967,
"avg_line_length": 25.188678741455078,
"blob_id": "d73b086759fe6a3cee878d10e7bc81175bd88797",
"content_id": "3f1c5a0a230096d93a3fd23c81223d4902c7b150",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1574,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 53,
"path": "/common/configemail.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import smtplib\nimport os\nimport sys\nfrom email.mime.text import MIMEText\nfrom readconfig import ReadConfig\nimport datetime\nimport getpathinfo\n\n# 读取配置文件\n\nread_conf = ReadConfig()\n# 从配置文件中读取,smtp服务器、端口、发件人、密码、收件人、抄送人\nsmtp = read_conf.get_email(\"host\")\nport = read_conf.get_email(\"port\")\nuser = read_conf.get_email(\"user\")\npwd = read_conf.get_email(\"pwd\")\naddressee = read_conf.get_email(\"addressee\")\n# addressee=list(addressee.split(\";\"))\n\n\nchaosong = read_conf.get_email(\"chaosong\")\n# 从配置文件中读取,邮件主题\nnow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\nsubject = \"hello,这是\" + now + read_conf.get_email(\"subject\")\nprint(subject)\nmail_path = os.path.join(getpathinfo.get_path(), 'result', 'report.html') # 获取测试报告路径\n\ncontent = \"<h1>测试报告来咯!</h1>\"\n\n\nclass SendEmail(object):\n def aliyun(self):\n # 构造邮件\n msg = MIMEText(content, \"html\", \"gbk\") # msg邮件对象\n msg['Subject'] = subject\n msg['From'] = user\n msg['to'] = addressee\n msg['Accept-Language'] = 'zh-CN'\n msg['Accept-Charset'] = 'ISO-8859-1,utf-8'\n\n # 发送邮件\n try:\n ss = smtplib.SMTP_SSL(smtp, port)\n ss.login(user, pwd)\n ss.sendmail(user, addressee, msg.as_string()) # 发送\n print(\"发送成功!\")\n except Exception as e:\n print(\"发送失败!详情:\", e)\n\n\nif __name__ == \"__main__\":\n print(subject)\n SendEmail().aliyun()\n"
},
{
"alpha_fraction": 0.5784821510314941,
"alphanum_fraction": 0.5818347930908203,
"avg_line_length": 32.479591369628906,
"blob_id": "7a23ef51be806de225785f65273447b3e72eb2d0",
"content_id": "fc4d6c38c2509e2f5229c0beaaa99aeea2e389df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3619,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 98,
"path": "/testCase/test01case.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import json\nimport unittest\nfrom common.commonhttp import RunMain\nimport paramunittest\nimport geturlparams\nimport urllib.parse\nfrom logging_method import LoggingMethod\nimport readexcel\nfrom common.commoncode import common_errorcode\nlogger = LoggingMethod(__name__).getlogger()\nurl = geturlparams.GetUrlParams().get_url() # 调用我们的geturlParams获取我们拼接的URL\nlogin_xls = (readexcel.ReadExcel().get_xlsx(\"interface_usecases.xlsx\", \"login\"))\n\n\[email protected](*login_xls) # 登录接口\nclass UserLogin(unittest.TestCase):\n def setParameters(self, case_module, case_name, note, path, method, query, status):\n \"\"\"\n 与xlsx的列名一致\n set params\n :param case_module\n :param case_name\n :param note\n :param path\n :param method\n :param query\n :param status\n :return:\n \"\"\"\n self.case_module = str(case_module)\n self.case_name = str(case_name)\n self.note = str(note)\n self.path = str(path)\n self.method = str(method)\n # self.query = str(query)\n self.query = query\n self.status = str(status)\n\n def description(self):\n \"\"\"\n test report description\n :return:\n \"\"\"\n print(self.case_name+\"正在测试登录模块\")\n logger.info(self.case_name+\"正在测试登录模块\")\n\n def setUp(self):\n \"\"\"\n :return:\n \"\"\"\n full_url = url + self.query\n self.data_list = dict(urllib.parse.parse_qsl(\n urllib.parse.urlsplit(full_url).query)) # 将一个完整的URL中的name=&pwd=转换为{\"name\":\"xxx\",\"pwd\":\"bbb\"}\n self.info = RunMain().run_main(self.method, url, self.data_list) # 根据Excel中的method调用run_main来进行requests请求,并拿到响应\n print(self.case_name + \"准备开始测试\")\n logger.info(self.case_name + \"准备开始测试\")\n\n def test01case(self):\n self.checkResult()\n\n def tearDown(self):\n print(\"测试结束,输出log完结\\n\\n\")\n\n def checkResult(self): # 断言\n \"\"\"\n check test result\n :return:\n \"\"\"\n # url1 = url\n # new_url = url1 + self.query\n # data_list = dict(urllib.parse.parse_qsl(\n # urllib.parse.urlsplit(new_url).query)) # 将一个完整的URL中的name=&pwd=转换为{\"name\":\"xxx\",\"pwd\":\"bbb\"}\n # info = RunMain().run_main(self.method, url, data_list) # 根据Excel中的method调用run_main来进行requests请求,并拿到响应\n # print(self.case_name, \"的json响应结果\" + \"\\n\", info)\n if self.info is not None and self.info != \"\":\n ss = json.loads(self.info) # 将响应转换为字典格式\n common_errorcode(ss)\n if self.case_name == \"login_success\": # 如果case_name是login_success,说明合法,返回的code应该为200\n if (ss[\"success\"]) is True and ss[\"errorCode\"] == 0:\n self.assertIsNotNone(ss[\"data\"][\"accessToken\"])\n self.assertEqual(ss[\"data\"][\"type\"], 3)\n self.assertEqual(ss[\"data\"][\"phone\"], self.data_list[\"phone\"])\n self.assertIsNotNone(ss[\"data\"][\"id\"])\n\n # except Exception as erq:\n # print(self.case_name, erq)\n # else:\n # print(\"目前执行的测试用例:\", self.case_name)\n # print(\"断言失败\")\n # finally:\n # logger.info(\"testcase01 结束\")\n else:\n logger.error(\"响应结果为空\")\n\n\nif __name__ == \"_main\":\n\n unittest.main()\n"
},
{
"alpha_fraction": 0.5726950168609619,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11.818181991577148,
"blob_id": "afb2e26682011cd811eccc91dd82f913987f5d6b",
"content_id": "efdc1fc4a65cbd7fa4eb9ed35e846cccb542dd26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 44,
"path": "/config/config.ini",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n[APP]\nServerModel = 0\n\n# 0测试服,1预发布,2正式服\n[Local_Database]\nhost = localhost\nusername ='root'\npassword =123456\nport =3306\ndatabase='test'\n\n\n\n# 0测试服\n[TestHttp]\nbaseurl = http://192.168.10.209\nport = 18087\ntimeout = 3.0\n# 1预发布\n[preHttp]\nbaseurl = http://192.168.10.111\nport = 18087\ntimeout = 3.0\n# 2正式服\n[ReleaseHttp]\nbaseurl = https://api.aircourses.com\nport = \"\"\ntimeout = 3.0\n\n\n\n\n\n[EMAIL]\non_off = off\nhost = smtp.qiye.aliyun.com\nport = 465\nuser = [email protected]\npwd = Zweibo1234\nsubject = 接口自动化测试报告\n# 以;连接邮箱地址\naddressee = [email protected];\nchaosong = \"\"\n"
},
{
"alpha_fraction": 0.6635730862617493,
"alphanum_fraction": 0.6635730862617493,
"avg_line_length": 19.5238094329834,
"blob_id": "a4fee840f7e2290e64eb196e7c56ac31ab1795aa",
"content_id": "641e4ce80bfbad9905a4ed8c734eee9c3df63edc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 21,
"path": "/getpathinfo.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "\"\"\"\n当A调用B时\nos.getcwd 返回的是最外层的路径:A文件所在的路径\nos.path.dirname(__file__)返回的是.py文件的目录\nos.path.abspath(__file__)返回的是.py文件的绝对路径(完整路径)\n\nos.path.realpath()先处理路径中的符号链接,再返回绝对路径\nlinux 中重定向文件显示 b -> a\n用os.path.realpath() 显示b\nos.path.abspath(__file__) 返回a\n\"\"\"\nimport os\n\n\ndef get_path():\n path = os.path.dirname(os.path.abspath(__file__))\n return path\n\n\nif __name__ == '__main__': # 执行该文件,测试下是否OK\n print('测试路径是否OK,路径为:', get_path())\n"
},
{
"alpha_fraction": 0.5981594920158386,
"alphanum_fraction": 0.6002045273780823,
"avg_line_length": 27.735294342041016,
"blob_id": "086738e7cd83942202f4bf844fc158785e2a9817",
"content_id": "8020a9b1246819b9dbbd5526c0967639cec23f8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 34,
"path": "/readexcel.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import os\nfrom getpathinfo import get_path\nfrom openpyxl import load_workbook\n\nfilepath = get_path()\nprint(filepath)\n\n\"\"\"\n读取xlsx表格\n\"\"\"\n\n\nclass ReadExcel(object):\n def get_xlsx(self, xlsx_name, sheet_name):\n excel_list = []\n excelpath = os.path.join(filepath, 'testCase', xlsx_name)\n excel = load_workbook(excelpath) # 加载文件\n # 获取sheet\n sheet = excel[sheet_name] # 获取表名\n # 获取行数和列数\n # maxrow = sheet.max_row # 获取行数\n # maxcol = sheet.max_column # 获取列数\n\n for row in sheet.rows: # 根据行数做循环\n son_list = []\n for cell in row:\n son_list.append(cell.value) # 将每一行的数据添加到son_list列表里面\n if son_list[1] != u'case_name': # son_list的第2列不等于case_name那么我们把这行的数据添加到excel_list = []\n excel_list.append(son_list)\n return excel_list\n\n\nif __name__ == '__main__': # 我们执行该文件测试一下是否可以正确获取Excel中的值\n print(ReadExcel().get_xlsx('interface_usecases.xlsx', 'login'))\n\n"
},
{
"alpha_fraction": 0.5892857313156128,
"alphanum_fraction": 0.6039915680885315,
"avg_line_length": 29.70967674255371,
"blob_id": "d7771efd20f3488905e032fd18cdc988cc6e0d1d",
"content_id": "0b1a3172e242c128a4bc2d397e59ea36f4ef16b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1040,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 31,
"path": "/common/commoncode.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n# :carriere\nimport json\n\ndef common_errorcode(result):\n \"\"\"\n 断言通用code\n 只接受 dict类型的响应结果\n \"\"\"\n print(result)\n print(type(result))\n if result[\"errorCode\"] == 0:\n assert result[\"success\"] is True\n return result[\"success\"]\n elif result[\"errorCode\"] == 103:\n assert result[\"success\"] is False\n assert result[\"errorMessage\"] == \"用户名或密码错误,请重试!\"\n return result[\"errorMessage\"]\n elif result[\"errorCode\"] == 401:\n assert result[\"success\"] is False\n assert result[\"errorMessage\"] == \"验证码错误,请检查\"\n return result[\"errorMessage\"]\n elif result[\"errorCode\"] == 407:\n assert result[\"success\"] is False\n assert result[\"errorMessage\"] == \"请输入正确的手机号\"\n return result[\"errorMessage\"]\n elif result[\"errorCode\"] == 101:\n assert result[\"success\"] is False\n return result[\"errorMessage\"]\n else:\n return result[\"errorCode\"]\n"
},
{
"alpha_fraction": 0.6791045069694519,
"alphanum_fraction": 0.6940298676490784,
"avg_line_length": 12.5,
"blob_id": "8ac6e3b927ecf370bd26e25b270151dc004c8c36",
"content_id": "b32f725a198795336ef44596934343f2f20bc530",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 10,
"path": "/learnruquest.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import requests\nimport pprint\nr = requests.get('https://github.com/timeline.json')\nprint(r.text)\n\n\n\n\nfor i in range(1,5):\n print(i)"
},
{
"alpha_fraction": 0.6083707213401794,
"alphanum_fraction": 0.6113602519035339,
"avg_line_length": 29.409090042114258,
"blob_id": "9469b49d5704016ff4a0e0232a703c5177680b33",
"content_id": "d579517ebefab2bb76acd758e283c17576c03421",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 22,
"path": "/geturlparams.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "from readconfig import ReadConfig\nfrom readexcel import ReadExcel\n\nreadconfig = ReadConfig()\npath = ReadExcel().get_xlsx('interface_usecases.xlsx', 'login')[0][3]\n# print(path)\n\n\nclass GetUrlParams(): # 定义一个方法,将从配置文件中读取的进行拼接\n def get_url(self):\n config_url = readconfig.get_http(\"baseurl\")\n if config_url != 'https://api.aircourses.com':\n new_url = readconfig.get_http('baseurl') + \":\" + readconfig.get_http('port') + path+\"?\"\n else:\n new_url = config_url+\"?\"\n # logger.info('new_url'+new_url)\n return new_url\n\n\nif __name__ == '__main__': # 验证拼接后的正确性\n print(__file__)\n print(GetUrlParams().get_url())\n"
},
{
"alpha_fraction": 0.5773897767066956,
"alphanum_fraction": 0.5989937782287598,
"avg_line_length": 26.69672203063965,
"blob_id": "3df06444fb5e4ab6488730ac8cc0b696d50f1601",
"content_id": "af756e735791247699caed13b5379b4fc20fe26f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3979,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 122,
"path": "/test.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "# import os\n# import getpathinfo # 自己定义的内部类,该类返回项目的绝对路径\n# # 调用读Excel的第三方库xlrd\n# from xlrd import open_workbook\n#\n# # # 拿到该项目所在的绝对路径\n# # path = getpathinfo.get_Path()\n# #\n# # class readExcel():\n# # def get_xls(self, xlsx_name, sheet_name): # xls_name填写用例的Excel名称 sheet_name该Excel的sheet名称\n# # cls = []\n# # # 获取用例文件路径\n# # xlsPath = os.path.join(path,'testCase',xlsx_name)\n# # file = open_workbook(xlsPath) # 打开用例Excel\n# # sheet = file.sheet_by_name(sheet_name) # 获得打开Excel的sheet\n# # # 获取这个sheet内容行数\n# # nrows = sheet.nrows\n# # for i in range(nrows): # 根据行数做循环\n# # if sheet.row_values(i)[0] != u'case_name': # 如果这个Excel的这个sheet的第i行的第一列不等于case_name那么我们把这行的数据添加到cls[]\n# # cls.append(sheet.row_values(i))\n# # return cls\n# #\n# #\n# #\n\nimport json\nimport requests\nimport urllib\ndata = {\"phone\":18701890657,\n \"smsCode\":\"171204\",\n \"channel\":\"IPAD\"\n }\nprint(type(data))\njson1=json.dumps(data)\nprint(type(json1))\nprint(json1)\njson2 = json.load(json1)\nprint(type(json2))\nprint(json2)\njsoN3= urllib.re\n#\n# #\n# #\n# # coding:utf-8\n# import smtplib\n# import sys\n# from email.mime.text import MIMEText\n# a='[email protected]','[email protected]'\n# print(a)\n# mailto_list=list(a) #收件人邮箱列表\n# mail_user=\"[email protected]\" #用户名\n# mail_passwd=\"passwd\" #用户登录密码(第三方登录授权码)\n# mail_host=\"smtp.163.com\" #邮箱服务器\n# mail_postfix=\"163.com\" #邮箱后缀名\n# #\n# def send_mail(to_list,sub,content): #定义函数,参数为收件人,邮件主题,邮件内容\n# print(content)\n# me=\"<\"+mail_user+\">\"\n#\n# msg=MIMEText(content,'plain')\n# msg['Subject']=sub\n# msg['From']=me\n# msg['To']=';'.join(to_list) #将收件人列表以“;” 形式隔开\n# print(msg)\n# try:\n# server = smtplib.SMTP_SSL() #用的是SSL协议的邮箱smtp\n# server.connect(mail_host,465) #smtp 的端口号465\n# print(server.login(mail_user,mail_passwd))\n# server.sendmail(me,to_list,msg.as_string()) #SMTP对象使用sendmail 方法发送邮件 #SMTP.sendmail(from_addr, to_addrs, msg[, mail_options, rcpt_options]\n# server.close()\n# except Exception as e:\n# print(str(e))\n# return False\n# #\n# # send_mail(mailto_list,\"Long time no see\",'happy new year,beautiful girl') #调用函数\n#\n#\n# import urllib.parse\n# #urlparse将url分为6个部分\n# url =\"https://i.cnblogs.com/EditPosts.aspx?opt=1\"\n# url_change = urllib.parse.urlparse(url) # 将url拆分为6个部分\n# query = url_change.query #取出拆分后6个部分中的查询模块query\n# lst_query = urllib.parse.parse_qsl(query) #使用parse_qsl返回列表\n# dict1 =dict(lst_query) #将返回的列表转换为字典\n# dict_query =urllib.parse.parse_qs(query) #使用parse_qs返回字典\n# print(\"使用parse_qsl返回列表 :\",lst_query)\n# print(\"将返回的列表转换为字典 :\",dict1)\n# print(\"使用parse_qs返回字典 : \",dict_query)\n#\n# # data = \"test=test&test2=test2&test2=test3\"\n# # print(urllib.parse.parse_qsl(data)) #返回列表\n# # print(urllib.parse.parse_qs(data)) #返回字典\n# a=3910+3510+3410\n# print(a)\n#\n\n\n\n# import unittest\n# import paramunittest\n#\n# # 方案二\n# @paramunittest.parametrized(\n# ('1', '2'),\n# # (4, 3),\n# ('2', '3'),\n# (('4',), {'b': '5'}),\n# ((), {'a': 5, 'b': 6}),\n# {'a': 5, 'b': 6},\n# )\n# class TestBar(unittest.TestCase):\n# def setParameters(self, a, b):\n# self.a = a\n# self.b = b\n#\n# def testLess(self):\n# print(\"第一个参数\", self.a)\n# self.assertLess(self.a, self.b)\n#\n#\n# if __name__ == \"__main__\":\n# unittest.main()\n"
},
{
"alpha_fraction": 0.5612867474555969,
"alphanum_fraction": 0.5873544216156006,
"avg_line_length": 37.36170196533203,
"blob_id": "262142b45dac121e98b7c1f382c885e6169c04c2",
"content_id": "1e910571ea816a5197885bf8b796f607624e8591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1933,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 47,
"path": "/common/commonhttp.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import json\nimport requests\nfrom common.commoncode import common_errorcode\n# import geturlparams\n\n\nclass RunMain(object):\n def __init__(self):\n\n self.usr_header = {'User-Agent': 'iPadN/84 CFNetwork/711.2.23 Darwin/14.0.0',\n 'Content-type': 'application/json'\n }\n\n def send_get(self, url, data, usr_header=None):\n if usr_header is None:\n usr_header = self.usr_header\n result = requests.get(url=url, data=data, headers=usr_header)\n get_res = json.dumps(result, ensure_ascii=False, sort_keys=True, indent=2)\n return get_res\n\n def send_post(self, url, data, usr_header=None): # 定义一个方法,传入需要的参数url和data\n # 参数必须按照url、data顺序传入\n data = json.dumps(data)\n if usr_header is None:\n usr_header = self.usr_header\n result = requests.post(url=url, data=data, headers=usr_header)\n result = result.json()\n post_res = json.dumps(result, ensure_ascii=False, sort_keys=True, indent=2) # 转换成字符串\n return post_res\n\n def run_main(self, method, url=None, data=None): # 定义一个run_main函数,通过传过来的method来进行不同的get或post请求\n result = None\n method = method.replace(\" \", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\").lower()\n if method == 'post':\n result = self.send_post(url, data)\n elif method == 'get':\n result = self.send_get(url, data)\n else:\n print(\"目前支持get 和post,method值错误!!!\")\n return result\n\n\nif __name__ == \"__main__\":\n json_result = RunMain().run_main(\"post\", \"http://192.168.10.209:18087/ac-common/oauth/sms/stu\",\n '{\"phone\":18701890657,\"smsCode\":\"171204\",\"channel\":\"IPAD\"}')\n print(json_result)\n print(json.loads(json_result)[\"errorMessage\"])\n"
},
{
"alpha_fraction": 0.6158989667892456,
"alphanum_fraction": 0.6233283877372742,
"avg_line_length": 31.047618865966797,
"blob_id": "72f6d766add5e57d953c206ea9e781efea0ffbc9",
"content_id": "3c03bcac339e35aa92b69e7a7aaa1be81a668625",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1614,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 42,
"path": "/readconfig.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "import os\nimport configparser\nfrom getpathinfo import get_path\n\npath = get_path()\nconfig_path = os.path.join(path, 'config', 'config.ini') # 获取config文件夹下的config.ini文件\n\nconfig = configparser.ConfigParser() # 调用外部的读取配置文件的方法 初始化实例\nconfig.read(config_path, encoding='utf-8') # 读取config文件\n\n\nclass ReadConfig(object):\n # 从config.ini中读取需要的数据\n def __init__(self):\n self.server = config.get('APP', 'ServerModel')\n self.email_on_off = config.get\n\n def get_http(self, name=None): # 0测试服,1预发布,2正式服,\n if self.server == '0':\n self.url = config.get('TestHttp', name)\n elif self.server == '1':\n self.url = config.get('preHttp', name)\n elif self.server == '2':\n self.url = config.get('ReleaseHttp', 'baseurl')\n else:\n print(\"服务器选择错误,0测试服,1预发布,2正式服,目前选择\", self.server)\n return self.url\n\n def get_mysql(self, name): # 写好,留以后备用。但是因为我们没有对数据库的操作,所以这个可以屏蔽掉\n sqlvalue = config.get('Local_Database', name)\n return sqlvalue\n\n def get_email(self, name):\n emailvalue = config.get('EMAIL', name)\n return emailvalue\n\n\nif __name__ == '__main__':\n print('HTTP中的baseurl值为:', ReadConfig().get_http('baseurl'))\n print('目前连接的数据库为:', ReadConfig().get_mysql('host'))\n test = ReadConfig().get_email('host')\n print('邮件的收件人有:', test)\n"
},
{
"alpha_fraction": 0.6445725560188293,
"alphanum_fraction": 0.6541786789894104,
"avg_line_length": 28.323944091796875,
"blob_id": "0561751eb491a8c11cd07336aa707112b8e5a615",
"content_id": "a5b85d87263c80bae276307534a211ec01391e13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2386,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 71,
"path": "/logging_method.py",
"repo_name": "badabeast/python_requests_unittest",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nimport logging\nimport time\nimport os\nfrom logging.handlers import TimedRotatingFileHandler\nfrom getpathinfo import get_path\n\n# logging.basicConfig(level=logging.DEBUG,format='%(asctime)s-%(name)s%(levelname)s-%(message)s')\n# logger = logging.getLogger(__name__) #实例化一个logger对象\n# logger.debug(\"msg1\")\n# logger.info(\"msg2\")\n# logger.warning(\"msg3\")\n# logger.error(\"msg4\")\n# logger.critical(\"msg5\")\n'''\n1、创建一个logger\n\n2、设置下logger的日志的等级\n\n3、创建合适的Handler(FileHandler要有路径)\n\n4、设置下每个Handler的日志等级\n\n5、创建下日志的格式\n\n6、向Handler中添加上面创建的格式\n\n7、将上面创建的Handler添加到logger中\n\n8、打印输出logger.debug\\logger.info\\logger.warning\\logger.error\\logger.critical\n'''\nlogpath = get_path()\nprint(logpath)\n\n\nclass LoggingMethod(object):\n # 将日志输出到文件\n def __init__(self, name):\n # 1、创建一个logger\n self.logger = logging.getLogger(name)\n # 2、设置下logger的日志的等级\n self.logger.setLevel(logging.DEBUG)\n # 定义log文件名\n now = time.strftime('%Y-%m-%d_%H_%M_%S')\n filename = logpath + '\\\\LOG\\\\' + now\n # 创建等级为DEBUG的 的日志文件,默认使用全局的Logger日志等级\n all_handler = logging.handlers.TimedRotatingFileHandler(filename + '_debug.txt', when='D', interval=1,\n backupCount=7, encoding='utf-8')\n all_handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n # 创建等级为error的日志文件\n fh_handler = logging.StreamHandler(filename + '_error.txt')\n fh_handler.setLevel(logging.ERROR)\n fh_handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(filename)s[:%(lineno)d] - %(message)s\"))\n\n # 7、将上面创建的Handler添加到logger中\n self.logger.addHandler(all_handler)\n self.logger.addHandler(fh_handler)\n\n def getlogger(self):\n return self.logger\n\n\nif __name__ == '__main__':\n login = LoggingMethod(__name__)\n logger = login.getlogger()\n logger.debug(\"debug message\")\n logger.info(\"info message\")\n logger.warning(\"warn message\")\n logger.error(\"error message\")\n logger.critical(\"critical message\")\n"
}
] | 12 |
rayofhopejp/mayfes2021-gameai
|
https://github.com/rayofhopejp/mayfes2021-gameai
|
275d99b19d6f39930a39711236789c99ef7e9964
|
071f85876ad27e0daaeb40e360d31e7771da0064
|
fc0387d07bc01be9bd4b2afdcaa6f4c7716bc942
|
refs/heads/master
| 2023-04-13T22:43:26.794623 | 2021-04-02T13:37:57 | 2021-04-02T13:37:57 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5523294806480408,
"alphanum_fraction": 0.5749493837356567,
"avg_line_length": 30.343915939331055,
"blob_id": "17097ec7c02b0d4ef8b590bd91f8a7865bcd2eb9",
"content_id": "fb89a4635db46a8d661e06b77ab844e209241ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6174,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 189,
"path": "/server.py",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "from aiohttp import web\nimport socketio\nimport math\nimport threading\nimport itertools\nimport numpy as np\nimport random\nimport torch\nfrom torch import nn, optim\n\nfrom ddqn_curling_discrete import CNNQNetwork\n\nsio = socketio.AsyncServer(async_mode='aiohttp')#,logger=True, engineio_logger=True\napp = web.Application()\nsio.attach(app)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nnet_load = torch.load(\"models/model_016000.pt\") #34000あたりが一番強そう…(謎)\n\nWIDTH=600\nHEIGHT=1000\nBALL_RADIUS=30\nFRICTION=0.01\nSTONE_NUM=5\nclass Stone:\n def __init__(self,camp,v,theta):\n self.camp=camp\n self.y=0\n self.x=WIDTH/2\n if self.camp==\"AI\":\n self.y=HEIGHT\n self.v=np.array([v*math.cos(math.radians(theta)),v*math.sin(math.radians(theta))])\n self.radius=BALL_RADIUS\n def move(self):\n vnorm=np.linalg.norm(self.v, ord=2)\n if vnorm>FRICTION:\n self.v=self.v*(vnorm-FRICTION)/vnorm #0.05減速\n else:\n self.v=np.array([0,0])#停止\n self.x+=self.v[0]\n self.y+=self.v[1]\n if self.x>WIDTH: #x軸方向に反転\n self.x=2*WIDTH-self.x\n self.v[0]=-self.v[0]\n if self.x<0: #x軸方向に反転\n self.x=-self.x\n self.v[0]=-self.v[0]\n if self.y>HEIGHT: #y軸方向に反転\n self.y=2*HEIGHT-self.y\n self.v[1]=-self.v[1]\n if self.y<0: #y軸方向に反転\n self.y=-self.y\n self.v[1]=-self.v[1]\n def collision(self,other):\n dist=math.sqrt( (self.x-other.x)**2+(self.y-other.y)**2 )\n if dist>self.radius+other.radius:\n return\n #衝突している時\n #運動方程式解いた\n e=np.divide(np.array([self.x-other.x,self.y-other.y]), dist, where=dist!=0)\n t=np.dot(self.v,e)-np.dot(other.v,e)\n self.v=self.v-t*e\n other.v=self.v+t*e\n def return_dist(self):\n dist=math.sqrt( (self.x-WIDTH/2)**2+(self.y-HEIGHT/2)**2 )\n return dist\n def encode(self):\n return {'x': self.x,\n 'y': self.y,\n 'radius':self.radius,\n 'camp':self.camp}\n\ndef stonesToObs(stones): #Stoneの塊をobs(numpy.ndarray)に変換する\n obs=np.array([-1 for i in range(STONE_NUM*4)])\n i_you=0\n i_AI=STONE_NUM\n for stone in stones:\n if stone.camp=='you' and i_you<STONE_NUM:\n obs[i_you*2]=stone.x\n obs[i_you*2+1]=stone.y\n i_you+=1\n if stone.camp=='AI' and i_AI<STONE_NUM*2:\n obs[i_AI*2]=stone.x\n obs[i_AI*2+1]=stone.y\n i_AI+=1\n return obs\n\n\nsituations={}#盤面ごとに存在するカーリングの球の状態を記録する\n\nasync def background_task():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n await sio.sleep(10)\n count += 1\n await sio.emit('my_response', {'data': 'Server generated event'})\n\nasync def index(request):\n \"\"\"Serve the client-side application.\"\"\"\n with open('dist/index.html') as f:\n return web.Response(text=f.read(), content_type='text/html')\n\[email protected]\ndef connect(sid, environ):\n print(\"connect \", sid)\n situations[sid]=[]\n\[email protected]\nasync def game_start(sid, data): \n print(\"message \", data['test'])\n #ここで準備\n #球を打っていいよの合図\n await sio.emit('your_turn',room=sid)\n\n\n \[email protected]\nasync def hit_stone(sid,data):\n print(\"hit_stone\")\n situations[sid].append( Stone(\"you\",data[\"velocity\"],data[\"theta\"]) )\n while True:\n await sio.emit('move_stones', {'stones': [stone.encode() for stone in situations[sid]]},room=sid)\n await sio.sleep(0.001)\n stillmove = False\n for stone in situations[sid]:\n stone.move()\n if stone.v[0]!=0 or stone.v[1]!=0:\n stillmove=True\n for pair in itertools.combinations(situations[sid], 2): #衝突判定\n pair[0].collision(pair[1])\n if not stillmove:\n break\n #相手が打つ\n obs=stonesToObs(situations[sid])\n action = net_load.act(torch.from_numpy(obs.astype(np.float32)).clone().float().to(device),0)\n situations[sid].append( Stone(\"AI\",action[0],action[1]) )\n \n while True:\n await sio.emit('move_stones', {'stones': [stone.encode() for stone in situations[sid]]},room=sid)\n await sio.sleep(0.001)\n stillmove = False\n for stone in situations[sid]:\n stone.move()\n if stone.v[0]!=0 or stone.v[1]!=0:\n stillmove=True\n for pair in itertools.combinations(situations[sid], 2): #衝突判定\n pair[0].collision(pair[1])\n if not stillmove:\n break\n if len(situations[sid])==STONE_NUM*2 :\n #ゲーム終わり\n player1_min_dist=1001001001\n player2_min_dist=1001001001\n for stone in situations[sid]:\n dist=stone.return_dist()\n if stone.camp=='you':\n player1_min_dist=min(player1_min_dist,dist)\n else:\n player2_min_dist=min(player2_min_dist,dist)\n score=0\n if player1_min_dist<player2_min_dist : #win player 1\n for stone in situations[sid]:\n dist=stone.return_dist()\n if stone.camp=='you' and dist<player2_min_dist:\n score+=1 #player1のreward\n await sio.emit('you_win',{\"score\":score},room=sid)\n else: #win player 2\n for stone in situations[sid]:\n dist=stone.return_dist()\n if stone.camp=='AI' and dist<player1_min_dist:\n score-=1 #player1のreward\n await sio.emit('AI_win',{\"score\":score},room=sid) \n else:\n await sio.emit('your_turn',room=sid)\n \n \n\n\[email protected]\ndef disconnect(sid):\n print('disconnect ', sid)\n\napp.router.add_static('/dist', 'dist')\napp.router.add_static('/node_modules', 'node_modules')\napp.router.add_get('/', index)\n\nif __name__ == '__main__':\n #sio.start_background_task(background_task)\n web.run_app(app)\n"
},
{
"alpha_fraction": 0.5659722089767456,
"alphanum_fraction": 0.5879629850387573,
"avg_line_length": 21.153846740722656,
"blob_id": "cc7928a22c85680cca62c13f43f822f5d8b26386",
"content_id": "dc74e3fa7e2c1118ff1fd60934181c3f81726f8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 39,
"path": "/random_curling.py",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "import math\nimport random\nimport curlingenv\n\nimport gym\n\nenv = gym.make('curlingenv-v0')\n\n\nnum_episodes = 300\nnum_steps_per_episode = 20\n\ncollected_rewards = []\nfor i in range(num_episodes):\n s = env.reset()\n total_reward = 0\n done = False\n turn=1\n\n for j in range(num_steps_per_episode):\n m = turn,random.uniform(10,170),random.uniform(0,5)\n #print (\"m: \", m)\n s1, reward, done, info = env.step(m)\n total_reward += reward\n s = s1\n turn=-turn\n if done:\n break\n # env.render()\n #total_reward *= oom;\n collected_rewards.append(total_reward)\n\n print(\"after \" + str(i + 1) + \" episodes:\")\n average = sum(collected_rewards) / len(collected_rewards)\n score = collected_rewards[-1]\n print(\"average score: \", average)\n print(\"score:\", score)\n print()\nprint(\"#########\")\n"
},
{
"alpha_fraction": 0.58437579870224,
"alphanum_fraction": 0.6019912958145142,
"avg_line_length": 35.27777862548828,
"blob_id": "9a3f5d71fc77da6261584b046d6672b84cc1f570",
"content_id": "e64ee80b362b79195706e3b1e01ce197d7f2c36f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4005,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 108,
"path": "/js/index.js",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "'use strict';\nconst socket = io.connect();\nconst canvas = $('#canvas-2d')[0];\nconst context = canvas.getContext('2d');\nconst playerImage = $('#player-image')[0];\nlet playercursor=90;\nlet playervelocity=3;\nlet data_storage=null;\nconst FRICTION=0.01\n//球を打つ\nfunction fillarc(x,y,size,color){\n context.fillStyle =color;\n context.beginPath();\n context.arc(x, y,size,0,2*Math.PI);\n context.closePath();\n context.fill();\n}\nfunction clearAndWriteStage(){\n context.save();\n context.clearRect(0, 0, canvas.width, canvas.height);\n context.lineWidth = 10;\n context.beginPath();\n context.rect(0, 0, canvas.width, canvas.height);\n context.closePath();\n context.stroke(); \n fillarc(canvas.width/2, canvas.height/2,250,'darkblue');\n fillarc(canvas.width/2, canvas.height/2,200,'white');\n fillarc(canvas.width/2, canvas.height/2,100,'brown');\n fillarc(canvas.width/2, canvas.height/2,50,'white');\n context.restore();\n}\n\nfunction rewriteSituation(data){\n clearAndWriteStage();\n if(data===null)return;\n Object.values(data.stones).forEach((stone)=>{\n context.beginPath();\n if(stone.camp==='you')context.fillStyle = 'red';\n else context.fillStyle = 'blue';\n context.arc(stone.x,canvas.height-stone.y,stone.radius,0,2*Math.PI);\n context.fill();\n });\n}\nfunction writePointer(theta,velocity){\n rewriteSituation(data_storage);//再描写(いい方法があれば変えたい…)\n var length=velocity*(velocity/FRICTION)/2;\n context.lineWidth = 5;\n var endx=canvas.width/2+length*Math.cos(theta*(Math.PI/180));\n var endy=canvas.height-length*Math.sin(theta*(Math.PI/180));\n context.beginPath();\n context.moveTo(canvas.width/2,canvas.height);\n context.lineTo(endx,endy);\n context.closePath();\n context.stroke();\n context.lineWidth = 10;\n}\n$(document).ready(function(){\n console.log(\"Page is loaded\");\n clearAndWriteStage();\n socket.on('connect', (data)=>{\n console.log(\"gameStart!\",data);\n var data={\"test\":\"yes\"};\n socket.emit(\"game_start\",data);\n });\n socket.on('your_turn', (data) =>{\n playercursor=90;\n playervelocity=3;\n writePointer(playercursor,playervelocity);\n $(document).on('keydown keyup', (event) => {\n //console.log(\"keydown,keyup\");\n if(event.key === 'ArrowLeft'&& event.type === 'keydown'){\n if(playercursor<135)playercursor+=1;\n writePointer(playercursor,playervelocity);\n }\n if(event.key === 'ArrowRight'&& event.type === 'keydown'){\n if(playercursor>45)playercursor-=1;\n writePointer(playercursor,playervelocity);\n }\n if(event.key === 'ArrowDown'&& event.type === 'keydown'){\n if(playervelocity>2)playervelocity-=0.25;\n writePointer(playercursor,playervelocity);\n }\n if(event.key === 'ArrowUp'&& event.type === 'keydown'){\n if(playervelocity<4)playervelocity+=0.25;\n writePointer(playercursor,playervelocity);\n }\n if(event.key === ' ' && event.type === 'keydown'){\n console.log(\"hit stone\");\n socket.emit(\"hit_stone\",{\"theta\":playercursor,\"velocity\":playervelocity});\n $(document).off('keydown keyup');\n }\n });\n });\n socket.on('move_stones', (data) =>{\n rewriteSituation(data);\n data_storage=data;\n });\n socket.on('you_win', (data) =>{\n console.log(\"you win! score:\",data[\"score\"])\n const scoreboard = document.getElementById(\"scoreboard\");\n scoreboard.innerHTML=data[\"score\"]+\"点であなたの勝利です.\";\n });\n socket.on('AI_win', (data) =>{\n console.log(\"AI win! score:\",data[\"score\"])\n const scoreboard = document.getElementById(\"scoreboard\");\n scoreboard.innerHTML=data[\"score\"]+\"点であなたの負けです.\";\n });\n});"
},
{
"alpha_fraction": 0.746268630027771,
"alphanum_fraction": 0.770691990852356,
"avg_line_length": 22.74193572998047,
"blob_id": "4e8e14300c6728f9beb4b72bace4f077d30d7ebd",
"content_id": "cb42d19d3af30c07588591e131499088051107cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 31,
"path": "/README.md",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "# mayfes20201-gameai\n## 開発方法\nmodels以下に\nhttps://drive.google.com/drive/folders/1vk-7_WMdSPf_ULpmY1LY4dX0Ek6Ebx1S?usp=sharing\nから好きなモデルを持ってきて配置する.\n\n```bash\n$ npm install\n$ npm run build\n$ pip install aiohttp\n$ pip install python-socketio\npip install git+https://github.com/openai/gym\n$ pip install torch\n$ python server.py\n```\n\n## 機械学習によるモデルの生成\n```\n$ python ddqn_curling_discrete.py #(機械学習によるmodel.ptの生成)\n```\n生成したモデルを使うときは\nserver.pyの\n```\nnet_load = torch.load(\"models/model_003000.pt\")\n```\n部分の変更を忘れずに!\n## メモ\n以下のことはまだやらなくても動きます(後で追加したいのでメモ)\n[three.js](http://threejs.org/build/three.js)をダウンロードしてstaticに配置する\n\n[フォント](https://raw.githubusercontent.com/mrdoob/three.js/master/examples/fonts/helvetiker_bold.typeface.json)をダウンロードしてstaticに配置する。\n\n"
},
{
"alpha_fraction": 0.32630273699760437,
"alphanum_fraction": 0.32630273699760437,
"avg_line_length": 24.1875,
"blob_id": "54bc0f270b9a0595330e873397cc97ebb9912cc6",
"content_id": "8385bbc599117cd7fe97b7b94b44f47107afd618",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 806,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 32,
"path": "/webpack.config.js",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "const CopyWebpackPlugin = require('copy-webpack-plugin');\n\n/** @type {import('webpack').Configuration} */\nmodule.exports = {\n mode: process.env.NODE_ENV || 'development',\n entry: './js/index.js',\n module: {\n rules: [\n {\n test: /\\.js$/,\n exclude: /node_modules/,\n use: [\n {\n loader: 'babel-loader',\n options: {\n presets: [\n '@babel/preset-env',\n ]\n }\n }\n ]\n }\n ]\n },\n plugins: [\n new CopyWebpackPlugin({\n patterns: [\n { from: 'static' }\n ]\n })\n ]\n};\n"
},
{
"alpha_fraction": 0.4826793074607849,
"alphanum_fraction": 0.5150203108787537,
"avg_line_length": 34.3636360168457,
"blob_id": "2d16823d43b3a8271fc41c74d83062699f2a344b",
"content_id": "34717c0c0f402ab24c79c74cafd217919fe4c4bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7966,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 209,
"path": "/curlingenv/env.py",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "import sys\n\nimport gym\nimport numpy as np\nimport gym.spaces\nimport math\nimport itertools\n\nWIDTH=600\nHEIGHT=1000\nBALL_RADIUS=30\nFRICTION=0.01\nSTONE_NUM=5\n#対戦型の環境にする方法がわからない…\nclass Stone:\n def __init__(self,camp,x,y,v,theta):\n self.camp=camp\n self.y=y\n self.x=x\n if v==0:\n self.v=np.array([0,0])\n else:\n self.v=np.array([v*math.cos(math.radians(theta)),v*math.sin(math.radians(theta))])\n self.radius=BALL_RADIUS\n def move(self):\n vnorm=np.linalg.norm(self.v, ord=2)\n if vnorm>FRICTION:\n self.v=self.v*(vnorm-FRICTION)/vnorm #0.05減速\n else:\n self.v=np.array([0,0])#停止\n self.x+=self.v[0]\n self.y+=self.v[1]\n if self.x>WIDTH: #x軸方向に反転\n self.x=2*WIDTH-self.x\n self.v[0]=-self.v[0]\n if self.x<0: #x軸方向に反転\n self.x=-self.x\n self.v[0]=-self.v[0]\n if self.y>HEIGHT: #y軸方向に反転\n self.y=2*HEIGHT-self.y\n self.v[1]=-self.v[1]\n if self.y<0: #y軸方向に反転\n self.y=-self.y\n self.v[1]=-self.v[1]\n def collision(self,other):\n dist=math.sqrt( (self.x-other.x)**2+(self.y-other.y)**2 )\n if dist>self.radius+other.radius:\n return\n #衝突している時\n #運動方程式解いた\n if dist==0:\n e=np.array([0,0])\n else:\n e=np.array([self.x-other.x,self.y-other.y])/dist\n t=np.dot(self.v,e)-np.dot(other.v,e)\n self.v=self.v-t*e\n other.v=self.v+t*e\n def return_dist(self):\n dist=math.sqrt( (self.x-WIDTH/2)**2+(self.y-HEIGHT/2)**2 )\n return dist\n def encode(self):\n return {'x': self.x,\n 'y': self.y,\n 'radius':self.radius,\n 'camp':self.camp}\n\nclass CurlingEnv(gym.Env):\n metadata = {'render.modes': ['human', 'ansi']}\n MAX_STEPS = 100\n def __init__(self):\n super().__init__()\n # action_space, observation_space, reward_range を設定する\n self.WIDTH=600\n self.HEIGHT=1000\n self.action_space =gym.spaces.Tuple((\n gym.spaces.Discrete(2), #どっちが打つか\n gym.spaces.Box(\n low=np.array([2,45]),#velocity\n high=np.array([4,135]),#theta\n dtype=np.float\n )\n ))\n \"\"\"\n #Discreteにする方がいいか?\n gym.spaces.Tuple((\n gym.spaces.Discrete(2), #どっちが打つか\n gym.spaces.Discrete(10),#velocity\n gym.spaces.Discrete(161) ))#theta\n \"\"\"\n \"\"\"\n #continuousにする方がいいか?\n gym.spaces.Box(\n low=np.array([10,0]),\n high=np.array([170,5]),\n dtype=np.float\n ) # 10度~170度,速さ0~5\n \"\"\"\n #状態はSTONE_NUM*2個のカーリングの球のx,y,x,y,....(最初のSTONE_NUMつがyou,最後のSTONE_NUMつがAI)\n HIGH=np.array([1000 if i%2 else 600 for i in range(STONE_NUM*4)])\n self.observation_space = gym.spaces.Box(\n low=-1,\n high=HIGH,\n shape=(STONE_NUM*4,),\n dtype=np.int\n )\n self.reward_range = [-STONE_NUM,STONE_NUM]#相手の一番近くにあるストーンより近くにあるストーンの数\n self.reset()\n\n def reset(self):\n # 諸々の変数を初期化する\n self.stone_position=np.array([-1 for i in range(STONE_NUM*4)])\n return self.stone_position\n\n def step(self, action):\n # 1ステップ進める処理を記述。戻り値は observation, reward, done(ゲーム終了したか), info(追加の情報の辞書)\n #theta=10+action//10 #10,11,...,170\n #velocity=(action%10+1)*0.5 #0.5,1.0,1.5,...,5\n camp,hit=action\n velocity,theta=hit\n #print(camp,velocity,action)\n #状態をStonesにコピー\n self.stones=[]\n for i in range(STONE_NUM):#player1\n if self.stone_position[i*2] >= 0:\n self.stones.append(Stone(\n 'you',\n self.stone_position[i*2],\n self.stone_position[i*2+1],\n 0,\n 0))\n for i in range(STONE_NUM,STONE_NUM*2):#player2\n if self.stone_position[i*2] >= 0:\n self.stones.append(Stone(\n 'AI',\n self.stone_position[i*2],\n self.stone_position[i*2+1],\n 0,\n 0))\n #新しいコマを配置\n self.stones.append(Stone(\n \"you\" if camp==1 else \"AI\",\n WIDTH/2,\n HEIGHT if camp==-1 else 0,\n velocity,\n theta))\n #moveする\n while True:\n stillmove = False\n for stone in self.stones:\n stone.move()\n if stone.v[0]!=0 or stone.v[1]!=0:\n stillmove=True\n for pair in itertools.combinations(self.stones, 2): #衝突判定\n pair[0].collision(pair[1])\n if not stillmove:\n break\n #stone_positionに移す\n self.stone_position=np.array([-1 for i in range(STONE_NUM*4)])\n i_you=0\n i_AI=STONE_NUM\n for stone in self.stones:\n if stone.camp=='you' and i_you<STONE_NUM:\n self.stone_position[i_you*2]=stone.x\n self.stone_position[i_you*2+1]=stone.y\n i_you+=1\n if stone.camp=='AI' and i_AI<STONE_NUM*2:\n self.stone_position[i_AI*2]=stone.x\n self.stone_position[i_AI*2+1]=stone.y\n i_AI+=1\n\n observation = self.stone_position\n reward = 0 #todo:一番近いストーンの近さに比例したreward\n self.done = ( len(self.stones)==STONE_NUM*2 ) #STONE_NUM*2個全部埋まってたらdone\n #print(len(self.stones),self.done)\n if self.done: #check:self.doneからTrue変更してみて報酬を密にしてみても良い?\n player1_min_dist=1001001001\n player2_min_dist=1001001001\n for i in range(STONE_NUM):#player1\n dist=np.sqrt( (self.stone_position[i*2]-WIDTH/2)**2+(self.stone_position[i*2+1]-HEIGHT/2)**2 )\n player1_min_dist=min(player1_min_dist,dist)\n for i in range(STONE_NUM,STONE_NUM*2):#player1\n dist=np.sqrt( (self.stone_position[i*2]-WIDTH/2)**2+(self.stone_position[i*2+1]-HEIGHT/2)**2 )\n player2_min_dist=min(player2_min_dist,dist)\n if player1_min_dist<player2_min_dist : #win player 1\n for i in range(STONE_NUM):\n dist=np.sqrt( (self.stone_position[i*2]-WIDTH/2)**2+(self.stone_position[i*2+1]-HEIGHT/2)**2 )\n if dist<player2_min_dist:\n reward+=1 #player1のreward\n else: #win player 2\n for i in range(STONE_NUM,STONE_NUM*2):\n dist=np.sqrt( (self.stone_position[i*2]-WIDTH/2)**2+(self.stone_position[i*2+1]-HEIGHT/2)**2 )\n if dist<player1_min_dist:\n reward-=1 #player1のreward\n return observation, camp*reward, self.done, {\"stonesize\":len(self.stones)}\n\n def render(self, mode='human', close=False):\n # human の場合はコンソールに出力。ansiの場合は StringIO を返す\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n outfile.write(','.join(self.stone_position) + '\\n')\n return outfile\n\n def close(self):\n pass\n def move_generator(self):\n moves = []\n for theta in range (161):\n for velocity in range(10):\n moves.append(theta*10+velocity)\n return moves"
},
{
"alpha_fraction": 0.56608647108078,
"alphanum_fraction": 0.5851237177848816,
"avg_line_length": 34.70388412475586,
"blob_id": "14278467770d0003d3c7bfe256b981a21290622e",
"content_id": "eead65172a72dd95081daf664dbdddb4395b9881",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7530,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 206,
"path": "/ddqn_curling.py",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "import random\n\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport torch\nfrom torch import nn, optim\nimport math\nfrom IPython.display import HTML\nimport curlingenv\n\nenv = gym.make('curlingenv-v0')\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nSTONE_NUM=5 #envのstone_numと合わせる\n\nclass PrioritizedReplayBuffer(object):\n def __init__(self, buffer_size):\n self.buffer_size = buffer_size \n self.index = 0 \n self.buffer = [] \n self.priorities = np.zeros(buffer_size, dtype=np.float32)\n self.priorities[0] = 1.0 \n \n def __len__(self):\n return len(self.buffer)\n def push(self, experience):\n if len(self.buffer) < self.buffer_size:\n self.buffer.append(experience)\n else:\n self.buffer[self.index] = experience\n self.priorities[self.index] = self.priorities.max()\n self.index = (self.index + 1) % self.buffer_size\n def sample(self, batch_size, alpha=0.6, beta=0.4):\n priorities = self.priorities[: self.buffer_size if len(self.buffer) == self.buffer_size else self.index] #>入っている経験の数まで取り出す\n priorities = priorities ** alpha\n prob = priorities / priorities.sum()\n indices = np.random.choice(len(self.buffer),size=batch_size,p=prob)\n weights = (len(self.buffer)*prob[indices])**(-beta) \n weights = weights / max(weights)\n obs, action, reward, next_obs, done = zip(*[self.buffer[i] for i in indices])\n return (torch.stack(obs),\n torch.as_tensor(action), \n torch.as_tensor(reward, dtype=torch.float32),\n torch.stack(next_obs), \n torch.as_tensor(done, dtype=torch.uint8),\n indices,\n torch.as_tensor(weights, dtype=torch.float32))\n\n def update_priorities(self, indices, priorities):\n self.priorities[indices] = priorities + 1e-4\n\ndef sigmoid(a):\n s = 1 / (1 + math.e**-a)\n return s\ndef zerotoone(x,a,b): #(0,1)でxなのを(a,b)に拡張\n return x*(b-a)+a\n\nclass CNNQNetwork(nn.Module):\n def __init__(self, state_shape, n_action):\n super(CNNQNetwork, self).__init__()\n self.state_shape = state_shape\n self.n_action = n_action\n \n self.fc_state = nn.Sequential(\n nn.Linear(STONE_NUM*4, 16),\n nn.ReLU(),\n nn.Linear(16, 1)\n )\n\n self.fc_advantage = nn.Sequential(\n nn.Linear(STONE_NUM*4, 16),\n nn.ReLU(),\n nn.Linear(16, n_action)\n )\n \n def forward(self, obs):\n feature = obs\n feature = feature.view(feature.size(0), -1)\n\n state_values = self.fc_state(feature)\n advantage = self.fc_advantage(feature) \n action_values = state_values + advantage - torch.mean(advantage, dim=1, keepdim=True)\n return action_values\n\n def act(self, obs, epsilon):\n if random.random() < epsilon:\n action = env.action_space.sample() \n cmp,act=action\n action=act\n else:\n with torch.no_grad():\n action = self.forward(obs.unsqueeze(0))[0]\n #出力をvelocity=0.5~5,theta=10~170に制限する\n action[0]=zerotoone(sigmoid(action[0]/10),2,4)\n action[1]=zerotoone(sigmoid(action[1]/10),10,170)\n action=action.to(device).detach().numpy().copy()\n #print(\"action:\",action)\n #print(\"action:\",action)\n return action\n\n\nbuffer_size = 100000 \ninitial_buffer_size = 10000 \nreplay_buffer = PrioritizedReplayBuffer(buffer_size)\n\nnet = CNNQNetwork(env.observation_space.shape, n_action=2).to(device)\ntarget_net = CNNQNetwork(env.observation_space.shape, n_action=2).to(device)\ntarget_update_interval = 2000\n\n\noptimizer = optim.Adam(net.parameters(), lr=1e-4) \nloss_func = nn.SmoothL1Loss(reduction='none') \n\ngamma = 0.99\nbatch_size = 32\nn_episodes = 30000 #100000とかでやりたい\nSAVE_NUM=1000\n\nbeta_begin = 0.4\nbeta_end = 1.0\nbeta_decay = 16*n_episodes\nbeta_func = lambda step: min(beta_end, beta_begin + (beta_end - beta_begin) * (step / beta_decay))\n\nepsilon_begin = 1.0\nepsilon_end = 0.01\nepsilon_decay = 16*n_episodes\nepsilon_func = lambda step: max(epsilon_end, epsilon_begin - (epsilon_begin - epsilon_end) * (step / epsilon_decay))\n\ndef update(batch_size, beta):\n obs, action, reward, next_obs, done, indices, weights = replay_buffer.sample(batch_size, beta)\n obs, action, reward, next_obs, done, weights \\\n = obs.float().to(device), action.to(device), reward.to(device), next_obs.float().to(device), done.to(device), weights.to(device)\n\n q_values = net(obs)\n \n with torch.no_grad():\n greedy_action_next = net(next_obs)\n q_values_next = target_net(next_obs)\n\n reward_np=reward.to(device).detach().numpy().copy()\n two_reward=[]\n for r in reward_np:\n two_reward.append([r,r])\n two_reward=torch.Tensor(two_reward )\n target_q_values = two_reward + gamma * q_values_next #* (1 - done) #ここでバグ\n \n two_weights=[]\n for r in weights:\n two_weights.append([r,r])\n two_weights=torch.Tensor(two_weights )\n optimizer.zero_grad()\n loss = (two_weights * loss_func(q_values, target_q_values)).mean()\n loss.backward()\n optimizer.step()\n \n replay_buffer.update_priorities(indices, (target_q_values - q_values).mean().abs().detach().cpu().numpy())\n\n return loss.item()\n\nif __name__==\"__main__\":\n step = 0\n rewards=[]\n for episode in range(n_episodes):\n obs = env.reset()\n done = False\n total_reward = 0\n camp=1\n while not done:\n if camp==1:\n action = env.action_space.sample()\n cmp,act=action\n action=(camp,act)\n #print(action)\n next_obs, reward, done, _ = env.step(action)\n #total_reward += reward\n obs = next_obs\n else:#後手について学習する\n action = net.act(torch.from_numpy(obs.astype(np.float32)).clone().float().to(device), epsilon_func(step))\n action=(camp,action)\n #print(action)\n next_obs, reward, done, _ = env.step(action)\n total_reward += reward\n replay_buffer.push([torch.from_numpy(obs.astype(np.float32)).clone(), action[1], reward, torch.from_numpy(next_obs.astype(np.float32)).clone(), done])\n obs = next_obs\n\n # ネットワークを更新 \n if len(replay_buffer) > initial_buffer_size:\n #print(\"update\")\n update(batch_size, beta_func(step))\n\n # ターゲットネットワークを定期的に同期させる\n if (step + 1) % target_update_interval == 0:\n target_net.load_state_dict(net.state_dict())\n \n step += 1\n camp=-camp\n\n print('Episode: {}, Step: {}, Reward: {}'.format(episode + 1, step + 1, total_reward))\n rewards.append(total_reward)\n if episode%SAVE_NUM==0:\n x=[i for i in range(len(rewards))]\n plt.plot(x,rewards,\"bo\")\n plt.savefig(\"graphs/continuous_{:0=6}.png\".format(episode))\n torch.save(net, 'models/continuous_model_{:0=6}.pt'.format(episode))\n torch.save(target_net, 'models/continuous_target_model_{:0=6}.pt'.format(episode))"
},
{
"alpha_fraction": 0.5652841925621033,
"alphanum_fraction": 0.5867895483970642,
"avg_line_length": 35.16666793823242,
"blob_id": "27f425866b536b27c500dd8e9603fce9d95f4387",
"content_id": "8a92fa2c8affc559395ecab68432f129421be4c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7313,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 198,
"path": "/ddqn_curling_discrete.py",
"repo_name": "rayofhopejp/mayfes2021-gameai",
"src_encoding": "UTF-8",
"text": "import random\n\nimport gym\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn, optim\nimport math\n\nfrom IPython.display import HTML\nimport curlingenv\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\nenv = gym.make('curlingenv-v0')\nSTONE_NUM=5 #envのSTONE_NUMと合わせる\nHIDDEN=64 #隠れ層の大きさ\n\n\nclass PrioritizedReplayBuffer(object):\n def __init__(self, buffer_size):\n self.buffer_size = buffer_size\n self.index = 0 \n self.buffer = [] \n self.priorities = np.zeros(buffer_size, dtype=np.float32)\n self.priorities[0] = 1.0 \n \n def __len__(self):\n return len(self.buffer)\n\n def push(self, experience):\n if len(self.buffer) < self.buffer_size:\n self.buffer.append(experience)\n else:\n self.buffer[self.index] = experience\n self.priorities[self.index] = self.priorities.max()\n self.index = (self.index + 1) % self.buffer_size \n \n def sample(self, batch_size, alpha=0.6, beta=0.4):\n priorities = self.priorities[: self.buffer_size if len(self.buffer) == self.buffer_size else self.index] \n priorities = priorities ** alpha \n prob = priorities / priorities.sum()\n\n indices = np.random.choice(len(self.buffer),size=batch_size,p=prob)\n\n weights = (len(self.buffer)*prob[indices])**(-beta)\n weights = weights / max(weights)\n\n obs, action, reward, next_obs, done = zip(*[self.buffer[i] for i in indices])\n\n return (torch.stack(obs),\n torch.as_tensor(action), \n torch.as_tensor(reward, dtype=torch.float32),\n torch.stack(next_obs), \n torch.as_tensor(done, dtype=torch.uint8),\n indices,\n torch.as_tensor(weights, dtype=torch.float32))\n def update_priorities(self, indices, priorities):\n self.priorities[indices] = priorities + 1e-4\n\n\"\"\"\n Dueling Networkを用いたQ関数を実現するためのニューラルネットワークをクラスとして記述します. \n\"\"\"\nclass CNNQNetwork(nn.Module):\n def __init__(self, state_shape, n_action):\n super(CNNQNetwork, self).__init__()\n self.state_shape = state_shape\n self.n_action = n_action\n\n self.fc_state = nn.Sequential(\n nn.Linear(STONE_NUM*4, HIDDEN),\n nn.ReLU(),\n nn.Linear(HIDDEN, 1)\n )\n self.fc_advantage = nn.Sequential(\n nn.Linear(STONE_NUM*4, HIDDEN),\n nn.ReLU(),\n nn.Linear(HIDDEN, n_action)\n )\n \n def forward(self, obs):\n feature = obs\n feature = feature.view(feature.size(0), -1) \n state_values = self.fc_state(feature) \n advantage = self.fc_advantage(feature)\n action_values = state_values + advantage - torch.mean(advantage, dim=1, keepdim=True)\n return action_values\n\n def act(self, obs, epsilon):\n if random.random() < epsilon:\n action = random.randrange(self.n_action)\n theta=(action//9)*5+45\n velocity=(action%9)/4+2\n action=(velocity,theta)\n else:\n with torch.no_grad():\n action = torch.argmax(self.forward(obs.unsqueeze(0))).item()\n #action=((theta-45)//5)*9+int((velocity-2)*4)\n theta=(action//9)*5+45\n velocity=(action%9)/4+2\n action=(velocity,theta)\n #print(action)\n return action\n\nbuffer_size = 10000#0 \ninitial_buffer_size = 1000#0 \nreplay_buffer = PrioritizedReplayBuffer(buffer_size)\n\nnet = CNNQNetwork(env.observation_space.shape, n_action=17*9).to(device)\ntarget_net = CNNQNetwork(env.observation_space.shape, n_action=17*9).to(device)\ntarget_update_interval = 2000 \n\noptimizer = optim.Adam(net.parameters(), lr=1e-4) \nloss_func = nn.SmoothL1Loss(reduction='none') \n\ngamma = 0.99 \nbatch_size = 32\nn_episodes = 100000 #100000とかでやりたい\nSAVE_NUM=1000\n\nbeta_begin = 0.4\nbeta_end = 1.0\nbeta_decay = n_episodes*16\nbeta_func = lambda step: min(beta_end, beta_begin + (beta_end - beta_begin) * (step / beta_decay))\n\nepsilon_begin = 1.0\nepsilon_end = 0.01\nepsilon_decay = n_episodes*16\nepsilon_func = lambda step: max(epsilon_end, epsilon_begin - (epsilon_begin - epsilon_end) * (step / epsilon_decay))\n\n\ndef update(batch_size, beta):\n #print(\"update\")\n obs, action, reward, next_obs, done, indices, weights = replay_buffer.sample(batch_size, beta)\n obs, action, reward, next_obs, done, weights \\\n = obs.float().to(device), action.to(device), reward.to(device), next_obs.float().to(device), done.to(device), weights.to(device)\n\n q_values = net(obs).gather(1, action.unsqueeze(1)).squeeze(1)\n \n with torch.no_grad():\n greedy_action_next = torch.argmax(net(next_obs),dim=1)\n q_values_next = target_net(next_obs).gather(1, greedy_action_next.unsqueeze(1)).squeeze(1)\n target_q_values = reward + gamma * q_values_next * (1 - done)\n optimizer.zero_grad()\n loss = (weights * loss_func(q_values, target_q_values)).mean()\n loss.backward()\n optimizer.step()\n replay_buffer.update_priorities(indices, (target_q_values - q_values).abs().detach().cpu().numpy())\n\n return loss.item()\n\nif __name__==\"__main__\":\n step = 0\n rewards=[]\n for episode in range(n_episodes+1):\n obs = env.reset()\n done = False\n total_reward = 0\n camp=1\n while not done:\n if camp==1:\n action = env.action_space.sample()\n cmp,act=action\n action=(camp,act)\n #print(action)\n next_obs, reward, done, _ = env.step(action)\n #total_reward += reward\n obs = next_obs\n else:#後手について学習する\n action = net.act(torch.from_numpy(obs.astype(np.float32)).clone().float().to(device), epsilon_func(step))\n action=(camp,action)\n next_obs, reward, done, _ = env.step(action)\n total_reward += reward\n #action=((theta-45)//5)*9+int((velocity-2)*4)\n action_num= ((action[1][1]-45)//5)*9+int((action[1][0]-2)*4)\n replay_buffer.push([torch.from_numpy(obs.astype(np.float32)).clone(), action_num, reward, torch.from_numpy(next_obs.astype(np.float32)).clone(), done])\n obs = next_obs\n\n # ネットワークを更新 \n if len(replay_buffer) > initial_buffer_size:\n #print(\"update\")\n update(batch_size, beta_func(step))\n if (step + 1) % target_update_interval == 0:\n target_net.load_state_dict(net.state_dict())\n \n step += 1\n camp=-camp\n\n print('Episode: {}, Step: {}, Reward: {}'.format(episode + 1, step + 1, total_reward))\n rewards.append(total_reward)\n if episode%SAVE_NUM==0:\n x=[i for i in range(len(rewards))]\n plt.plot(x,rewards,\"bo\")\n plt.savefig(\"graphs/{:0=6}.png\".format(episode))\n torch.save(net, 'models/model_{:0=6}.pt'.format(episode))\n torch.save(target_net, 'models/target_model_{:0=6}.pt'.format(episode))\n"
}
] | 8 |
IllinoisTechServicesSRTILab/ChaliceDeployment
|
https://github.com/IllinoisTechServicesSRTILab/ChaliceDeployment
|
b04b96c6a806fcaec68867614a09eec387437255
|
21ddc856dc9c4485b1de4d8736b6a3652364f677
|
39d41b68aac0dcdec05c4109512bf2891611536b
|
refs/heads/master
| 2020-03-23T20:59:06.593000 | 2018-08-14T19:37:06 | 2018-08-14T19:37:06 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6500857472419739,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 31.38888931274414,
"blob_id": "da7dbd958b3ea4be6969f95286c06deaf33a7374",
"content_id": "38aeefe6c4095506214b638b1b9f9a8105148e36",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 18,
"path": "/chalicelib/ics/ics.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nimport json\n\nclass ICSStatus():\n\n def __init__(self):\n self.url = 'https://www.ics.illinois.edu/e107/e107_plugins/ics_usage/api.php?_call=ICSUsage::getCurrent&department_name=ICS,HOUSING,UNION-IT'\n\n def get_labs(self):\n return self._load_data()['results']\n\n def get_labs_by_department(self, department):\n data = self._load_data()['results']\n return list(filter(lambda x: x['department_name']==department, data))\n\n def _load_data(self):\n response = urlopen(self.url)\n return json.load(response)\n"
},
{
"alpha_fraction": 0.6096345782279968,
"alphanum_fraction": 0.6096345782279968,
"avg_line_length": 24.08333396911621,
"blob_id": "2bee81178140600b6746e9f865cbf7308f3fff24",
"content_id": "2f94c2558bbcc3be83da1ae3c7db20763ff0854d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 602,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 24,
"path": "/chalicelib/athletic/athletic.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "import json\n\nfrom chalicelib.athletic import athletic_consts\n\nclass AthleticSchedule():\n\n def __init__(self):\n self.path = 'chalicelib/athletic/data/sports.json'\n with open(self.path, 'r') as f:\n raw = json.load(f)\n self.last_update = raw['last_update']\n self.data = raw['data']\n\n def get_last_update(self):\n return {'last_update': self.last_update}\n\n def get_sports_list(self):\n return athletic_consts.sports_dict\n\n def get_sport(self, sport):\n return self.data[sport]\n\n def get_sports(self):\n return self.data\n"
},
{
"alpha_fraction": 0.7402746081352234,
"alphanum_fraction": 0.7431350350379944,
"avg_line_length": 34.67346954345703,
"blob_id": "205a06b036e6968e43a355c5fba5bfb782155925",
"content_id": "076f30d22cc39112b432e856f204fcdb7e7954b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1748,
"license_type": "permissive",
"max_line_length": 235,
"num_lines": 49,
"path": "/README.md",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "# Public API gateway in the University of Illinois at Urbana-Champaign\n[![Build Status][travis-image]][travis-url] [](https://img.shields.io) \nThe University of Illinois at Urbana-Champaign API gateway allows anyone to build applications using data of the campus easily. It integrates data from multiple sources and websites in the public gateway, with the help of AWS chalice. \n\n#### Notes: \n- This is an unofficial development and is not supported or controlled by the University of Illinois at Urbana-Champaign itself.\n- These skills are currently being developed in progess, they are not guaranteed to function properly.\n\n## Accessing the API\nAll calls are made to the following URL with the required parameters for a given service.\n```\nhttps://69smoo2dc6.execute-api.us-east-1.amazonaws.com/api\n```\nThe data is returned in `json` format.\n\n## Endpoints\n#### EWS/ICS Workstation\n- /ews\n- /ics\n- /ics/{department}\n\n#### Sports\n- /sports/check\n- /sports/list\n- /spots/{sport}\n\n#### Dining\n- /dining/{hall}\n- /dining/{hall}/{date}\n- /dining/{hall}/{date_from}/{date_to}\n\n#### Library\n- /library\n- /library/{library_id}/{year}/{month}/{date}\n\n#### Buildings \n- /buildings\n- /buildings/{building_id}\n\n## Developers\n### Student Innovation Lab, TechService, the University of Illinois at Urbana-Champaign\n[Feng Xiyang](https://github.com/andyfengHKU) \n[Wang Jikun](https://github.com/WagJK) \n\n## Disclaimer\nThis project is initially developed by Student Innovation Lab of TechService of the University of Illinois at Urbana-Champaign.\n\n[travis-url]: https://travis-ci.org/andyfengHKU/uiuc-api-chalice\n[travis-image]: https://travis-ci.org/andyfengHKU/uiuc-api-chalice.svg?branch=master\n"
},
{
"alpha_fraction": 0.5258092880249023,
"alphanum_fraction": 0.5485564470291138,
"avg_line_length": 29.891891479492188,
"blob_id": "1c392c5646942b1e5679df6c42d456b05a6d7e4f",
"content_id": "60f6a56558adc2f7460ba21985877deb7ab2ec40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1143,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 37,
"path": "/chalicelib/daily/daily.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\nimport json\nimport re\n\nclass DailyIlliniScraper():\n\n def __init__(self):\n self.base_url = 'http://dailyillini.com/'\n\n def get_recent_news(self):\n response = self._request('feed/')\n soup = BeautifulSoup(response, 'lxml')\n return self._parse_xml(soup)\n\n def _parse_xml(self, data):\n result = []\n count = 0\n for item in data.find_all('item'):\n news = {}\n title = item.title.string\n date = item.pubdate.string\n description = re.findall(r'<description>(.*?)\\.\\.', str(item.description), flags=0)[0] + '...'\n news['title'] = title\n news['date'] = date\n news['description'] = description\n count += 1\n if count == 5:\n break\n result.append(news)\n return result\n\n def _request(self, url):\n req = Request(self.base_url + url, None,\n {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})\n response = urlopen(req)\n return response\n"
},
{
"alpha_fraction": 0.5691699385643005,
"alphanum_fraction": 0.5691699385643005,
"avg_line_length": 27.11111068725586,
"blob_id": "db24621b39c91c53034ea6adf685fbddb827d681",
"content_id": "9aae4e31f0085bc2f791f8cbcafd4186cb924d2b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 18,
"path": "/chalicelib/building/buildings.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "import json\n\nclass BuildingInfo():\n\n def __init__(self):\n self.path = 'chalicelib/building/data/buildings.json'\n with open(self.path, 'r') as f:\n self.data = json.load(f)\n\n def get_all_buildings(self):\n return self.data\n\n def search_by_building_num(self, num):\n for building in self.data:\n for sub_building in building:\n if int(sub_building['BLDG_NUM']) == num:\n return building\n return 'no such building'\n"
},
{
"alpha_fraction": 0.6328358054161072,
"alphanum_fraction": 0.6328358054161072,
"avg_line_length": 22.928571701049805,
"blob_id": "1c9cd5a51b3cfde8c70771174408c2f9b6876e7a",
"content_id": "4ab6dc820865a5eee2aac3bdcaceaa267e288119",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 335,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 14,
"path": "/chalicelib/ews/ews.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nimport json\n\nclass EWSStatus():\n\n def __init__(self):\n self.url = 'https://my.engr.illinois.edu/labtrack/util_data_json.asp'\n\n def get_labs(self):\n return self._load_data()['data']\n\n def _load_data(self):\n response = urlopen(self.url)\n return json.load(response)\n"
},
{
"alpha_fraction": 0.5780616998672485,
"alphanum_fraction": 0.5855406522750854,
"avg_line_length": 32.77894592285156,
"blob_id": "3291a2d042109ff52d1c4f17b91849f4a1976e77",
"content_id": "3b4f678425290b1022c625571542292639e9be16",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3209,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 95,
"path": "/chalicelib/athletic/athletic_scrape.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen, Request\nfrom bs4 import BeautifulSoup\nimport datetime\nimport json\n\n#from chalicelib.athletic import athletic_consts\nimport athletic_consts\n\nclass AthleticScheduleScraper():\n\n def __init__(self):\n self.url = 'http://www.fightingillini.com/schedule.aspx?path='\n self.sports = list(athletic_consts.sports_dict.keys())\n self.path = 'data/sports.json'\n\n def print_to_file(self):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n data = self._get_sports()\n json_text = {\n 'last_update': now,\n 'data': data\n }\n with open(self.path, 'w') as f:\n json.dump(json_text, f, indent=4)\n\n def _get_sports(self):\n data = {}\n for sport in self.sports:\n data[sport] = self._get_sport(sport)\n return data\n\n def _get_sport(self, sport):\n response = self._request(sport)\n sport_info = self._parse_html(response)\n return sport_info\n\n def _request(self, sport):\n request_url = self.url + sport\n req = Request(request_url, None,\n {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})\n response = urlopen(req)\n return response\n\n def _parse_html(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n games = []\n for item in soup.find_all(class_='schedule_game'):\n game = {}\n game['opponent'] = self._get_opponent(item)\n game['date'] = self._get_date(item)\n game['time'] = self._get_time(item)\n game['location'] = self._get_location(item)\n game['result'] = self._get_result(item)\n games.append(game)\n return games\n\n def _get_opponent(self, game):\n opponent_div = game.find(class_='schedule_game_opponent_name')\n if opponent_div.a is None and opponent_div.span is None:\n return opponent_div.string.strip()\n elif opponent_div.span is None:\n if opponent_div.a.string is None:\n return opponent_div.a.span.string.strip()\n else:\n return opponent_div.a.string.strip()\n else:\n return opponent_div.span.string.strip()\n\n def _get_date(self, game):\n date_div = game.find(class_='schedule_game_opponent_date')\n return date_div.string.strip()\n\n def _get_time(self, game):\n time_div = game.find(class_='schedule_game_opponent_time')\n return time_div.string.strip()\n\n def _get_location(self, game):\n location_div = game.find(class_='schedule_game_location')\n if location_div.span is None:\n return location_div.string.strip()\n else:\n if location_div.span.string is None:\n return 'not available'\n else:\n return location_div.span.string.strip()\n\n def _get_result(self, game):\n result_div = game.find(class_='schedule_game_results')\n if result_div is None or len(result_div.div.contents) == 0:\n return 'not available'\n else:\n return result_div.div.contents[0]\n\n#t = AthleticScheduleScraper()\n#t.print_to_file()\n"
},
{
"alpha_fraction": 0.6884488463401794,
"alphanum_fraction": 0.6976897716522217,
"avg_line_length": 33.431819915771484,
"blob_id": "c3095acc9378a80fb83b127e688d52876e44d372",
"content_id": "47e1d719ce71d54abf2f1e450b713fbee5caeb7b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1515,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 44,
"path": "/chalicelib/laundry/laundry.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport json\n\nfrom chalicelib.laundry import laundry_consts\n\nclass LaundryStatus():\n\n\tdef __init__(self):\n\t\tself.url = 'http://classic.laundryview.com/lvs.php?s=1506'\n\n\tdef get_laundry_status(self):\n\t\treturn self._load_data()\n\n\tdef get_laundry_by_building(self, building_id):\n\t\tdata = self._load_data()\n\t\tfor building in data:\n\t\t\tif laundry_consts.building_id2name[building_id] == building['building']:\n\t\t\t\treturn building\n\t\treturn None\n\n\tdef get_laundry_by_building_machine(self, building_id, machine):\n\t\tdata = self._load_data()\n\t\tfor building in data:\n\t\t\tif laundry_consts.building_id2name[building_id] == building['building']:\n\t\t\t\treturn building[machine]\n\t\treturn None\n\n\tdef _load_data(self):\n\t\tresponse = urlopen(self.url).read().decode('utf-8')\n\t\treturn self._parse_data(response)\n\n\tdef _parse_data(self, data):\n\t\tresults = []\n\t\tsoup = BeautifulSoup(data, 'html.parser')\n\t\telements = soup.find(\"div\",{\"id\" : \"campus1\"}).findAll(\"span\", {\"class\" : \"user-avail\"})\n\t\tfor key in laundry_consts.switcher.keys():\n\t\t\tbuilding = laundry_consts.building_id2name[key]\n\t\t\tfor index, item in enumerate(laundry_consts.switcher[key]):\n\t\t\t\twasher_num = int(re.findall(r\"\\d+\", elements[item - 1].get_text())[0])\n\t\t\t\tdryer_num = int(re.findall(r\"\\d+\", elements[item - 1].get_text())[1])\n\t\t\t\tresults.append({'building' : building, 'room' : laundry_consts.building_room_matcher[key][index], 'washer' : washer_num, 'dryer' : dryer_num})\n\t\treturn results\n"
},
{
"alpha_fraction": 0.5968712568283081,
"alphanum_fraction": 0.5968712568283081,
"avg_line_length": 30.961538314819336,
"blob_id": "8fc0b984644d5e9065afa3ef7e2149ecf559e507",
"content_id": "c425c9a2f3460df8fdc85de5dea2d926f6010ec8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 831,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 26,
"path": "/chalicelib/library/library.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nimport json\n\n\nclass Library():\n\n def __init__(self):\n self.url_all = \"https://quest.library.illinois.edu/LibDirectory/Api/UnitsWithCalendars\"\n self.url_search = \"https://quest.library.illinois.edu/LibDirectory/Api/SearchCalendar/\"\n\n def get_all(self):\n response_json = urlopen(self.url_all)\n try:\n response = json.load(response_json)\n return response\n except ValueError:\n return None\n\n def search_library(self, library_id, y, m, d):\n request_url = self.url_search + str(library_id) + \"/\" + str(y) + \"/\" + str(m) + \"/\" + str(d)\n response_json = urlopen(request_url)\n try:\n response = json.load(response_json)\n return response\n except ValueError:\n return None\n"
},
{
"alpha_fraction": 0.7006350159645081,
"alphanum_fraction": 0.7018445730209351,
"avg_line_length": 29.620370864868164,
"blob_id": "c72ef326323b3d112c9d678c0144db5c7c72f1d4",
"content_id": "405700de40ac5b278f0827cb9118965e29da7bfd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3307,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 108,
"path": "/app.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from chalice import Chalice, Response\n\nfrom chalicelib.ews import ews\nfrom chalicelib.ics import ics\nfrom chalicelib.dining import dining\nfrom chalicelib.library import library\nfrom chalicelib.athletic import athletic\nfrom chalicelib.daily import daily\nfrom chalicelib.building import buildings\nfrom chalicelib.laundry import laundry\n\napp = Chalice(app_name='UIUC-API')\n\n\[email protected]('/')\ndef index():\n return Response(body='Welcome to University of Illinois, Urbana-Champaign API',\n status_code=200,\n headers={'Content-Type': 'text/plain'})\n# EWS router\[email protected]('/ews', methods=['GET'])\ndef get_ews_status():\n return ews.EWSStatus().get_labs()\n\n\n# ICS router\[email protected]('/ics', methods=['GET'])\ndef get_ics_status():\n return ics.ICSStatus().get_labs()\n\[email protected]('/ics/{department}', methods=['GET'])\ndef get_ics_by_department(department):\n return ics.ICSStatus().get_labs_by_department(department)\n\n\n# athletic router\[email protected]('/sports/check', methods=['GET'])\ndef check_sports():\n return athletic.AthleticSchedule().get_last_update()\n\[email protected]('/sports/list', methods=['GET'])\ndef get_sports_list():\n return athletic.AthleticSchedule().get_sports_list()\n\[email protected]('/sports/{sport}', methods=['GET'])\ndef get_sport(sport):\n return athletic.AthleticSchedule().get_sport(sport)\n\n\n# dining router\[email protected]('/dining/{hall}', methods=['GET'])\ndef get_dining_today(hall):\n return dining.Dining().get_menu_today(hall)\n\[email protected]('/dining/{hall}/{date_from}', methods=['GET'])\ndef get_dining_date(hall, date_from):\n return dining.Dining().get_menu_date(hall, date_from, date_from)\n\[email protected]('/dining/{hall}/{date_from}/{date_to}', methods=['GET'])\ndef get_dining_date_range(hall, date_from, date_to):\n return dining.Dining().get_menu_date(hall, date_from, date_to)\n\n\n# library router\[email protected]('/library', methods=['GET'])\ndef get_all_library():\n return library.Library().get_all()\n\[email protected]('/library/{library_id}/{y}/{m}/{d}', methods=['GET'])\ndef search_library(library_id, y, m, d):\n return library.Library().search(library_id, y, m, d)\n\n\n#daily news router\[email protected]('/dailynews', methods=['GET'])\ndef get_all_news():\n return daily.DailyIlliniScraper().get_recent_news()\n\[email protected]('dailynews/{n}', methods=['GET'])\ndef get_n_news(n):\n return daily.DailyIlliniScraper().get_recent_news()[0:n]\n\n\n#builidng router\[email protected]('/buildings', methods=['GET'])\ndef get_all_buildings():\n return buildings.BuildingInfo().get_all_buildings()\n\[email protected]('/buildings/{building_num}', methods=['GET'])\ndef get_building_by_num(building_num):\n return buildings.BuildingInfo().search_by_building_num(int(building_num))\n\n\n#laundry router\[email protected]('/laundry', methods=['GET'])\ndef get_all_laundry_status():\n return laundry.LaundryStatus().get_laundry_status()\n\n#Code below currently not working. To be changed.\n'''\[email protected]('/laundry/{building_id}', methods=['GET'])\ndef get_laundry_by_building(building_id):\n return laundry.LaundryStatus().get_laundry_by_building(building_id)\n\[email protected]('/laundry/{building_id}/{machine_type}', methods=['GET'])\ndef get_laundry_by_building_machine(building_id, machine_type):\n return laundry.LaundryStatus().get_laundry_by_building_machine(building_id, machine_type)\n'''\n"
},
{
"alpha_fraction": 0.5430916547775269,
"alphanum_fraction": 0.5745553970336914,
"avg_line_length": 30.782608032226562,
"blob_id": "779c79a56c6d0a3ae37a6efbfd00e553c3298f3a",
"content_id": "d4ffdda31edb020521d922cec7399dddd4d2eba6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 731,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 23,
"path": "/chalicelib/dining/dining.py",
"repo_name": "IllinoisTechServicesSRTILab/ChaliceDeployment",
"src_encoding": "UTF-8",
"text": "from urllib.request import urlopen\nimport json\n\nclass Dining():\n\n def __init__(self):\n self.url = 'https://web.housing.illinois.edu/MobileDining2/WebService/Search.aspx?k=7A828F94-620B-4EE3-A56F-328036CC3C04'\n\n def get_menu_today(self, hall):\n request_url = self.url + \"&id=\" + hall + \"&t=json\"\n response = urlopen(request_url)\n try:\n return json.load(response)\n except:\n return ''\n\n def get_menu_date(self, hall, date_from, date_to):\n request_url = self.url + \"&id=\" + hall + \"&from=\" + date_from +\"&to=\" + date_to + \"&t=json\"\n response = urlopen(request_url)\n try:\n return json.load(response)\n except:\n return ''\n"
}
] | 11 |
mengqingjian/Phantom2
|
https://github.com/mengqingjian/Phantom2
|
dcf20564653acfd11d96841ad42e177b8dbe373b
|
e997e042d0b8ec9effe4d5997d7766f64a6f80fe
|
7712b98cdd362cac7c54295f7dd3279bf4eb1cd6
|
refs/heads/master
| 2021-08-31T06:06:16.535080 | 2017-12-20T13:50:03 | 2017-12-20T13:50:03 | 114,891,493 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5873397588729858,
"alphanum_fraction": 0.6025640964508057,
"avg_line_length": 36.69696807861328,
"blob_id": "9edce767e20da718a738bfaf622e5d1fd0986686",
"content_id": "127b76884aa4281b6ee35565e04e45b5c8137d7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 33,
"path": "/app03/views.py",
"repo_name": "mengqingjian/Phantom2",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom app03 import pager\nHOST_LIST=[]\nfor i in range(1,104):\n HOST_LIST.append(\"c%s.com\"%i)\nfrom app03.pager import Pagination\ndef hosts(request):\n pager_obj = Pagination(request.GET.get('page', 1), len(HOST_LIST), request.path_info)\n host_list = HOST_LIST[pager_obj.start:pager_obj.end]\n html = pager_obj.page_html()\n return render(request, 'hosts.html', {'host_list': host_list, \"page_html\": html})\n # try:\n # current_page=int(request.GET.get('page',1))\n # except Exception as e:\n # current_page=1\n #\n # per_page_count=10\n # start=(current_page-1)*per_page_count\n # end=current_page*10\n # host_list=HOST_LIST[start:end]\n # total=len(HOST_LIST)\n # max_page_html,div=divmod(total,per_page_count)\n # if div:\n # max_page_html+=1\n # page_html_list=[]\n # for i in range(1,max_page_html+1):\n # if i==current_page:\n # temp='<a class=\"active\" href=\"/hosts/?page%s\">%s</a>'%(i,i)\n # else:\n # temp = '<a href=\"/hosts/?page%s\">%s</a>' % (i, i)\n # page_html_list.append(temp)\n # page_html=''.join(page_html_list)\n # return render(request,\"hosts.html\",{\"host_list\":host_list,\"page_html\":page_html})\n\n\n\n\n"
},
{
"alpha_fraction": 0.559109091758728,
"alphanum_fraction": 0.5629323720932007,
"avg_line_length": 35.86353302001953,
"blob_id": "55114a621ab44a89fef039446e7c50b9a0444f77",
"content_id": "8def213daba7144748ca6b80f1e1582604c6032f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17084,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 447,
"path": "/stark/service/v1.py",
"repo_name": "mengqingjian/Phantom2",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.shortcuts import HttpResponse,render,redirect\nfrom django.http import QueryDict\nfrom django.db.models import Q\nimport copy\n\n\"\"\"userconfig继承starkconfig\"\"\"\n\nclass FilterOption(object):\n def __init__(self,field_name,multi=False,condition=None,is_choice=False):\n \"\"\"\n\n :param field_name: 字段\n :param multi: 是否多选\n :param condition: 显示数据的筛选条件\n :param is_choice: 是否是choice\n \"\"\"\n self.field_name=field_name\n self.multi=multi\n self.is_choice=is_choice\n self.condition=condition\n def get_queryset(self,_field):\n if self.condition:\n return _field.rel.to.objects.filter(self.condition)\n return _field.rel.to.objects.all()\n\n def get_choices(self,_field):\n return _field.choices\n\n\n\nclass FilterRow(object): #创建对象\n def __init__(self,option,data,request):\n self.data=data\n self.option=option\n self.request=request\n def __iter__(self): #生成器也是可迭代对象的一种\n params=copy.deepcopy(self.request.GET) #params就是request.GET这个参数\n params._mutable=True\n current_id=params.get(self.option.field_name) #取的是当前发过来的值,这个值是字符串\n current_id_list=params.getlist(self.option.field_name)\n\n if self.option.field_name in params:\n origin_list=params.pop(self.option.field_name)\n url = \"{0}?{1}\".format(self.request.path_info, params.urlencode())\n yield mark_safe('<a href=\"{0}\">全部</a>'.format(url))\n params.setlist(self.option.field_name,origin_list)\n else:\n url = \"{0}?{1}\".format(self.request.path_info, params.urlencode())\n yield mark_safe('<a class=\"active\" href=\"{0}\">全部</a>'.format(url))\n for val in self.data:\n if self.option.is_choice: #这个表示的是choice选项\n pk,text=str(val[0]),val[1] #把这个值全部转换成字符串\n else:\n pk,text=str(val.pk),str(val) #这里如果不是choice就是一个对象,对象直接.pk就可以取出来\n\n #当前的URL\n #self.request.path_info 取的是当前的路径\n #self.request.GET 它是传过来的url\n if not self.option.multi:\n \"\"\"单选\"\"\"\n params[self.option.field_name]=pk\n url = \"{0}?{1}\".format(self.request.path_info, params.urlencode())\n if current_id==pk: #传过来的值和pk做判断\n yield mark_safe(\"<a class='active',href='{0}'>{1}</a>\".format(url,text))\n else:\n yield mark_safe(\"<a href='{0}'>{1}</a>\".format(url,text))\n else:\n \"\"\"多选\"\"\"\n _params = copy.deepcopy(params)\n id_list = _params.getlist(self.option.field_name)\n print(\"urrent_id_list\",current_id_list)\n print(\"pk\",pk)\n print(\"id_list\",id_list)\n if pk in current_id_list:\n id_list.remove(pk)\n print(\"55id_list\",id_list)\n _params.setlist(self.option.field_name, id_list)\n url = \"{0}?{1}\".format(self.request.path_info,_params.urlencode())\n print(\"url\",url)\n yield mark_safe(\"<a class='active'href='{0}'>{1}</a>\".format(url, text))\n else:\n\n id_list.append(pk)\n #params被重新复制\n _params.setlist(self.option.field_name,id_list)\n #创建url\n url = \"{0}?{1}\".format(self.request.path_info, _params.urlencode())\n yield mark_safe(\"<a href='{0}'>{1}</a>\".format(url,text))\n\n\n\n\nclass ChangeList(object):\n def __init__(self,config,queryset):\n self.config=config\n self.list_display=config.get_list_display()\n self.model_class=config.model_class\n self.request=config.request\n self.show_add_btn=config.get_show_add_btn()\n self.actions = config.get_actions()\n self.show_actions=config.get_show_actions()\n self.comb_filter=config.get_comb_filter()\n #搜索\n self.show_search_form=config.get_show_search_form()\n self.search_form_val=config.request.GET.get(config.search_key,\"\")\n\n\n from utile.pager import Pagination\n current_page = self.request.GET.get('page', 1)\n totale_count = queryset.count()\n pager_obj = Pagination(current_page, totale_count, self.request.path_info, self.request.GET, per_page_count=3)\n self.pager_obj=pager_obj\n self.data_list=queryset[pager_obj.start:pager_obj.end]\n\n def modify_actions(self):\n result=[]\n for func in self.actions:\n temp={\"name\":func.__name__,'text':func.short_desc}\n result.append(temp)\n return result\n\n def add_url(self):\n return self.config.get_add_url()\n\n def head_list(self):\n \"\"\"\n 构造表头\n :return:\n \"\"\"\n result = []\n for filted_name in self.list_display:\n if isinstance(filted_name, str):\n # 根据类和字段名称,获取字段对象的verbose_name\n verbose_name = self.model_class._meta.get_field(filted_name).verbose_name\n else:\n verbose_name = filted_name(self.config, is_header=True) #self是startconfig中edit中self对象\n result.append(verbose_name)\n return result\n\n def body_list(self):\n data_list = self.data_list\n new_data_list = []\n for row in data_list:\n # row是 UserInfo中的字段\n # row.id,row.name,row.age\n temp = []\n # print(\"999\",row)\n for field_name in self.list_display:\n if isinstance(field_name, str):\n val = getattr(row,field_name)\n # print(\"8995556\",val)\n else:\n val = field_name(self.config,row)\n temp.append(val)\n # print(\"+++++\",temp)\n new_data_list.append(temp)\n return new_data_list\n\n def gen_comb_filter(self):\n \"\"\"\n 生成器\n :return:\n \"\"\"\n from django.db.models import ForeignKey,ManyToManyField\n for option in self.comb_filter:\n _field=self.model_class._meta.get_field(option.field_name)\n if isinstance(_field,ForeignKey):\n row=FilterRow(option,option.get_queryset(_field),self.request)\n elif isinstance(_field,ManyToManyField):\n row = FilterRow(option,option.get_queryset(_field),self.request)\n else:\n row=FilterRow(option,option.get_choices(_field),self.request)\n #可迭代对象\n yield row #返回的是可迭代对象\n\n\n\nclass StarkConfig(object):\n #1.定制页面显示的列\n def checkbox(self,obj=None,is_header=False):\n if is_header:\n return '选择'\n return mark_safe('<input type=\"checkbox\" name=\"pk\" value=\"%s\" />' %(obj.id,))\n def edit(self,obj=None,is_header=False):\n if is_header:\n return '编辑'\n\n query_str=self.request.GET.urlencode()\n if query_str:\n params=QueryDict(mutable=True)\n params[self._query_param_key]=query_str\n return mark_safe('<a href=\"%s?%s\">编辑</a>' %(self.get_chang_url(obj.id),params.urlencode(),))\n return mark_safe('<a href=\"%s\">编辑</a>' % (self.get_chang_url(obj.id),))\n def delete(self,obj=None,is_header=False):\n if is_header:\n return '删除'\n return mark_safe('<a href=\"%s\">删除</a>' %(self.get_delete_url(obj.id),))\n list_display = []\n def get_list_display(self):\n data=[]\n if self.list_display: #self.list_display就是UserInfoConfig中的list_display\n data.extend(self.list_display)\n data.append(StarkConfig.edit)\n data.append(StarkConfig.delete)\n data.insert(0,StarkConfig.checkbox)\n return data\n\n #2.是否显示添加按钮\n show_add_btn=True\n def get_show_add_btn(self):\n return self.show_add_btn\n\n #3.model_form_class\n model_form_class=None\n def get_model_form_class(self):\n if self.model_form_class:\n return self.model_form_class\n from django.forms import ModelForm\n # class add_ModeForm(ModelForm):\n # def Meta(self):\n # model = self.model_class\n # fields = \"__all__\"\n meta=type('Meta',(object,),{'model':self.model_class,'fields':\"__all__\"})\n add_ModeForm=type(\"add_ModeForm\",(ModelForm,),{'Meta':meta})\n return add_ModeForm\n\n #4.关键字搜索\n\n show_search_form=False\n def get_show_search_form(self):\n return self.show_search_form\n search_fields=[]\n def get_search_fields(self):\n result=[]\n if self.search_fields:\n result.extend(self.search_fields)\n return result\n\n def get_search_condition(self):\n key_word = self.request.GET.get(self.search_key)\n search_fields = self.get_search_fields()\n condition = Q()\n condition.connector = \"or\"\n if key_word and self.get_show_search_form():\n for field_name in search_fields:\n condition.children.append((field_name, key_word))\n return condition\n\n #5.actions定制\n\n show_actions = False\n\n def get_show_actions(self):\n return self.show_actions\n\n actions = []\n def get_actions(self):\n result = []\n if self.actions:\n result.extend(self.actions)\n return result\n\n #6.组合搜索\n comb_filter=[]\n def get_comb_filter(self):\n result = []\n if self.comb_filter:\n result.extend(self.comb_filter)\n return result\n\n\n\n def __init__(self,model_class,site):\n self.model_class=model_class\n self.site=site\n self.request=None\n self._query_param_key=\"_listfilter\"\n self.search_key=\"_q\"\n\n\n def warp(self,view_func):\n def inner(request,*args,**kwargs):\n self.request=request\n return view_func(request,*args,**kwargs)\n return inner\n\n\n def get_urls(self):\n app_model_name=(self.model_class._meta.app_label,self.model_class._meta.model_name,)\n url_patterns=[\n url(r'^$',self.warp(self.changlist_view),name=\"%s_%s_changlist\"%app_model_name),\n url(r'^add/$',self.warp(self.add_view),name=\"%s_%s_add\"%app_model_name),\n url(r'^(\\d+)/delete/$',self.warp( self.delete_view), name=\"%s_%s_delete\" % app_model_name),\n url(r'^(\\d+)/change/$', self.warp(self.change_view), name=\"%s_%s_chang\" % app_model_name),\n ]\n\n url_patterns.extend(self.extra_url())\n return url_patterns\n def extra_url(self):\n return []\n\n @property\n def urls(self):\n return self.get_urls()\n\n def get_chang_url(self,nid):\n name=\"stark:%s_%s_chang\"%(self.model_class._meta.app_label,self.model_class._meta.model_name)\n edit_url=reverse(name,args=(nid,))\n return edit_url\n def get_add_url(self):\n name=\"stark:%s_%s_add\"%(self.model_class._meta.app_label,self.model_class._meta.model_name)\n edit_url=reverse(name)\n return edit_url\n def get_delete_url(self,nid):\n name=\"stark:%s_%s_delete\"%(self.model_class._meta.app_label,self.model_class._meta.model_name)\n edit_url=reverse(name,args=(nid,))\n return edit_url\n def get_list_url(self):\n name=\"stark:%s_%s_changlist\"%(self.model_class._meta.app_label,self.model_class._meta.model_name)\n edit_url=reverse(name)\n return edit_url\n\n#########################处理请求的方式##############################\n def changlist_view(self,request,*args,**kwargs):\n\n if request.method==\"POST\" and self.get_show_actions():\n func_name_str=request.POST.get('list_action')\n action_func=getattr(self,func_name_str)\n ret=action_func(request)\n if ret:\n return ret\n\n comb_conditions={}\n option_list=self.get_comb_filter()\n for key in request.GET.keys():\n value_list=request.GET.getlist(key)\n flag=False\n for option in option_list:\n if option.field_name==key:\n flag=True\n break\n if flag:\n comb_conditions[\"%s__in\" %key]=value_list\n queryset = self.model_class.objects.filter(self.get_search_condition()).filter(**comb_conditions).distinct()\n c1=ChangeList(self,queryset) #self是当前的对象\n # print(\"55555\",c1.data_list)\n return render(request,'stark/changelist.html',{\"c1\":c1})\n\n\n # head_list = []\n # for filted_name in self.get_list_display():\n # if isinstance(filted_name, str):\n # # 根据类和字段名称,获取字段对象的verbose_name\n # verbose_name = self.model_class._meta.get_field(filted_name).verbose_name\n # else:\n # verbose_name = filted_name(self, is_header=True)\n # head_list.append(verbose_name)\n #\n #\n #\n # #处理分页\n # from utile.pager import Pagination\n # current_page=request.GET.get('page', 1)\n # totale_count=self.model_class.objects.all().count()\n # pager_obj = Pagination(current_page,totale_count, request.path_info, request.GET,per_page_count=3)\n #\n #\n #\n # #处理表中数据\n # data_list = self.model_class.objects.all()[pager_obj.start:pager_obj.end]\n # new_data_list = []\n # for row in data_list:\n # # row是 UserInfo(id=2,name='alex2',age=181)\n # # row.id,row.name,row.age\n # temp = []\n # for field_name in self.get_list_display():\n # if isinstance(field_name, str):\n # val = getattr(row,field_name)\n # else:\n # val = field_name(self,row)\n # temp.append(val)\n # new_data_list.append(temp)\n #\n # return render(request, 'stark/changelist.html', {'data_list': new_data_list, 'head_list': head_list,'pager_obj':pager_obj,'add_url':self.get_add_url(),'show_add_btn':self.get_show_add_btn(),})\n\n\n #添加的数据及页面,用modelform\n def add_view(self, request, *args, **kwargs):\n model_form_class=self.get_model_form_class()\n if request.method==\"GET\":\n form=model_form_class()\n return render(request,\"stark/add_view.html\",{'form':form})\n else:\n form=model_form_class(request.POST)\n if form.is_valid():\n form.save()\n return redirect(self.get_list_url())\n return render(request, \"stark/add_view.html\", {'form': form})\n #修改数据及页面\n def change_view(self, request, nid, *args, **kwargs):\n obj=self.model_class.objects.filter(pk=nid).first()\n if not obj:\n redirect(self.get_list_url())\n model_form_class=self.get_model_form_class()\n if request.method==\"GET\":\n form=model_form_class(instance=obj)\n return render(request,\"stark/change_view.html\",{'form':form})\n else:\n form=model_form_class(instance=obj,data=request.POST)\n if form.is_valid():\n form.save()\n list_query_str=request.GET.get(self._query_param_key)\n list_url=\"%s?%s\" %(self.get_list_url(),list_query_str,)\n return redirect(list_url)\n return render(request, \"stark/change_view.html\", {'form': form})\n def delete_view(self, request, nid):\n # return HttpResponse(123)\n self.model_class.objects.filter(pk=nid).delete()\n return redirect(self.get_list_url())\nclass StarkSite(object):\n def __init__(self):\n self._registry = {}\n def register(self,model_class,stark_config_class=None):\n if not stark_config_class:\n stark_config_class=StarkConfig\n self._registry[model_class]=stark_config_class(model_class,self)\n\n def get_urls(self):\n url_pattern=[]\n\n for model_class,stark_config_obj in self._registry.items():\n \"\"\"为每一个类,创建了4个url\"\"\"\n app_name=model_class._meta.app_label\n model_name=model_class._meta.model_name\n\n curd_url=url(r'^%s/%s/'%(app_name,model_name,),(stark_config_obj.urls,None,None))\n url_pattern.append(curd_url)\n return url_pattern\n @property\n def urls(self):\n return(self.get_urls(),None,'stark')\n\n\nsite=StarkSite()\n"
},
{
"alpha_fraction": 0.48767462372779846,
"alphanum_fraction": 0.5008217096328735,
"avg_line_length": 24.36458396911621,
"blob_id": "2fd478c7a7e349a544de92c0cd08e95400ad196f",
"content_id": "8f3991bc786cf15493292f43ca9fe5fc0b0c6183",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2462,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 96,
"path": "/stark/templates/stark/changelist.html",
"repo_name": "mengqingjian/Phantom2",
"src_encoding": "UTF-8",
"text": "{% load staticfiles %}\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <title>Title</title>\n <link rel=\"stylesheet\" href=\"{% static \"stark/bootstrap/css/bootstrap.css\" %}\" />\n <style>\n .list_filter a{\n display: inline-block;\n padding:3px 8px;\n border: 1px solid #2e6da4;\n margin: 3px 0;\n }\n .list_filter a.active{\n background-color: #2e6da4;\n color: white;\n }\n </style>\n</head>\n<body>\n\n<div class=\"container\">\n <h1>列表页面</h1>\n <hr>\n<div>\n {% for Filter_row in c1.gen_comb_filter %}\n <div class=\"list_filter\">\n {% for item in Filter_row %}\n {{ item }}\n {% endfor %}\n </div>\n {% endfor %}\n\n</div>\n\n{% if c1.show_search_form %}\n<span class=\"form-group\">\n <form method=\"get\">\n <input name=\"{{ c1.config.search_key }}\" value=\"{{ c1.search_form_val }}\" class=\"form-control\" placeholder=\"请输入搜索框\" type=\"text\" style=\"display:inline-block;width: 200px\">\n <button class=\"btn btn-primary\"><span class=\"glyphicon glyphicon-search\"></span></button>\n\n </form>\n </span>\n{% endif %}\n\n\n{% if c1.show_add_btn %}\n <a class=\"btn btn-primary\" href=\"{{ c1.add_url }}\">添加</a>\n{% endif %}\n\n\n <form method=\"post\">\n {% csrf_token %}\n {% if c1.show_actions %}\n<div class=\"form_group\" >\n <select name=\"list_action\" class=\"form-control\"style=\"display:inline-block;width: 200px\">\n {% for item in c1.modify_actions %}\n <option value=\"{{ item.name }}\">{{ item.text }}</option>\n {% endfor %}\n </select>\n <button class=\"btn btn-primary\">执行</button>\n</div>\n {% endif %}\n\n <table class=\"table table-bordered\">\n <thead>\n <tr>\n {% for item in c1.head_list %}\n <th>{{ item }}</th>\n {% endfor %}\n </tr>\n </thead>\n <tbody>\n {% for obj_list in c1.body_list %}\n <tr>\n {% for col in obj_list %}\n <td>{{ col }}</td>\n {% endfor %}\n </tr>\n {% endfor %}\n </tbody>\n\n </table>\n </form>\n<div>\n <ul class=\"pagination\">\n {{ c1.pager_obj.page_html|safe }}\n </ul>\n</div>\n </div>\n\n</body>\n</html>"
},
{
"alpha_fraction": 0.5651663541793823,
"alphanum_fraction": 0.5694715976715088,
"avg_line_length": 25.863157272338867,
"blob_id": "405c5b14d6daa68118d786a0959e37be193a6b35",
"content_id": "90a8cf553f314e38fc2f441a897552576269c62f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2625,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 95,
"path": "/app01/stark.py",
"repo_name": "mengqingjian/Phantom2",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import HttpResponse,render,redirect\nfrom django.conf.urls import url\nfrom stark.service import v1\nfrom app01 import models\nfrom django.forms import ModelForm\nclass UserInfoModelForm(ModelForm):\n class Meta:\n model=models.UserInfo\n fields=\"__all__\"\n error_messages={\n 'name':{\n 'required':'用户名不能为空'\n }\n }\nclass UserInfoConfig(v1.StarkConfig):\n list_display = ['id','name']\n\n show_add_btn = True\n model_form_class = UserInfoModelForm\n # def extra_url(self):\n # url_list=[\n # url(r'^xxxXX$',self.func),\n # ]\n # return url_list\n # def func(self,request):\n # return HttpResponse('......')\n show_search_form = True\n search_fields=[\"id__contains\",'name__contains']\n\n show_actions=True\n def multi_del(self,request):\n pk_list=request.POST.getlist('pk')\n self.model_class.objects.filter(id__in=pk_list).delete()\n return redirect(\"/userinfo/\")\n multi_del.short_desc=\"批量删除\"\n actions = [multi_del, ]\n\nv1.site.register(models.UserInfo,UserInfoConfig)\n\nclass UserTypeConfig(v1.StarkConfig):\n list_display = ['name',]\n\nv1.site.register(models.UserType,UserTypeConfig)\n\nclass RoleConfig(v1.StarkConfig):\n list_display = ['id','xxx']\n\n def delete_view(self, request, nid):\n if request.method == \"GET\":\n return render(request, \"stark/delete.html\")\n else:\n self.model_class.objects.filter(pk=nid).delete()\n return redirect(self.get_list_url())\nv1.site.register(models.Role,RoleConfig)\n\n\nclass HostModelForm(ModelForm):\n class Meta:\n model = models.Host\n fields = ['id','hostname','ip','port']\n error_messages = {\n 'hostname':{\n 'required':'主机名不能为空',\n },\n 'ip':{\n 'required': 'IP不能为空',\n 'invalid': 'IP格式错误',\n }\n\n }\n\nclass HostConfig(v1.StarkConfig):\n def ip_port(self, obj=None, is_header=False):\n if is_header:\n return '自定义列'\n return \"%s:%s\" % (obj.ip, obj.port,)\n\n list_display = ['id', 'ip', 'hostname', 'port',ip_port]\n\n\n\n show_add_btn = True\n model_form_class = HostModelForm\n\n\n def extra_url(self):\n urls = [\n url('^report/$', self.report_view)\n ]\n return urls\n\n def report_view(self, request):\n return HttpResponse('自定义报表')\n\nv1.site.register(models.Host,HostConfig)\n\n\n\n"
},
{
"alpha_fraction": 0.6868537664413452,
"alphanum_fraction": 0.7001476883888245,
"avg_line_length": 32.900001525878906,
"blob_id": "2e6c07217a85452c44df02a5c5f62d54bc5ccad4",
"content_id": "f69980ed278574a7c17cc5cd7b86ae3648a63e81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 20,
"path": "/app01/models.py",
"repo_name": "mengqingjian/Phantom2",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass UserInfo(models.Model):\n name=models.CharField(verbose_name=\"用户名称\",max_length=32)\n def __str__(self):\n return self.name\nclass UserType(models.Model):\n name=models.CharField(verbose_name=\"类型名称\",max_length=32)\n def __str__(self):\n return self.name\nclass Role(models.Model):\n xxx=models.CharField(verbose_name=\"角色名称\",max_length=32)\n def __str__(self):\n return self.xxx\n\nclass Host(models.Model):\n ip=models.GenericIPAddressField(verbose_name=\"IP\",protocol='ipv4')\n hostname=models.CharField(verbose_name='主机名',max_length=32)\n port=models.IntegerField(verbose_name='端口')"
}
] | 5 |
anoukv/disambiguateCWSPR
|
https://github.com/anoukv/disambiguateCWSPR
|
b03c826898646be1e644161b443afe3b1686dd92
|
081211d4cf1907e3c24a93b4a8057868b3d94e2c
|
f7690d77d5ecc3b7f393e9be840247f16a3d6f95
|
refs/heads/master
| 2021-01-18T13:45:17.506964 | 2014-03-30T18:20:28 | 2014-03-30T18:20:28 | 17,630,962 | 1 | 0 | null | 2014-03-11T13:16:47 | 2014-03-12T17:34:33 | 2014-03-12T17:34:33 | null |
[
{
"alpha_fraction": 0.6401148438453674,
"alphanum_fraction": 0.6505202651023865,
"avg_line_length": 23.65486717224121,
"blob_id": "c82de23d7f9deee905aa97352180905d4cd11144",
"content_id": "e13ca8b18d6252faad4d0d9fdb35519a01206810",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2787,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 113,
"path": "/clusteringAnouk/getGlobalCOC.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nfrom collections import defaultdict\nfrom time import time\nimport shelve\nfrom math import sqrt\n\ndef get_document_vocabulary(inpt, minimumOccurence = 5):\n\ttotal = defaultdict(int)\n\tfor word in inpt:\n\t\ttotal[word] += 1\n\ts = sum(total.values())\n\tvoc = dict([ (key, total[key]) for key in total.keys() if total[key] > minimumOccurence ])\n\tvoc[\"_UNKNOWN_\"] = s - sum(voc.values())\n\treturn voc\n\ndef normalize_coc(coc):\n\ttotal = sqrt( sum([v**2 for v in coc.values()]) )\n\tnew_coc = dict()\n\tfor key in coc.keys():\n\t\tnew_coc[key] = coc[key] / total\n\treturn new_coc\n\ndef relatedness(word, coc, vocabulary):\n\tnew_coc = dict()\n\tfor key in coc.keys():\n\t\ttry:\n\t\t\tnew_coc[key] = coc[key] / float((vocabulary[word] + vocabulary[key] - coc[key]))\n\t\texcept:\n\t\t\tnew_coc[key] = 1\n\treturn new_coc\n\ndef getCocMatrix(inpt,skipsize):\n\tk = 2\n\tqueueSize = skipsize * 2 + 1\n\tqueueMid = skipsize + 1\n\n\tqueueIsReady = lambda x : len(x) == queueSize\n\tdef push(element, queue):\n\t\tqueue.append(element)\n\t\tif len(queue) > queueSize:\n\t\t\tqueue.pop(0)\n\t\n\tvocabulary = get_document_vocabulary(inpt)\n\tvocSize = len(vocabulary) + 1\n\n\twordToVec = dict()\n\tfor word in vocabulary:\n\t\twordToVec[word] = defaultdict(int)\n\n\tqueue = []\n\tfor word in inpt:\n\t\tpush(word, queue)\n\t\tif queueIsReady(queue):\n\t\t\tmid = queue[queueMid]\n\t\t\tif mid in vocabulary:\n\t\t\t\tfor i in xrange(skipsize):\n\t\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\t\tword1 = queue[i]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\t\t\t\t\n\t\t\t\t\twordToVec[mid][word1] += 1\n\t\t\t\t\twordToVec[mid][word2] += 1\n\n\tnormalized_wordToVec = dict()\n\trelations = dict()\n\tfor word in wordToVec.keys():\n\t\tnormalized_wordToVec[word] = normalize_coc(wordToVec[word])\n\t\trelations[word] = relatedness(word, wordToVec[word], vocabulary)\n\t\trelations[word] = normalize_coc(relations[word])\n\n\treturn dict( {'voc': vocabulary, 'coc' : normalized_wordToVec, 'rel' : relations} )\n\n\ndef read_args():\n\tdef read_file(filename):\n\t\tf = open(train, 'r')\n\t \tinpt = f.readline().replace(\"\\n\", \"\").split(\" \")\n\t \tf.close()\n\t \treturn inpt\n\n\tif len(sys.argv) < 3:\n \t\tprint \"Please call me as:\"\n \t\tprint \"python main.py training.txt output.txt (skipsize = 5)\"\n \t\tsys.exit()\n\n\ttrain = sys.argv[1]\n \toutput_file = sys.argv[2]\n \tskipsize = 5\n \tif len(sys.argv) == 4:\n \t\tskipsize = int(sys.argv[3])\n\n \treturn (read_file(train), output_file, skipsize)\n\ndef main_anouk_is_a_charm():\n\t(inpt, output_file, skipsize) = read_args()\n\n\tcoc = getCocMatrix(inpt, skipsize)\n\tfor key in coc:\n\t\tmyShelve = shelve.open(output_file + \"_\" + key)\n\t\tmyShelve.update(coc[key])\n\t\tmyShelve.close()\n\t\n\nif __name__ == \"__main__\":\n\tstart = time()\n\tmain_anouk_is_a_charm()\n\tstop = time()\n \tprint \"I spent\", int(stop-start+0.5), \"seconds.\"\n\n"
},
{
"alpha_fraction": 0.6976593136787415,
"alphanum_fraction": 0.7028608322143555,
"avg_line_length": 32.71111297607422,
"blob_id": "660802759022cc41eca4635b703452503109f4e2",
"content_id": "4ced64310ddb14272be44283c713fc279394e3b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1538,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 45,
"path": "/AccuracyEval.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "from __future__ import division\nimport sys\nimport pickle\nfrom collections import defaultdict\nfrom scipy import stats\nimport math\n\ngoodAnswers = \"QuestionsAnswers/word_relationship.answers\"\n#givenAnswers = \"precomputedAnswers/testCristinaPearsonCorrelation.answered\"\n#givenAnswers = \"precomputedAnswers/testCristinaEuclideanDistance.answered\"\n#givenAnswers = \"precomputedAnswers/testCristinamanhattanSimilarity.answered\"\ngivenAnswers = \"precomputedAnswers/testCristinaSpearmanCorrelation.answered\"\n\n# Written by Remi\n# Approved and edited by Anouk (made all words lower case)\ndef load_GivenAnswers(filename):\n f = open(filename, 'r')\n c = [ l.lower().replace(\"\\n\",\"\") for l in f.readlines()]\n f.close()\n return c\n\ndef load_ExpectedAnswers(filename):\n f = open(filename, 'r')\n c = [ l.lower().replace(\"\\n\",\"\").split(\" \")[1] for l in f.readlines()]\n f.close()\n return c\n\n\nif __name__ == \"__main__\":\n print \"Loading given answers...\"\n answersGiven = load_GivenAnswers(givenAnswers)\n print answersGiven[0]\n print \"Loading the expected answers\"\n answersExpected = load_ExpectedAnswers(goodAnswers)\n print answersExpected[0]\n print \"Computing accuracy\"\n correctAnswers = 0\n \n if(len(answersGiven) == len(answersExpected)):\n for i in range(len(answersGiven)):\n if(answersGiven[i] == answersExpected[i]):\n correctAnswers = correctAnswers + 1\n accuracy = correctAnswers/len(answersGiven) * 100\n print \"Accuracy is\"\n print accuracy\n \n"
},
{
"alpha_fraction": 0.49723583459854126,
"alphanum_fraction": 0.5089243650436401,
"avg_line_length": 14.7683687210083,
"blob_id": "3341134d1089d75844f47d30df1958898bf680a8",
"content_id": "016a15a8432272537a0ce5731a51ad8a21f2110b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 12662,
"license_type": "no_license",
"max_line_length": 220,
"num_lines": 803,
"path": "/not-used/SemanticRelations.c",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n\n#include <string.h>\n\n#include <math.h>\n\n#include <stdlib.h>\n#include <dirent.h>\n\n\n\nconst int max_size=2000;\n\nconst int N=40;\n\nconst int max_w=50;\n\n\n\nint main(int argc, char **argv)\n\n{\n\n // declarations\n\n FILE *wordProjections, *questions, *answers, *output;\n\n int numberOfWords, size, a, b, c, d, vA, vB, vC, vD, missing = 0;\n\n char stA[max_size], stB[max_size], stC[max_size], stD[max_size], file_name[max_size], questions_file_name[max_size], answers_file_name[max_size], output_file_name[max_size];\n char tempC[max_size], tempD[max_size];\n\n float *M, *y;\n\n char *vocab;\n\n float len, dist, temp;\n\n \n // argument handling\n\n if (argc<4) {\n\n printf(\"Usage: ./dist <PROJECTIONS> <QUESTIONS> <ANSWERS> <OUTPUT> \\nwhere PROJECTIONS contains word projections, QUESTIONS contains questions and ANSWERS contains answers and OUTPUT contains the output file\\n\");\n\n return 0;\n\n }\n \n strcpy(file_name, argv[1]);\n\n strcpy(questions_file_name, argv[2]); \n strcpy(answers_file_name, argv[3]); \n strcpy(output_file_name, argv[4]); \n \n\n wordProjections=fopen(file_name, \"rb\");\n\n if (wordProjections==NULL)\n\n {\n\n printf(\"Projections file not found\\n\");\n\n return -1;\n\n }\n\n \n\n // get the number of words and number of dimensions (size)\n\n fscanf(wordProjections, \"%d\", &numberOfWords);\n\n fscanf(wordProjections, \"%d\", &size);\n\n \n\n // allocate memory for the vocabulary, matrix and y\n\n vocab=(char *)malloc(numberOfWords*max_w*sizeof(char));\n\n M=(float *)malloc(numberOfWords*size*sizeof(float));\n\n y=(float *)malloc(size*sizeof(float));\n\n \n\n if (M==NULL)\n\n {\n\n printf(\"Cannot allocate memory: %d MB\\n\", numberOfWords*size*sizeof(float)/1048576);\n\n return -1;\n\n }\n\n \n\n // fill the vocabulary and the matrix with normalized vectors\n\n for (b=0; b<numberOfWords; b++)\n\n {\n\n fscanf(wordProjections, \"%s\", &vocab[b*max_w]);\n\n for (a=0; a<size; a++)\n\n {\n\n fscanf(wordProjections, \"%f\", &M[a+b*size]);\n\n }\n\n \n\n len=0;\n\n for (a=0; a<size; a++)\n\n {\n\n len+=M[a+b*size]*M[a+b*size];\n\n }\n\n \n\n len=sqrt(len);\n\n for (a=0; a<size; a++)\n\n {\n\n M[a+b*size]/=len;\n\n }\n\n }\n\n \n\n // make whole vocabulary uppercase\n\n for (a=0; a<numberOfWords*max_w; a++)\n\n {\n\n vocab[a]=toupper(vocab[a]);\n\n }\n\n \n\n fclose(wordProjections);\n\n \n\n // open file with questions\n\n questions=fopen(questions_file_name, \"rb\");\n\n if (questions==NULL)\n\n {\n\n printf(\"Questions file not found\\n\");\n\n return -1;\n\n }\n\n // open file with answers\n answers=fopen(answers_file_name, \"rb\");\n if (answers == NULL)\n {\n\tprintf(\"Questions file not found\\n\");\n\n return -1;\n }\n\n \n\n // open file for output\n\n output = fopen(output_file_name, \"w\");\n\n \n\n // init counter\n\n int counterAnswers = 0, counterQuestions = 0;\n\n char FirstQuestion[max_size], SecondQuestion[max_size], ThirdQuestion[max_size], FourthQuestion[max_size], line[max_size];\n \n while ( fgets (line, sizeof line, questions) != NULL ) /* read a line */\n {\n\tcounterQuestions++;\n\tif (counterQuestions >= 5 && (counterQuestions <= 6 || (counterQuestions <= 8 && line != \"\")))\n\t{\n\t\tif(counterQuestions == 5) { strcpy(FirstQuestion, line); } \n\t\tif(counterQuestions == 6) { strcpy(SecondQuestion, line); } \n\t\tif(counterQuestions == 7) { strcpy(ThirdQuestion, line); } \n\t\tif(counterQuestions == 8) { strcpy(FourthQuestion, line); } \n\t}\n\t\n\tif(counterQuestions >= 7) { break; }\n }\n fclose(questions);\n\n char str[max_size];\n char *ptr;\n strcpy (str, FirstQuestion);\n strtok_r (str, \":\", &ptr);\n ptr[strlen(ptr)-1]='\\0';\n \n float resultsFirstQuestion[100], resultsSecondQuestion[100], resultsThirdQuestion[100], resultsFourthQuestion[100], resultsAverage[100];\n char resultsC[100][max_size], resultsD[100][max_size];\n int NoOfAddedResults = 0, NoOfAddedResultsSecondQuestion = 0, NoOfAddedResultsThirdQuestion = 0, NoOfAddedResultsFourthQuestion = 0,\n\tNoOfResultsC = 0, NoOfResultsD = 0;\n \n while(fscanf(answers, \"%s\", &stC) != EOF)\n {\n \tcounterAnswers++;\n\t\n\n char *res = stC;\n\tres++[strlen(res)-1] = 0;\n\t\n\tchar ans[max_size];\n \tchar *ans2;\n \tstrcpy (ans, res);\n \tstrtok_r (ans, \":\", &ans2);\n \tptr[strlen(ans2)-1]='\\0';\n\tstrcpy(stC, ans);\n\tstrcpy(stD, ans2);\n\n\tstrcpy(resultsC[NoOfResultsC], stC); \n\tNoOfResultsC++;\n\tstrcpy(resultsD[NoOfResultsD], stD); \n\tNoOfResultsD++;\n\n\tfor (a=0; a<strlen(stC); a++)\n\n {\n\n stC[a]=toupper(stC[a]);\n\n }\n\tfor (a=0; a<strlen(stD); a++)\n\n {\n\n stD[a]=toupper(stD[a]);\n\n }\n\t\n\tfor (vC=0; vC<numberOfWords; vC++)\n\n {\n\n if (!strcmp(&vocab[vC*max_w], stC))\n\n {\n\n break;\n\n }\n\n }\n\tfor (vD=0; vD<numberOfWords; vD++)\n\n {\n\n if (!strcmp(&vocab[vD*max_w], stD))\n\n {\n\n break;\n\n }\n\n }\n if(vC == numberOfWords || vD == numberOfWords) { continue; }\n \n int MissingAB = 0;\n\t\n\t//FirstQuestion\n\tif(strlen(FirstQuestion) != 0)\n\t{\n\t\tchar str[max_size];\n \t\tchar *ptr;\n \t\tstrcpy (str, FirstQuestion);\n \t\tstrtok_r (str, \":\", &ptr);\n \t\tptr[strlen(ptr)-1]='\\0';\n\t\tstrcpy(stA, str);\n\t\tstrcpy(stB, ptr);\n\n\t\t// uppercase the words\n\n\t\tfor (a=0; a<strlen(stA); a++)\n\n\t\t{\n\n\t\t stA[a]=toupper(stA[a]);\n\n\t\t}\n\n\t\tfor (a=0; a<strlen(stB); a++)\n\n\t\t{\n\n\t\t stB[a]=toupper(stB[a]);\n\t\t}\n\n\t\tfor (vA=0; vA<numberOfWords; vA++){\n\n\t\t if (!strcmp(&vocab[vA*max_w], stA))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tfor (vB=0; vB<numberOfWords; vB++)\n\n\t\t{\n\n\t\t if (!strcmp(&vocab[vB*max_w], stB))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\n\t\tif (vA == numberOfWords || vB == numberOfWords || vC == numberOfWords || vD == numberOfWords)\n\n\t\t{\n\t\t MissingAB++;\n\n\n\n\t\t missing++;\n\n\t\t}\n\t\telse\n\t\t{\n\t\t // compute y\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a] = M[a+vB*size]-M[a+vA*size] + M[a+ vC *size];\n\n\t\t }\n\n\n\n\t\t // normalize y again.\n\n\t\t len=0;\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t len+=y[a]*y[a];\n\n\t\t }\n\n\t\t \n\n\t\t len=sqrt(len);\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a]/=len;\n\n\t\t }\n\n\t\t // compute the cosine similarity between y and D\n\t\t dist = 0;\n\t\t for (a=0; a<size; a++) \n {\t\t\t\n\t\t\tdist+=y[a]*M[a+vD*size];\n\t\t }\n\t\t \n\t\t resultsFirstQuestion[NoOfAddedResults] = dist;\n NoOfAddedResults = NoOfAddedResults + 1;\n\t\t \t }\n\t}\n \n\t//SecondQuestion\n\tif(strlen(SecondQuestion) != 0)\n\t{\n\t\tchar str[max_size];\n \t\tchar *ptr;\n \t\tstrcpy (str, SecondQuestion);\n \t\tstrtok_r (str, \":\", &ptr);\n \t\tptr[strlen(ptr)-1]='\\0';\n\t\tstrcpy(stA, str);\n\t\tstrcpy(stB, ptr);\n\n\t\t// uppercase the words\n\n\t\tfor (a=0; a<strlen(stA); a++)\n\n\t\t{\n\n\t\t stA[a]=toupper(stA[a]);\n\n\t\t}\n\n\t\tfor (a=0; a<strlen(stB); a++)\n\n\t\t{\n\n\t\t stB[a]=toupper(stB[a]);\n\t\t}\n\n\t\tfor (vA=0; vA<numberOfWords; vA++){\n\n\t\t if (!strcmp(&vocab[vA*max_w], stA))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tfor (vB=0; vB<numberOfWords; vB++)\n\n\t\t{\n\n\t\t if (!strcmp(&vocab[vB*max_w], stB))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\t\n\t\tif (vA == numberOfWords || vB == numberOfWords || vC == numberOfWords || vD == numberOfWords)\n\n\t\t{\n\t\t MissingAB++;\n\n\n\n\t\t missing++;\n\n\t\t}\n\t\telse\n\t\t{\n\t\t // compute y\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a] = M[a+vB*size]-M[a+vA*size] + M[a+ vC *size];\n\n\t\t }\n\n\n\n\t\t // normalize y again.\n\n\t\t len=0;\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t len+=y[a]*y[a];\n\n\t\t }\n\n\t\t \n\n\t\t len=sqrt(len);\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a]/=len;\n\n\t\t }\n\n\t\t // compute the cosine similarity between y and D\n\t\t dist = 0;\n\t\t for (a=0; a<size; a++) \n {\t\t\t\n\t\t\tdist+=y[a]*M[a+vD*size];\n\t\t }\n\t\t \n\t\t resultsSecondQuestion[NoOfAddedResultsSecondQuestion] = dist;\n NoOfAddedResultsSecondQuestion = NoOfAddedResultsSecondQuestion + 1;\n\t\t \n\t }\n\t\n\n\t}\n\n\t//Third Question\n\tif(strlen(ThirdQuestion) != 0)\n\t{\n\t\tchar str[max_size];\n \t\tchar *ptr;\n \t\tstrcpy (str, ThirdQuestion);\n \t\tstrtok_r (str, \":\", &ptr);\n \t\tptr[strlen(ptr)-1]='\\0';\n\t\tstrcpy(stA, str);\n\t\tstrcpy(stB, ptr);\n\n\n\t\tfor (a=0; a<strlen(stA); a++)\n\n\t\t{\n\n\t\t stA[a]=toupper(stA[a]);\n\n\t\t}\n\n\t\tfor (a=0; a<strlen(stB); a++)\n\n\t\t{\n\n\t\t stB[a]=toupper(stB[a]);\n\t\t}\n\n\t\tfor (vA=0; vA<numberOfWords; vA++){\n\n\t\t if (!strcmp(&vocab[vA*max_w], stA))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tfor (vB=0; vB<numberOfWords; vB++)\n\n\t\t{\n\n\t\t if (!strcmp(&vocab[vB*max_w], stB))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tif (vA == numberOfWords || vB == numberOfWords || vC == numberOfWords || vD == numberOfWords)\n\n\t\t{\n\t\t MissingAB++;\n\n\t\t\n\n\t\t missing++;\n\n\t\t}\n\t\telse\n\t\t{\n\t\t // compute y\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a] = M[a+vB*size]-M[a+vA*size] + M[a+ vC *size];\n\n\t\t }\n\n\n\n\t\t // normalize y again.\n\n\t\t len=0;\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t len+=y[a]*y[a];\n\n\t\t }\n\n\t\t \n\n\t\t len=sqrt(len);\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a]/=len;\n\n\t\t }\n\n\t\t // compute the cosine similarity between y and D\n\t\t dist = 0;\n\t\t for (a=0; a<size; a++) \n {\t\t\t\n\t\t\tdist+=y[a]*M[a+vD*size];\n\t\t }\n\t\t \n\t\t resultsThirdQuestion[NoOfAddedResultsThirdQuestion] = dist;\n NoOfAddedResultsThirdQuestion = NoOfAddedResultsThirdQuestion + 1;\n\t\t \n\t }\n\t\n\n\t}\n\n\t// Fourth Question\n\tif(strlen(FourthQuestion) != 0)\n\t{\n\t\tchar str[max_size];\n \t\tchar *ptr;\n \t\tstrcpy (str, ThirdQuestion);\n \t\tstrtok_r (str, \":\", &ptr);\n \t\tptr[strlen(ptr)-1]='\\0';\n\t\tstrcpy(stA, str);\n\t\tstrcpy(stB, ptr);\n\n\t\t// uppercase the words\n\n\t\tfor (a=0; a<strlen(stA); a++)\n\n\t\t{\n\n\t\t stA[a]=toupper(stA[a]);\n\n\t\t}\n\n\t\tfor (a=0; a<strlen(stB); a++)\n\n\t\t{\n\n\t\t stB[a]=toupper(stB[a]);\n\t\t}\n\n\t\tfor (vA=0; vA<numberOfWords; vA++){\n\n\t\t if (!strcmp(&vocab[vA*max_w], stA))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tfor (vB=0; vB<numberOfWords; vB++)\n\n\t\t{\n\n\t\t if (!strcmp(&vocab[vB*max_w], stB))\n\n\t\t {\n\n\t\t break;\n\n\t\t }\n\n\t\t}\n\n\t\tif (vA == numberOfWords || vB == numberOfWords || vC == numberOfWords || vD == numberOfWords)\n\n\t\t{\n\t\t MissingAB++;\n\n\t\t\n\n\t\t missing++;\n\n\t\t}\n\t\telse\n\t\t{\n\t\t // compute y\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a] = M[a+vB*size]-M[a+vA*size] + M[a+ vC *size];\n\n\t\t }\n\n\n\n\t\t // normalize y again.\n\n\t\t len=0;\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t len+=y[a]*y[a];\n\n\t\t }\n\n\t\t \n\n\t\t len=sqrt(len);\n\n\t\t for (a=0; a<size; a++)\n\n\t\t {\n\n\t\t y[a]/=len;\n\n\t\t }\n\n\t\t // compute the cosine similarity between y and D\n\t\t dist = 0;\n\t\t for (a=0; a<size; a++) \n {\t\t\t\n\t\t\tdist+=y[a]*M[a+vD*size];\n\t\t }\n\t\t \n\t\t resultsFourthQuestion[NoOfAddedResultsFourthQuestion] = dist;\n NoOfAddedResultsFourthQuestion = NoOfAddedResultsFourthQuestion + 1;\n\t\t \n\t }\n\n\t}\n\n\tint totalQuestions = 3 - MissingAB;\n\tif(NoOfAddedResultsFourthQuestion > 0) { totalQuestions = 4 - MissingAB; }\n\t\n\tfor(a = 0; a < NoOfAddedResults; a++){\n\t\tif (totalQuestions == 3) { resultsAverage[a] = (resultsFirstQuestion[a] + resultsSecondQuestion[a] + resultsThirdQuestion[a])/totalQuestions; }\n\t\telse { resultsAverage[a] = (resultsFirstQuestion[a] + resultsSecondQuestion[a] + resultsThirdQuestion[a] + resultsFourthQuestion[a])/totalQuestions; }\n\t}\n\n\n } \n\n // sort semantic similarity scores descending\n for(a = 0; a < NoOfAddedResults; a++)\n {\n for(b = a; b < NoOfAddedResults; b++)\n {\n if(resultsAverage[a] < resultsAverage[b]){\n temp = resultsAverage[a];\n\t\tstrcpy(tempC, resultsC[a]);\n strcpy(tempD, resultsD[a]);\n resultsAverage[a] = resultsAverage[b];\n strcpy(resultsC[a], resultsC[b]);\n strcpy(resultsD[a], resultsD[b]);\n resultsAverage[b] = temp;\n strcpy(resultsC[b], tempC);\n strcpy(resultsD[b], tempD);\n }\n }\n }\n\n for(a = 0; a < NoOfAddedResults; a++){\n\t\tfprintf(output, \"%f\", resultsAverage[a]);\n\t\tfprintf(output, \"%s\", \" \");\n\t\tfprintf(output, \"%s\", resultsC[a]);\n\t\tfprintf(output, \"%s\", \":\");\n fprintf(output, \"%s\", resultsD[a]);\n\t\tfprintf(output, \"%s\", \"\\n\");\n\t}\n\n fclose(answers);\n fclose(output);\n\n \n\n \n\n return 0;\n\n}\n"
},
{
"alpha_fraction": 0.6325966715812683,
"alphanum_fraction": 0.6510128974914551,
"avg_line_length": 21.163265228271484,
"blob_id": "ab2289f12ad44f3758967aa55b28d95a71d4718d",
"content_id": "a8ec63538ed0ad69d79d6ab51e6c946437ca00b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 49,
"path": "/empEval.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "# A tool for empirical evaluation of the clusters\n# Anouk Visser \n\nimport sys\nimport shelve\n\nif not len(sys.argv) > 1:\n\tprint \"Call me as:\"\n\tprint \"python empEval.py <sensesOfCOCFile>\"\n\tsys.exit()\n\ninputfile = sys.argv[1]\nprint \"Opening: \", inputfile\n\ncoc = shelve.open(inputfile)\nwhile True:\n\tword = raw_input('Which word would you like to inspect? (type q to quit): ').lower()\n\tif word == 'q':\n\t\tbreak\n\tif word in coc:\n\t\twordRep = coc[word]\n\t\tsense0 = sorted(wordRep[0].items(), key = lambda x: x[1], reverse=True)\n\t\tsense1 = sorted(wordRep[1].items(), key = lambda x: x[1], reverse=True)\n\t\tcommonTerms = set(sense0).intersection(set(sense1))\n\t\tprint\n\t\tprint \"Sense 1: \"\n\t\tone = []\n\t\tfor term in sense0:\n\t\t\tif term not in commonTerms:\n\t\t\t\tone.append(term[0])\n\t\tprint one[:50]\n\t\tprint\n\t\tprint \" ----------------- \"\n\t\tprint\n\n\t\tprint \"Sense 2: \"\n\t\ttwo = []\n\t\tfor term in sense1:\n\t\t\tif term not in commonTerms:\n\t\t\t\ttwo.append(term[0])\n\t\tprint two[:50]\n\t\tprint\n\t\tprint \"Cluster distance: \", wordRep['clusterDistance']\n\n\telse:\n\t\tprint \"This word was not disambiguated...\"\n\tprint\n\nprint \"Goodbye\"\n"
},
{
"alpha_fraction": 0.6093813180923462,
"alphanum_fraction": 0.6223210692405701,
"avg_line_length": 25.117021560668945,
"blob_id": "803433a132677fcb07fc92dc58ae7c4002c7143d",
"content_id": "ffd58a18beeb9257dd2c1038a10b08c80e9a168c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2473,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 94,
"path": "/not-used/amb_eval.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nimport pickle\nfrom collections import defaultdict\nfrom math import sqrt\n\nquestions = \"QuestionsAnswers/word_relationship.questions\"\n\ndef load_questions(filename=questions):\n\tf = open(filename, 'r')\n\tc = [ tuple(l.replace(\"\\n\",\"\").split(\" \")) for l in f.readlines()]\n\tf.close()\n\treturn c\n\ndef load_vectors(filename):\n\tdef normalize(vec):\n\t\tvec = [ float(x) for x in vec]\n\t\ttotal = sqrt( sum([v**2 for v in vec]) )\n\t\tnew_vec = []\n\t\tfor v in vec:\n\t\t\tnew_vec.append(v/total)\n\t\treturn tuple(new_vec)\n\n\tf = open(filename,'r')\n\tf.readline()\n\tcontent = [ filter( lambda x : not x in [\"\\n\",\"\"], l.split(\" \")) for l in f.readlines() ]\n\tcontent = [ (l[0], normalize(l[1:])) for l in content ]\n\tcontent = filter(lambda x : not x[1] == None, content)\n\twords = defaultdict(list)\n\tfor (word, vector) in content:\n\t\tif \"_\" in word:\n\t\t\twords[word.split(\"_\")[0]].append((word,vector))\n\t\telse:\n\t\t\twords[word].append((word,vector))\n\treturn words\n\ndef save_answers(answers, filename):\n\tf = open(filename, 'w')\n\tf.write( \"\".join([ word + \"\\n\" for word in answers]) )\n\tf.close()\n\ndef vector_distance(vec1, vec2):\n\treturn sum([x[0] * x[1] for x in zip(vec1,vec2)])\n\ndef vector_add(vec1, vec2):\n\treturn [ x[0] + x[1] for x in zip(vec1, vec2) ]\n\ndef answer((a,b,c), vecs):\n\tfor e in (a,b,c):\n\t\tif e not in vecs or len(vecs[e]) == 0:\n\t\t\treturn \"NONE\" \n\n\tdef find_AB_match(a,b,vecs):\n\t\tbest_distance = 2\n\t\tbest_tuple = (None,None)\n\t\tfor va in vecs[a]:\n\t\t\tfor vb in vecs[b]:\n\t\t\t\tdistance = vector_distance(va[1],vb[1])\n\t\t\t\tif distance < best_distance:\n\t\t\t\t\tbest_distance = distance\n\t\t\t\t\tbest_tuple = (va[1], vb[1])\n\t\treturn best_tuple\n\n\t(av, bv) = find_AB_match(a,b,vecs)\n\tdiff = map(lambda x : x[0] - x[1], zip(av,bv))\n\n\tcvs = [ v[1] for v in vecs[c] ]\n\n\tbest_distance = 2\n\tbest_word = \"NONE\"\n\tfor reference_vec in cvs:\n\t\tfor key in vecs.keys():\n\t\t\tif key not in (a,b,c):\n\t\t\t\tfor v in vecs[key]:\n\t\t\t\t\tnew_vec = vector_add(v[1], diff)\n\t\t\t\t\tdistance = vector_distance(reference_vec, new_vec)\n\t\t\t\t\tif distance < best_distance:\n\t\t\t\t\t\tbest_distance = distance\n\t\t\t\t\t\tbest_word = key\n\n\tprint \"Best word:\", best_word\n\treturn best_word\n\n\n\nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 2:\n\t\tprint \"Call me as:\"\n\t\tprint \"python amb_eval.py wordvectors.txt\"\n\t\tsys.exit()\n\n\tquestions = load_questions()\n\tvecs = load_vectors(sys.argv[1])\n\tanswers = map(lambda x : answer(x, vecs), questions)\n\tsave_answers(answers, \"precomputedAnswers/\" + sys.argv[1].split(\"/\")[-1] + \".answered\")\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6478537321090698,
"alphanum_fraction": 0.660572350025177,
"avg_line_length": 25.787233352661133,
"blob_id": "a732e1c9f17f5c5184fdcdf1172cc1a4b7e15866",
"content_id": "893a5d43e623a0641fdcfafe4f3f4ca2dfa457fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1258,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 47,
"path": "/score.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import os, sys\n\nif not len(sys.argv) > 2:\n\tprint \"Call me as:\"\n\tprint \"python score.py <projections> <answers>\"\n\tsys.exit()\n\ndef compare(together):\n\tcorrect = 0\n\tfor i in together:\n\t\tanswer = i[0]\n\n\t\t# for the syntactic test downloaded from http://research.microsoft.com/en-us/projects/rnn/\n\t\t# has a different format, so that's where the split is needed\n\t\trealAnswer = i[1].split(' ')\n\t\tif len(realAnswer) > 1:\n\t\t\trealAnswer = realAnswer[1]\n\t\telse:\n\t\t\trealAnswer = realAnswer[0]\n\n\t\t# correct++ if the two answers match\n\t\tif answer == realAnswer:\n\t\t\tcorrect += 1\n\n\t# compute accuracy\n\treturn correct / float(len(together)) * 100\n\nvecs = sys.argv[1]\nanswer = sys.argv[2]\n\nvecsname = \"precomputedAnswers/\" + vecs.split(\"/\")[-1] + \".\" + answer.split(\"/\")[-1].split(\".\")[0] + \".answered\"\n\nif not os.path.isfile(vecsname):\n\tprint \"Need to calculate answers for\", vecs\n\tos.system(\"pypy qa.py \" + vecs + \" \" + answer.replace(\"answers\", \"questions\"))\n\tprint \"Caclulated answers for \", vecs\n\nanswers = open(answer, 'r')\nreference = open(vecsname, 'r')\n\nans = [ l.lower().replace(\"\\n\",\"\") for l in answers.readlines() ]\nref = [ l.lower().replace(\"\\n\",\"\") for l in reference.readlines() ]\n\nanswers.close()\nreference.close()\n\nprint \"\\nAccuracy:\\t\", compare(zip(ref,ans))"
},
{
"alpha_fraction": 0.8063380122184753,
"alphanum_fraction": 0.8133803009986877,
"avg_line_length": 112.5999984741211,
"blob_id": "c02e067fa0a09d52535282a0bd42b33727ddc748",
"content_id": "e8e92f2ae3be49892a49bb1c4eb0a977776e5285",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 5,
"path": "/QuestionsAnswers/README.md",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "\nword_relationship.answers and word_relationship.questions is the syntactic test used in \"Linguistic Regularities in Continuous Space Word Representations\" downloaded from http://research.microsoft.com/en-us/projects/rnn/\n\nquestions_extended and answer_extended is the syntactic test downloaded from https://code.google.com/p/word2vec/\n\nOriginal_files_and_conversions_scripts holds the original file questions-words.txt from https://code.google.com/p/word2vec/ and a script to convert a four column file to the desired format (3 columns - question, 1 column - answer)"
},
{
"alpha_fraction": 0.6645335555076599,
"alphanum_fraction": 0.6702218651771545,
"avg_line_length": 31.70697593688965,
"blob_id": "e58e5b53993c9f4c8a2944864148c73eedae509f",
"content_id": "1d397523c743566e0c1888571daae81cf894eda9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7032,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 215,
"path": "/qa.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nimport pickle\nfrom collections import defaultdict\nfrom math import sqrt\n\n# Written by Remi\n# Approved by Anouk\ndef save_answers(answers, filename):\n\tf = open(filename, 'w')\n\tf.write( \"\".join([ word + \"\\n\" for word in answers]) )\n\tf.close()\n\n# Written by Remi\n# Approved and edited by Anouk (made all words lower case)\ndef load_questions(filename):\n\tprint \"Loading questions...\"\n\tf = open(filename, 'r')\n\tc = [ tuple(l.lower().replace(\"\\n\",\"\").split(\" \")) for l in f.readlines()]\n\tf.close()\n\treturn c\n\n# Written by Remi\n# Approved by Anouk\ndef normalizeString(vec):\n\tvec = [ float(x) for x in vec]\n\ttotal = sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\ndef normalize(vec):\n\ttotal = sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\n# written by Remi\n# Approved and edited by Anouk (made all words lower case and took out internal normalize function)\ndef load_vectors(filename):\n\tprint \"Loading word projections\"\n\tf = open(filename,'r')\n\tf.readline()\n\tcontent = [ filter( lambda x : not x in [\"\\n\",\"\"], l.replace(\"\\n\", \"\").split(\" \")) for l in f.readlines() ]\n\tcontent = [ (l[0], normalizeString(l[1:])) for l in content ]\n\tcontent = filter(lambda x : not x[1] == None, content)\n\twords = defaultdict(list)\n\tfor (word, vector) in content:\n\t\tif \"_\" in word:\n\t\t\twords[word.lower().split(\"_\")[0]].append(vector)\n\t\telse:\n\t\t\twords[word.lower()].append(vector)\n\treturn words\n\n# Written by Anouk based on qa.c\ndef qa_ambiguous(wordvectors, questions):\n\tprint \"Answering questions\"\n\t\n\t# initialize empty answers list\n\tanswers = []\n\t\n\t# iterate over all questions\n\tunseenCount = 0\n\tfor question in questions:\n\n\t\t# get representations for a, b and c, only if they actually exist\n\t\tif question[0] in wordvectors and question[1] in wordvectors and question[2] in wordvectors:\n\t\t\t\n\t\t\t# get the word projections, this is in wordvectors[word]\n\t\t\t# it is a list of tuples\n\t\t\ta_projections = wordvectors[question[0]]\n\t\t\tb_projections = wordvectors[question[1]]\n\t\t\tc_projections = wordvectors[question[2]]\n\n\t\t\t# y_projections is a list of tuples\n\t\t\t# NOTE: Remi does not want to have all combinations of b-a, but just the \n\t\t\t# b - a that are close to each other, this could lead to more efficiency\n\t\t\t# but I think using all possible combinations is more sound, for now \n\t\t\ty_projections = []\n\t\t\tfor a in a_projections:\n\t\t\t\tfor b in b_projections:\n\t\t\t\t\tfor c in c_projections:\n\t\t\t\t\t\ty_projections.append(normalize([b[i] - a[i] + c[i] for i in xrange(len(a))]))\n\n\t\t\t# initialize bestSim and bestWord\n\t\t\t# sim ranges between -1 and 1, where 1 is most similar\n\t\t\tbestSim = 0\n\t\t\tbestWord = \"nothing\"\n\t\t\t\n\t\t\t# look at all senses of the word representations to find the answer to a:b c:bestWord\n\t\t\t# except for a, b and c\n\t\t\tfor word in wordvectors:\n\t\t\t\tif word not in question:\n\t\t\t\t\twordReps = wordvectors[word]\n\t\t\t\t\t\n\t\t\t\t\t# for every word_projection of word, we will compute the similarity \n\t\t\t\t\t# with every y_projection and save the best!\n\t\t\t\t\tfor wordRep in wordReps:\n\t\t\t\t\t\tfor y in y_projections:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# similarity is defined as the cosine similarity\n\t\t\t\t\t\t\t# cosine similarity normaly is (a (dot product) b) / (norm(a) * norm(b))\n\t\t\t\t\t\t\t# we have normalized a and b, so the denominator is always one and can be discarded\n\t\t\t\t\t\t\tsim = sum([y[i] * wordRep[i] for i in xrange(len(y))])\n\n\t\t\t\t\t\t\t# save result if it is better than the previous best result\n\t\t\t\t\t\t\tif sim > bestSim:\n\t\t\t\t\t\t\t\tbestSim = sim\n\t\t\t\t\t\t\t\tbestWord = word\n\t\t\n\t\t# If we don't have a projection for a, b, or c, we won't be answering the question.\n\t\t# But we will be printing a word ('nothing'), to make sure we end up with just as many\n\t\t# as answer lines as question lines.\n\t\telse:\n\t\t\tbestWord = 'nothing'\n\t\t\tunseenCount +=1\n\t\tanswers.append(bestWord)\n\tprint unseenCount, \" questions were not answered properly (out of \", len(answers), \")\"\n\treturn answers\n\n# Written by Anouk based on qa.c\ndef qa_ambiguous_stricter(wordvectors, questions):\n\tdef similarity_function(x, y):\n\t\treturn sum([x[i] * y[i] for i in xrange(len(y))])\n\n\tdef answer_question(question, wordvectors):\n\t\tunseenCount = 0\n\t\t# get representations for a, b and c, only if they actually exist\n\t\tif question[0] in wordvectors and question[1] in wordvectors and question[2] in wordvectors:\n\t\t\t\n\t\t\t# get the word projections, this is in wordvectors[word]\n\t\t\t# it is a list of tuples\n\t\t\ta_projections = wordvectors[question[0]]\n\t\t\tb_projections = wordvectors[question[1]]\n\t\t\tc_projections = wordvectors[question[2]]\n\n\n\t\t\tbest_a_b_match = (None, None)\n\t\t\tbest_match_similarity = -1\n\n\t\t\tfor a in a_projections:\n\t\t\t\tfor b in b_projections:\n\t\t\t\t\tsim = similarity_function(a,b)\n\t\t\t\t\tif sim > best_match_similarity:\n\t\t\t\t\t\tbest_a_b_match = (a,b)\n\t\t\t\t\t\tbest_match_similarity = sim\n\n\t\t\t(best_a, best_b) = best_a_b_match\n\t\t\ty_projections = []\n\t\t\tfor c in c_projections:\n\t\t\t\ty_projections.append(normalize([best_b[i] - best_a[i] + c[i] for i in xrange(len(c))]))\n\n\t\t\t# initialize bestSim and bestWord\n\t\t\t# sim ranges between -1 and 1, where 1 is most similar\n\t\t\tbestSim = 0\n\t\t\tbestWord = \"nothing\"\n\t\t\t\n\t\t\t# look at all senses of the word representations to find the answer to a:b c:bestWord\n\t\t\t# except for a, b and c\n\t\t\tfor word in wordvectors:\n\t\t\t\tif word not in question:\n\t\t\t\t\twordReps = wordvectors[word]\n\t\t\t\t\t\n\t\t\t\t\t# for every word_projection of word, we will compute the similarity \n\t\t\t\t\t# with every y_projection and save the best!\n\t\t\t\t\tfor wordRep in wordReps:\n\t\t\t\t\t\tfor y in y_projections:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# similarity is defined as the cosine similarity\n\t\t\t\t\t\t\t# cosine similarity normaly is (a (dot product) b) / (norm(a) * norm(b))\n\t\t\t\t\t\t\t# we have normalized a and b, so the denominator is always one and can be discarded\n\t\t\t\t\t\t\tsim = similarity_function(y, wordRep)\n\n\t\t\t\t\t\t\t# save result if it is better than the previous best result\n\t\t\t\t\t\t\tif sim > bestSim:\n\t\t\t\t\t\t\t\tbestSim = sim\n\t\t\t\t\t\t\t\tbestWord = word\n\t\t\n\t\t# If we don't have a projection for a, b, or c, we won't be answering the question.\n\t\t# But we will be printing a word ('nothing'), to make sure we end up with just as many\n\t\t# as answer lines as question lines.\n\t\telse:\n\t\t\tbestWord = 'nothing'\n\t\t\tunseenCount = 1\n\t\treturn (unseenCount, bestWord)\n\n\n\tprint \"Answering questions\"\n\t\n\t# initialize empty answers list\n\tanswers = []\n\t\n\t# iterate over all questions\n\tunseenCount = 0\n\tfor question in questions:\n\t\t(c, bestWord) = answer_question(question, wordvectors)\n\n\t\tunseenCount += c\n\t\tanswers.append(bestWord)\n\n\tprint unseenCount, \" questions were not answered properly (out of \", len(answers), \")\"\n\treturn answers\n\n\nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 3:\n\t\tprint \"Call me as:\"\n\t\tprint \"python qa.py wordvectors.txt questions\"\n\t\tsys.exit()\n\n\tanswers = qa_ambiguous_stricter(load_vectors(sys.argv[1]), load_questions(sys.argv[2]))\n\t\n\tprint \"Saving answers to file\"\n\tsave_answers(answers, \"precomputedAnswers/\" + sys.argv[1].split(\"/\")[-1] + \".\" + sys.argv[2].split(\"/\")[-1].split(\".\")[0] + \".answered\")\n"
},
{
"alpha_fraction": 0.6890243887901306,
"alphanum_fraction": 0.6955284476280212,
"avg_line_length": 26.920454025268555,
"blob_id": "70d1c6381486bacd2b408cb180ef5677e735cd28",
"content_id": "bc25a53df460500b8fa78d8d845e94c31ce6f7ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2460,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 88,
"path": "/clusteringRemi/clustering.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "from random import choice\nfrom math import sqrt\nfrom collections import defaultdict\n\nclass cluster:\n\tdef __init__(self, center = dict() ):\n\t\tself.center = center\n\t\tself.elements_set = set(center.keys())\n\t\tself.assigned_datapoints = []\n\n\tdef add_datapoint(self, datapoint):\n\t\tself.assigned_datapoints.append(datapoint)\n\n\tdef distance(self, data_point):\n\t\tintersection = self.elements_set.intersection(set(data_point.keys()))\n\t\tsimilarity = 0\n\t\tfor element in intersection:\n\t\t\tsimilarity += self.center[element] * data_point[element]\n\t\treturn 1 - similarity\n\n\tdef cluster_distance(self, other_cluster):\n\t\treturn self.distance(other_cluster.get_representation())\n\n\tdef get_representation(self):\n\t\tif len(self.assigned_datapoints) > 0:\n\t\t\tself.set_new_cluster_center()\n\t\treturn self.center\n\n\tdef set_new_cluster_center(self):\n\t\tdef normalize_coc(coc):\n\t\t\ttotal = sqrt( sum([v**2 for v in coc.values()]) )\n\t\t\tnew_coc = dict()\n\t\t\tfor key in coc.keys():\n\t\t\t\tnew_coc[key] = coc[key] / total\n\t\t\treturn new_coc\n\n\t\tassert len(self.assigned_datapoints) > 0, \"No data_points were assigned to this cluster...\"\n\t\tnew_center = defaultdict(float)\n\t\tfor data_point in self.assigned_datapoints:\n\t\t\tfor element in data_point.keys():\n\t\t\t\tnew_center[element] += data_point[element]\n\t\tself.center = normalize_coc(new_center)\n\t\tself.assigned_datapoints = []\n\t\tself.elements_set = set(self.center.keys())\n\ndef kmeans_process(data):\n\tk = 2\n\tdef kmeans(data, k, min_dist_change=0.01, max_iter=25):\n\t\tclusters = dict()\n\n\t\t# init empty clusters\n\t\tfor i in xrange(k):\n\t\t\tclusters[i] = cluster()\n\n\t\t# Fill clusters\n\t\tfor i in xrange(len(data)):\n\t\t\tclusters[choice(xrange(k))].add_datapoint(data[i])\n\n\t\t# Calculate centroids\n\t\tfor i in xrange(k):\n\t\t\tclusters[i].set_new_cluster_center()\n\n\t\tfor _ in xrange(max_iter):\n\t\t\t# Assign data to clusters\n\t\t\tfor datapoint in data:\n\t\t\t\tsmallestDistance = 2\n\t\t\t\tsmallestClusterIndex = -1\n\t\t\t\tfor i in xrange(k):\n\t\t\t\t\tdistance = clusters[i].distance(datapoint)\n\t\t\t\t\tif distance < smallestDistance:\n\t\t\t\t\t\tsmallestDistance = distance\n\t\t\t\t\t\tsmallestClusterIndex = i\n\t\t\t\tassert not smallestClusterIndex == -1, \"Didn't find appropriate distance...\"\n\t\t\t\tclusters[smallestClusterIndex].add_datapoint(datapoint)\n\n\t\t\t# re-estimate centers.\n\t\t\tfor i in xrange(k):\n\t\t\t\tclusters[i].set_new_cluster_center()\n\t\treturn clusters\n\n\tfor _ in xrange(5):\n\t\ttry:\n\t\t\treturn kmeans(data, k)\n\t\texcept:\n\t\t\tpass\n\n\t# finally just get one single cluster.\n\treturn kmeans(data, 1)\n\n\n\n"
},
{
"alpha_fraction": 0.7088724374771118,
"alphanum_fraction": 0.7179825901985168,
"avg_line_length": 29.780487060546875,
"blob_id": "f0bca1414cd87baa4ed3eb60460fc06bf45b7ea6",
"content_id": "b05ab94c6d49557470feb4bc4f710669774fab23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7574,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 246,
"path": "/clusteringAnouk/newCorpus.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "from math import sqrt\nfrom kmeans import kmeans_process\nfrom collections import defaultdict\nimport sys\nfrom copy import copy\nfrom random import choice\nimport shelve\nfrom multiprocessing import *\n\n# deletes all the keys from dic that are not in keysToKeep\ndef deleteSomeKeys(keysToKeep, dic):\n\tintersection = keysToKeep.intersection(set(dic.keys()))\n\tnew_dic = dict()\n\tfor key in intersection:\n\t\tnew_dic[key] = dic[key]\n\treturn new_dic\n\n# reads the corpus file\ndef read_file(filename):\n\tf = open(filename, 'r')\n \tinpt = f.readline().replace(\"\\n\", \"\").split(\" \")\n \tf.close()\n \treturn inpt\n\n# annotates the corpus using the multiple senses of a word\ndef annotate(inpt, clustered, vocabulary, skipsize):\n\tqueueSize = skipsize * 2 + 1\n\n\t# two functions\n\tqueueIsReady = lambda x : len(x) == queueSize\n\tdef push(element, queue):\n\t\tqueue.append(element)\n\t\tif len(queue) > queueSize:\n\t\t\tqueue.pop(0)\n\n\tannotated = []\n\tqueue = []\n\tfor word in inpt:\n\t\tpush(word, queue)\n\t\tif queueIsReady(queue) and word in clustered:\t\n\t\t\tcoc = set()\n\t\t\tfor i in xrange(skipsize):\n\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\tword1 = queue[i]\n\t\t\t\telse:\n\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\telse:\n\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\tcoc.add(word1)\n\t\t\t\tcoc.add(word2)\n\n\t\t\t# Now get the best cluster\t\t\t\n\t\t\tsense0 = set(clustered[word][0].keys())\n\t\t\tsense1 = set(clustered[word][1].keys())\n\t\t\tintersectionSense0 = len(coc.intersection(sense0))\n\t\t\tintersectionSense1 = len(coc.intersection(sense1))\n\t\t\tif intersectionSense0 > 0 and intersectionSense1 > 0:\n\t\t\t\tif intersectionSense0 > intersectionSense1:\n\t\t\t\t\tword = word + \"_\" + str(0)\n\t\t\t\telif intersectionSense1 > intersectionSense0:\n\t\t\t\t\tword = word + \"_\" + str(1)\n\t\t\t\telse:\n\t\t\t\t\tword = word + \"_\" + str(choice([0,1]))\n\n\t\tannotated.append(word + \" \")\n\treturn annotated\n\n# prepares the data for a word, that is necessary to create the two senses\ndef prepareExtraction(word, coc):\n\n\t# get co-occurences for the word\n\twordCOC = copy(coc[word])\n\tcoOccuringWords = set(wordCOC.keys())\n\n\tcococ = dict()\n\tfor bla in wordCOC:\n\t\tvector = copy(coc[bla])\n\t\tvector = deleteSomeKeys(coOccuringWords, vector)\n\t\tcococ[bla] = vector\n\n\treturn (wordCOC, cococ)\n\n# extracts the two senses of a word\ndef extractSenses((word, preparation)):\n\t(wordCOC, cococ) = preparation\n\t# only if more than one datapoint was found, the word will be called ambiguous\n\n\n\t# sort from high relatedness to low relatedness\n\t# cut off half, top half will be used, other half will be things that are relevant to all sensess\n\ttupleList = sorted(wordCOC.items(), key=lambda x: x[1], reverse = True)\n\t\n\trelevantCocWords = tupleList[:len(tupleList)/2]\n\ttheRest = tupleList[len(tupleList)/2:]\n\n\tcocWords = [ elem[0] for elem in relevantCocWords ]\n\trelevantToAll = [elem[0] for elem in theRest]\n\n\t# for every co-occuring word with the word\n\t# we save the vector with co-occuring words (only containing words from cocWords)\n\t# this collection will be datapoints\n\tlistOfDatapoints = [ cococ[elem] for elem in cocWords ]\n\n\t# Garbage collection\n\tcococ = None\n\n\t# cluster all co-occurence vectors\n\tclusters = kmeans_process(listOfDatapoints)\n\t\n\t# find out which term belongs to which cluster\n\twordAssignemnts = defaultdict(list)\n\tfor i, cocWord in enumerate(cocWords):\n\t\tbestClusterID = \"NONE\"\n\t\tbestDistance = 2\n\t\tfor clusterID in clusters:\n\t\t\tdist = clusters[clusterID].distance(listOfDatapoints[i])\n\t\t\tif dist < bestDistance:\n\t\t\t\tbestDistance = dist\n\t\t\t\tbestClusterID = clusterID\n\t\twordAssignemnts[bestClusterID].append(cocWord)\n\t\n\t# get the cluster distance\n\tclusterDistance = clusters[0].cluster_distance(clusters[1])\n\t\n\t# make a new representations for the different senses of the words\n\t# save also the cluster distance for future reference\n\tsenses = dict()\n\tsenses['clusterDistance'] = clusterDistance\n\n\t# for all clusters, we will now make a new sense of the word\n\t# the sense will contain the relevantToAll words and the words assigned to the specific cluster\n\tfor key in wordAssignemnts:\n\t\tsense = copy(wordCOC)\n\t\tsense = deleteSomeKeys(set(wordAssignemnts[key]+relevantToAll), sense)\n\t\tsenses[key] = sense\n\n\t# save the different sences of the word\n\treturn (word, senses)\n\n# returns a list of words that have a unique frequency or are in the top 25 of most frequent words\ndef pruneVocabulary(voc):\n\tprint \"Running some statistics on the vocabulary to find which words won't be clustered!\"\n\twordsToCut = set()\t\n\tvocTups = voc.items()\n\tsortedVocTups = sorted(vocTups, key = lambda x: x[1], reverse = True)\n\n\tfor i in range(75):\n\t\twordsToCut.add(sortedVocTups[i][0])\n\tprint\n\n\treturn wordsToCut\n\n#\n# gives us a new dictionary with multiple senses of the words\n# not all words will be in this dictionary, only the words for which \n# multiple senses were actually found\ndef makeNewCOCS(coc, outputfile, voc):\t\n\n\t# inititate return object\n\tprint \"Writing results to: \", outputfile\n\tnewCOC = shelve.open(outputfile)\n\t\n\twordsToCut = pruneVocabulary(voc)\n\n\tprint \"Not disambiguating: \", len(wordsToCut), \" words\"\n\t# we will be evaluating the ambiguousness of every single word excpet for ''\n\tp = Pool(processes=6)\n\n\tcounter = 0\n\tinstructions = []\n\tfor word in coc:\n\t\tcounter += 1\n\n\t\t# we don't want nothing\n\t\t# we don't want words that occur less than 20 times\n\t\tif word != '' and voc[word] > 20 and word not in wordsToCut:\n\t\t \tprint word, counter, \"/ ~50.000\"\n\t\t\t# here we cluster! \n\t\t\tinstructions.append((word, prepareExtraction(word,coc)))\n\t\t\tif len(instructions) == 8:\n\t\t\t\tprint \"Agregated instructions, executing...\"\n\t\t\t\tresults = p.map(extractSenses, instructions)\n\t\t\t\tfor (w,s) in results:\n\t\t\t\t\tnewCOC[w] = s\n\t\t\t\tinstructions = []\n\n\tprint \"Executing rest of length\", len(instructions)\n\tresults = p.map(extractSenses, instructions)\n\tfor (w,s) in results:\n\t\tnewCOC[w] = s\n\treturn newCOC\n\nprint \"Welcome to the clustering method designed by Anouk. You'll enjoy your time here.\"\n\nif len(sys.argv) < 6:\n \t\tprint \"Please call me as:\"\n \t\tprint \"python runRemi.py <original coc> <new coc (output)> <training text> <new coc half (output)> <annotated corpus>\"\n \t\tsys.exit()\n\ninput_file_coc = sys.argv[1]\noutput_file_new_coc = sys.argv[2]\ntraining_text = sys.argv[3]\noutput_file_new_coc_half = sys.argv[4]\noutput_annotated_corpus = sys.argv[5]\n\n# this is the original co-occurence thing, with 'rel', 'coc' and 'voc' as keys\nprint \"Reading global co-occurences (relative frequencies, relatedness scores and vocabulary)\"\nco_occurences = shelve.open(input_file_coc + \"_rel\")\nvoc = shelve.open(input_file_coc + \"_voc\")\n\n# This thing actually makes a co occurence thing with multiple senses of the word\nprint \"Making new co-occurence dictionary, with multiple senses of all words... This might take a while.\"\nnew = makeNewCOCS(co_occurences, output_file_new_coc, voc)\n\n\n# annotate the corpus \n# we might want to decrease new based on cluster distances\n# for example, we might only take 50% of the words in here, that have\n# the highest cluster distances\nprint \"Throwing away half of the words... \"\nclustered = sorted(new.items(), key=lambda x: x[1]['clusterDistance'], reverse = True)\nhalfCOC = shelve.open(output_file_new_coc_half)\nhalfCOC.update(dict(clustered[:len(clustered)/2]))\n\n# we can close the new one and the original one now.\nnew.close()\nco_occurences.close()\nclustered = None\n\n# the input is the text file\nprint \"Reading corpus...\"\ninpt = read_file(training_text)\n\nprint \"Annotating corpus.\"\nannotated = annotate(inpt, halfCOC, voc, 5)\n\nf = open(output_annotated_corpus, 'w')\nf.write(\"\".join(annotated))\nf.close()\n\n\nvoc.close()\nhalfCOC.close()\n\n\n"
},
{
"alpha_fraction": 0.60447758436203,
"alphanum_fraction": 0.6139755845069885,
"avg_line_length": 72.69999694824219,
"blob_id": "c6b80b96b5f326d29cebc76d24a32e16921e8c51",
"content_id": "0040d9540cadd80e4c63b29b130d97174c4c9102",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1474,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 20,
"path": "/not-used/BuildResults-Cristina.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import os, glob\n\nquestionList = glob.glob(\"SemVal/Testing/Phase1Questions/*.txt\")\nanswerList = glob.glob(\"SemVal/Testing/Phase1Answers/*.txt\")\n\n#print len(questionList)\n#print len(answerList)\n#print \"hy!\" \nif(len(questionList) == len(answerList)):\n for i in range(len(questionList)):\n\t#print \"q de i:\" + questionList[i]\n\t#print \"find result:\" + questionList[i][questionList[i].rfind('/') + 1:]\n\tfor j in range(len(answerList)):\n \tif(questionList[i][questionList[i].rfind('/') + 1:] == answerList[j][answerList[j].rfind('/') + 1:].replace('Answers','Questions')):\n #print questionList[i][questionList[i].rfind('\\\\') + 1:]\n #print \"name of generated file is: \" + questionList[i][questionList[i].rfind('\\\\') + 1:].replace('Questions', 'Similarity')\n print \"results file name: \" + \" Results/\" + questionList[i][questionList[i].rfind('/') + 1:].replace('Questions', 'Similarity').replace(\".txt\", \"\")\n #print \"executed command: \" + \"./SemanticRelations word_projections-80.txt \" + questionList[i] + \" \" + answerList[i] + \" Results/\" + questionList[i][questionList[i].rfind('\\\\') + 1:].replace('Questions', 'Similarity').replace(\".txt\", \"\")\n \n os.system(\"./SemanticRelations word_projections-80.txt \" + questionList[i] + \" \" + answerList[j] + \" Results/\" + questionList[i][questionList[i].rfind('/') + 1:].replace('Questions', 'Similarity').replace(\".txt\", \"\"))\n"
},
{
"alpha_fraction": 0.6239425539970398,
"alphanum_fraction": 0.6411176323890686,
"avg_line_length": 21.285715103149414,
"blob_id": "5a6de2955ce7ff45e2ac73faa919b8599d090d6a",
"content_id": "8b48958379428c44d784fa11cb0cc3227af9f1fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3901,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 175,
"path": "/clusteringRemi/main.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nfrom collections import defaultdict\nimport shelve\nfrom time import time\nimport pickle\nfrom math import sqrt\n\nfrom clustering import kmeans_process\n\ndef get_document_vocabulary(inpt, minimumOccurence = 5):\n\ttotal = defaultdict(int)\n\tfor word in inpt:\n\t\ttotal[word] += 1\n\treturn set([ key for key in total.keys() if total[key] > minimumOccurence ])\n\ndef normalize_coc(coc):\n\ttotal = sqrt( sum([v**2 for v in coc.values()]) )\n\tnew_coc = dict()\n\tfor key in coc.keys():\n\t\tnew_coc[key] = coc[key] / total\n\treturn new_coc\n\ndef anotate(inpt, skipsize):\n\tk = 2\n\tqueueSize = skipsize * 2 + 1\n\tqueueMid = skipsize + 1\n\n\tqueueIsReady = lambda x : len(x) == queueSize\n\tdef push(element, queue):\n\t\tqueue.append(element)\n\t\tif len(queue) > queueSize:\n\t\t\tqueue.pop(0)\n\n\tdef map_append(dic, key, elem):\n\t\tif key in dic:\n\t\t\tl = dic[key]\n\t\t\tl.append(elem)\n\t\t\tdic[key] = l\n\t\telse:\n\t\t\tdic[key] = [elem]\n\t\n\tvocabulary = get_document_vocabulary(inpt)\n\tvocSize = len(vocabulary) + 1\n\n\ttotalWords = len(inpt)\n\n\tprint vocSize, \"words in vocabulary.\"\n\tprint \"Starting on determining word co-occurences of\", totalWords, \"words\"\n\n\n\tcocs = defaultdict(list)\n\tqueue = []\n\tfor i in xrange(queueSize):\n\t\tword = inpt[i]\n\t\tpush(word, queue)\n\n\tfor counter in xrange(queueSize, len(inpt)):\n\t\tword = inpt[counter]\n\t\tif counter % 100000 == 0:\n\t\t\tprint \"Part\", counter / 100000, \"of\", totalWords / 100000, \"parts.\"\n\t\tpush(word, queue)\n\t\tmid = queue[queueMid]\n\t\tif mid in vocabulary:\n\t\t\tcoc = defaultdict(int)\n\t\t\tfor i in xrange(skipsize):\n\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\tword1 = queue[i]\n\t\t\t\telse:\n\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\telse:\n\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\tcoc[word1] += 1\n\t\t\t\tcoc[word2] += 1\n\n\t\t\tcocs[mid].append(normalize_coc(coc))\n\n\tprint \"Found\",len(cocs),\"co-occurence vectors.\"\n\n\tprint \"Now clustering...\"\n\n\tclustered = []\n\tfor key in cocs:\n\t\tc = kmeans_process(cocs[key])\n\t\tif len(c) == 2:\n\t\t\tclustered.append((c[0].cluster_distance(c[1]), key, c))\n\n\tclustered = sorted(clustered, key = lambda x : x[0])\n\tclustered = clustered[0:len(clustered)/2]\n\tclustered = dict([ (x[1], x[2]) for x in clustered] )\n\n\tclustered_words = set(clustered.keys())\n\n\tprint \"Clustered words:\", clustered_words\n\n\tprint \"Starting anotating corpus.\"\n\tanotated = []\n\tqueue = []\n\tfor i in xrange(queueSize):\n\t\tword = inpt[i]\n\t\tpush(word, queue)\n\n\n\tfor counter in xrange(queueSize, len(inpt)):\n\t\tword = inpt[counter]\n\t\tpush(word, queue)\n\t\tword = queue[queueMid]\n\t\tif word in clustered_words:\n\t\t\tcoc = defaultdict(int)\n\t\t\tfor i in xrange(skipsize):\n\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\tword1 = queue[i]\n\t\t\t\telse:\n\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\telse:\n\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\tcoc[word1] += 1\n\t\t\t\tcoc[word2] += 1\n\n\t\t\tcoc = normalize_coc(coc)\n\n\t\t\t# Now get the best cluster\n\t\t\tbestValue = 1\n\t\t\tbestIndex = -1\n\t\t\tfor i in xrange(k):\n\t\t\t\tdistance = clustered[word][i].distance(coc)\n\t\t\t\tif distance < bestValue:\n\t\t\t\t\tbestValue = distance\n\t\t\t\t\tbestIndex = i\n\t\t\tword = word + \"_\" + str(bestIndex)\n\t\tanotated.append(word + \" \")\n\n\treturn anotated\n\ndef read_args():\n\tdef read_file(filename):\n\t\tf = open(filename, 'r')\n\t \tinpt = f.readline().replace(\"\\n\", \"\").split(\" \")\n\t \tf.close()\n\t \treturn inpt\n\n\tif len(sys.argv) < 3:\n \t\tprint \"Please call me as:\"\n \t\tprint \"python main.py training.txt output.txt (skipsize = 5)\"\n \t\tsys.exit()\n\n \tskipsize = 5\n \tif len(sys.argv) == 4:\n \t\tskipsize = int(sys.argv[3])\n\n \treturn (read_file(sys.argv[1]), sys.argv[2], skipsize)\n\n\ndef main_cluster_remi():\n\t(inpt, output_file, skipsize) = read_args()\n\n \tprint \"Preparing data.\"\n\n \tanotated = anotate(inpt, skipsize)\n\n \tf = open(output_file, 'w')\n \tf.write(\"\".join(anotated))\n \tf.close()\n\n\nif __name__ == \"__main__\":\n\tstart = time()\n\tmain_cluster_remi()\n\tstop = time()\n \tprint \"I spent\", int(stop-start+0.5), \"seconds.\"\n\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 286,
"blob_id": "0bb33ce45e8a086f320a3d591e0f1aa0e0db1b50",
"content_id": "0896c854c6111600e551be7d39a6e8144c1abe23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 286,
"num_lines": 1,
"path": "/README.md",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "In this project we work on improving the work from \"Linguistic Regularities in Continuous Space Word Representations\" by Tomas Mikolov, Wen-tau Yih, and Geoffrey Zweig. We will try to disambiguate words before using them is input to the RNN to achieve better results on syntactic as well as semantic tests. \n"
},
{
"alpha_fraction": 0.6777070164680481,
"alphanum_fraction": 0.6980891823768616,
"avg_line_length": 26.086206436157227,
"blob_id": "8487911550364b260db667e194b6bfb112b6bc27",
"content_id": "253eec7806aa64963c8f9a676559eb1996f1346d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1570,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 58,
"path": "/statistics.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "# Anouks statistics\n# For a cleaner and better estimation of... things\nimport shelve\nfrom collections import defaultdict\n\nvoc = shelve.open('../data/coc.medium_voc')\n\n# We want to know: \nprint\n# - how many words are there anyway?\nn = sum(voc.values())\nprint \"Number of words: \", n\n\n# - how many UNIQUE words are there?\nu = len(voc.keys())\nprint \"Number of unique words: \", u\nprint\n\n# - top 25 most occuring words\nvocTups = voc.items()\nsortedVocTups = sorted(vocTups, key = lambda x: x[1], reverse = True)\n\nfor i in range(100):\n\tprint i+1, sortedVocTups[i]\nprint\n\n# - How many words do we loose when we throw out percentages of the data?\npercentages = [1/float(2), 1/float(3), 1/float(4), 1/float(5), 1/float(6)]\n\nfor per in percentages:\n\tremove = 0\n\tfor i in range(int(u * per)):\n\t\tremove += sortedVocTups[i][1]\n\tprint per, \"% of unique words = \", remove * 100 / float(n), \" % of all words\"\nprint\n\n\n# - (frequency of word, how many words) - I really don't know how to call this\n# - how many unique frequencies are there? What are the words associated with them?\nfrequencyCounts = defaultdict(int)\nfrequencyWord = defaultdict(list)\n\nfor word in voc:\n\tfrequencyCounts[voc[word]] += 1\n\tfrequencyWord[voc[word]].append(word)\n\nwordsToCut = []\n\nremove = 0\nfor freq in frequencyCounts:\n\tif frequencyCounts[freq] == 1:\n\t\twordsToCut.append(frequencyWord[freq][0])\n\t\tremove += freq\nprint \"By removing the \", len(wordsToCut) * 100 / float(u), \"% words that have a unique frequency, we would remove \", remove * 100 / float(n), \"% of our words...\"\nprint wordsToCut\nprint\n\nvoc.close()"
},
{
"alpha_fraction": 0.658450722694397,
"alphanum_fraction": 0.6690140962600708,
"avg_line_length": 46.16666793823242,
"blob_id": "718e0599855fe754ec1718374ba8c37d874fd60f",
"content_id": "b6a4a900deb789a61dbacffea6e37f7c653c8963",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 12,
"path": "/BuildSemEvalResults.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import os, glob\n\nquestionList = glob.glob(\"../SemVal/Testing/Phase1Questions/*.txt\")\nanswerList = glob.glob(\"../SemVal/Testing/Phase1Answers/*.txt\")\n\n\nif(len(questionList) == len(answerList)):\n for i in range(len(questionList)):\n for j in range(len(answerList)):\n if(questionList[i][questionList[i].rfind('/') + 1:] == answerList[j][answerList[j].rfind('/') + 1:].replace('Answers','Questions')):\n print questionList[i], answerList[j]\n os.system(\"pypy semEvalQA.py ../wordvectors/vectors80.broadcast \" + questionList[i] + \" \" + answerList[j])\n\n\n"
},
{
"alpha_fraction": 0.6076134443283081,
"alphanum_fraction": 0.6193264722824097,
"avg_line_length": 23.39285659790039,
"blob_id": "bd3a14c95565068e7cecf5331978c901b0b40021",
"content_id": "421622fa88665b83a6928eb7ba545e93d5d49c3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 28,
"path": "/QuestionsAnswers/Original_files_and_conversion_scripts/fourToThree.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\n\n# to split the file of a b c d to a file quesions_extended with a b c and answers_extended with Done\nfile_name = sys.argv[1]\n\nfile = open(file_name, 'rb')\n\nquestions = None\nanswers = None\n\nfor line in file:\n\twords = line.split(' ')\n\tif words[0] != \":\":\n\t\tquestions.write(words[0].lower() + \" \" + words[1].lower() + \" \" + words[2].lower() + \"\\n\")\n\t\tanswers.write(words[3].lower())\n\telse:\n\t\tif questions != None and answers != None: \n\t\t\tquestions.close()\n\t\t\tanswers.close()\n\t\t\n\t\tquestions = open(words[1].replace(\"\\n\", \"\") + \".questions\", 'w')\n\t\tanswers = open(words[1].replace(\"\\n\", \"\") + \".answers\", 'w')\n\t\t\n\t\tprint words\n\nquestions.close()\nanswers.close()\nprint \"Done\"\n"
},
{
"alpha_fraction": 0.6295063495635986,
"alphanum_fraction": 0.6328341364860535,
"avg_line_length": 30.63157844543457,
"blob_id": "803df4f651444f88d7f2041526f4f50081840cd1",
"content_id": "78446f69dd3900d9f25062c64e97f9a81641be6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1803,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 57,
"path": "/SemEval/build_result_files.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys, os, glob\n\ndef add_hyphens(filename):\n\tf = open(filename, 'r')\n\tcontent = filter(lambda x : \":\" in x and not \" \" in x, f.readlines())\n\tf.close()\n\tc = []\n\tchange = True\n\tfor line in content:\n\t\tif '\"' not in line:\n\t\t\tchange = True\n\t\t\tc.append( '\"' + line.replace(\"\\n\",\"\") + '\"\\n')\n\t\telse:\n\t\t\tc.append(line)\n\tif change:\n\t\tf = open(filename, 'w')\n\t\tf.write(\"\".join(c))\n\t\tf.close()\n\n\n\nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 2:\n\t\tprint \"Call me as:\"\n\t\tprint \"python build_result_file.py <wordvectors>\"\n\t\tsys.exit()\n\n\tquestionList = glob.glob(\"../../SemVal/Testing/Phase1Questions/*.txt\")\n\tanswerList = glob.glob(\"../../SemVal/Testing/Phase1Answers/*.txt\")\n\n\tvector_filename = sys.argv[1]\n\n\tfor i in xrange(len(questionList)):\n\t\tfor j in xrange(len(answerList)):\n\t\t\tif(questionList[i][questionList[i].rfind('/') + 1:] == answerList[j][answerList[j].rfind('/') + 1:].replace('Answers','Questions')):\n\t\t\t\tprint questionList[i], answerList[j]\n\n\t\t\t\tadd_hyphens(questionList[i])\n\t\t\t\tadd_hyphens(answerList[j])\n\n\t\t\t\t# Produce answers:\n\t\t\t\tos.system(\"pypy semEvalQA.py \" + vector_filename + \" \" + questionList[i] + \" \" + answerList[j] + \" tmp_files/qa_answers\")\n\n\t\t\t\t# Produce spearman score\n\t\t\t\tos.system(\"./maxdiff_to_scale.pl \" + answerList[j] + \" tmp_files/tmp.answers\" )\n\t\t\t\tos.system(\"./score_scale.pl tmp_files/tmp.answers tmp_files/qa_answers result_files/spearman.score\" )\n\t\t\t\t# os.system(\"rm tmp_files/tmp.answers\")\n\n\t\t\t\t# Produce maxdiff\n\t\t\t\t# os.system(\"./scale_to_maxdiff.pl \" + questionList[i] + \" tmp_files/qa_answers tmp_files/tmp.answers\")\n\t\t\t\t# os.system(\"./score_maxdiff.pl \" + answerList[j] + \" tmp_files/tmp.answers result_files/maxdiff.score\")\n\t\t\t\t# os.system(\"rm tmp_files/tmp.answers\")\n\n\t\t\t\t# os.system(\"rm tmp_files/qa_answers\")\n\n\t\t\t\tprint \"Exiting..\"\n\t\t\t\tsys.exit()\n"
},
{
"alpha_fraction": 0.6618415713310242,
"alphanum_fraction": 0.6767189502716064,
"avg_line_length": 25.43617057800293,
"blob_id": "8584fc60443fdd088a00bb890ff6f1fe9061210e",
"content_id": "a6dfc8cb39729a68a08514d72e530b74035dda79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2487,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 94,
"path": "/clusteringAnouk/justAnnotate.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nimport shelve\nfrom random import choice\nfrom multiprocessing import *\n\n# reads the corpus file\ndef read_file(filename):\n\tf = open(filename, 'r')\n \tinpt = f.readline().replace(\"\\n\", \"\").split(\" \")\n \tf.close()\n \treturn inpt\n\n# annotates the corpus using the multiple senses of a word\ndef annotate(inpt, clustered, vocabulary, skipsize):\n\tqueueSize = skipsize * 2 + 1\n\tclusteredKeys = set(clustered.keys())\n\n\t# two functions\n\tqueueIsReady = lambda x : len(x) == queueSize\n\tdef push(element, queue):\n\t\tqueue.append(element)\n\t\tif len(queue) > queueSize:\n\t\t\tqueue.pop(0)\n\n\tannotated = []\n\tqueue = []\n\ttotal = len(inpt)\n\tfor i, word in enumerate(inpt):\n\t\tpush(word, queue)\n\t\tif queueIsReady(queue) and word in clusteredKeys:\t\n\t\t\tcoc = set()\n\t\t\tfor i in xrange(skipsize):\n\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\tword1 = queue[i]\n\t\t\t\telse:\n\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\telse:\n\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\tcoc.add(word1)\n\t\t\t\tcoc.add(word2)\n\n\t\t\t# Now get the best cluster\t\t\t\n\t\t\tsense0 = set(clustered[word][0].keys())\n\t\t\tsense1 = set(clustered[word][1].keys())\n\t\t\tintersectionSense0 = len(coc.intersection(sense0))\n\t\t\tintersectionSense1 = len(coc.intersection(sense1))\n\t\t\tif intersectionSense0 > 0 and intersectionSense1 > 0:\n\t\t\t\tif intersectionSense0 > intersectionSense1:\n\t\t\t\t\tword = word + \"_\" + str(0)\n\t\t\t\telif intersectionSense1 > intersectionSense0:\n\t\t\t\t\tword = word + \"_\" + str(1)\n\t\t\t\telse:\n\t\t\t\t\t# instead choice we could for example choose the previous thing\n\t\t\t\t\tword = word + \"_\" + str(choice([0,1]))\n\n\t\tannotated.append(word + \" \")\n\treturn annotated\n\nprint \"Welcome to the clustering method designed by Anouk. You'll enjoy your time here.\"\n\nif len(sys.argv) < 4:\n \t\tprint \"Please call me as:\"\n \t\tprint \"python justAnnotate.py <newCOC> <training text> <output file> <vocabulary>\"\n \t\tprint \"python justAnnotate.py ../../newCOC.medium ../../text.medium ../../newAnnotated.medium ../../coc.medium_voc\"\n \t\tsys.exit()\n\ninput_file = sys.argv[1]\ntraining_text = sys.argv[2]\noutput_annotated_corpus = sys.argv[3]\nvocabulary = sys.argv[4]\n\n# the input is the text file\nprint \"Reading corpus...\"\ninpt = read_file(training_text)\n\nprint input_file\nprint vocabulary\ncoc = shelve.open(input_file)\nprint \"opened coc\"\nvoc = shelve.open(vocabulary)\n\nprint \"Annotating corpus.\"\nannotated = annotate(inpt, coc, voc, 5)\n\nf = open(output_annotated_corpus, 'w')\nf.write(\"\".join(annotated))\nf.close()\n\n\nvoc.close()\ncoc.close()\n\n\n"
},
{
"alpha_fraction": 0.5303725600242615,
"alphanum_fraction": 0.5400990843772888,
"avg_line_length": 25.066986083984375,
"blob_id": "7fedc039d61fa5ee44b13dd4bfdeab75db7ca17c",
"content_id": "a09646c6d6877ca97070782da25c67385820e9c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5449,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 209,
"path": "/clusteringCristina/LSACristina.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nfrom collections import defaultdict\nfrom time import time\nimport pickle\nfrom math import sqrt\nfrom semanticpy.vector_space import VectorSpace\nfrom clustering import kmeans_process\n\ndef get_document_vocabulary(inpt, minimumOccurence = 5):\n\ttotal = defaultdict(int)\n\tfor word in inpt:\n\t\ttotal[word] += 1\n\treturn set([ key for key in total.keys() if total[key] > minimumOccurence ])\n\ndef normalize_coc(coc):\n\ttotal = sqrt( sum([v**2 for v in coc.values()]) )\n\tnew_coc = dict()\n\tfor key in coc.keys():\n\t\tnew_coc[key] = coc[key] / total\n\treturn new_coc\n\ndef anotate(inpt, skipsize):\n\tk = 2\n\tqueueSize = skipsize * 2 + 1\n\tqueueMid = skipsize + 1\n\n\tqueueIsReady = lambda x : len(x) == queueSize\n\tdef push(element, queue):\n\t\tqueue.append(element)\n\t\tif len(queue) > queueSize:\n\t\t\tqueue.pop(0)\n\t\n\tvocabulary = get_document_vocabulary(inpt)\n\tvocSize = len(vocabulary) + 1\n\n print \"Starting on determining word co-occurences.\"\n\n\tcocs = defaultdict(list)\n\tqueue = []\n\tfor word in inpt:\n\t\tpush(word, queue)\n\t\tif queueIsReady(queue):\n\t\t\tmid = queue[queueMid]\n\t\t\tif mid in vocabulary:\n\t\t\t\tcoc = []\n\t\t\t\tfor i in xrange(skipsize):\n\t\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\t\tword1 = queue[i]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\t\tcoc.append(word1)\n\t\t\t\t\tcoc.append(word2)\n\t\t\t\t#print \"final co-occurences\"\n\t\t\t\t#print coc\n\t\t\t\tcocs[mid].append(coc)\n\t\t\t\t#print \"Coc[mid]\"\n\t\t\t\t#print cocs[mid]\n\n\tprint \"Determining LSA relatedness scores between documents...\"\n \n\tclustered = dict()\n\tfor key in cocs.keys():\n \n #print \"KEY:\" + key\n #print \"\\ncocs[\" + key + \"]:\"\n #print \"len cocs key\"\n #print len(cocs[key])\n #print cocs[key][0]\n #print cocs[key][1]\n #print \" \".join(cocs[key][0])\n a = [\" \".join(cocs[key][i]) for i in range(len(cocs[key]))]\n #a = a[:6]\n #print \"len a\"\n #print len(a)\n \"\"\"\n print a[0]\n print a[1]\n print a[2]\n print a[3]\n print a[4]\n print a[5]\n \"\"\"\n vector_space = VectorSpace(a)\n scores = vector_space.related(0)\n LSIscores = []\n for i in range(len(a)):\n ss = {\"docText\" : a[i], \"similarity\" : scores[i]}\n LSIscores.append(ss)\n LSIscores = sorted(LSIscores, key=lambda k: k['similarity'], reverse=True)\n \"\"\"print \"scores\"\n print LSIscores\"\"\"\n LSIscores = LSIscores[:len(LSIscores)/2]\n\n \"\"\"\n text = \"\"\n for item in LSIscores:\n text += item[\"docText\"] + \" \"\n print \"text is: \" + text\n\n d = defaultdict(int)\n for word in text.split():\n d[word] += 1\n \"\"\"\n itemsToCluster = []\n for item in LSIscores:\n text = item[\"docText\"]\n d = defaultdict(int)\n for word in text.split():\n d[word] += 1\n d = normalize_coc(d)\n itemsToCluster.append(d)\n \"\"\"\n print \"printing d follows\"\n print d\n #normalize\n d = normalize_coc(d)\n print \"after normalization\"\n print d\n d = list(d)\n print d\n \"\"\"\n \n clustered[key] = kmeans_process(itemsToCluster)\n #print \"half scores\"\n #print LSIscores\n \n #print cocs[key]\n\t #clustered[key] = kmeans_process(LSIScores)\n\t\n \n\tprint \"Starting anotating corpus.\"\n\tanotated = []\n\tqueue = []\n\tfor word in inpt:\n\t\tpush(word, queue)\n\t\tif queueIsReady(queue):\n\t\t\tword = queue[queueMid]\n\t\t\tif word in clustered and len(clustered[word]) > 1:\n\t\t\t\tcoc = defaultdict(int)\n\t\t\t\tfor i in xrange(skipsize):\n\t\t\t\t\tif queue[i] in vocabulary:\n\t\t\t\t\t\tword1 = queue[i]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword1 = \"_UNKNOWN_\"\n\t\t\t\t\tif queue[i+1+skipsize] in vocabulary:\n\t\t\t\t\t\tword2 = queue[i+1+skipsize]\n\t\t\t\t\telse:\n\t\t\t\t\t\tword2 = \"_UNKNOWN_\"\n\n\t\t\t\t\tcoc[word1] += 1\n\t\t\t\t\tcoc[word2] += 1\n\n\t\t\t\tcoc = normalize_coc(coc)\n\n\t\t\t\t# Now get the best cluster\n\t\t\t\tbestValue = 1\n\t\t\t\tbestIndex = -1\n\t\t\t\tfor i in xrange(k):\n\t\t\t\t\tdistance = clustered[word][i].distance(coc)\n\t\t\t\t\tif distance < bestValue:\n\t\t\t\t\t\tbestValue = distance\n\t\t\t\t\t\tbestIndex = i\n\t\t\t\tword = word + \"_\" + str(bestIndex) + \" \"\n\t\t\tanotated.append(word)\n\n\treturn (clustered, anotated)\n\ndef read_args():\n\tdef read_file(filename):\n\t\tf = open(filename, 'r')\n\t \tinpt = f.readline().replace(\"\\n\", \"\").split(\" \")\n\t \tf.close()\n\t \treturn inpt\n\n\tif len(sys.argv) < 3:\n \t\tprint \"Please call me as:\"\n \t\tprint \"python main.py training.txt output.txt (skipsize = 5)\"\n \t\tsys.exit()\n\n \tskipsize = 5\n \tif len(sys.argv) == 4:\n \t\tskipsize = int(sys.argv[3])\n\n \treturn (read_file(sys.argv[1]), sys.argv[2], skipsize)\n\n\ndef main_cluster_remi():\n\t(inpt, output_file, skipsize) = read_args()\n\n \tprint \"Preparing data.\"\n\n \t(clustered, anotated) = anotate(inpt, skipsize)\n\n \tf = open(output_file, 'w')\n \tf.write(\"\".join(anotated))\n \tf.close()\n \tpickle.dump(clustered, open(\"clusters.pickle\", 'wb'))\n\n\nif __name__ == \"__main__\":\n\tstart = time()\n\tmain_cluster_remi()\n\tstop = time()\n \tprint \"I spent\", int(stop-start+0.5), \"seconds.\"\n\n"
},
{
"alpha_fraction": 0.6040063500404358,
"alphanum_fraction": 0.6249434351921082,
"avg_line_length": 31.90671730041504,
"blob_id": "2c04af344ad703215d1795eebec4332c5c65f936",
"content_id": "6192de2d1762cef6d392a5b4ebf3108256c2ac53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8836,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 268,
"path": "/qa_cristina.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "from __future__ import division\nimport sys\nimport pickle, math\n\nfrom collections import defaultdict\nfrom time import time\n\n\nquestions = \"QuestionsAnswers/word_relationship.questions\"\n\n# Written by Remi\n# Approved by Anouk\ndef save_answers(answers, filename):\n\tf = open(filename, 'w')\n\tf.write( \"\".join([ word + \"\\n\" for word in answers]) )\n\tf.close()\n\n# Written by Remi\n# Approved and edited by Anouk (made all words lower case)\ndef load_questions(filename=questions):\n\tf = open(filename, 'r')\n\tc = [ tuple(l.lower().replace(\"\\n\",\"\").split(\" \")) for l in f.readlines()]\n\tf.close()\n\treturn c\n\n# Written by Remi\n# Approved by Anouk\ndef normalizeString(vec):\n\tvec = [ float(x) for x in vec]\n\ttotal = math.sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\ndef normalize(vec):\n\ttotal = math.sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\n# written by Remi\n# Approved and edited by Anouk (made all words lower case and took out internal normalize function)\ndef load_vectors(filename):\n\tf = open(filename,'r')\n\tf.readline()\n\tcontent = [ filter( lambda x : not x in [\"\\n\",\"\"], l.replace(\"\\n\", \"\").split(\" \")) for l in f.readlines() ]\n\tcontent = [ (l[0], normalizeString(l[1:])) for l in content ]\n\tcontent = filter(lambda x : not x[1] == None, content)\n\twords = defaultdict(list)\n\tfor (word, vector) in content:\n\t\tif \"_\" in word:\n\t\t\twords[word.lower().split(\"_\")[0]].append(vector)\n\t\telse:\n\t\t\twords[word.lower()].append(vector)\n\treturn words\n\n# Written by Anouk based on qa.c\ndef qa(wordvectors, questions, distanceMeasure):\n\t#select distance measure function\n\tif (distanceMeasure == \"euclidean\"):\n\t\tsim_measure_function = EuclideanSimilarity\n\telif (distanceMeasure == \"jaccard\"):\n\t\tsim_measure_function = JaccardSimilarity\n\telif (distanceMeasure == \"pearson\"):\n\t\tsim_measure_function = PearsonCorrelation\n\telif (distanceMeasure == \"spearman\"):\n\t\tsim_measure_function = SpearmanCorrelation\n\telif (distanceMeasure == \"mahalanobis\"):\n\t\tsim_measure_function = MahalanobisDist\n\telif (distanceMeasure == \"manhattan\"):\n\t\tsim_measure_function = ManhattanSimilarity\n\telif (distanceMeasure == \"manhattan_sqrt\"):\n\t\tsim_measure_function = ManhattanSimilaritySqrt\n\telif (distanceMeasure == \"manhattan_squared\"):\n\t\tsim_measure_function = ManhattanSimilaritySquared\n\telse: #default cosine similarity\n\t\tsim_measure_function = CosineSimilarity\n\t\n\t# initialize empty answers list\n\tanswers = []\n\t\n\t# iterate over all questions\n\tfor question in questions:\n\n\t\t# get representations for a, b and c, only if they actually exist\n\t\tif question[0] in wordvectors and question[1] in wordvectors and question[2] in wordvectors:\n\t\t\t\n\t\t\t# get the word projections, this is in wordvectors[word], we assume for now that \n\t\t\t# there is only one word projection, but the content of wordvectors[word] is a list\n\t\t\t# so we have to ask for index 0\n\t\t\ta = wordvectors[question[0]][0]\n\t\t\tb = wordvectors[question[1]][0]\n\t\t\tc = wordvectors[question[2]][0]\n\n\t\t\t# compute v, normalize it. Result is a tuple\n\t\t\ty = normalize( [b[i] - a[i] + c[i] for i in xrange(len(a))] )\n\n\t\t\t# initialize bestSim and bestWord\n\t\t\t# sim ranges between -1 and 1, where 1 is most similar\n\t\t\tbestSim = -9999999999\n\t\t\tbestWord = \"nothing\"\n\t\t\t\n\t\t\t# look at all word representations to find the answer to a:b c:bestWord\n\t\t\t# except for a, b and c\n\t\t\tquestion_set = set(question)\n\t\t\tfor word in wordvectors:\n\t\t\t\tif word not in question_set:\n\n\t\t\t\t\t# again assume that there is only one projection for the word\n\t\t\t\t\twordRep = wordvectors[word][0]\n\n\t\t\t\t\tsim = sim_measure_function(y, wordRep)\n\t\t\t\t\t\n\t\t\t\t\t# save result if it is better than the previous best result\n\t\t\t\t\tif sim > bestSim:\n\t\t\t\t\t\tbestSim = sim\n\t\t\t\t\t\tbestWord = word\n\t\t\n\t\t# If we don't have a projection for a, b, or c, we won't be answering the question.\n\t\telse:\n\t\t\tbestWord = 'nothing'\n\t\tanswers.append(bestWord)\n\t\tprint question[0], ' ', question[1], ' ', question[2], ' ', bestWord\n\treturn answers\n\ndef CosineSimilarity(vec1, vec2):\n # similarity is defined as the cosine similarity\n # cosine similarity normaly is (a (dot product) b) / (norm(a) * norm(b))\n # we have normalized a and b, so the denominator is always one and can be discarded\n return sum([vec1[i] * vec2[i] for i in xrange(len(vec1))])\n\ndef EuclideanSimilarity(vec1, vec2):\n\treturn 1/(1 + math.sqrt(sum([(vec1[i] - vec2[i])**2 for i in xrange(len(vec1))])))\n\ndef JaccardSimilarity(vec1, vec2):\n #Jaccard / Tanimoto Coefficient\n #n = len(set(vec1).intersection(set(vec2)))\n #return 1/(1 + n / float(len(vec1) + len(vec2) - n))\n\n #similarity belongs to [0,1], with 1 meaning its exact replica\n similarity = float(len(list(set(vec1).intersection(set(vec2))))*1.0/len(list(set(vec1).union(set(vec2))))) \n return 1 + similarity\n\ndef average(x):\n assert len(x) > 0\n return float(sum(x)) / len(x)\n\n<<<<<<< HEAD\n# def PearsonCorrelation(x, y):\n# assert len(x) == len(y)\n# n = len(x)\n# assert n > 0\n# avg_x = average(x)\n# avg_y = average(y)\n# diffprod = 0\n# xdiff2 = 0\n# ydiff2 = 0\n# for i in range(n):\n# <<<<<<< HEAD\n# \t xdiff = x[i] - avg_x\n# \t ydiff = y[i] - avg_y\n# \t diffprod += xdiff * ydiff\n# \t xdiff2 += xdiff * xdiff\n# \t ydiff2 += ydiff * ydiff\n# =======\n# xdiff = x[i] - avg_x\n# ydiff = y[i] - avg_y\n# diffprod += xdiff * ydiff\n# xdiff2 += xdiff * xdiff\n# ydiff2 += ydiff * ydiff\n# >>>>>>> af23aedc41d92947901841e99425af906cd5f0e4\n\n# return diffprod / math.sqrt(xdiff2 * ydiff2)\n=======\ndef PearsonCorrelation(x, y):\n assert len(x) == len(y)\n n = len(x)\n assert n > 0\n avg_x = average(x)\n avg_y = average(y)\n diffprod = 0\n xdiff2 = 0\n ydiff2 = 0\n for i in range(n):\n xdiff = x[i] - avg_x\n ydiff = y[i] - avg_y\n diffprod += xdiff * ydiff\n xdiff2 += xdiff * xdiff\n ydiff2 += ydiff * ydiff\n\n return diffprod / math.sqrt(xdiff2 * ydiff2)\n>>>>>>> e885d9a794f29961a54dabb8454bbe74b2d77271\n\ndef SpearmanCorrelation(x,y):\n\tfrom scipy import stats\n return stats.stats.spearmanr(x, y)[0]\n\ndef ManhattanSimilarity(vec1, vec2):\n return - sum([math.fabs(vec1[i] - vec2[i]) for i in xrange(len(vec1))])\n # return 1/(1 + math.fabs(sum([(vec1[i] - vec2[i]) for i in xrange(len(vec1))])))\n\ndef ManhattanSimilaritySqrt(vec1, vec2):\n return - sum([ math.sqrt(math.fabs(vec1[i] - vec2[i])) for i in xrange(len(vec1)) ])\n\ndef ManhattanSimilaritySquared(vec1, vec2):\n return - sum([ (vec1[i] - vec2[i]) ** 2 for i in xrange(len(vec1)) ]) \n\n\"\"\"\ndef hamming_distance(s1, s2):\n\tfrom pattern.vector import distance\n #Return the Hamming distance between equal-length sequences\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\"\"\"\n\ndef estimate(self, xDest, yDest):\n xd = xDest - self.xPos\n yd = yDest - self.yPos\n # Euclidian Distance\n d = math.sqrt(xd * xd + yd * yd)\n # Manhattan distance\n # d = abs(xd) + abs(yd)\n # Chebyshev distance\n # d = max(abs(xd), abs(yd))\n return(d)\n\ndef MahalanobisDist(x, y):\n\timport numpy as np\n covariance_xy = np.cov(x,y, rowvar=0)\n inv_covariance_xy = np.linalg.inv(covariance_xy)\n xy_mean = np.mean(x),np.mean(y)\n x_diff = np.array([x_i - xy_mean[0] for x_i in x])\n y_diff = np.array([y_i - xy_mean[1] for y_i in y])\n diff_xy = np.transpose([x_diff, y_diff])\n\n md = []\n dist = 0\n for i in range(len(diff_xy)):\n md.append(np.sqrt(np.dot(np.dot(np.transpose(diff_xy[i]),inv_covariance_xy),diff_xy[i])))\n dist += md[i]\n return 1 / (1 + dist/len(md))\n \nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 3:\n\t\tprint \"Call me as:\"\n\t\tprint \"python qa_cristina.py wordvectors.txt distancemeasurename\"\n\t\tsys.exit()\n \n\tstart = time()\n\tprint \"Loading questions...\"\n\tquestions = load_questions()\n\n\tprint \"Loading word projections\"\n\tvecs = load_vectors(sys.argv[1])\n\n\tprint \"Answering questions\"\n\tdistanceMeasure = sys.argv[2]\n\tanswers = qa(vecs, questions, distanceMeasure)\n\n\tprint \"Saving answers to file\"\n\n\tsave_answers(answers, \"precomputedAnswers/testCristina\" + distanceMeasure + \"640Similarity.answered\")\n\t#save_answers(answers, \"precomputedAnswers/testRemi\" + distanceMeasure + \"Similarity.word_relationship.answered\")\n\tstop = time()\n\tprint \"Spent\", int(stop - start + 0.5), \"seconds.\"\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.4566546678543091,
"alphanum_fraction": 0.4652877748012543,
"avg_line_length": 26.121952056884766,
"blob_id": "e539dcdead9aa1b974b25ba3d8bf7f2886144186",
"content_id": "32d7db7a2b1c3e751c2faf31b847f8d618641f25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5560,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 205,
"path": "/not-used/qa.c",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n#include <math.h>\n#include <stdlib.h>\n\nconst int max_size=2000;\nconst int N=40;\nconst int max_w=50;\n\nint main(int argc, char **argv)\n{\n // declarations\n FILE *wordProjections, *questions, *output;\n int numberOfWords, size, counter, a, b, c, vA, vB, vC, missing;\n char stA[max_size], stB[max_size], stC[max_size], bestWord[max_size], file_name[max_size], questions_file_name[max_size], output_file_name[max_size];\n float *M, *y;\n char *vocab, *notFoundMessage;\n float bestDistance, len, dist;\n \n notFoundMessage = \"NOTHING\";\n\n // argument handling\n if (argc<4) {\n printf(\"Usage: ./qa <PROJECTIONS> <QUESTIONS> <OUTPUT> \\nwhere PROJECTIONS contains word projections, QUESTIONS contains questions and OUTPUT contains the outpute file\\n\");\n return 0;\n }\n \n strcpy(file_name, argv[1]);\n strcpy(questions_file_name, argv[2]);\n strcpy(output_file_name, argv[3]);\n \n wordProjections=fopen(file_name, \"rb\");\n if (wordProjections==NULL)\n {\n printf(\"Projections file not found\\n\");\n return -1;\n }\n \n // get the number of words and number of dimensions (size)\n fscanf(wordProjections, \"%d\", &numberOfWords);\n fscanf(wordProjections, \"%d\", &size);\n \n // allocate memory for the vocabulary, matrix and y\n vocab=(char *)malloc(numberOfWords*max_w*sizeof(char));\n M=(float *)malloc(numberOfWords*size*sizeof(float));\n y=(float *)malloc(size*sizeof(float));\n \n if (M==NULL)\n {\n printf(\"Cannot allocate memory: %d MB\\n\", numberOfWords*size*sizeof(float)/1048576);\n return -1;\n }\n \n // fill the vocabulary and the matrix with normalized vectors\n for (b=0; b<numberOfWords; b++)\n {\n fscanf(wordProjections, \"%s\", &vocab[b*max_w]);\n for (a=0; a<size; a++)\n {\n fscanf(wordProjections, \"%f\", &M[a+b*size]);\n }\n \n len=0;\n for (a=0; a<size; a++)\n {\n len+=M[a+b*size]*M[a+b*size];\n }\n \n len=sqrt(len);\n for (a=0; a<size; a++)\n {\n M[a+b*size]/=len;\n }\n }\n \n // make whole vocabulary uppercase\n for (a=0; a<numberOfWords*max_w; a++)\n {\n vocab[a]=toupper(vocab[a]);\n }\n \n fclose(wordProjections);\n \n // open file with questions\n questions=fopen(questions_file_name, \"rb\");\n if (questions==NULL)\n {\n printf(\"Questions file not found\\n\");\n return -1;\n }\n \n // open file for output\n output = fopen(output_file_name, \"w\");\n \n // init counter\n counter = 0;\n missing = 0;\n \n // as long as we have not reached the EOF, look for answer to the question\n \n while(fscanf(questions,\"%s\", &stA) != EOF)\n {\n counter++;\n // printf(\"%d\\n\", counter);\n fscanf(questions,\"%s\", &stB);\n fscanf(questions,\"%s\", &stC);\n \n // uppercase the words\n for (a=0; a<strlen(stA); a++)\n {\n stA[a]=toupper(stA[a]);\n }\n for (a=0; a<strlen(stB); a++)\n {\n stB[a]=toupper(stB[a]);\n }\n for (a=0; a<strlen(stC); a++)\n {\n stC[a]=toupper(stC[a]);\n }\n \n for (vA=0; vA<numberOfWords; vA++){\n if (!strcmp(&vocab[vA*max_w], stA))\n {\n break;\n }\n }\n for (vB=0; vB<numberOfWords; vB++)\n {\n if (!strcmp(&vocab[vB*max_w], stB))\n {\n break;\n }\n }\n for (vC=0; vC<numberOfWords; vC++)\n {\n if (!strcmp(&vocab[vC*max_w], stC))\n {\n break;\n }\n }\n \n if (vA == numberOfWords || vB == numberOfWords || vC == numberOfWords)\n {\n// printf(\"Word was not found in dictionary\\n\");\n fwrite(notFoundMessage, sizeof(char), strlen(notFoundMessage), output);\n fwrite(\"\\n\", sizeof(char), 1, output);\n missing++;\n\n }\n else\n {\n // compute y\n for (a=0; a<size; a++)\n {\n y[a] = M[a+vB*size]-M[a+vA*size] + M[a+ vC *size];\n }\n\n // normalize y again.\n len=0;\n for (a=0; a<size; a++)\n {\n len+=y[a]*y[a];\n }\n \n len=sqrt(len);\n for (a=0; a<size; a++)\n {\n y[a]/=len;\n }\n \n bestDistance = 0;\n \n // find best match to y\n for (c=0; c<numberOfWords; c++)\n {\n if(c != vC && c != vA && c != vB)\n {\n dist=0;\n for (a=0; a<size; a++)\n {\n dist+=y[a]*M[a+c*size];\n }\n if(dist > bestDistance)\n {\n bestDistance = dist;\n strcpy(bestWord, &vocab[c*max_w]);\n }\n }\n \n }\n fwrite(bestWord, sizeof(char), strlen(bestWord), output);\n fwrite(\"\\n\", sizeof(char), 1, output);\n // printf(\"%s, %s, %s, %s\", &vocab[vA*max_w], &vocab[vB*max_w], &vocab[vC*max_w], bestWord);\n }\n\n }\n \n fclose(questions);\n fclose(output);\n printf(\"From %d questions, %d were not answered properly. Keep this in mind.\\nDone!\", counter, missing);\n \n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.6401468515396118,
"alphanum_fraction": 0.6499388217926025,
"avg_line_length": 26.694915771484375,
"blob_id": "8cda4d635638ea0e8e42af48dc301ebed1fa81ed",
"content_id": "a42b7d1aeacac1b3b5b07f30bc71434fd90a554a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3268,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 118,
"path": "/SemEval/semEvalQA.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\nimport pickle\nfrom collections import defaultdict\nfrom math import sqrt\n\n\n# Written by Remi\n# Approved by Anouk\ndef save_answers(answers, filename):\n\tf = open(filename, 'w')\n\tf.write( \"\".join([ str(answer[0])+ ' \"' + answer[1][0] + \":\" + answer[1][1] + '\"\\n' for answer in answers]) )\n\tf.close()\n\ndef load_questions(filename):\n\tf = open(filename, 'r')\n\tstart = False\n\tquestions = []\n\twhile True:\n\t\tl = f.readline()\n\t\tif l not in [\"\", \"\\n\", \"\\r\\n\"] and \":\" in l and \" \" not in l:\n\t\t\tstart = True\n\t\t\tquestions.append(tuple(l.lower().replace(\"\\n\", \"\").replace('\"', \"\").split(\":\")))\n\t\telif start:\n\t\t\tbreak\n\tf.close()\n\treturn questions\n\ndef load_answers(filename):\n\tf = open(filename, 'r')\n\tanswers = [ tuple(l.lower().replace(\"\\n\",\"\").replace('\"', '').split(\":\")) for l in f.readlines()]\n\tf.close()\n\treturn answers \n\n# Written by Remi\n# Approved by Anouk\ndef normalizeString(vec):\n\tvec = [ float(x) for x in vec]\n\ttotal = sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\ndef normalize(vec):\n\ttotal = sqrt( sum([v**2 for v in vec]) )\n\tnew_vec = []\n\tfor v in vec:\n\t\tnew_vec.append(v/total)\n\treturn tuple(new_vec)\n\n# written by Remi\n# Approved and edited by Anouk (made all words lower case and took out internal normalize function)\ndef load_vectors(filename):\n\tf = open(filename,'r')\n\tf.readline()\n\tcontent = [ filter( lambda x : not x in [\"\\n\",\"\"], l.replace(\"\\n\", \"\").split(\" \")) for l in f.readlines() ]\n\tcontent = [ (l[0], normalizeString(l[1:])) for l in content ]\n\tcontent = filter(lambda x : not x[1] == None, content)\n\twords = defaultdict(list)\n\tfor (word, vector) in content:\n\t\tif \"_\" in word:\n\t\t\twords[word.lower().split(\"_\")[0]].append(vector)\n\t\telse:\n\t\t\twords[word.lower()].append(vector)\n\treturn words\n\n# Written by Anouk based on qa.c\ndef qa(wordvectors, questions, answers):\n\t\n\t# initialize empty answers list\n\tranking = []\n\n\tfor answer in answers:\n\t\t# compute cosine similarity with all questions\n\t\tsim = 0\n\t\tfor question in questions:\n\t\t\tif question[0] in wordvectors and question[1] in wordvectors and answer[0] in wordvectors and answer[1] in wordvectors:\n\t\t\t\ta = wordvectors[question[0]][0]\n\t\t\t\tb = wordvectors[question[1]][0]\n\t\t\t\tc = wordvectors[answer[0]][0]\n\t\t\t\td = wordvectors[answer[1]][0]\n\n\t\t\t\ty = normalize([b[i] - a[i] + c[i] for i in xrange(len(a))])\n\t\t\t\tsim += sum([y[i] * d[i] for i in xrange(len(y))])\n\t\t\telse:\n\t\t\t\tsim = -10\n\t\tsim = sim / float(len(questions))\n\t\tranking.append((sim, answer))\n\treturn sorted(ranking, key = lambda x: x[0], reverse = True)\n\n\nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 5:\n\t\tprint \"Call me as:\"\n\t\tprint \"python semEvalQA.py <wordvectors> <questions> <answers> <outfile>\"\n\t\tsys.exit()\n\n\tprojections = sys.argv[1]\n\tquestionsFile = sys.argv[2]\n\tanswersFile = sys.argv[3]\n\toutFile = sys.argv[4]\n\n\tprint \"Loading questions...\"\n\tquestions = load_questions(questionsFile)\n\tprint \"Loaded \", len(questions), \" questions.\"\n\n\tprint \"Loading answers...\"\n\tanswers = load_answers(answersFile)\n\tprint \"Loaded \", len(answers), \" answers\"\n\t\n\tprint \"Loading word projections\"\n\tvecs = load_vectors(projections)\n\t\n\tprint \"Answering questions\"\n\tranking = qa(vecs, questions, answers)\n\t\n\tprint \"Saving answers to file\"\n\tsave_answers(ranking, outFile)\n"
},
{
"alpha_fraction": 0.5701754093170166,
"alphanum_fraction": 0.5760233998298645,
"avg_line_length": 17.88888931274414,
"blob_id": "2f81234c3e2356bc0552ecb57324a6de8aeead8b",
"content_id": "cbcdbac185d678c781b6a8923baa927cbbc241ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 18,
"path": "/stats.py",
"repo_name": "anoukv/disambiguateCWSPR",
"src_encoding": "UTF-8",
"text": "import sys\n\ndef stats(inpt):\n\n\n\nif __name__ == \"__main__\":\n\tif not len(sys.argv) == 2:\n\t\tprint \"Call me as:\"\n\t\tprint \"pypy stats.py data_file\"\n\n\tdef read_file(filename):\n\t\tf = open(filename, 'r')\n\t \tinpt = filter(lambda x : not x == \"\", f.readline().replace(\"\\n\", \"\").split(\" \"))\n\t \tf.close()\n\t \treturn inpt\n\n\tstats(read_file(sys.argv[1]))\n\n\n"
}
] | 23 |
Patriotwo/Python-Challenge
|
https://github.com/Patriotwo/Python-Challenge
|
b924a9943a1489164456a975098bef2fed8ba824
|
f2f9bb85fa30f0fbf75da7c6245252d4e5ed4258
|
322ec02a700892e37cedcbb0d79e38f71f13f3cc
|
refs/heads/main
| 2023-03-01T12:56:17.210363 | 2021-02-08T19:01:34 | 2021-02-08T19:01:34 | 334,586,634 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6008230447769165,
"alphanum_fraction": 0.6117969751358032,
"avg_line_length": 28.139999389648438,
"blob_id": "0a91e99c94847665bc677ac3f297ca7ccb8f6267",
"content_id": "a9c1c7564353fbeb4ce7fc0b21e7ced11abfb9d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2916,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 100,
"path": "/PyBank/main.py",
"repo_name": "Patriotwo/Python-Challenge",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\n\n#Thanks to Joseph Yon \"Big1bluey\" for the references and guidance of his code\n# read the CSV file \n\nPyBankcsv = os.path.join('PyBank','Resources','budget_data.csv')\n\n\nwith open(PyBankcsv, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n csv_header = next(csvreader)\n \n# names to store dataset values\n profit = []\n month_collect = []\n pl_change = []\n monthly_change = []\n month_count = []\n \n#Store Values\n previous_row = 0\n net_total = 0\n total_months = 0\n total_profit = 0\n highest_pl = 0\n lowest_pl = 0\n \n# This will start the csv reader on the 2nd row\n\n\n\n for row in csvreader:\n\n\n\n\n\n# Count the total number of months\n \n total_months += 1 \n \n# Calculate the total profit and loss\n\n net_total += int(row[1])\n \n# Calculate the monthly changes in profit/loss over the entire dataset \n# Thanks to Joseph Yon \"Bigbluey\". I struggled for 3-days with my solution \n \n pl_change = int(row[1]) - previous_row\n monthly_change.append(pl_change)\n previous_row = int(row[1])\n month_count.append(row[0])\n \n\n\n# find the average \n \n total_month_change = sum(monthly_change) - monthly_change[0]\n value_monthly_change = len(monthly_change) - 1\n \n average_change = total_month_change / 85\n # value_monthly_change = 85\n # toatal_average_change = (-196785)\n # using the names in the equation returns a Zero Division Error\n \n\n\n #Greatest increase in profits (date and amount) Thanks to BigBluey \n if int(row[1]) > highest_pl: \n highest_pl = int(row[1])\n highest_date = row[0]\n greatest_increase = max(monthly_change)\n\n if int(row[1]) < lowest_pl: \n lowest_pl = int(row[1])\n lowest_date = row[0]\n greatest_decrease = min(monthly_change)\n\nprint(f\"Financial Analysis\")\nprint(f\"-------------------------------------------\")\nprint(f\"Total Months: \" + str(total_months))\nprint(f\"Total Profit/Loss: \" + \"$\" + str(net_total))\nprint(f\"Average Change: \" + \"$\" + str(average_change))\nprint(f\"Greatest Increase in Profits:, {str(highest_date)}, (${greatest_increase})\")\nprint(f\"Greatest Decrease in Profits:, {str(lowest_date)}, (${greatest_decrease})\")\n\n\n# export the results \noutput_file = os.path.join('PyBank','Resources','budget_data.text')\nwith open(output_file, 'w',) as txtfile:\n\n txtfile.write(f\"Financial Analysis\")\n txtfile.write(f\"-------------------------------------------\")\n txtfile.write(f\"Total Months: \" + str(total_months))\n txtfile.write(f\"Total Profit/Loss: \" + \"$\" + str(net_total))\n txtfile.write(f\"Average Change: \" + \"$\" + str(average_change))\n txtfile.write(f\"Greatest Increase in Profits:, {str(highest_date)}, (${greatest_increase})\")\n txtfile.write(f\"Greatest Decrease in Profits:, {str(lowest_date)}, (${greatest_decrease})\") "
},
{
"alpha_fraction": 0.5330848097801208,
"alphanum_fraction": 0.5445790886878967,
"avg_line_length": 30.509803771972656,
"blob_id": "57a40c6beda3f2bc58af882fc3833c13135c6742",
"content_id": "4c6fab94fc800bc89749c16fd9bb37c069cd9d85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3219,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 102,
"path": "/PyPoll/main.py",
"repo_name": "Patriotwo/Python-Challenge",
"src_encoding": "UTF-8",
"text": "# Special Thanks to Olufunke Olaleye: ofunkey and Joseph Yon: BigBluey for their code examples\n\nimport os\nimport csv\n\n\ncsvpath = os.path.join('.', 'PyPoll', 'Resources', 'election_data.csv')\nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n csv_header = next(csvfile) \n\n\n# Create a list to count the number of voters in voter ID row [0]\n voter_ID = []\n\n# Create a list for candidate names\n candidate_list = []\n\n# Create a list to count each of the candidate vote\n Khan_Vote = 0\n Correy_Vote = 0\n Li_Vote = 0\n OTooley_Vote = 0\n winning_candidate = []\n number_votes = 0\n \n \n\n\n# Why is this action different from PyBank - row = next(csvreader)\n for row in csvreader:\n\n# Count the number of votes\n number_votes += 1\n\n# Return a list of candidate votes \n if (row[2]) == (\"Khan\"):\n Khan_Vote += 1\n elif(row[2]) == (\"Correy\"):\n Correy_Vote += 1\n elif(row[2]) == (\"Li\"):\n Li_Vote += 1\n else:\n OTooley_Vote += 1\n\n \n # Calculate the percentage of each candidate votes\n #I need eac candidates individual total votes\n #This needs to be Total votes / \"Khan\" votes * 100\n #Total votes / \"Correy\" votes * 100 \n #Total votes / \"Li\" votes * 100 \n #Total votes / \"O'Tooley\" votes * 100\n # take off the *100\n \n khan_percent = (Khan_Vote / number_votes)\n correy_percent = (Correy_Vote / number_votes)\n li_percent = (Li_Vote / number_votes)\n otooley_percent = (OTooley_Vote / number_votes)\n\n#Calculte the winner \n winner = max(Khan_Vote, Correy_Vote, Li_Vote, OTooley_Vote)\n if winner == Khan_Vote:\n winning_candidate = \"Khan\"\n elif winner == Correy_Vote:\n winning_candidate = \"Correy\"\n elif winner == Li_Vote:\n winning_candidate = \"Li\"\n else:\n winning_candidate = \"O'Tooley\"\n\n print(f'Election Results')\n print(f'-------------------------------------------')\n print(f'Total Votes: {number_votes}')\n print(f'Khan: {khan_percent:.3%} ({Khan_Vote})')\n print(f'Correy: {correy_percent:.3%} ({Correy_Vote})')\n print(f'Li: {li_percent:.3%} ({Li_Vote})')\n print(f\"O'Tooley: {otooley_percent:.3%} ({OTooley_Vote})\")\n print(f'-------------------------------------------')\n print(f'Winner: {winning_candidate}')\n print(f'-------------------------------------------')\n\n\n # Output files\n\n output_file = os.path.join('.', 'PyPoll', 'Resources', 'election_data_revised.text')\n\n with open(output_file, 'w',) as txtfile:\n\n\n txtfile.write(f'Election Results')\n txtfile.write(f'-------------------------------------------')\n txtfile.write(f'Total Votes: {number_votes}')\n \n \n txtfile.write(f'Khan: {khan_percent:.3%} ({Khan_Vote})')\n txtfile.write(f'Correy: {correy_percent:.3%} ({Correy_Vote})')\n txtfile.write(f'Li: {li_percent:.3%} ({Li_Vote})')\n txtfile.write(f\"O'Tooley: {otooley_percent:.3%} ({OTooley_Vote})\")\n txtfile.write(f'-------------------------------------------')\n txtfile.write(f'Winner: {winning_candidate}')\n txtfile.write(f'-------------------------------------------')\n \n"
}
] | 2 |
Cuido/anton
|
https://github.com/Cuido/anton
|
ce6ec8411dd1c42e675a839d4a6f246a5e2dbc1d
|
ef0b95ef0179c25bfd25de6a66312324056b6632
|
fdf8ac5fdce3b6ad197f4685e772c8f766271c69
|
refs/heads/master
| 2021-09-26T05:24:57.643695 | 2019-12-17T19:10:03 | 2019-12-17T19:10:03 | 224,893,495 | 0 | 0 | null | 2019-11-29T17:01:28 | 2019-12-17T19:10:50 | 2021-09-22T18:07:15 |
Python
|
[
{
"alpha_fraction": 0.5064935088157654,
"alphanum_fraction": 0.5649350881576538,
"avg_line_length": 17.117647171020508,
"blob_id": "2fa311d2df76ced2903786a25ebf62af87d01e65",
"content_id": "415128c0a0daa9e57f9443e58802c2d15172cc9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 308,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/cuido/migrations/0002_auto_20191215_1050.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0 on 2019-12-15 10:50\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cuido', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelTable(\n name='user',\n table='user',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7456359267234802,
"alphanum_fraction": 0.7556109428405762,
"avg_line_length": 12.366666793823242,
"blob_id": "7e5c4ef7c312cc3fa823dc072972aae2d955a26a",
"content_id": "b33abcd799ae1d81ad6c270f9c31ed6e99189b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 30,
"path": "/README.md",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "# anton\nCuido Server\n\n## Download\nPython 3\n\nPostgresql 12.1\n\n## Virtual Environment\n### Make a virtual environment\n`virtualenv anton-venv`\n\n### To activate the virtualenv\n`source anton-venv/bin/activate`\n\n\n### To deactivate virtualenv\n`deactivate`\n\n\n## Database\nDownload postgres\n\n`brew install postgres`\n\n`initdb /usr/local/var/postgres`\n\nTo Start postgresql server\n\n`brew services start postgresql`\n"
},
{
"alpha_fraction": 0.4931506812572479,
"alphanum_fraction": 0.7054794430732727,
"avg_line_length": 15.333333015441895,
"blob_id": "e7861865a8dd6e7974c0bbbe6ddc03d2cb3310d6",
"content_id": "022cc131626c110ceed6c946d22c621e746bb3e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "Django==3.0\ndjangorestframework==3.10.3\nmock==3.0.5\nsimplejson==3.17.0\ngunicorn==20.0.4\nstatsd==3.3.0\nretrying==1.3.3\nboto==2.49.0\npsycopg2==2.8.4"
},
{
"alpha_fraction": 0.7441860437393188,
"alphanum_fraction": 0.7441860437393188,
"avg_line_length": 21,
"blob_id": "b31fc8a055aef515a686d907b1f898a2aab7d717",
"content_id": "54ef1c7cfa0a6605e3de94a4bf47ddbf5c8c0440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/cuido/models/__init__.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "from .user import *\nfrom .services import *"
},
{
"alpha_fraction": 0.5420040488243103,
"alphanum_fraction": 0.5713562965393066,
"avg_line_length": 42.911109924316406,
"blob_id": "a57fbf7e5acd56ee03ee902c61fc0a8c373769aa",
"content_id": "530c55af31e4e61758e11a6c4acdca4958504670",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1976,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 45,
"path": "/cuido/migrations/0003_cla_usercredentials.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0 on 2019-12-17 18:53\n\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cuido', '0002_auto_20191215_1050'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CLA',\n fields=[\n ('cla_id', models.AutoField(primary_key=True, serialize=False)),\n ('first_name', models.CharField(max_length=255)),\n ('last_name', models.CharField(blank=True, max_length=255, null=True)),\n ('phone_number', models.CharField(max_length=10, validators=[django.core.validators.MinLengthValidator(10)])),\n ('email_id', models.EmailField(max_length=254)),\n ('gender', models.SmallIntegerField(choices=[(1, 'MALE'), (2, 'FEMALE'), (3, 'OTHERS')])),\n ('primary_language', models.CharField(max_length=255)),\n ('seconday_language', models.CharField(max_length=255)),\n ('tertiary_language', models.CharField(blank=True, max_length=255, null=True)),\n ],\n options={\n 'db_table': 'cuido_lifestyle_aficionados',\n },\n ),\n migrations.CreateModel(\n name='UserCredentials',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=12, validators=[django.core.validators.MinLengthValidator(8)])),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='cuido.User')),\n ],\n options={\n 'db_table': 'user_credentials',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5602779984474182,
"alphanum_fraction": 0.568297266960144,
"avg_line_length": 44.621952056884766,
"blob_id": "8b45512247ac76bf77a1ef9699fac409c9bc1866",
"content_id": "69909b50ed1302c11d8f89fdbf7b6d5cd478df4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3741,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 82,
"path": "/cuido/migrations/0004_auto_20191217_1856.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0 on 2019-12-17 18:56\n\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cuido', '0003_cla_usercredentials'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bookings',\n fields=[\n ('booking_id', models.AutoField(primary_key=True, serialize=False)),\n ('slot', models.CharField(max_length=100)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('cla_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cla', to='cuido.CLA')),\n ],\n options={\n 'db_table': 'bookings',\n },\n ),\n migrations.CreateModel(\n name='Payment',\n fields=[\n ('id', models.IntegerField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Services',\n fields=[\n ('service_id', models.AutoField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=255)),\n ],\n options={\n 'db_table': 'services',\n },\n ),\n migrations.CreateModel(\n name='UserFeedback',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('service_rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),\n ('cla_rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),\n ('review', models.TextField(blank=True, null=True)),\n ('booking_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookings', to='cuido.Bookings')),\n ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='cuido.User')),\n ],\n ),\n migrations.CreateModel(\n name='Transactions',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('slot_start_time', models.DateTimeField()),\n ('slot_end_time', models.DateTimeField()),\n ('actual_payable', models.IntegerField()),\n ('amount_payable', models.IntegerField()),\n ('cuido_cash_used', models.IntegerField(blank=True, null=True)),\n ('discount_availed', models.IntegerField(blank=True, null=True)),\n ('booking_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='booking', to='cuido.Bookings')),\n ('payment_mode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='payment', to='cuido.Payment')),\n ],\n options={\n 'db_table': 'transactions',\n },\n ),\n migrations.AddField(\n model_name='bookings',\n name='service_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service', to='cuido.Services'),\n ),\n migrations.AddField(\n model_name='bookings',\n name='user_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='booked_user', to='cuido.User'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.6206896305084229,
"avg_line_length": 13.5,
"blob_id": "3824b3f143b3a720a7708795e320994f74d186d2",
"content_id": "c4c10091b697ea0b5c8217849769cd920b25428c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 8,
"path": "/cuido/enums/payments.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\n\nclass PaymentModes(Enum):\n PAYTM = 1\n PHONEPE = 2\n RAZORPAY = 3\n GOOGLE_PAY = 4\n"
},
{
"alpha_fraction": 0.7070446610450745,
"alphanum_fraction": 0.712199330329895,
"avg_line_length": 37.16393280029297,
"blob_id": "c6856331e828046c8f2596708c4ca7cb29f9aa8e",
"content_id": "7dad9fec05f8a2b8c9ffdd8639618dc146aa5d5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2328,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 61,
"path": "/cuido/models/services.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom .user import User, CLA\n\n\nclass Services(models.Model):\n service_id = models.AutoField(primary_key=True)\n name = models.CharField(null=False, blank=False, max_length=255)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'services'\n\n\nclass Payment(models.Model):\n id = models.IntegerField(primary_key=True)\n name = models.CharField(max_length=50)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'payments'\n\n\nclass Bookings(models.Model):\n booking_id = models.AutoField(primary_key=True)\n user_id = models.ForeignKey(User, related_name='booked_user', on_delete=models.CASCADE)\n cla_id = models.ForeignKey(CLA, related_name='cla', on_delete=models.CASCADE)\n slot = models.CharField(max_length=100)\n service_id = models.ForeignKey(Services, related_name='service', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'bookings'\n\n\nclass Transactions(models.Model):\n booking_id = models.ForeignKey(Bookings, related_name='booking', on_delete=models.CASCADE)\n slot_start_time = models.DateTimeField(null=False, blank=False)\n slot_end_time = models.DateTimeField(null=False, blank=False)\n payment_mode = models.ForeignKey(Payment, related_name='payment', on_delete=models.CASCADE)\n actual_payable = models.IntegerField(null=False, blank=False)\n amount_payable = models.IntegerField(null=False, blank=False)\n cuido_cash_used = models.IntegerField(null=True, blank=True)\n discount_availed = models.IntegerField(null=True, blank=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'transactions'\n\n\nclass UserFeedback(models.Model):\n user_id = models.ForeignKey(User, related_name='users', on_delete=models.CASCADE)\n booking_id = models.ForeignKey(Bookings, related_name='bookings', on_delete=models.CASCADE)\n service_rating = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])\n cla_rating = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])\n review = models.TextField(null=True, blank=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'user_feedback'\n"
},
{
"alpha_fraction": 0.4943310618400574,
"alphanum_fraction": 0.5623582601547241,
"avg_line_length": 20,
"blob_id": "686c9500cedb513e3be8162b5c1c9a71d4d8140a",
"content_id": "e64dea92a29b89090ed5c6722e1b286ec774321b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 441,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/cuido/migrations/0005_auto_20191217_1908.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0 on 2019-12-17 19:08\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cuido', '0004_auto_20191217_1856'),\n ]\n\n operations = [\n migrations.AlterModelTable(\n name='payment',\n table='payments',\n ),\n migrations.AlterModelTable(\n name='userfeedback',\n table='user_feedback',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7878788113594055,
"alphanum_fraction": 0.7878788113594055,
"avg_line_length": 32,
"blob_id": "0346ab86b17f27f8293016d8ba6be0692a6dbb5f",
"content_id": "fba6f91715a4bc72deb772b96857f9f74cfec63d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 1,
"path": "/cuido/enums/__init__.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "from cuido.enums.gender import *\n"
},
{
"alpha_fraction": 0.7051517963409424,
"alphanum_fraction": 0.7212511301040649,
"avg_line_length": 41.6274528503418,
"blob_id": "71a17a46e5047235be432a3b4caff161256652b5",
"content_id": "b972d606b5bc70a0330df70e87ae3c166ed1ad66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2174,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 51,
"path": "/cuido/models/user.py",
"repo_name": "Cuido/anton",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.validators import MinLengthValidator\nfrom cuido.enums.gender import Gender\n\nGENDER_TYPES = [\n (Gender.MALE.value, Gender.MALE.name),\n (Gender.FEMALE.value, Gender.FEMALE.name),\n (Gender.OTHERS.value, Gender.OTHERS.name)\n]\n\n\nclass User(models.Model):\n first_name = models.CharField(null=False, blank=False, max_length=255)\n last_name = models.CharField(null=True, blank=True, max_length=255)\n phone_number = models.CharField(null=False, blank=False, max_length=10, validators=[MinLengthValidator(10)])\n email_id = models.EmailField(null=False, blank=False)\n gender = models.SmallIntegerField(choices=GENDER_TYPES, null=False, blank=False)\n persona = models.CharField(null=True, blank=True, max_length=255)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'user'\n\n\nclass UserCredentials(models.Model):\n user_id = models.ForeignKey(User, related_name='user', on_delete=models.CASCADE)\n password = models.CharField(max_length=12, validators=[MinLengthValidator(8)])\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'user_credentials'\n\n\nclass CLA(models.Model):\n cla_id = models.AutoField(primary_key=True)\n first_name = models.CharField(null=False, blank=False, max_length=255)\n last_name = models.CharField(null=True, blank=True, max_length=255)\n phone_number = models.CharField(null=False, blank=False, max_length=10, validators=[MinLengthValidator(10)])\n email_id = models.EmailField(null=False, blank=False)\n gender = models.SmallIntegerField(choices=GENDER_TYPES, null=False, blank=False)\n primary_language = models.CharField(max_length=255, null=False, blank=False)\n seconday_language = models.CharField(max_length=255, null=False, blank=False)\n tertiary_language = models.CharField(max_length=255, null=True, blank=True)\n\n class Meta:\n app_label = 'cuido'\n db_table = 'cuido_lifestyle_aficionados'\n"
}
] | 11 |
momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan
|
https://github.com/momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan
|
a942f8579a9f3145b5c34d9dee311885c055bbcd
|
207b8f712a6e27e0dc6d876cc05f94b150a89de6
|
773391a1ef62d5afffecbcd9276d42fb2dcb03d8
|
refs/heads/master
| 2022-12-16T17:34:01.481499 | 2019-07-18T18:27:39 | 2019-07-18T18:27:39 | 197,071,870 | 0 | 0 | null | 2019-07-15T21:01:14 | 2019-07-22T21:14:22 | 2022-12-04T03:31:21 |
Python
|
[
{
"alpha_fraction": 0.5671482086181641,
"alphanum_fraction": 0.5826645493507385,
"avg_line_length": 39.630435943603516,
"blob_id": "2abd5c47f21e2c304f68783fffe307244a08ac0e",
"content_id": "4bba35ddf11d182bfca9299858cb6076ac662c9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1869,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 46,
"path": "/core/migrations/0001_initial.py",
"repo_name": "momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-07-16 21:47\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('category_name', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(help_text='Enter a title for your question', max_length=100)),\n ('body', models.TextField(help_text='Enter details about your question', max_length=1000)),\n ('timestamp', models.DateField(auto_now_add=True)),\n ('categories', models.ManyToManyField(to='core.Category')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_asker', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-timestamp'],\n },\n ),\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('body', models.TextField(help_text='Enter your answer', max_length=1000)),\n ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Question')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.8128078579902649,
"alphanum_fraction": 0.8128078579902649,
"avg_line_length": 27.85714340209961,
"blob_id": "2cfa87f97338f360d16745eaf086d2a007c98131",
"content_id": "27902de744bffee7920c81c608df30123390adee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 7,
"path": "/core/admin.py",
"repo_name": "momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom core.models import Question, Answer, Category\n# Register your models here.\n\nadmin.site.register(Question)\nadmin.site.register(Answer)\nadmin.site.register(Category)\n\n"
},
{
"alpha_fraction": 0.7007575631141663,
"alphanum_fraction": 0.7007575631141663,
"avg_line_length": 21,
"blob_id": "183b0e5a56d989147d3d35eb7d35b4fe6cea4256",
"content_id": "79e5e3d91366550e438b179c7a983ce3b0a8bbbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 12,
"path": "/core/forms.py",
"repo_name": "momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan",
"src_encoding": "UTF-8",
"text": "from django import forms\n\nfrom core.models import Question, Answer\n\nclass QuestionForm(forms.ModelForm):\n model = Question\n fields = ('title', 'body', 'categories')\n\n\nclass AnswerForm(forms.ModelForm):\n model = Answer\n fields = ('title', 'categories')\n"
},
{
"alpha_fraction": 0.7126530408859253,
"alphanum_fraction": 0.7200000286102295,
"avg_line_length": 33,
"blob_id": "f6f62320a528ba64a1c6e6c3a9e769d688f8c819",
"content_id": "fce3609d9f8f42890c466406b1bf9d79e74b87e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 36,
"path": "/core/views.py",
"repo_name": "momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, get_object_or_404\nfrom django.views import generic\nfrom django.http import HttpResponseRedirect\nfrom core.models import Question, Answer\nfrom core.forms import QuestionForm, AnswerForm\nfrom core import views\n# import django.contrib.auth.decorators\n# from django.contrib.auth.decorators import login_required\n\ndef index(request):\n \"\"\"View function for home page of site, which includes a list of all questions.\"\"\"\n\n question_list = Question.objects.all()\n \n context = {\n 'question_list': question_list,\n } \n return render(request, 'index.html', context)\n\ndef question_detail(request, pk):\n \"\"\"Individual question pages, which includes all answers to a given question.\"\"\"\n\n question = get_object_or_404(Question, pk=pk)\n\n return render(request, 'detail.html', {'question':question})\n\ndef questions_by_category(request, category_pk):\n questions_by_category = Question.objects.filter(categories__pk=category_pk)\n category = get_object_or_404(Category, pk=category_pk)\n\n context = {\n 'questions_by_category': questions_by_category,\n 'category': category,\n }\n\n return render(request, 'questions_by_category.html', context)\n\n"
},
{
"alpha_fraction": 0.690119743347168,
"alphanum_fraction": 0.7005987763404846,
"avg_line_length": 33.28205108642578,
"blob_id": "1c3584e7abcf9ec4fdb67451970c4fb20363537a",
"content_id": "7a660136ef0fd1301ae7e6b18db8d85b32f69c43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1336,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 39,
"path": "/core/models.py",
"repo_name": "momentum-cohort-2019-05/w8-group-project-2-greg-sarah-benny-meagan",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.urls import reverse\nfrom datetime import date\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\nclass Question(models.Model):\n \"\"\"Model representing a posted questions.\"\"\"\n title = models.CharField(max_length=100, help_text='Enter a title for your question')\n body = models.TextField(max_length=1000, help_text='Enter details about your question')\n categories = models.ManyToManyField('Category')\n timestamp = models.DateField(auto_now_add=True)\n\n user = models.ForeignKey(to=User, on_delete=models.CASCADE, related_name='question_asker')\n\n def get_absolute_url(self):\n return reverse('questions', args=[str(self.pk)])\n\n class Meta:\n ordering = ['-timestamp']\n\n def __str__(self):\n \"\"\"String for representing the Model object.\"\"\"\n return self.title\n\nclass Answer(models.Model):\n \"\"\"Model representing an answer to a posted question.\"\"\"\n body = models.TextField(max_length=1000, help_text='Enter your answer')\n question = models.ForeignKey(to=Question, on_delete=models.CASCADE)\n\nclass Category(models.Model):\n category_name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.category_name\n\n def get_absolute_url(self):\n return reverse('category', args=[str(self.pk)])"
}
] | 5 |
charles-abehsira/Data-Wrangling-with-Mongo-DB
|
https://github.com/charles-abehsira/Data-Wrangling-with-Mongo-DB
|
9386eb01177d788642de2dee557497202e2c68f3
|
19dd52ab98d4ba2a0b3c67f244999da8d8ad5d78
|
c70f38a6bb840d0e6d46acdbd5890e4bf89835a0
|
refs/heads/master
| 2018-01-05T04:22:18.684303 | 2016-10-16T14:21:00 | 2016-10-16T14:21:00 | 71,055,059 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 30,
"blob_id": "01b9493252018a1856523b01e39689bfd5f79fa5",
"content_id": "9825888bea34f9a7ec59610a8583a2c7686edd04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 1,
"path": "/README.md",
"repo_name": "charles-abehsira/Data-Wrangling-with-Mongo-DB",
"src_encoding": "UTF-8",
"text": "# Data-Wrangling-with-Mongo-DB\n"
},
{
"alpha_fraction": 0.4618072509765625,
"alphanum_fraction": 0.46996021270751953,
"avg_line_length": 31.096572875976562,
"blob_id": "ed5809d1aa5a50b384cf0713029904cf2e7fdbfc",
"content_id": "c14b5052c80e6e6ed434bc79704ba6f16b9c14ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10303,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 321,
"path": "/data.py",
"repo_name": "charles-abehsira/Data-Wrangling-with-Mongo-DB",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom pprint import pprint\nimport codecs\nimport json\nimport phonenumbers\nimport re\nimport xml.etree.ElementTree as ET\n\nimport audit\n'''\nBuilds JSON file from OSM. Parses, cleans, and shapes data accordingly.\n'''\n\nDEBUG = False\n\nif DEBUG:\n OSMFILE = 'data/vegas-subset.osm'\nelse:\n OSMFILE = 'data/vegas.osm'\n\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\naddress_regex = re.compile(r'^addr\\:')\nstreet_regex = re.compile(r'^street')\ngnis_regex = re.compile(r'^gnis\\:')\n\nCREATED_ATTRIBUTES = ['version', 'changeset', 'timestamp', 'user', 'uid']\nPOSITION_ATTRIBUTES = ['lat', 'lon']\nIGNORED_TAGS = ['name:gv', \n 'name:gl', \n 'name:gn', \n 'wifi', \n 'name:gd', \n 'name:ga', \n 'name:ne', \n 'name:na', \n 'name:no', \n 'name:nn', \n 'name:gu', \n 'name:nl', \n 'boundary:type', \n 'name:cbk-zam', \n 'alt_name:vi', \n 'wetap:statusnote', \n 'name:mwl', \n 'name:sco', \n 'name:ab', \n 'recycling_type', \n 'name:nds-nl', \n 'name:pms', \n 'name:stq', \n 'name:fiu-vro', \n 'drinking_water', \n 'name:ltg', \n 'name:pag', \n 'name:pam', \n 'name:zh-classical', \n 'name:pap', \n 'name:csb', \n 'name:nov', \n 'name:xh', \n 'name:mhr', \n 'contact:phone', \n 'unisex', \n 'sanitary_dump_station:round_drain', \n 'name:zh-min-nan', \n 'name:az', \n 'diet:vegetarian', \n 'name:ay', \n 'road', \n 'plant:output:electricity', \n 'name:pih', \n 'name:jbo', \n 'memorial:type', \n 'cycling', \n 'bitcoin:address', \n 'cycleway:left', \n 'name:zh-yue', \n 'county:right', \n 'name:ksh', \n 'name:nap', \n 'twitter', \n 'protection_title', \n 'vending', \n 'furniture', \n 'name:nds', \n 'name:dsb', \n 'name:chr', \n 'name:chy', \n 'name:dv', \n 'name:dz', \n 'name:mdf', \n 'name:da', \n 'name:kaa', \n 'name:kab', \n 'name:simple', \n 'name:nah', \n 'name:ca', \n 'name:ce', \n 'name:cs', \n 'name:cu']\n\nALIAS_TAGS = ['name_1', 'old_name', 'alt_name', 'name_2', 'place_name', 'loc_name',\n 'official_name', 'name_3', 'short_name', 'bridge_name']\nZIPCODE_TAGS = ['addr:postcode', 'tiger:zip_left', 'tiger:zip_left_1', 'tiger:zip_left_2',\n 'tiger:zip_left_3', 'tiger:zip_left_4', 'tiger:zip_right', 'tiger:zip_right_1',\n 'tiger:zip_right_2', 'tiger:zip_right_3', 'tiger:zip_right_4']\nMAPPED_TAGS = {'olor': 'color'}\n\ndef shape_element(element):\n node = {}\n created_attributes = CREATED_ATTRIBUTES\n position_attributes = POSITION_ATTRIBUTES\n ignored_tags = IGNORED_TAGS\n alias_tags = ALIAS_TAGS\n zipcode_tags = ZIPCODE_TAGS\n mapped_tags = MAPPED_TAGS\n\n\n if element.tag == 'node' or element.tag == 'way':\n # populate tag type\n node['type'] = element.tag\n\n # initialize specialized combination fields\n address = {}\n zipcodes = set()\n\n # parse through attributes\n for attribute in element.attrib:\n if attribute in created_attributes:\n if 'created' not in node:\n node['created'] = {}\n node['created'][attribute] = element.get(attribute)\n elif attribute in position_attributes:\n continue\n else:\n node[attribute] = element.get(attribute)\n\n # populate position\n if 'lat' in element.attrib and 'lon' in element.attrib:\n node['pos'] = [float(element.get('lat')), float(element.get('lon'))]\n\n # parse second-level tags\n for child in element:\n # parse second-level tags for ways and populate `node_refs`\n if child.tag == 'nd':\n if 'node_refs' not in node:\n node['node_refs'] = []\n if 'ref' in child.attrib:\n node['node_refs'].append(child.get('ref'))\n\n # throw out not-tag elements and elements without `k` or `v`\n if child.tag != 'tag'\\\n or 'k' not in child.attrib\\\n or 'v' not in child.attrib:\n continue\n key = child.get('k').lower()\n val = child.get('v')\n\n # skip problematic characters\n if problemchars.search(key):\n continue\n\n # skip any gnis tags\n if gnis_regex.search(key):\n continue\n\n # skip ignored tags\n if key in ignored_tags:\n continue\n\n # swap keys for corrections\n if key in mapped_tags:\n key = mapped_tags[key]\n\n # extract any zip codes\n if key in zipcode_tags:\n for zipcode in process_zipcode(val):\n zipcodes.add(zipcode)\n\n # set all states to TX\n if key == 'addr:state':\n key = 'TX'\n\n # fix and standardize phone numbers using phonenumbers module and list comprehensions\n if key == 'contact:phone':\n phone_number_matches = [m.number for m in phonenumbers.PhoneNumberMatcher(val, \"US\")]\n val = ';'.join([phonenumbers.format_number(phone_number_match,\n phonenumbers.PhoneNumberFormat.NATIONAL)\n for phone_number_match in phone_number_matches])\n\n # parse address k-v pairs\n if address_regex.search(key):\n key = key.replace('addr:', '')\n address[key] = val\n continue\n\n # parse alias tags\n if key in alias_tags:\n if 'aliases' not in node:\n node['aliases'] = {}\n node['aliases'][key] = val\n continue\n\n # parse branched tags\n if ':' in key:\n add_branched_item(key, val, node)\n continue\n\n # catch-all\n if key not in node:\n node[key] = val\n\n # name fallback to aliases in priority order\n if 'name' not in node and 'aliases' in node:\n for alias in alias_tags:\n if alias in node['aliases']:\n node['name'] = alias\n break\n\n # add zipcodes field\n if zipcodes:\n node['zipcodes'] = list(zipcodes)\n\n # compile address\n if len(address) > 0:\n node['address'] = {}\n street_full = None\n street_dict = {}\n street_format = ['prefix', 'name', 'type']\n # parse through address objects\n for key in address:\n val = address[key]\n if street_regex.search(key):\n if key == 'street':\n street_full = audit.clean_street_address(val)\n elif 'street:' in key:\n street_dict[key.replace('street:', '')] = val\n else:\n node['address'][key] = val\n\n # assign street_full or fallback to compile street dict\n if street_full:\n node['address']['street'] = street_full\n elif len(street_dict) > 0:\n unclean_street = ' '.join([street_dict[key] for key in street_format])\n node['address']['street'] = audit.clean_street_address(unclean_street)\n\n return node\n else:\n return None\n\ndef add_branched_item(key, val, node):\n \"\"\" \"\"\"\n key_split = key.split(':')\n base = key_split.pop(0)\n remainder = ':'.join(key_split)\n if type(node) == dict:\n if len(key_split) == 0:\n node[base] = val\n else:\n if base not in node:\n node[base] = {}\n add_branched_item(remainder, val, node[base])\n\ndef process_zipcode(string):\n result = []\n groups = [group.strip() for group in string.split(';')]\n for group in groups:\n if re.match(r'\\d{5}\\:\\d{5}', group):\n group_range = map(int, group.split(':'))\n result += list(map(str, range(group_range[0], group_range[1]+1)))\n elif re.match(r'\\d{5}', group):\n result.append(group)\n return result\n\ndef process_map(file_in, pretty=False):\n file_out = '{0}.json'.format(file_in)\n data = []\n debug_counter = 0\n with codecs.open(file_out, 'w') as fo:\n fo.write('[\\n')\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n # if el and len(el) > 4:\n if el:\n data.append(el)\n if pretty:\n fo.write(json.dumps(el, indent=2)+',\\n')\n else:\n fo.write(json.dumps(el) + ',\\n')\n debug_counter += 1\n if debug_counter >= 10 and DEBUG:\n break\n fo.write('{}]\\n')\n return data\n\ndef main():\n data = process_map(OSMFILE, pretty=DEBUG)\n # pprint(data)\n\ndef test_branched():\n node = {'tiger': {'zip_left': '43210'}}\n key = 'tiger:zip_right'\n val = '01234'\n add_branched_item(key, val, node)\n pprint(node)\n assert node == {'tiger': {'zip_left': '43210', 'zip_right': '01234'}}\n\ndef test_zipcode():\n string = \"78727; 78727:78729\"\n zipcodes = process_zipcode(string)\n print zipcodes\n assert zipcodes == ['78727', '78727', '78728', '78729']\n\nif __name__ == '__main__':\n # test_branched()\n # test_zipcode()\n main()\n"
},
{
"alpha_fraction": 0.2926079034805298,
"alphanum_fraction": 0.34052595496177673,
"avg_line_length": 30.892473220825195,
"blob_id": "d87fb95f714807a0d87fca82b553e626155b0cf6",
"content_id": "84df1c33b0a435830ae3388bda01811ed09c37f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23728,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 744,
"path": "/tags.py",
"repo_name": "charles-abehsira/Data-Wrangling-with-Mongo-DB",
"src_encoding": "UTF-8",
"text": "\"\"\"\nExplores different `tag` contents.\n\"\"\"\nfrom collections import defaultdict\nfrom pprint import pprint\nimport json\nimport xml.etree.cElementTree as ET\n\nfrom mapparser import count_tags\n\nOSMFILE = 'data/vegas.osm'\n\nTAG_KEYS = [('highway', 79791),\n ('name', 46414),\n ('tiger:county', 28184),\n ('tiger:cfcc', 28020),\n ('tiger:name_base', 27178),\n ('tiger:reviewed', 26460),\n ('source', 26303),\n ('tiger:name_type', 25706),\n ('tiger:zip_left', 23700),\n ('tiger:zip_right', 23394),\n ('tiger:tlid', 20628),\n ('tiger:source', 20590),\n ('tiger:separated', 19801),\n ('review', 15055),\n ('footway', 7901),\n ('power', 7870),\n ('natural', 7302),\n ('created_by', 6929),\n ('oneway', 5560),\n ('building', 5149),\n ('color', 4777),\n ('service', 4296),\n ('tiger:name_direction_prefix', 3820),\n ('amenity', 3453),\n ('landuse', 3340),\n ('tiger:upload_uuid', 2858),\n ('noexit', 2781),\n ('bicycle', 2504),\n ('name_1', 2187),\n ('tiger:name_base_1', 2127),\n ('ele', 1941),\n ('gnis:feature_id', 1835),\n ('leisure', 1688),\n ('foot', 1574),\n ('gnis:created', 1555),\n ('ref', 1522),\n ('golf', 1499),\n ('gnis:county_id', 1305),\n ('gnis:state_id', 1305),\n ('access', 1123),\n ('tiger:name_type_1', 1037),\n ('layer', 1032),\n ('lanes', 998),\n ('aeroway', 904),\n ('bridge', 865),\n ('barrier', 806),\n ('surface', 803),\n ('sport', 691),\n ('addr:state', 669),\n ('tiger:mtfcc', 571),\n ('tiger:name_full', 556),\n ('type', 526),\n ('addr:housenumber', 524),\n ('addr:postcode', 516),\n ('hgv', 495),\n ('gnis:county_name', 494),\n ('addr:street', 490),\n ('shop', 474),\n ('maxspeed', 470),\n ('tiger:zip_left_1', 465),\n ('FIXME', 444),\n ('tracktype', 425),\n ('waterway', 400),\n ('addr:city', 399),\n ('leaf_cycle', 386),\n ('leaf_type', 386),\n ('website', 369),\n ('parking', 366),\n ('religion', 359),\n ('segregated', 353),\n ('building:levels', 322),\n ('old_ref', 296),\n ('tourism', 289),\n ('man_made', 281),\n ('railway', 278),\n ('tiger:zip_right_1', 272),\n ('admin_level', 259),\n ('phone', 259),\n ('boundary', 257),\n ('tiger:name_direction_prefix_1', 250),\n ('gnis:feature_type', 250),\n ('cables', 232),\n ('cuisine', 225),\n ('voltage', 222),\n ('construction', 220),\n ('tunnel', 195),\n ('source_ref', 189),\n ('note', 184),\n ('destination', 174),\n ('place', 173),\n ('junction', 168),\n ('crossing', 157),\n ('frequency', 152),\n ('NHS', 149),\n ('denomination', 148),\n ('water', 144),\n ('gnis:ftype', 141),\n ('gnis:fcode', 138),\n ('electrified', 135),\n ('gauge', 132),\n ('horse', 124),\n ('hgv:national_network', 122),\n ('source:hgv:national_network', 122),\n ('operator', 116),\n ('height', 112),\n ('area', 107),\n ('motor_vehicle', 106),\n ('indoor', 103),\n ('wikipedia', 100),\n ('tiger:zip_left_2', 97),\n ('wires', 92),\n ('opening_hours', 91),\n ('is_in', 89),\n ('nhd:fdate', 88),\n ('nhd:com_id', 88),\n ('nhd-shp:fdate', 85),\n ('nhd-shp:com_id', 85),\n ('tiger:name_base_2', 84),\n ('name_2', 80),\n ('entrance', 79),\n ('gnis:edited', 73),\n ('restriction', 72),\n ('description', 65),\n ('tiger:zip_right_2', 62),\n ('mtb:scale', 61),\n ('fcode', 61),\n ('addr:country', 60),\n ('start_date', 59),\n ('wheelchair', 58),\n ('gnis:id', 53),\n ('level', 53),\n ('shelter_type', 51),\n ('old_name', 49),\n ('designation', 48),\n ('lit', 47),\n ('golf:par', 46),\n ('golf:course', 46),\n ('gnis:Class', 43),\n ('intermittent', 43),\n ('gnis:ST_alpha', 43),\n ('gnis:County', 43),\n ('import_uuid', 43),\n ('payment:bitcoin', 43),\n ('gnis:County_num', 43),\n ('gnis:ST_num', 43),\n ('route', 42),\n ('network', 41),\n ('generator:source', 40),\n ('roof:shape', 40),\n ('tiger:name_type_2', 39),\n ('fee', 38),\n ('capacity', 37),\n ('capacity:disabled', 36),\n ('cycleway', 34),\n ('usage', 34),\n ('building:colour', 34),\n ('roof:colour', 34),\n ('exit_to', 34),\n ('sidewalk', 34),\n ('gnis:name', 33),\n ('source:name', 33),\n ('destination:ref', 32),\n ('building:material', 31),\n ('building:part', 31),\n ('generator:output:electricity', 28),\n ('adot_name', 28),\n ('rock_type', 27),\n ('addr:housename', 27),\n ('fixme', 27),\n ('note:lanes', 27),\n ('is_in:state', 26),\n ('emergency', 24),\n ('tiger:zip_left_3', 24),\n ('generator:method', 24),\n ('email', 23),\n ('power_source', 23),\n ('historic', 22),\n ('min:height', 22),\n ('addr:county', 21),\n ('is_in:country', 21),\n ('atm', 21),\n ('public_transport', 20),\n ('office', 20),\n ('generator:type', 19),\n ('is_in:state_code', 18),\n ('is_in:country_code', 17),\n ('is_in:iso_3166_2', 17),\n ('tiger:LSAD', 17),\n ('tiger:PCINECTA', 17),\n ('tiger:CPI', 17),\n ('tiger:PLCIDFP', 17),\n ('tiger:PLACEFP', 17),\n ('tiger:STATEFP', 17),\n ('tiger:MTFCC', 17),\n ('tiger:PCICBSA', 17),\n ('tiger:name_direction_suffix', 17),\n ('tiger:NAMELSAD', 17),\n ('sac_scale', 17),\n ('tiger:PLACENS', 17),\n ('tiger:FUNCSTAT', 17),\n ('building:min_level', 17),\n ('tiger:NAME', 17),\n ('tiger:CLASSFP', 17),\n ('hov', 16),\n ('tower:type', 15),\n ('url', 14),\n ('motorcar', 13),\n ('tram', 13),\n ('time', 12),\n ('tiger:zip_right_3', 12),\n ('smoking', 12),\n ('name:de', 12),\n ('covered', 12),\n ('alt_name', 11),\n ('internet_access', 11),\n ('escalator', 11),\n ('wikipedia:en', 11),\n ('fut_ref', 10),\n ('crossing_ref', 10),\n ('tiger:name_base_3', 10),\n ('name:ru', 10),\n ('tiger:name_direction_suffix_1', 10),\n ('park_ride', 10),\n ('cutting', 10),\n ('traffic_calming', 10),\n ('fire_hydrant:type', 9),\n ('tiger:name_direction_prefix_2', 9),\n ('name:en', 9),\n ('closest_town', 8),\n ('brand', 8),\n ('colour', 8),\n ('drive_through', 8),\n ('population', 8),\n ('border_type', 8),\n ('tiger:name_type_3', 8),\n ('symbol', 7),\n ('olor', 7),\n ('width', 7),\n ('clothes', 7),\n ('motorcycle', 7),\n ('exit_to:left', 7),\n ('information', 7),\n ('voltage-high', 7),\n ('roof:material', 7),\n ('fax', 7),\n ('bus', 6),\n ('wetland', 6),\n ('tigis', 6),\n ('exit_to:right', 6),\n ('takeaway', 6),\n ('place_numbers', 6),\n ('faa', 6),\n ('dispensing', 5),\n ('supervised', 5),\n ('fence_type', 5),\n ('internet_access:fee', 5),\n ('iata', 5),\n ('name:prefix', 5),\n ('name:he', 5),\n ('golf:designer', 5),\n ('name:nv', 4),\n ('FIXME:bicycle', 4),\n ('gambling', 4),\n ('smoothness', 4),\n ('location', 4),\n ('name:yi', 4),\n ('name:ko', 4),\n ('name:ka', 4),\n ('name:hy', 4),\n ('name:zh', 4),\n ('name:ar', 4),\n ('noref', 4),\n ('tower:construction', 4),\n ('name:pl', 4),\n ('name:ja', 4),\n ('is_in:continent', 4),\n ('icao', 4),\n ('variation', 3),\n ('mormone', 3),\n ('name:qu', 3),\n ('incline', 3),\n ('artwork_type', 3),\n ('disused:leisure', 3),\n ('fire_hydrant:position', 3),\n ('abandoned', 3),\n ('source:date', 3),\n ('sources', 3),\n ('name:eo', 3),\n ('name:lt', 3),\n ('tiger:zip_left_4', 3),\n ('wikidata', 3),\n ('name:szl', 3),\n ('name:hi', 3),\n ('name:sr', 3),\n ('mtb:scale:uphill', 3),\n ('name:el', 3),\n ('attraction', 3),\n ('name:ta', 3),\n ('outdoor_seating', 3),\n ('wetap:status', 3),\n ('name:fa', 3),\n ('name:fr', 3),\n ('name:ml', 3),\n ('name:mr', 3),\n ('history', 3),\n ('name1', 3),\n ('name:bn', 3),\n ('census:population', 3),\n ('name:uk', 3),\n ('name:ur', 3),\n ('attribution', 3),\n ('conveying', 2),\n ('shelter', 2),\n ('yelp', 2),\n ('FG:GPS_DATE', 2),\n ('voltage-low', 2),\n ('line', 2),\n ('delivery', 2),\n ('ford', 2),\n ('FG:RTE', 2),\n ('female', 2),\n ('payment:litecoin', 2),\n ('parking:condition:both', 2),\n ('ref:fips', 2),\n ('tiger:zip_right_6', 2),\n ('FG:photo', 2),\n ('ID', 2),\n ('FG:lane_miles', 2),\n ('short_name', 2),\n ('prop_description', 2),\n ('toilets:disposal', 2),\n ('direction', 2),\n ('basin', 2),\n ('name:bg', 2),\n ('name:os', 2),\n ('construction_date', 2),\n ('toilets', 2),\n ('y_coordinate', 2),\n ('FG:rte_description', 2),\n ('name:et', 2),\n ('FG:area', 2),\n ('rating', 2),\n ('x_coordinate', 2),\n ('FG:PROP_NO', 2),\n ('FG:datafile', 2),\n ('FG:route', 2),\n ('channel', 2),\n ('name:hak', 2),\n ('name:haw', 2),\n ('FG:ORG_CODE', 2),\n ('parking:lane:both', 2),\n ('FG:visitors', 2),\n ('elevft', 2),\n ('level:usage', 2),\n ('name:arc', 2),\n ('FG:perimeter', 2),\n ('building:max_level', 2),\n ('FG:COND_INDEX', 2),\n ('drive_in', 2),\n ('name:te', 2),\n ('name:tl', 2),\n ('name:th', 2),\n ('roof:levels', 2),\n ('district', 2),\n ('male', 2),\n ('ISO3166-2', 2),\n ('site', 2),\n ('nist:state_fips', 2),\n ('nist:fips_code', 2),\n ('strorage', 2),\n ('military', 2),\n ('FG:station', 2),\n ('name:be', 2),\n ('toll', 2),\n ('roof:height', 2),\n ('condition', 2),\n ('housing_type', 2),\n ('name:gv', 1),\n ('name:gl', 1),\n ('name:gn', 1),\n ('wifi', 1),\n ('name:gd', 1),\n ('name:ga', 1),\n ('name:ne', 1),\n ('name:na', 1),\n ('name:no', 1),\n ('name:nn', 1),\n ('name:gu', 1),\n ('name:nl', 1),\n ('boundary:type', 1),\n ('name:cbk-zam', 1),\n ('alt_name:vi', 1),\n ('wetap:statusnote', 1),\n ('name:mwl', 1),\n ('name:sco', 1),\n ('name:ab', 1),\n ('recycling_type', 1),\n ('name:nds-nl', 1),\n ('name:pms', 1),\n ('name:stq', 1),\n ('name:fiu-vro', 1),\n ('drinking_water', 1),\n ('name:ltg', 1),\n ('name:pag', 1),\n ('name:pam', 1),\n ('name:zh-classical', 1),\n ('name:pap', 1),\n ('name:csb', 1),\n ('name:nov', 1),\n ('name:xh', 1),\n ('name:mhr', 1),\n ('contact:phone', 1),\n ('unisex', 1),\n ('sanitary_dump_station:round_drain', 1),\n ('name:zh-min-nan', 1),\n ('name:az', 1),\n ('diet:vegetarian', 1),\n ('name:ay', 1),\n ('road', 1),\n ('plant:output:electricity', 1),\n ('name:pih', 1),\n ('name:jbo', 1),\n ('memorial:type', 1),\n ('cycling', 1),\n ('bitcoin:address', 1),\n ('cycleway:left', 1),\n ('name:zh-yue', 1),\n ('county:right', 1),\n ('name:ksh', 1),\n ('name:nap', 1),\n ('twitter', 1),\n ('protection_title', 1),\n ('vending', 1),\n ('furniture', 1),\n ('name:nds', 1),\n ('name:dsb', 1),\n ('name:chr', 1),\n ('name:chy', 1),\n ('name:dv', 1),\n ('name:dz', 1),\n ('name:mdf', 1),\n ('name:da', 1),\n ('name:kaa', 1),\n ('name:kab', 1),\n ('name:simple', 1),\n ('name:nah', 1),\n ('name:ca', 1),\n ('name:ce', 1),\n ('name:cs', 1),\n ('name:cu', 1),\n ('name:cv', 1),\n ('name:cy', 1),\n ('old_name:vi', 1),\n ('name:wuu', 1),\n ('name:udm', 1),\n ('name:pdc', 1),\n ('name:fur', 1),\n ('name:be-x-old', 1),\n ('protected', 1),\n ('ownership', 1),\n ('levels', 1),\n ('ISO3166-1:alpha2', 1),\n ('ISO3166-1:alpha3', 1),\n ('name:tzl', 1),\n ('surveillance', 1),\n ('name:ext', 1),\n ('name_alt', 1),\n ('name:vi', 1),\n ('social_facility:for', 1),\n ('beer_garden', 1),\n ('name:koi', 1),\n ('backrest', 1),\n ('name:yo', 1),\n ('name:bar', 1),\n ('name:new', 1),\n ('name:oc', 1),\n ('aerodrome:type', 1),\n ('bicycle_parking', 1),\n ('ref:right', 1),\n ('boat', 1),\n ('name:or', 1),\n ('name:om', 1),\n ('name:sah', 1),\n ('name:bm', 1),\n ('name:hsb', 1),\n ('name:mrj', 1),\n ('name:krc', 1),\n ('building:use', 1),\n ('protect_id', 1),\n ('name:tg', 1),\n ('name:ckb', 1),\n ('name:eml', 1),\n ('name:ro', 1),\n ('name:rn', 1),\n ('name:rm', 1),\n ('name:to', 1),\n ('name:es', 1),\n ('name:eu', 1),\n ('name:lij', 1),\n ('official_name:vi', 1),\n ('operating_days', 1),\n ('name:ee', 1),\n ('wheelchair:description', 1),\n ('contact:fax', 1),\n ('name:co', 1),\n ('name:ks', 1),\n ('name:ln', 1),\n ('name:li', 1),\n ('name:lg', 1),\n ('name:tk', 1),\n ('name:la', 1),\n ('name:lb', 1),\n ('mormon', 1),\n ('name:pfl', 1),\n ('addr:suite', 1),\n ('name:lv', 1),\n ('name:tet', 1),\n ('name:tr', 1),\n ('timezone', 1),\n ('is_in:city', 1),\n ('name:ky', 1),\n ('name:ku', 1),\n ('name:kv', 1),\n ('name:kw', 1),\n ('name:ki', 1),\n ('social_facility', 1),\n ('name:km', 1),\n ('name:kn', 1),\n ('name:ceb', 1),\n ('tiger:name_direction_suffix_2', 1),\n ('name:ang', 1),\n ('name:wo', 1),\n ('name:pcd', 1),\n ('ref:left', 1),\n ('name:wa', 1),\n ('name:xal', 1),\n ('name:min', 1),\n ('name:roa-tara', 1),\n ('name:rw', 1),\n ('addr:interpolation', 1),\n ('name:lmo', 1),\n ('name:srn', 1),\n ('name:hr', 1),\n ('name:hu', 1),\n ('name:ht', 1),\n ('name:io', 1),\n ('name:bpy', 1),\n ('name:ha', 1),\n ('artist_name', 1),\n ('antiques', 1),\n ('short_name:vi', 1),\n ('name:id', 1),\n ('name:ie', 1),\n ('owner', 1),\n ('wetap:bottle', 1),\n ('name:diq', 1),\n ('name:myv', 1),\n ('name:tpi', 1),\n ('name:ia', 1),\n ('name:xmf', 1),\n ('organic', 1),\n ('name:vls', 1),\n ('name:sq', 1),\n ('name:ss', 1),\n ('name:su', 1),\n ('name:sv', 1),\n ('name:sw', 1),\n ('name:arz', 1),\n ('name:sa', 1),\n ('name:se', 1),\n ('name:sh', 1),\n ('name:war', 1),\n ('isced:level', 1),\n ('name:sl', 1),\n ('name:sm', 1),\n ('name:sn', 1),\n ('name:so', 1),\n ('name:ilo', 1),\n ('name:za', 1),\n ('name:zu', 1),\n ('name:af', 1),\n ('color_1', 1),\n ('payment:credit_cards', 1),\n ('name:an', 1),\n ('name:am', 1),\n ('name:av', 1),\n ('name:as', 1),\n ('name:gan', 1),\n ('name_3', 1),\n ('odbl', 1),\n ('addr:door', 1),\n ('bar', 1),\n ('name:fy', 1),\n ('name:lo', 1),\n ('name:tn', 1),\n ('name:tt', 1),\n ('name:tw', 1),\n ('name:ts', 1),\n ('shoes', 1),\n ('name:ty', 1),\n ('name:mzn', 1),\n ('name:glk', 1),\n ('name:zea', 1),\n ('name:vo', 1),\n ('name:nso', 1),\n ('name:fo', 1),\n ('name:hif', 1),\n ('name:fi', 1),\n ('name:ff', 1),\n ('name:mk', 1),\n ('name:mi', 1),\n ('name:mn', 1),\n ('name:mg', 1),\n ('name:lad', 1),\n ('name:my', 1),\n ('name:ms', 1),\n ('name:mt', 1),\n ('name:ace', 1),\n ('theatre:genre', 1),\n ('name:bxr', 1),\n ('room', 1),\n ('name:vec', 1),\n ('flag', 1),\n ('name:vep', 1),\n ('ISO3166-1', 1),\n ('name:scn', 1),\n ('name:rue', 1),\n ('name:cdo', 1),\n ('FIXME:name', 1),\n ('FIXME:ref', 1),\n ('source:gate', 1),\n ('name:pa', 1),\n ('ISO3166-1:numeric', 1),\n ('name:ps', 1),\n ('name:pt', 1),\n ('name:crh', 1),\n ('name:jv', 1),\n ('name:bcl', 1),\n ('name:frp', 1),\n ('name:kbd', 1),\n ('modifier', 1),\n ('microbrewery', 1),\n ('name:ba', 1),\n ('address', 1),\n ('name:bi', 1),\n ('name:bo', 1),\n ('name:bs', 1),\n ('name:br', 1),\n ('destination:ref:to', 1),\n ('name:pnb', 1),\n ('name:sc', 1),\n ('name:sd', 1),\n ('store_number', 1),\n ('name:nrm', 1),\n ('name:sg', 1),\n ('name:lbe', 1),\n ('name:si', 1),\n ('name:sk', 1),\n ('name:it', 1),\n ('name:iu', 1),\n ('name:is', 1),\n ('name:ik', 1),\n ('name:ig', 1),\n ('name:lez', 1),\n ('dir', 1),\n ('bench', 1),\n ('name:map-bms', 1),\n ('name:ug', 1),\n ('name:als', 1),\n ('name:kk', 1),\n ('name:uz', 1),\n ('name:kl', 1),\n ('name:frr', 1),\n ('name:ast', 1),\n ('name:bat-smg', 1),\n ('name:gag', 1),\n ('Loading Docks 2F-2H', 1)]\n\n\ndef examine_tags(osmfile, tag_range, item_limit):\n assert len(tag_range) == 2\n # use pre-loaded tag_keys list of tuples, if exists\n if TAG_KEYS:\n tag_keys = TAG_KEYS\n # else call mapparser count_tags method to pull sorted list of top tags\n else:\n _, tag_keys = count_tags(osmfile)\n # list comprehension for producing a list of tag_keys in string format\n tag_keys = [tag_key[0] for tag_key in tag_keys][tag_range[0]:tag_range[1]]\n print \"Examining tag keys: {}\".format(tag_keys)\n\n # open osm file\n osm_file = open(osmfile, \"r\")\n\n # initialize data with default set data structure\n data = defaultdict(set)\n\n # iterate through elements\n for _, elem in ET.iterparse(osm_file, events=(\"start\",)):\n # check if the element is a node or way\n if elem.tag == \"node\" or elem.tag == \"way\":\n # iterate through children matching `tag`\n for tag in elem.iter(\"tag\"):\n # skip if does not contain key-value pair\n if 'k' not in tag.attrib or 'v' not in tag.attrib:\n continue\n key = tag.get('k')\n val = tag.get('v')\n # add to set if in tag keys of interest and is below the item limit\n if key in tag_keys and len(data[key]) < item_limit:\n data[key].add(val)\n return data\n\ndef main(tag_range=(0, 10), item_limit=10):\n # call examine_tags fucntion\n tag_data = dict(examine_tags(OSMFILE, tag_range, item_limit))\n\n # convert sets to JSON-read/writeable format (list)\n for key in tag_data:\n tag_data[key] = list(tag_data[key])\n\n # write to file\n json.dump(tag_data, open(OSMFILE + '-tag-data.json', 'w'))\n\n # pretty print\n pprint(tag_data)\n\n # return data\n return tag_data\n\nif __name__ == '__main__':\n main(tag_range=(0, -1), item_limit=20)\n"
}
] | 3 |
Weixx/python_exercise1
|
https://github.com/Weixx/python_exercise1
|
f1d9ae5659b32f67a010dcf8d02250a952f1d7f9
|
1501a68a1493acb690bafc5ad0ffb02bd561024c
|
66ef59e18d18a336464a54ed61b9731584055e37
|
refs/heads/master
| 2015-08-22T23:43:59 | 2015-04-11T11:30:05 | 2015-04-11T11:30:05 | 33,438,120 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6215469837188721,
"alphanum_fraction": 0.6215469837188721,
"avg_line_length": 18.052631378173828,
"blob_id": "ede16ed64a0f78c1218cc7ecea3ba930983114a4",
"content_id": "7782967e1ba53a7fe28aae625c046b16bfbbeeb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 38,
"path": "/Bet.py",
"repo_name": "Weixx/python_exercise1",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nCommon base class for all bets\n\nThis class defines all attributes of a French-Roulette bet.\n\n\"\"\"\n\nclass Bet:\n\n type = None\n amount = None\n optional = None\n\n # Constructor\n def __init__(self, type, amount, optional=None):\n self.type = type\n self.amount = amount\n self.optional = optional\n\n # Getters, Setters and CRUD-Functionalities\n def getType(self):\n return self.type\n\n def getAmount(self):\n return self.amount\n\n def getOptional(self):\n return self.optional\n\n def setType(self, type):\n self.type = type\n\n def setAmount(self, amount):\n self.amount = amount\n\n def setOptional(self, optional):\n self.optional = optional\n"
},
{
"alpha_fraction": 0.7243902683258057,
"alphanum_fraction": 0.7243902683258057,
"avg_line_length": 18.4761905670166,
"blob_id": "850f75b5a5af64a001ef7ffce3508a2a8de47a6a",
"content_id": "f7d229710b31984ba20555600316ee7a21106f1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 21,
"path": "/FrenchRoulette.py",
"repo_name": "Weixx/python_exercise1",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nMain-Module of the Game 'French-Roulette'\n\nInitializes the classes 'Croupier.py' and 'Roulette.py'\n\n\"\"\"\n\nfrom Roulette import *\nfrom Croupier import *\n\n# Header\nprint(\"\\n\\ \\-- French Roulette --/ /\")\nprint(\"Either follow instructions or type 'quit' to end the game\")\n\n\n# Register Roulette-Table\nrouletteTable = Roulette(\"frenchRoulette\")\n\n# Register Croupier\ncroupier = Croupier(\"Pierre\", rouletteTable)\n\n"
},
{
"alpha_fraction": 0.5106927752494812,
"alphanum_fraction": 0.5279805064201355,
"avg_line_length": 25.20469856262207,
"blob_id": "bba5315f8880627e4f9b7e7fa4bc79aaf733f5f9",
"content_id": "515e422dc60c69589847b131f96028f7d198c4a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7809,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 298,
"path": "/Roulette.py",
"repo_name": "Weixx/python_exercise1",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nCommon base class for a new roulette table\n\nThis class holds all information regarding bet types and is responsible for the determination of a random winning number and evaluation of the results.\n\n\"\"\"\n\nimport random\n\nclass Roulette:\n\n winnigNumber = None\n users = []\n\n rouletteInformationList = [\"Straight (or Single) a bet on a single number [35 to 1]\",\n \"1 to 18 (Manque) a bet on one of the first 18 numbers. [1 to 1]\",\n \"19 to 36 (Passe) a bet on the high 18 numbers. [1 to 1]\",\n \"Red or Black (Rouge ou Noir) a bet on which color the roulette wheel will show. [1 to 1]\",\n \"Even or odd (Pair ou Impair) a bet on even or odd nonzero number. [1 to 1]\",\n \"Dozen Bets a bet on the first 12 (1-12), second 12 (13-24) or third 12 (25-36) numbers. [2 to 1]\",\n \"Column Bets a bet on one of the three vertical lines e.g.: 1-4-7-10 . . . [2 to 1]\"]\n\n rouletteInformationDict = {1: [\"Straight (or Single)\", 35],\n 2: [\"1 to 18 (Manque)\", 1],\n 3: [\"19 to 36 (Passe)\", 1],\n 4: [\"Red or Black (Rouge ou Noir)\", 1],\n 5: [\"Even or odd (Pair ou Impair)\", 1],\n 6: [\"Dozen Bets\", 2],\n 7: [\"Column Bets\", 2]}\n\n # Constructor\n def __init__(self, id):\n self.id = id\n\n # Getters, Setters and CRUD-Functionalities\n def getUsers(self):\n return self.users\n\n def addUser(self, user):\n self.users.append(user)\n\n def getUsers(self):\n return self.users\n\n def setUsers(self, users):\n self.users = users\n\n def deleteUser(self, name):\n self.users[self.users.index(name)]\n\n def getRouletteInformatonList(self):\n return self.rouletteInformationList\n\n def getRouletteInformatonDict(self):\n return self.rouletteInformationDict\n\n def getRandomWinningNumber(self):\n return self.winnigNumber\n\n\n def evaluateRandomWinningNumber(self):\n \"\"\"\n Evaluation of random winning number\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.winnigNumber = random.randint(0, 36)\n\n def evaluateColorOfWinningNumber(self, number):\n \"\"\"\n Evaluation of the color of the winning number\n\n Parameters\n ----------\n number: int\n random winning number\n\n Returns\n -------\n \"black\" or \"white\": str\n\n \"\"\"\n\n if number % 2 == 0:\n return \"black\"\n else:\n return \"red\"\n\n def evaluateEvenOrOddOfWinningNumber(self, number):\n \"\"\"\n Evaluation either the winning number is even or odd\n\n Parameters\n ----------\n number: int\n random winning number\n\n Returns\n -------\n \"even\" or \"odd\": str\n\n \"\"\"\n\n if number % 2 == 0:\n return \"even\"\n else:\n return \"odd\"\n\n def evaluateDozenOfWinningNumber(self, number):\n \"\"\"\n Evaluation of the dozen the winning number is implied\n\n Parameters\n ----------\n number: int\n random winning number\n\n Returns\n -------\n \"first\" or \"second\" or \"third\": str\n\n \"\"\"\n\n if(number == range(1,12)):\n return \"first\"\n if(number == range(13,24)):\n return \"second\"\n if(number == range(25,26)):\n return \"third\"\n\n def evaluateColumnOfWinningNumber(self, number):\n \"\"\"\n Evaluation of the column the winning number is implied\n\n Parameters\n ----------\n number: int\n random winning number\n\n Returns\n -------\n \"first\" or \"second\" or \"third\": str\n\n \"\"\"\n\n if(number == range(1,34,3)):\n return \"first\"\n if(number == range(2,35,3)):\n return \"second\"\n if(number == range(3,36,3)):\n return \"third\"\n\n\n def checkBetsOfUsers(self, users):\n \"\"\"\n Checks the result of all bets of all users\n\n Parameters\n ----------\n users: list\n a list of all users with the registered bets\n\n Returns\n -------\n None\n\n \"\"\"\n\n for user in users:\n winOfBetsPerUser = self.checkBetsOfUser(user.getBets())\n user.setBudget(user.getBudget() + winOfBetsPerUser)\n print(\"User {} : total win/loss: {} [capital: {}]\".format(user.getName(), winOfBetsPerUser, user.getBudget()))\n\n def checkBetsOfUser(self, bets):\n \"\"\"\n Checks the result of all bets of a single user\n\n Parameters\n ----------\n bets: list\n a list of all bets per user\n\n Returns\n -------\n winOfBets: float\n total win of all bets\n\n \"\"\"\n\n winOfBets = float()\n for bet in bets:\n winOfBet = self.checkWinOfBet(bet)\n winOfBets += winOfBet\n\n return winOfBets\n\n def checkWinOfBet(self, bet):\n \"\"\"\n Checks the result of a single bet of a single user.\n Determines win/loss of each user\n\n Parameters\n ----------\n bet: bet\n a bet defined by Bet.py\n\n Returns\n -------\n winOfBet: float\n win of a single bet\n\n \"\"\"\n\n winOfBet = None\n winningNumber = self.getRandomWinningNumber()\n type = bet.getType()\n optional = bet.getOptional()\n amount = bet.getAmount()\n\n if type == self.rouletteInformationDict[1][0]:\n if winningNumber == optional:\n winOfBet = amount * self.rouletteInformationDict[1][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[2][0]:\n if 1 <= winningNumber <= 18:\n winOfBet = amount * self.rouletteInformationDict[2][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[3][0]:\n if 19 <= winningNumber <= 36:\n winOfBet = amount * self.rouletteInformationDict[3][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[4][0]:\n if self.evaluateColorOfWinningNumber(winningNumber) == optional:\n winOfBet = amount * self.rouletteInformationDict[4][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[5][0]:\n if self.evaluateEvenOrOddOfWinningNumber(winningNumber) == optional:\n winOfBet = amount * self.rouletteInformationDict[5][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[6][0]:\n if self.evaluateDozenOfWinningNumber(winningNumber) == optional:\n winOfBet = amount * self.rouletteInformationDict[6][1]\n else:\n winOfBet = -amount\n if type == self.rouletteInformationDict[7][0]:\n if self.evaluateColumnOfWinningNumber(winningNumber) == optional:\n winOfBet = amount * self.rouletteInformationDict[7][1]\n else:\n winOfBet = -amount\n\n return float(winOfBet)\n\n\n def spin(self):\n \"\"\"\n 'Spins' the table and evaluates a random winning number\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.evaluateRandomWinningNumber()\n\n def getSpinResult(self):\n \"\"\"\n Checks the result of a 'spin'. Determines win/loss of all bets for all players\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.checkBetsOfUsers(self.getUsers())\n"
},
{
"alpha_fraction": 0.5972644090652466,
"alphanum_fraction": 0.5972644090652466,
"avg_line_length": 18.264705657958984,
"blob_id": "214a687b695c007f3230d226f6e4df156e4d627e",
"content_id": "4bf271cdcbd6b03c951becffc1fb03b8e6e5690e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 658,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 34,
"path": "/User.py",
"repo_name": "Weixx/python_exercise1",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nCommon base class for all users\n\nThis class defines all attributes of a French-Roulette player.\n\n\"\"\"\n\nclass User:\n\n name = None\n budget = None\n\n # Constructor\n def __init__(self, name, budget):\n self.name = name\n self.budget = budget\n self.bets = []\n\n # Getters, Setters and CRUD-Functionalities\n def getName(self):\n return self.name\n\n def getBudget(self):\n return self.budget\n def setBudget(self, budget):\n self.budget = budget\n\n def getBets(self):\n return self.bets\n def addBet(self, bet):\n self.bets.append(bet)\n def deleteBets(self):\n self.bets = []\n\n\n\n"
},
{
"alpha_fraction": 0.4064277708530426,
"alphanum_fraction": 0.41019579768180847,
"avg_line_length": 36.18681335449219,
"blob_id": "701333b0963b7c43d371bac9c03a69ee0a46d798",
"content_id": "c6f706a00199d596738181dacd0a1143c1cbaf4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13535,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 364,
"path": "/Croupier.py",
"repo_name": "Weixx/python_exercise1",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nCommon base class for a new croupier\n\nThis class is responsible for all user requests and responds\n\n\"\"\"\n\nimport time\nfrom User import *\nfrom Roulette import *\nfrom Bet import *\n\nclass Croupier:\n\n name = None\n rouletteTable = None\n usersList = None\n\n # Constructor\n def __init__(self, name, rouletteTable):\n self.name = name\n self.rouletteTable = rouletteTable\n self.usersList = self.registerUsers()\n\n countRound = 1\n while True:\n print(\"\\n\\n{}. Round - Lets play!\".format(countRound))\n self.setBetsProcess()\n self.spinTable()\n self.presentResult()\n self.cleanUpUsers()\n\n countRound += 1\n\n # Quit-Method\n def quit(self):\n print(\"\\nBye Bye\")\n exit()\n\n # Register Users\n def registerUsers(self):\n \"\"\"\n Responsible for user registration\n\n Parameters\n ----------\n None\n\n Returns\n -------\n usersList: list\n a list of all registered users\n \"\"\"\n\n usersList = []\n\n while True:\n try:\n input = raw_input(\"\\nRegister user-name and capital (e.g.: user1, 1000): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n inputArr = input.split(\",\")\n if len(inputArr) == 2:\n name = str(inputArr[0].strip())\n budget = float(inputArr[1].strip())\n user = User(name, budget)\n usersList.append(user)\n else:\n print(\"\\nYou did not input the correct amount of parameters! (e.g.: user1, 1000)\")\n continue\n\n outOfLoop = False\n while True:\n another = raw_input(\"\\nRegister another user? (y or n): \")\n another.strip()\n if another == \"quit\":\n self.quit()\n else:\n if another == \"y\":\n break\n elif another == \"n\":\n outOfLoop = True\n break\n else:\n print(\"Could not read your input.\")\n continue\n\n if outOfLoop:\n break\n\n except ValueError:\n print(\"Could not read your input.\")\n\n return usersList\n\n\n # Register strategies\n def setBetsProcess(self):\n \"\"\"\n Responsible for the registration of all bets for the respective user\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n # List registered Users\n print(\"\\nRegistered Users:\")\n for user in self.usersList:\n print(\"Name: {}, Capital: {}\".format(user.getName(), user.getBudget()))\n\n # List strategies\n print(\"\\nPlease select one of the following strategies:\")\n for i, strategyOutput in enumerate(self.rouletteTable.rouletteInformationList):\n print(\"{}. {}.\".format(i+1, strategyOutput))\n\n for user in self.usersList:\n totalAmountSet = 0\n while True:\n bet = Bet(None, None, None)\n try:\n # Register strategy\n input = raw_input(\"\\n{}, which strategy do you want to choose? [e.g.: 1]: \".format(user.getName()))\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if 1 <= int(input) <= 7:\n strategy = self.rouletteTable.getRouletteInformatonDict()[int(input)][0]\n bet.setType(strategy)\n else:\n print(\"Please choose a number between >= 1 and <= 7\")\n continue\n\n # Register strategy 1: number\n if strategy == self.rouletteTable.getRouletteInformatonDict()[1][0]:\n input = raw_input(\"\\nPlease place your bet number (e.g.: 1): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if 0 <= int(input) <= 36:\n optional = int(input)\n bet.setOptional(optional)\n else:\n print(\"I do not understand your bet number.\")\n continue\n\n # Register strategy 4: color\n if strategy == self.rouletteTable.getRouletteInformatonDict()[4][0]:\n input = raw_input(\"\\nPlease place your bet color (red or black): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if input == \"red\" or input == \"black\":\n optional = input\n bet.setOptional(optional)\n else:\n print(\"I do not understand your bet color.\")\n continue\n\n # Register strategy 5: even or odd\n if strategy == self.rouletteTable.getRouletteInformatonDict()[5][0]:\n input = raw_input(\"\\nPlease place your bet (even or odd): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if input == \"even\" or input == \"odd\":\n optional = input\n bet.setOptional(optional)\n else:\n print(\"I do not understand your bet (even or odd).\")\n continue\n\n # Register strategy 6: dozen\n if strategy == self.rouletteTable.getRouletteInformatonDict()[6][0]:\n input = raw_input(\"\\nPlease place your dozen bet (first, second or third): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if input == \"first\" or input == \"second\" or input == \"third\":\n optional = input\n bet.setOptional(optional)\n else:\n print(\"I do not understand your dozen bet.\")\n continue\n\n # Register strategy 7: column\n if strategy == self.rouletteTable.getRouletteInformatonDict()[7][0]:\n input = raw_input(\"\\nPlease place your column bet (first, second or third): \")\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if input == \"first\" or input == \"second\" or input == \"third\":\n optional = input\n bet.setOptional(optional)\n else:\n print(\"I do not understand your column bet.\")\n continue\n\n\n # Register amount\n input = raw_input(\"\\nPlease place your input amount |{} left to bet|: \".format(user.getBudget()-totalAmountSet))\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n amount = float(input)\n if amount > user.getBudget() or (totalAmountSet+amount) > user.getBudget():\n print(\"{}, you do not have enough budget to set this amount.\".format(user.getName()))\n continue\n else:\n amount = float(amount)\n bet.setAmount(amount)\n totalAmountSet += amount\n\n user.addBet(bet)\n\n # Determine if optional information (according to bet type) is necessary\n if bet.getOptional() is None:\n print(\"\\n{}, you have choosen strategy '{}' and set an amount of '{}'.\".format(\n user.getName(), strategy, amount)\n )\n else:\n print(\"\\n{}, you have choosen strategy '{}' and set an amount of '{}' on '{}'.\".format(\n user.getName(), strategy, amount, optional)\n )\n\n outOfLoop = False\n while True:\n another = raw_input(\"\\nRegister another bet? |{} left to bet| (y or n): \".format(user.getBudget()-totalAmountSet))\n another.strip()\n if another == \"quit\":\n self.quit()\n else:\n if another == \"y\":\n if user.getBudget()-totalAmountSet > 0:\n break\n else:\n print(\"{}, sorry - you do not have any more budget.\".format(user.getName()))\n outOfLoop = True\n break\n elif another == \"n\":\n outOfLoop = True\n break\n else:\n print(\"Could not read your input\")\n continue\n\n if outOfLoop:\n break\n\n except ValueError:\n print(\"Could not read your input.\")\n\n\n # List registered bets per user\n print(\"\\nThe following bets were set:\")\n for user in self.usersList:\n print(\"\\n{}:\".format(user.getName()))\n for bet in user.getBets():\n if bet.getOptional() == None:\n print(\"an amount of {} was set on strategy '{}'\".format(\n bet.getAmount(), bet.getType())\n )\n else:\n print(\"an amount of {} was set on strategy '{}' on {}\".format(\n bet.getAmount(), bet.getType(), bet.getOptional())\n )\n\n def spinTable(self):\n \"\"\"\n 'Spins' the table, registers all users (with bets) and determines random winning number\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.rouletteTable.setUsers(self.usersList)\n self.rouletteTable.spin()\n print(\"\\nNo more bets please, the sphere is on the table...\")\n time.sleep(3)\n print(\"\\n.. sphere rolling ..\")\n time.sleep(3)\n winningNumber = self.rouletteTable.getRandomWinningNumber()\n if winningNumber != 0:\n print(\"\\nThe sphere stopped at: {} ({}).\".format(winningNumber, self.rouletteTable.evaluateColorOfWinningNumber(winningNumber)))\n else:\n print(\"\\nThe sphere stopped at: {}.\\n\".format(winningNumber))\n\n def presentResult(self):\n \"\"\"\n Determines the results and shows it to the user\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n self.rouletteTable.getSpinResult()\n\n def cleanUpUsers(self):\n \"\"\"\n Checks the current state of the users and allows users to buy in again\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n for user in self.usersList:\n user.deleteBets()\n if user.getBudget() == 0:\n while True:\n try:\n input = raw_input(\"\\n{}, sorry - you do not have any budget left! Do you want to buy in? (y or n) \".format(user.getName()))\n input.strip()\n if input == \"quit\":\n self.quit()\n else:\n if input == \"y\":\n input = raw_input(\"\\nWhat should your budget be? (e.g.: 5000)? \")\n budget = float(input)\n user.setBudget(budget)\n break\n elif input == \"n\":\n self.usersList.pop(self.usersList.index(user))\n if not self.usersList:\n print(\"\\nNo users left at the table.\")\n self.quit()\n break\n else:\n print(\"Could not read your input\")\n continue\n except ValueError:\n print(\"Could not read your input.\")"
}
] | 5 |
Sarahkel/dataRepresentation
|
https://github.com/Sarahkel/dataRepresentation
|
e5ac811c5c1ad2c1cb094758c39884413e5da536
|
2ad2c6922755dd06275a33e91878aae67fa81536
|
fa6b8156538bc48e7c0d6028e20d2c7f90c1f030
|
refs/heads/master
| 2020-08-07T14:06:04.008216 | 2019-10-20T16:29:19 | 2019-10-20T16:29:19 | 213,481,372 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6270096302032471,
"alphanum_fraction": 0.6334404945373535,
"avg_line_length": 26,
"blob_id": "4820661aeb4e12a196187b62cba09c0ee5dd2293",
"content_id": "4bd76c8b3f3ce206a2b12c382dff180fffd84b72",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 23,
"path": "/week03/PY05-readFileFinal.py",
"repo_name": "Sarahkel/dataRepresentation",
"src_encoding": "UTF-8",
"text": "#Lab 3, GMIT, Sarah Scholz\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\nwith open(\"../cars.html\") as fp:\n soup = BeautifulSoup(fp, 'html.parser')\n\nemployee_file = open('week02data.csv', mode = 'w')\nemployee_writer = csv.writer(employee_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\nrows = soup.findAll(\"tr\")\nfor row in rows: \n cols = row.findAll(\"td\")\n dataList = []\n for col in cols: \n if (col.text == \"update\") or (col.text == \"delete\"):\n continue\n else: \n dataList.append(col.text)\n employee_writer.writerow(dataList)\n\nemployee_file.close()\n\n"
}
] | 1 |
valliappan617/Python
|
https://github.com/valliappan617/Python
|
4be433bcc0ba945ff5a2234778e7457e841e638d
|
162a1a3590bae179f435b79ed5a8f566d9a8088b
|
0deaefed2ca072330632934879a98e06cd124536
|
refs/heads/master
| 2020-04-04T12:10:40.435311 | 2019-02-17T01:34:28 | 2019-02-17T01:34:28 | 155,916,852 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6003643870353699,
"alphanum_fraction": 0.6352869868278503,
"avg_line_length": 34.021278381347656,
"blob_id": "57d23366bb18597556629286ab061953f743330e",
"content_id": "5e4ec04b799241bf80ab25a96a39a26259437180",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3299,
"license_type": "no_license",
"max_line_length": 580,
"num_lines": 94,
"path": "/val.py",
"repo_name": "valliappan617/Python",
"src_encoding": "UTF-8",
"text": "print(4+8)\na = [1, 5, 8]\nb = [2, 6, 9, 10]\nc = [100, 200]\n\nprint(max(a))\n\nprint(max([len(a), len(b), len(c)]))\nprint(min([len(a), len(b), len(c)]))\n\nnames = [\"Carol\", \"Albert\", \"Ben\", \"Donna\"]\nprint(\" & \".join(sorted(names)))\nprint(\"-\".join(sorted(names)))\n\nnames = [\"Carol\", \"Albert\", \"Ben\", \"Donna\"]\nnames.append(\"Eugenia\")\nprint(sorted(names))\n\nnumbers = [1, 2, 6, 3, 1, 1, 6]\nunique_nums = set(numbers)\nprint(unique_nums)\n\nfruit = {\"apple\", \"banana\", \"orange\", \"grapefruit\"} # define a set\n\nprint(\"watermelon\" in fruit) # check for element\n\nfruit.add(\"watermelon\") # add an element\nprint(fruit)\n\nprint(fruit.pop()) # remove a random element\nprint(fruit)\n\n# Define a Dictionary, population,\n# that provides information\n# on the world's largest cities.\n# The key is the name of a city\n# (a string), and the associated\n# value is its population in\n# millions of people.\n\n# Key | Value\n# Shanghai | 17.8\n# Istanbul | 13.3\n# Karachi | 13.0\n# Mumbai | 12.5\n\nCities = {'Shanghai': 17.8, 'Istanbul':13.3, 'Karachi':13.0, 'Mumbai':12.5}\nprint(Cities)\nprint(type(Cities))\n\nelements = {'hydrogen': {'number': 1, 'weight': 1.00794, 'symbol': 'H'},\n 'helium': {'number': 2, 'weight': 4.002602, 'symbol': 'He'}}\nelements ['hydrogen']['is_noble_gas'] = False\nelements ['helium']['is_noble_gas'] = True\nprint(elements['hydrogen']['is_noble_gas'])\nprint(elements['helium']['is_noble_gas'])\n\n# todo: Add an 'is_noble_gas' entry to the hydrogen and helium dictionaries\n# hint: helium is a noble gas, hydrogen isn't\n\nverse = \"if you can keep your head when all about you are losing theirs and blaming it on you if you can trust yourself when all men doubt you but make allowance for their doubting too if you can wait and not be tired by waiting or being lied about don’t deal in lies or being hated don’t give way to hating and yet don’t look too good nor talk too wise\"\nprint(verse, '\\n')\n\n# split verse into list of words\nverse_list = verse.split()\nprint(verse_list, '\\n')\n\n# convert list to a data structure that stores unique elements\nverse_set = set(verse_list)\nprint(verse_set, '\\n')\n\n# print the number of unique words\nnum_unique = len(verse_set)\nprint(num_unique, '\\n')\n\nverse_dict = {'if': 3, 'you': 6, 'can': 3, 'keep': 1, 'your': 1, 'head': 1, 'when': 2, 'all': 2, 'about': 2, 'are': 1, 'losing': 1, 'theirs': 1, 'and': 3, 'blaming': 1, 'it': 1, 'on': 1, 'trust': 1, 'yourself': 1, 'men': 1, 'doubt': 1, 'but': 1, 'make': 1, 'allowance': 1, 'for': 1, 'their': 1, 'doubting': 1, 'too': 3, 'wait': 1, 'not': 1, 'be': 1, 'tired': 1, 'by': 1, 'waiting': 1, 'or': 2, 'being': 2, 'lied': 1, 'don\\'t': 3, 'deal': 1, 'in': 1, 'lies': 1, 'hated': 1, 'give': 1, 'way': 1, 'to': 1, 'hating': 1, 'yet': 1, 'look': 1, 'good': 1, 'nor': 1, 'talk': 1, 'wise': 1}\nprint(verse_dict, '\\n')\n\n# find number of unique keys in the dictionary\nnum_keys = len(verse_dict)\nprint(num_keys)\n\n# find whether 'breathe' is a key in the dictionary\ncontains_breathe = \"breathe\" in verse_dict\nprint(contains_breathe)\n\n# create and sort a list of the dictionary's keys\nsorted_keys = sorted(verse_dict.keys())\n\n# get the first element in the sorted list of keys\nprint(sorted_keys[0])\n\n# find the element with the highest value in the list of keys\nprint(sorted_keys[-1]) \n"
},
{
"alpha_fraction": 0.7540902495384216,
"alphanum_fraction": 0.7642538547515869,
"avg_line_length": 42.3763427734375,
"blob_id": "d2ba15277515567c218a5ea5db1d52530adf8ddf",
"content_id": "33a174b8d5b1aba0700ad15c91f5d0c5ecb8960b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 16136,
"license_type": "no_license",
"max_line_length": 466,
"num_lines": 372,
"path": "/Python Knowledge Book.md",
"repo_name": "valliappan617/Python",
"src_encoding": "UTF-8",
"text": "Python is case sensitive and indented\n\nPython\n\nprint()\nCase sensitive and spacing is very important\n\nOperators\n+, *, -, /\nExponentiation **\n% Modulo \n^ carat Bitwise XOR \n// Integer division\n\nVariable name, assignment operator (=), variable\n\nx=y is not equal to y=x\n\nVariable names = No reserved names or identifiers\nTypical ones, all lower case letters and under_score letters\nWriting variable names that are descriptive\n\nKeywords: \n\nFalse\nclass\nFinally\nis\nreturn\nNone\ncontinue\nfor\nlambda\ntry\nTrue\nDef\nfrom\nnonlocal\nwhile\nand\ndel\nglobal\nnot\nwith\nas\nelif\nif\nor\nyield\nassert\nelse\nimport\npass\nbreak\nexcept\nin \nRaise\nVariables\nIn Python, Numbers are of 4 types:\nInteger.\nFloating Point or Real Numbers.\nComplex Numbers.\nBoolean.\nIntegers and Floats\nThere are two Python data types that could be used for numeric values:\nint - for integer values\nfloat - for decimal or floating point values\nhttps://www.python.org/dev/peps/pep-0008/\nhttps://softwareengineering.stackexchange.com/questions/148677/why-is-80-characters-the-standard-limit-for-code-width\nhttps://atom.io/packages/linter-python-pep8\n\n\n79 - 99 characters line\n\nIn general, there are two types of errors to look out for\nExceptions\nSyntax\nhttps://docs.python.org/3/tutorial/errors.html\n\nBooleans, Comparison Operators, and Logical Operators\nThe bool data type holds one of the values True or False, which are often encoded as 1 or 0, respectively.\nThere are 6 comparison operators that are common to see in order to obtain a boolvalue:\nComparison Operators\nSymbol Use Case\nBool\nOperation\n5 < 3\nFalse\nLess Than\n5 > 3\nTrue\nGreater Than\n3 <= 3\nTrue\nLess Than or Equal To\n3 >= 5\nFalse\nGreater Than or Equal To\n3 == 5\nFalse\nEqual To\n3 != 5\nTrue\nNot Equal To\nAnd there are three logical operators you need to be familiar with:\nLogical Use\nBool\nOperation\n5 < 3 and 5 == 5\nFalse\nand - Evaluates if all provided statements are True\n5 < 3 or 5 == 5\nTrue\nor - Evaluates if at least one of many statements is True\nnot 5 < 3\nTrue\nnot - Flips the Bool Value\n\nStrings\nStrings in Python are shown as the variable type str. You can define a string with either double quotes \" or single quotes '. If the string you are creating actually has one of these two values in it, then you need to be careful to assure your code doesn't give an error.\nlen(udacity) -> Built in function\n\nTypeError: object of type 'int' has no len(), which alludes to the fact that len only works on a \"sequence (such as a string, bytes, tuple, list, or range) or a collection (such as a dictionary, set, or frozen set),\n\n\nBuilt-in functions:\n\nhttps://docs.python.org/2/library/functions.html#len\n\nint\nstring\nfloat\nstr()\n\nString Methods\nIn this video you were introduced to methods. Methods are like some of the functions you have already seen:\nlen(\"this\")\ntype(12)\nprint(\"Hello world\")\nThese three above are functions - notice they use parentheses, and accept one or more arguments. Functions will be studied in much more detail in a later lesson!\nA method in Python behaves similarly to a function. Methods actually are functions that are called using dot notation. For example, lower() is a string method that can be used like this, on a string called \"sample string\": sample_string.lower().\nMethods are specific to the data type for a particular variable. So there are some built-in methods that are available for all strings, different methods that are available for all integers, etc.\nBelow is an image that shows some methods that are possible with any string.\n\nEach of these methods accepts the string itself as the first argument of the method. However, they also could receive additional arguments, that are passed inside the parentheses. Let's look at the output for a few examples.\n>>> my_string.islower()\nTrue\n>>> my_string.count('a')\n2\n>>> my_string.find('a')\n3\n\n\nYou can see that the count and find methods both take another argument. However, the .islower() method does not accept another argument.\nNo professional has all the methods memorized, which is why understanding how to use documentation and find answers is so important. Gaining a strong grasp of the foundations of programming will allow you to use those foundations to use documentation to build so much more than someone who tries to memorize all the built-in methods in Python.\nOne important string method: format()\nWe will be using the format() string method a good bit in our future work in Python, and you will find it very valuable in your coding, especially with your printstatements.\nWe can best illustrate how to use format() by looking at some examples:\n# Example 1\nprint(\"Mohammed has {} balloons\".format(27))\n\n\nMohammed has 27 balloons\n\n\n# Example 2\nanimal = \"dog\"\naction = \"bite\"\nprint(\"Does your {} {}?\".format(animal, action))\n\n\nDoes your dog bite?\n\n\n# Example 3\nmaria_string = \"Maria loves {} and {}\"\nprint(maria_string.format(\"math\",\"statistics\"))\n\n\nMaria loves math and statistics\nNotice how in each example, the number of pairs of curly braces {} you use inside the string is the same as the number of replacements you want to make using the values inside format().\nMore advanced students can learn more about the formal syntax for using the format() string method here.\nPython Container - List contains other data types\n\nLists!\nData structures are containers that organize and group data types together in different ways. A list is one of the most common and basic data structures in Python.\nYou saw here that you can create a list with square brackets. Lists can contain any mix and match of the data types you have seen so far.\nlist_of_random_things = [1, 3.4, 'a string', True]\n\n\nThis is a list of 4 elements. All ordered containers (like lists) are indexed in python using a starting index of 0. Therefore, to pull the first value from the above list, we can write:\n>>> list_of_random_things[0]\n1\n\n\nIt might seem like you can pull the last element with the following code, but this actually won't work:\n>>> list_of_random_things[len(list_of_random_things)] \n---------------------------------------------------------------------------\nIndexError Traceback (most recent call last)\n<ipython-input-34-f88b03e5c60e> in <module>()\n----> 1 lst[len(lst)]\n\nIndexError: list index out of range\n\n\nHowever, you can retrieve the last element by reducing the index by 1. Therefore, you can do the following:\n>>> list_of_random_things[len(list_of_random_things) - 1] \nTrue\n\n\nAlternatively, you can index from the end of a list by using negative values, where -1 is the last element, -2 is the second to last element and so on.\n>>> list_of_random_things[-1] \nTrue\n>>> list_of_random_things[-2] \na string\n\n\nSlice and Dice with Lists\nYou saw that we can pull more than one value from a list at a time by using slicing. When using slicing, it is important to remember that the lower index is inclusiveand the upper index is exclusive.\nTherefore, this:\n>>> list_of_random_things = [1, 3.4, 'a string', True]\n>>> list_of_random_things[1:2]\n[3.4]\n\n\nwill only return 3.4 in a list. Notice this is still different than just indexing a single element, because you get a list back with this indexing. The colon tells us to go from the starting value on the left of the colon up to, but not including, the element on the right.\nIf you know that you want to start at the beginning, of the list you can also leave out this value.\n>>> list_of_random_things[:2]\n[1, 3.4]\n\n\nor to return all of the elements to the end of the list, we can leave off a final element.\n>>> list_of_random_things[1:]\n[3.4, 'a string', True]\n\n\nThis type of indexing works exactly the same on strings, where the returned value will be a string.\nAre you in OR not in?\nYou saw that we can also use in and not in to return a bool of whether an element exists within our list, or if one string is a substring of another.\n>>> 'this' in 'this is a string'\nTrue\n>>> 'in' in 'this is a string'\nTrue\n>>> 'isa' in 'this is a string'\nFalse\n>>> 5 not in [1, 2, 3, 4, 6]\nTrue\n>>> 5 in [1, 2, 3, 4, 6]\nFalse\nLists - Lower bound inclusive but upper bound exclusive\n\nLength of a string is the number of characters whereas the length of the list is the number of elements it holds.\n\nPython has 0 indexing\n\nMutability and Order\nMutability is about whether or not we can change an object once it has been created. If an object (like a list or string) can be changed (like a list can), then it is called mutable. However, if an object cannot be changed with creating a completely new object (like strings), then the object is considered immutable.\n>>> my_lst = [1, 2, 3, 4, 5]\n>>> my_lst[0] = 'one'\n>>> print(my_lst)\n['one', 2, 3, 4, 5]\n\n\nAs shown above, you are able to replace 1 with 'one' in the above list. This is because lists are mutable.\nHowever, the following does not work:\n>>> greeting = \"Hello there\"\n>>> greeting[0] = 'M'\n\n\nThis is because strings are immutable. This means to change this string, you will need to create a completely new string.\nThere are two things to keep in mind for each of the data types you are using:\nAre they mutable?\nAre they ordered?\nOrder is about whether the position of an element in the object can be used to access the element. Both strings and lists are ordered. We can use the order to access parts of a list and string.\nHowever, you will see some data types in the next sections that will be unordered. For each of the upcoming data structures you see, it is useful to understand how you index, are they mutable, and are they ordered. Knowing this about the data structure is really useful!\nAdditionally, you will see how these each have different methods, so why you would use one data structure vs. another is largely dependent on these properties, and what you can easily do with it!\nMutability and Order\n\nList Manipulation using indexing\n\nTuples\nA tuple is another useful container. It's a data type for immutable ordered sequences of elements. They are often used to store related pieces of information. Consider this example involving latitude and longitude:\nlocation = (13.4125, 103.866667)\nprint(\"Latitude:\", location[0])\nprint(\"Longitude:\", location[1])\n\n\nTuples are similar to lists in that they store an ordered collection of objects which can be accessed by their indices. Unlike lists, however, tuples are immutable - you can't add and remove items from tuples, or sort them in place.\nTuples can also be used to assign multiple variables in a compact way.\ndimensions = 52, 40, 100\nlength, width, height = dimensions\nprint(\"The dimensions are {} x {} x {}\".format(length, width, height))\n\n\nThe parentheses are optional when defining tuples, and programmers frequently omit them if parentheses don't clarify the code.\nIn the second line, three variables are assigned from the content of the tuple dimensions. This is called tuple unpacking. You can use tuple unpacking to assign the information from a tuple into multiple variables without having to access them one by one and make multiple assignment statements.\nIf we won't need to use dimensions directly, we could shorten those two lines of code into a single line that assigns three variables in one go!\nlength, width, height = 52, 40, 100\nprint(\"The dimensions are {} x {} x {}\".format(length, width, height))\n\n\nHave questions? Head to the forums for discussion with the Udacity Community.\n\n\n\nSets\nA set is a data type for mutable unordered collections of unique elements. One application of a set is to quickly remove duplicates from a list.\nnumbers = [1, 2, 6, 3, 1, 1, 6]\nunique_nums = set(numbers)\nprint(unique_nums)\n\n\nThis would output:\n{1, 2, 3, 6}\n\n\nSets support the in operator the same as lists do. You can add elements to sets using the add method, and remove elements using the pop method, similar to lists. Although, when you pop an element from a set, a random element is removed. Remember that sets, unlike lists, are unordered so there is no \"last element\".\nfruit = {\"apple\", \"banana\", \"orange\", \"grapefruit\"} # define a set\n\nprint(\"watermelon\" in fruit) # check for element\n\nfruit.add(\"watermelon\") # add an element\nprint(fruit)\n\nprint(fruit.pop()) # remove a random element\nprint(fruit)\n\n\nThis outputs:\nFalse\n{'grapefruit', 'orange', 'watermelon', 'banana', 'apple'}\ngrapefruit\n{'orange', 'watermelon', 'banana', 'apple'}\n\n\nOther operations you can perform with sets include those of mathematical sets. Methods like union, intersection, and difference are easy to perform with sets, and are much faster than such operators with other containers.\n \nMethods are functions that belong to an object. The object is always the first argument to a method.\n\n\nDebugging Code\nEveryone gets \"bugs,\" or unexpected errors, in their code, and this is a normal and expected part of software development. We all say at one time or another, \"Why isn't this computer doing what I want it to do?!\"\n\nSo an important part of coding is \"debugging\" your code, to remove these bugs. This can often take a long time, and cause you frustration, but developing effective coding habits and mental calmness will help you address these issues. With determined persistence, you can prevail over these bugs!\n\nHere are some tips on successful debugging that we'll discuss in more detail below:\n\nUnderstand common error messages you might receive and what to do about them.\nSearch for your error message, using the Web community.\nUse print statements.\n\nUnderstanding Common Error Messages\nThere are many different error messages that you can receive in Python, and learning how to interpret what they're telling you can be very helpful. Here are some common ones for starters:\n\n\"ZeroDivisionError: division by zero.\" This is an error message that you saw earlier in this lesson. What did this error message indicate to us? You can look back in the Quiz: Arithmetic Operators section to review it if needed.\n\"SyntaxError: unexpected EOF while parsing\" Take a look at the two lines of code below. Executing these lines produces this syntax error message - do you see why?\ngreeting = \"hello\"\nprint(greeting.upper\nThis message is often produced when you have accidentally left out something, like a parenthesis. The message is saying it has unexpectedly reached the end of file (\"EOF\") and it still didn't find that right parenthesis. This can easily happen with code syntax involving pairs, like beginning and ending quotes also.\n\"TypeError: len() takes exactly one argument (0 given)\" This kind of message could be given for many functions, like len in this case, if I accidentally do not include the required number of arguments when I'm calling a function, as below. This message tells me how many arguments the function requires (one in this case), compared with how many I gave it (0). I meant to use len(chars) to count the number of characters in this long word, but I forgot the argument.\nchars = \"supercalifragilisticexpialidocious\"\nlen()\nThere are other kinds of error messages that you'll certainly begin experiencing soon in your Python work. Learning what they mean and how to address them will help you debug your code. You might keep an ongoing page of notes on them.\n\nSearch for Your Error Message\nSoftware developers like to share their problems and solutions with each other on the web, so using Google search, or searching in StackOverflow, or searching in Udacity's Knowledge forum are all good ways to get ideas on how to address a particular error message you're getting.\n\nCopy and paste the error message into your web browser search tab, or in Knowledge, and see what others suggest about what might be causing it.\nYou can copy and paste the whole error message, with or without quotes around it.\nOr you can search using just key words from the error message or situation you're facing, along with some other helpful words that describe your context, like Python and Mac.\nUse Print Statements to Help Debugging\nAdding print statements temporarily into your code can help you see which code lines have been executed before the error occurs, and see the values of any variables that might be important. This approach to debugging can also be helpful even if you're not receiving an error message, but things just aren't working the way you want.\n"
}
] | 2 |
besteman/futurama-mining
|
https://github.com/besteman/futurama-mining
|
c6f414ff8d0e83d1c3c3fd44c77cc8420cf0e6c4
|
fcff46b8b88628d1f7ccdd201a1b83b09e56636a
|
68b98dddbe5ed7bcbdc439e9ea6652807481c0d8
|
refs/heads/master
| 2023-06-26T10:55:50.553383 | 2021-07-26T02:06:02 | 2021-07-26T02:06:02 | 361,137,012 | 0 | 1 | null | 2021-04-24T10:51:31 | 2021-07-25T13:30:21 | 2021-07-25T13:50:59 |
Python
|
[
{
"alpha_fraction": 0.6618374586105347,
"alphanum_fraction": 0.67420494556427,
"avg_line_length": 26.211538314819336,
"blob_id": "402732d8915c0b2c6779852b5a17a761c335aedc",
"content_id": "9dd6954577ebd13c0eb4cab6876ff3a3723810c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2830,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 104,
"path": "/alerts/alerts.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom dotenv import load_dotenv\nimport psycopg2\nimport requests\nfrom twilio.rest import Client\n\n\nload_dotenv()\n\nACCOUNT_SID = os.environ['account_sid']\nAUTH_TOKEN = os.environ['auth_token']\n\nBASE_ETH_URL: str = 'https://api.nanopool.org/v1/eth/'\nETH_MINER_ADDRESS: str = '0x5d78c71912ea88c23c602c8e0d5363d1e3cba4be'\nPHONE_NUMBERS: list = [os.environ.get('besteman_number'), os.environ.get('stephen_number')]\n\nDATABASE_URL = os.environ['DATABASE_URL']\nconn = psycopg2.connect(DATABASE_URL, sslmode='require')\n\ncur = conn.cursor()\n\n\ndef get_enabled_miners_from_db():\n\n cur.execute(\"\"\"SELECT name FROM miner WHERE enabled = true\"\"\")\n rows = cur.fetchall()\n\n print(f'Miners found DB: {rows}')\n\n enabled_miners = []\n for miner in rows:\n enabled_miners.append(miner[0])\n\n print(f'enabled_miners are: {enabled_miners}')\n\n return enabled_miners\n\n\ndef get_workers_reported_hashrate() -> dict:\n \"\"\"Hits Nanopool API to get last reported hashrates\n\n Returns:\n dict: A dict of workers and their hashrates\n \"\"\"\n response = requests.get(f'{BASE_ETH_URL}reportedhashrates/{ETH_MINER_ADDRESS}')\n response: dict = response.json()\n\n return response['data']\n\n\ndef check_workers_hashrate(workers_hashrate: dict) -> list:\n \"\"\"Check if any workers' hashrate is at 0, if hashrate is at 0 it will all to list and return\n\n Args:\n workers_hashrate (dict): Workers and their hashrates\n\n Returns:\n list: list of workers that hashrate is equal 0\n \"\"\"\n offline_workers: list = []\n\n enabled_miners: list = get_enabled_miners_from_db()\n for worker in workers_hashrate:\n if worker['hashrate'] == 0 and worker['worker'] in enabled_miners:\n offline_workers.append(worker['worker'])\n\n return offline_workers\n\n\ndef send_text_message(offline_workers: list) -> None:\n \"\"\"If any workers' hashrate is equal to zero, this function will be called and send a text message\n\n Args:\n offline_workers (list): Workers that hashrate is equal to zero\n \"\"\"\n txt_body: str = f\"\"\"Panic! At the Hashrate! \\n{', '.join(offline_workers)} rigs are reporting 0 hashrate\"\"\"\n\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n for phone_number in PHONE_NUMBERS:\n client.api.account.messages.create(\n to=phone_number,\n from_=os.environ.get('twilio_number'),\n body=txt_body)\n\n\ndef main():\n \"\"\"Main function that start the process\n \"\"\"\n print(\"Starting Cronjob\")\n\n workers_hashrate: dict = get_workers_reported_hashrate()\n\n print(f'Workers hashrates {workers_hashrate}')\n\n offline_workers: list = check_workers_hashrate(workers_hashrate)\n\n print(f'Offline Workers: {offline_workers}')\n\n if offline_workers:\n send_text_message(offline_workers)\n else:\n print('No Workers are at 0')\n"
},
{
"alpha_fraction": 0.6146547198295593,
"alphanum_fraction": 0.618344783782959,
"avg_line_length": 27.313432693481445,
"blob_id": "e7bd13be8b94eb760eb8bc44ad9d030ac933850e",
"content_id": "aac0096c956ce04767973f32b4f5b43fbc952a7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1897,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 67,
"path": "/flaskr/__init__.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "import os\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\n\nfrom flaskr.extensions import db, migrate\nfrom flaskr.config import Config\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object(Config)\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile('config.py', silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n if not app.debug and not app.testing:\n\n if app.config['LOG_TO_STDOUT']:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n app.logger.addHandler(stream_handler)\n else:\n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler('logs/microblog.log',\n maxBytes=10240, backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n app.logger.setLevel(logging.INFO)\n app.logger.info('Miner Web startup')\n\n db.init_app(app)\n app.app_context().push()\n migrate.init_app(app, db)\n with app.app_context():\n db.create_all()\n\n from . import auth\n app.register_blueprint(auth.bp)\n\n from . import index\n app.register_blueprint(index.bp)\n app.add_url_rule('/', endpoint='index')\n\n return app\n\n\napp = create_app()\napp.app_context().push()\n"
},
{
"alpha_fraction": 0.6716417670249939,
"alphanum_fraction": 0.7910447716712952,
"avg_line_length": 14,
"blob_id": "7166a5889658dad4b97d46d6a708ec9b452402af",
"content_id": "ca3cc356ee926d9fbd5753d846655270ec72d159",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 9,
"path": "/requirements.txt",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "requests==2.25.0\npython-dotenv==0.17.0\ntwilio==6.57.0\napscheduler==3.7.0\nflask\ngunicorn\nFlask-SQLAlchemy\npsycopg2-binary\nflask-migrate"
},
{
"alpha_fraction": 0.6203840374946594,
"alphanum_fraction": 0.6223534941673279,
"avg_line_length": 26.445945739746094,
"blob_id": "83244543a37cb3003f648b75ff7db2a871ae8eef",
"content_id": "14a5b425c444a513141993cf101513bf2898fc16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2031,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 74,
"path": "/flaskr/auth.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "import functools\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for, current_app\n)\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom flaskr.extensions import db, User\n\n\nbp = Blueprint('auth', __name__, url_prefix='/auth')\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n users = User.query.all()\n\n current_app.logger.info(f'Users: {users}')\n\n if len(users) == 0:\n stephen_user = User(username='stephen', password=generate_password_hash('1234'))\n besteman_user = User(username='besteman', password=generate_password_hash('123'))\n db.session.add(stephen_user)\n db.session.add(besteman_user)\n db.session.commit()\n\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n error = None\n current_app.logger.info(f'Logging in for: {username}')\n user = User.query.filter_by(username=username).first()\n\n if user is None:\n current_app.logger.error(f'Username not right for {user}')\n error = 'Incorrect username.'\n elif not check_password_hash(user.password, password):\n current_app.logger.error(f'Password not right for {user}')\n error = 'Incorrect password.'\n\n if error is None:\n session.clear()\n session['user_id'] = user.id\n return redirect(url_for('index'))\n\n flash(error)\n\n return render_template('auth/login.html')\n\n\[email protected]_app_request\ndef load_logged_in_user():\n user_id = session.get('user_id')\n if user_id is None:\n g.user = None\n else:\n g.user = user_id\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('index'))\n\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view\n"
},
{
"alpha_fraction": 0.7925311326980591,
"alphanum_fraction": 0.8091286420822144,
"avg_line_length": 25.77777862548828,
"blob_id": "bb12ea1dd5205c5c54e7d3cbbe6ee4289bc5b91a",
"content_id": "4a217eac325573b3b25ccdb17e9944c12dd651de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 9,
"path": "/alerts/cronjob.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "from apscheduler.schedulers.blocking import BlockingScheduler\n\nfrom alerts import main\n\n# Create an instance of scheduler and add function.\nscheduler = BlockingScheduler()\nscheduler.add_job(main, \"interval\", seconds=3600)\n\nscheduler.start()\n"
},
{
"alpha_fraction": 0.6338383555412292,
"alphanum_fraction": 0.6338383555412292,
"avg_line_length": 32,
"blob_id": "8c5ea81449c6a63bc869170fda5cef682d806ec3",
"content_id": "53b5af5fc33c04e5c7f47fd1a3ff0a0ae895229c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 12,
"path": "/flaskr/config.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config(object):\n SECRET_KEY = 'dev'\n SQLALCHEMY_DATABASE_URI = (\n os.environ.get('DATABASE_URL', '').replace('postgres://', 'postgresql://') or\n 'sqlite:///' + os.path.join(basedir, 'instance/flaskr.db')\n )\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT')\n"
},
{
"alpha_fraction": 0.5879473686218262,
"alphanum_fraction": 0.5891709923744202,
"avg_line_length": 26.47058868408203,
"blob_id": "917d8a5fe6f0b76dd2b0e8c858cb0ee5f03eabc2",
"content_id": "ac155f67851397897655a21a3e5daba76effab6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3269,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 119,
"path": "/flaskr/index.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for, current_app\n)\nfrom sqlite3 import IntegrityError\nfrom werkzeug.exceptions import abort\n\nfrom flaskr.auth import login_required\nfrom flaskr.extensions import db, Miner\n\nbp = Blueprint('index', __name__)\n\n\[email protected]('/')\ndef index():\n\n miners = Miner.query.all()\n\n current_app.logger.info(f'Miners found: {miners}')\n\n return render_template('miner/index.html', miners=miners)\n\n\[email protected]('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n current_app.logger.info('Starting create route')\n if request.method == 'POST':\n name = request.form['name']\n user_enabled = request.form['enabled']\n error = None\n\n if user_enabled == 'True':\n enabled = True\n else:\n enabled = False\n\n if not name:\n error = 'Name is required.'\n\n if error is not None:\n flash(error)\n else:\n\n try:\n current_app.logger.info(f'Creating miner: {name}, {enabled}, {g.user}')\n user_created_miner = Miner(name=name, enabled=enabled, created_user_id=g.user)\n\n db.session.add(user_created_miner)\n db.session.commit()\n\n current_app.logger.info(f'Created miner: {name}, {enabled}, {g.user}')\n except IntegrityError as err:\n db.session.rollback()\n return render_template('miner/dup_name.html', err=err)\n except Exception as err:\n db.session.rollback()\n current_app.logger.error(f'Erroring creating miner: {name}, {enabled}, {g.user}, {err}')\n return redirect(url_for('index.index'))\n\n return render_template('miner/create.html')\n\n\[email protected]('/<int:id>/update', methods=('GET', 'POST'))\n@login_required\ndef update(id):\n miner = get_miner(id)\n\n if request.method == 'POST':\n name = request.form['name']\n user_enabled = request.form['enabled']\n error = None\n\n if user_enabled == 'True':\n enabled = True\n else:\n enabled = False\n\n if not name:\n error = 'Name is required.'\n\n if error is not None:\n flash(error)\n else:\n current_app.logger.info(f'Updating miner: {name}, {enabled}, {id}')\n\n miner.name = name\n miner.enabled = enabled\n\n db.session.commit()\n\n current_app.logger.info(f'Updated miner: {name}, {enabled}, {id}')\n return redirect(url_for('index.index'))\n\n return render_template('miner/update.html', miner=miner)\n\n\[email protected]('/<int:id>/delete', methods=('POST',))\n@login_required\ndef delete(id):\n miner = get_miner(id)\n\n current_app.logger.info(f'Deleting miner: {miner.name}, {miner.enabled}, {id}')\n\n db.session.delete(miner)\n db.session.commit()\n\n current_app.logger.info(f'Deleted miner: {miner.name}, {miner.enabled}, {id}')\n return redirect(url_for('index.index'))\n\n\ndef get_miner(id, check_author=True):\n miner = Miner.query.get(id)\n\n current_app.logger.info(f'Found miner for get_miner with {id}: {miner}')\n\n if miner is None:\n abort(404, f\"Post id {id} doesn't exist.\")\n\n return miner\n"
},
{
"alpha_fraction": 0.7068965435028076,
"alphanum_fraction": 0.7413793206214905,
"avg_line_length": 24.77777862548828,
"blob_id": "31671ad30db6c0180354e321272047a78d432a5c",
"content_id": "74b90ccf6b843e8d3fc0629758be47182754d4e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 9,
"path": "/alerts/scripts/miner-reboot.sh",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/expect -f\nset miner [lindex $argv 0]\nset timeout 60\nspawn sshpass -f switch_password.txt ssh switch.home.schrauger.com:10022\nexpect \"Switched CDU: \"\nsend \"reboot $miner\\r\"\nexpect \"Switched CDU: \"\nsend \"exit\\r\"\nexpect eof\n"
},
{
"alpha_fraction": 0.7289807200431824,
"alphanum_fraction": 0.7346171736717224,
"avg_line_length": 19.285715103149414,
"blob_id": "04d2c631ab25e9765e0dd735d337c584b53c640a",
"content_id": "1b2a972ebacc29744ef3cd2bc2ae8de967b6e754",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2129,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 105,
"path": "/README.md",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "# futurama-mining\n\n## Setup Alerts\n\n### Virtual Env\n\n* Install Package for Envs\n\n```sh\npython3 -m pip install --user virtualenv\n```\n\n* Create virtual environment\n\n```sh\npython3 -m venv futurama-mining-env\n```\n\n* Activate virtual environment\n\n```sh\n. futurama-mining-env/bin/activate\n```\n\n* Upgrade Pip\n\n```sh\npip install --upgrade pip\n```\n\n* Install packages to environment\n\n```sh\npip install -r requirements.txt\n```\n\n> Note: You have to do this every time you add a package, remove a package, and upgrade package\n\n* Leave environment\n\n```sh\ndeactivate\n```\n\n### Docker\n\n**TODO update this to work with PostgreSQL**\n\nTo run alerts, you need to download and [install docker](https://docs.docker.com/get-docker/).\n\nYou will also need to get `.env` file that stores creds for login into Gmail server.\n\nOnce you have downloaded docker you can run:\n\n`docker build --tag alerts .`\n\nAnd to run the docker container:\n\n`docker run alerts`\n\n## Run Miner Web App\n\n> Note, you have to be in the virtual env!\n\n### Packages\n\nAll packages should be included in the requirements.txt file.\n\n### Set Bash Variables\n\n```sh\n$ export FLASK_APP=flaskr\n$ export FLASK_ENV=development\nflask run\n```\n\nThe miner app should now be running on `http://127.0.0.1:5000/` and has hot reloading\n\n## Prod\n\nEverything is running on Heroku\n\n### Database\n\n#### Dev\n\nOn dev, miner app will use SQLite to mimic prod's database\n\n#### Prod\n\nProd is running on Heroku PostgreSQL Addon. Everything should be automatic in terms of setup and connection\n\n#### Migrations\n\nMigrations are handled through [flask-migrate](https://flask-migrate.readthedocs.io/en/latest/), which is a wrapper for [Alembic](https://alembic.sqlalchemy.org/en/latest/)\n\nAll migrations are stored in [version folder](migrations/versions)\n\nIf you update a SQLAlchemy class, add column, change column size, delete column, etc, flask-migrate should be able to detect that change with the following:\n\n```sh\nflask db migrate -m \"MIGRATIONS MESSAGE\"\n```\n\nThis will create a migration file in the [version folder](migrations/versions). When you deploy to Heroku, the migration will run automatically for you."
},
{
"alpha_fraction": 0.591478705406189,
"alphanum_fraction": 0.5939849615097046,
"avg_line_length": 35.318180084228516,
"blob_id": "2ac70e6d283e55a079a261d7a235b9dfa08b70ce",
"content_id": "5cff80b142c2c47d17cac4567f90b5e22fc3b0e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 798,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/flaskr/templates/miner/update.html",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n\n{% block header %}\n <h1>{% block title %}Edit \"{{ miner['name'] }}\"{% endblock %}</h1>\n{% endblock %}\n\n{% block content %}\n <form method=\"post\">\n <label for=\"name\">Miner</label>\n <input name=\"name\" id=\"name\"value=\"{{ request.form['name'] or miner['name'] }}\" required>\n <label for=\"enable\">Enabled?</label>\n <input type=\"radio\" id=\"yes\" name=\"enabled\" value=\"True\">\n <label for=\"yes\">Yes</label><br>\n <input type=\"radio\" id=\"no\" name=\"enabled\" value=\"False\">\n <label for=\"no\">No</label><br>\n <input type=\"submit\" value=\"Save\">\n </form>\n <hr>\n <form action=\"{{ url_for('index.delete', id=miner['id']) }}\" method=\"post\">\n <input class=\"danger\" type=\"submit\" value=\"Delete\" onclick=\"return confirm('Are you sure?');\">\n </form>\n{% endblock %}"
},
{
"alpha_fraction": 0.5537874102592468,
"alphanum_fraction": 0.587523877620697,
"avg_line_length": 30.420000076293945,
"blob_id": "4a9689e64a01ff3b429039a3049a831fcdcc21fa",
"content_id": "89619268bcfd85573097973280cbddc6d89165f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1571,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 50,
"path": "/migrations/versions/b3e274d2b63f_change_text_columns_values.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "\"\"\"change text columns values\n\nRevision ID: b3e274d2b63f\nRevises: \nCreate Date: 2021-07-24 17:43:21.444137\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b3e274d2b63f'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('miner', 'name',\n existing_type=sa.VARCHAR(length=80),\n type_=sa.String(length=50),\n existing_nullable=False)\n op.alter_column('user', 'username',\n existing_type=sa.VARCHAR(length=80),\n type_=sa.String(length=20),\n existing_nullable=False)\n op.alter_column('user', 'password',\n existing_type=sa.VARCHAR(length=20),\n type_=sa.String(length=150),\n existing_nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user', 'password',\n existing_type=sa.String(length=150),\n type_=sa.VARCHAR(length=20),\n existing_nullable=False)\n op.alter_column('user', 'username',\n existing_type=sa.String(length=20),\n type_=sa.VARCHAR(length=80),\n existing_nullable=False)\n op.alter_column('miner', 'name',\n existing_type=sa.String(length=50),\n type_=sa.VARCHAR(length=80),\n existing_nullable=False)\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.6342794895172119,
"alphanum_fraction": 0.635371208190918,
"avg_line_length": 21.924999237060547,
"blob_id": "c9bf9c9fc1bfec6f981c8a532aa861704217e5b7",
"content_id": "e4d6aa3e8f95b712d6e8de07240d69445936fb54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 40,
"path": "/monitoring-scripts/check_nanominer.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "from subprocess import check_output\nfrom os import system\nimport typing\n\n\ndef process_exists(process_name: str) -> bool:\n \"\"\"Checks to see if a process exists or is running\n\n Args:\n process_name (str): Process/App to check if running\n\n Returns:\n bool: boolean base on if Process/App is running\n \"\"\"\n programs: str = str(check_output('tasklist'))\n\n if process_name in programs:\n return True\n else:\n return False\n\n\ndef restart_system(program: str) -> None:\n \"\"\"Will restart machine if program is not running\n\n Args:\n program (str): Process/App to check if running\n \"\"\"\n is_program_running: bool = process_exists(program)\n\n if is_program_running:\n print(f\"{program} is Running\")\n return\n else:\n print(f\"{program} is not Running\\n\")\n print(\"Restarting\")\n system(\"shutdown /r /t 1\")\n\n\nrestart_system('nanominer')"
},
{
"alpha_fraction": 0.643203854560852,
"alphanum_fraction": 0.6488673090934753,
"avg_line_length": 31.526315689086914,
"blob_id": "ec7a4c94486cc6958a128195a13e5e54736ce544",
"content_id": "29a2c141d11cb1254a61a3e39cc6e4ec48b14733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 38,
"path": "/flaskr/extensions.py",
"repo_name": "besteman/futurama-mining",
"src_encoding": "UTF-8",
"text": "from flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import func\n\ndb = SQLAlchemy()\nmigrate = Migrate(compare_type=True)\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True, nullable=False)\n password = db.Column(db.String(150), unique=True, nullable=False)\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n miner = db.relationship('Miner', backref='user', lazy=True)\n\n def __repr__(self):\n return f\"\"\"\n id: {self.id}\n username: {self.username}\n created_date: {self.created_at}\n \"\"\"\n\n\nclass Miner(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), unique=True, nullable=False)\n enabled = db.Column(db.Boolean, nullable=False)\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n created_user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def __repr__(self):\n return f\"\"\"\n id: {self.id}\n name: {self.name}\n enabled: {self.enabled}\n created_date: {self.created_at}\n created_user_id: {self.created_user_id}\n \"\"\"\n"
}
] | 13 |
Chincoya/Practica09-Modelado20171
|
https://github.com/Chincoya/Practica09-Modelado20171
|
ffd4e94cbefd95cd3defeb5808f05cfbedacce11
|
a7a2b92e39b80d8098d371adce7079af7d693ed0
|
1d5d0418aeeccb9b6cc4731bcdbda8d6d24171e2
|
refs/heads/master
| 2018-01-02T12:13:24.897055 | 2016-10-20T22:58:49 | 2016-10-20T22:58:49 | 71,510,833 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5591807961463928,
"alphanum_fraction": 0.5799434781074524,
"avg_line_length": 39,
"blob_id": "23886046a3a61edbccd9ab90e07aa357545d79f2",
"content_id": "cafe6d7fc1640b2d9f968a1ea99e99a04e944841",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7080,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 177,
"path": "/servidor.py",
"repo_name": "Chincoya/Practica09-Modelado20171",
"src_encoding": "UTF-8",
"text": "import sys\nfrom random import randint\nfrom PyQt4 import QtCore, QtGui, uic\n\n#stuntData= [[],QtCore.QTimer()]\n## This is a remindr of a previous version. All these variables are declared and assigned in the class __init__\npaused = False\nnotStarted = True\nwait = 100\ntimer = QtCore.QTimer()\napp = QtGui.QApplication(sys.argv)\nsnakes=[]\nfacing = []\n##This next list has the values [game pauses, game started, wait, timer, snakes(list), the directions of the snakes(list)]\npreferences = [paused,notStarted, wait, timer,snakes,facing]\n#mainWindow = uic.loadUi(\"servidor.ui\")\n\nclass Board(QtGui.QMainWindow):\n \n def randCoor(self):\n x= randint(0,self.board.rowCount()-1)\n y= randint(0,self.board.columnCount()-1)\n return [x,y]\n \n def moveSnakes(self):\n yesFood = randint(0,100)\n if (not self.thereIsFood) and yesFood>90:\n self.food = self.randCoor()\n while(self.numbers[self.food[0]][self.food[1]]!=0):\n self.food = self.randCoor()\n self.board.item(self.food[0],self.food[1]).setBackground(QtGui.QColor('lime'))\n self.numbers[self.food[0]][self.food[1]] = 1\n print(self.food)\n self.thereIsFood = True\n #self.app.processEvents()\n cols= self.board.columnCount()\n rows= self.board.rowCount()\n \n for snake in self.snakes:\n last = [snake[len(snake)-1][0],snake[len(snake)-1][1]]\n #self.board.item(snake[len(snake)-1][0],snake[len(snake)-1][1]).setBackground(QtGui.QColor('white'))\n #self.numbers[snake[len(snake)-1][0]][snake[len(snake)-1][1]]=0\n for j in reversed(range(1, len(snake))):\n snake[j][0],snake[j][1]=snake[j-1][0],snake[j-1][1]\n if self.facing[0]==1:\n snake[0]=((snake[0][0]+1)%cols,snake[0][1])\n elif self.facing[0]==2:\n snake[0]=(snake[0][0],(snake[0][1]+1)%rows)\n elif self.facing[0]==3:\n snake[0]=((snake[0][0]-1)%cols,snake[0][1])\n else:\n snake[0]=(snake[0][0],(snake[0][1]-1)%rows)\n if self.food[0]!=snake[0][0] or self.food[1]!=snake[0][1]:\n self.board.item(last[0],last[1]).setBackground(QtGui.QColor('white'))\n self.numbers[last[0]][last[1]]= 0\n else:\n print(\"EATEN!\")\n snake.append(last)\n self.thereIsFood = False\n self.paintSnakes()\n \n def paintSnakes(self):\n color = QtGui.QColor(100,100,100)\n for snake in self.snakes:\n if self.numbers[snake[0][0]][snake[0][1]]!=0 and self.numbers[snake[0][0]][snake[0][1]]!=1:\n self.board.item(snake[0][0],snake[0][1]).setBackground(QtGui.QColor(100,0,0))\n self.endGameButton()\n return\n for coor in snake:\n self.board.item(coor[0],coor[1]).setBackground(color)\n self.numbers[coor[0]][coor[1]]=2\n\n def connectEvents(self,app):\n self.connect(self.colSpin, QtCore.SIGNAL(\"valueChanged(int)\"),\n lambda:self.board.setRowCount(self.colSpin.value()))\n self.connect(self.rowSpin, QtCore.SIGNAL(\"valueChanged(int)\"),\n lambda:self.board.setColumnCount(self.rowSpin.value()))\n self.connect(self.waitSpin, QtCore.SIGNAL(\"valueChanged(int)\"),lambda:self.setWait(self.waitSpin.value()))\n self.connect(self.initButton, QtCore.SIGNAL('clicked()'),lambda:self.initGameButtonFunc(app))\n self.connect(self.endButton, QtCore.SIGNAL('clicked()'), self.endGameButton)\n self.connect(self.timer,QtCore.SIGNAL('timeout()'),lambda:self.moveSnakes())\n\n def initGameButtonFunc(self,app):\n self.setFocus()\n if self.started:\n self.initGame(app)\n if self.paused:\n self.initButton.setText(\"Reanudar Juego\")\n self.timer.stop()\n else:\n self.initButton.setText(\"Pausar Juego\")\n self.endButton.show()\n self.timer.start(self.wait)\n print(self.paused)\n self.paused = not self.paused\n\n def endGameButton(self):\n self.snakes = []\n self.facing = []\n self.timer.stop()\n self.numbers = []\n self.endButton.hide()\n self.started = True\n self.paused = False\n self.thereIsFood = False\n self.food = [-1,-1]\n self.initButton.setText(\"Iniciar Juego\")\n \n def setWait(self,newWait):\n if not self.paused:\n self.wait = newWait\n print(self.wait)\n\n def initGame(self,app):\n for i in range(self.board.rowCount()):\n for j in range(self.board.columnCount()):\n self.board.setItem(i,j,QtGui.QTableWidgetItem())\n self.snakes.append([[0,j]for j in range((self.board.columnCount()//4) if (self.board.columnCount()<self.board.rowCount()) else (self.board.rowCount()//4))])\n self.facing.append(3)\n print(self.facing)\n self.numbers = [[0 for i in range(self.board.rowCount())] for j in range(self.board.columnCount())]\n self.started = False\n #self.connect(timer,QtCore.SIGNAL('timeout()'),lambda:self.moveSnakes())\n self.timer.start(self.wait)\n \n def keyPressEvent(self, event):\n print('EVENT!')\n if event.key() == QtCore.Qt.Key_Down:\n print('DOWN!')\n if self.facing[0]!=3:\n self.facing[0]=1\n print (self.facing)\n elif event.key() == QtCore.Qt.Key_Right:\n if self.facing[0]!=4:\n self.facing[0]=2\n elif event.key() == QtCore.Qt.Key_Up:\n if self.facing[0]!=1:\n self.facing[0]=3\n elif event.key() == QtCore.Qt.Key_Left:\n if self.facing[0]!=2:\n self.facing[0]=4\n #self.app.processEvents()\n\n def __init__(self,app,preferences):\n QtGui.QMainWindow.__init__(self)\n self.ui = uic.loadUi(\"servidor.ui\", self)\n self.initButton= self.initGameButton\n self.btnGroup = self.gridLayout\n self.endButton = QtGui.QPushButton('Terminar Juego',self)\n self.colSpin = self.colSpin\n self.rowSpin = self.filasspin\n self.waitSpin = self.esperaSpin\n self.board = self.boardTable\n self.app = app\n self.paused = preferences[0]\n self.started = preferences[1]\n self.wait = preferences[2]\n self.facing = preferences[5]\n self.snakes = preferences[4]\n self.timer = preferences[3]\n self.thereIsFood = False\n self.food = [-1,-1]\n\n self.board.keyPressEvent = self.keyPressEvent\n\n header = self.board.horizontalHeader()\n header.setResizeMode(QtGui.QHeaderView.Stretch)\n header = self.board.verticalHeader()\n header.setResizeMode(QtGui.QHeaderView.Stretch)\n self.connectEvents(app) \n self.btnGroup.addWidget(self.endButton)\n self.show()\n self.endButton.hide()\n\n\nboard = Board(app,preferences)\nsys.exit(app.exec_())\n"
}
] | 1 |
hazxel/NJU-Lab136-HMM-Project
|
https://github.com/hazxel/NJU-Lab136-HMM-Project
|
9f61deeef9035c6f04959099cc390dc47d32f9d3
|
296f25b2ed00297e677e1203800cbb1ea7f42ed9
|
106bb2ca12ba0ea632bdbd92aeb60427a5578373
|
refs/heads/master
| 2022-01-10T13:15:37.467586 | 2019-07-26T05:03:56 | 2019-07-26T05:03:56 | 198,345,371 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6304348111152649,
"alphanum_fraction": 0.6612318754196167,
"avg_line_length": 28.105262756347656,
"blob_id": "5c912ddf5118fafba37efac617bfeb9321483c72",
"content_id": "cbaf6b3093f84231cfee22650c52a57e164c1c15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 19,
"path": "/try.py",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "import os\nimport re\n\n\ninputPath = \"Z:/AchivedWorkbyName/LuSheng/MitoMoveData/trajectory\"\noutputPath = \"C:/Users/stevenzhou/Desktop\"\n\npattern = r\"^(.*Div7MitoMove.*C - Series.*)\\.xml$\"\n\n# print(re.findall(pattern, '20140711 Div7MitoMove27C - Series004.xml'))\n\nfor dirpath, _, filenames in os.walk(inputPath):\n for filename in filenames:\n if len(re.findall(pattern, filename)) > 0:\n # print(filename)\n filenames = [(\"{}/{}\".format(dirpath, filename), \n re.search(pattern, filename).group(2) )]\n\nprint(filenames)"
},
{
"alpha_fraction": 0.6684394478797913,
"alphanum_fraction": 0.6789097785949707,
"avg_line_length": 31.176469802856445,
"blob_id": "ead40e784b64778632f4e585599efed235d478a4",
"content_id": "d017c47f6dd80cb048ffb02d5d46e44978f117c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6017,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 187,
"path": "/MitoMoveData/trackmate.ij.py",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "import fiji.plugin.trackmate as trackmate\nimport fiji.plugin.trackmate.detection\nimport fiji.plugin.trackmate.tracking\nimport fiji.plugin.trackmate.tracking.sparselap\nimport fiji.plugin.trackmate.features\nimport fiji.plugin.trackmate.features.track\nimport fiji.plugin.trackmate.visualization.hyperstack\nimport fiji.plugin.trackmate.io.TmXmlWriter\nimport ij\n\nimport java.awt\nimport java.io\n\nimport os\nimport sys\nimport time\n\nimport logging\n\nimport re\n\n\n\ndef openImage(filename):\n filenames = {\"{}{}\".format(fileInfo.directory.replace(\"\\\\\", \"/\"), fileInfo.fileName): imp for i in range(ij.WindowManager.getImageCount()) for imp in [ij.WindowManager.getImage(i + 1)] for fileInfo in [imp.getOriginalFileInfo()]}\n logging.debug(\"imageFilenames: {}\".format(filenames))\n if filename not in filenames:\n imp = ij.IJ.openImage(filename)\n imp.show()\n filenames[filename] = imp\n return filenames[filename]\n\n\ndef main():\n inputPath = \"X:/AchivedWorkbyName/LuSheng/MitoMoveData/tif\"\n outputPath = \"X:/AchivedWorkbyName/ChenXudong/MitochondrialTransport/MitoMoveData/trajectory\"\n pattern = r\"^(.*Div7MitoMove.*)\\.tif$\"\n repl = r\"\\1.xml\"\n filenames = [(\"{}/{}\".format(dirpath, filename), \"{}/{}\".format(outputPath, re.sub(pattern, repl, filename))) for dirpath, _, filenames in os.walk(inputPath) for filename in filenames if len(re.findall(pattern, filename)) > 0]\n logging.debug(\"filenames: {}\".format(filenames))\n\n for inputFilename, outputFilename in filenames:\n logging.info(\"{} -> {}\".format(inputFilename, outputFilename))\n\n # Get currently selected image\n imp = openImage(inputFilename)\n filename = imp.getOriginalFileInfo().fileName\n\n\n # Create the trackmate object\n settings = trackmate.Settings()\n settings.setFrom(imp)\n tm = trackmate.TrackMate(settings)\n\n # Configure displayer\n displayer = trackmate.visualization.hyperstack.HyperStackDisplayer(tm.getModel(), trackmate.SelectionModel(tm.getModel()), imp)\n displayerSettings = {\n \"Color\": java.awt.Color(1.0, 0, 1.0, 1.0),\n }\n for key, value in displayerSettings.items():\n displayer.setDisplaySettings(key, value)\n\n\n # Configure detector\n settings.detectorFactory = trackmate.detection.DogDetectorFactory()\n settings.detectorSettings = {\n \"TARGET_CHANNEL\": 1,\n \"RADIUS\": 1.05 / 2,\n \"THRESHOLD\": 40.0,\n \"DO_MEDIAN_FILTERING\": False,\n \"DO_SUBPIXEL_LOCALIZATION\": True,\n }\n if not tm.execDetection():\n return\n logging.info(\"{} detection\".format(filename))\n\n # Configure initial spot filter\n #settings.initialSpotFilterValue = 0.0\n if not tm.execInitialSpotFiltering():\n return\n logging.info(\"{} initial spot filtering\".format(filename))\n\n # Compute spot features\n if not tm.computeSpotFeatures(True):\n return\n logging.info(\"{} spot features\".format(filename))\n displayer.render()\n\n # Configure spot filters\n spotFilterSettings = {\n # \"QUALITY\": (50, None),\n \"POSITION_Z\": (0.3, 3.0),\n }\n for key, (lowerBound, upperBound) in spotFilterSettings.items():\n if lowerBound is not None:\n settings.addSpotFilter(trackmate.features.FeatureFilter(key, lowerBound, True))\n if upperBound is not None:\n settings.addSpotFilter(trackmate.features.FeatureFilter(key, upperBound, False))\n if not tm.execSpotFiltering(True):\n return\n logging.info(\"{} spot filtering\".format(filename))\n displayer.render()\n\n\n # Configure tracker\n settings.trackerFactory = trackmate.tracking.sparselap.SparseLAPTrackerFactory()\n settings.trackerSettings = {\n \"ALTERNATIVE_LINKING_COST_FACTOR\": 1.05,\n \"BLOCKING_VALUE\": float(\"inf\"),\n \"CUTOFF_PERCENTILE\": 0.9,\n\n # Frame to frame linking\n \"LINKING_MAX_DISTANCE\": 2.1,\n \"LINKING_FEATURE_PENALTIES\": {\n \"POSITION_X\": 0.1,\n \"POSITION_Y\": 0.2,\n \"POSITION_Z\": 0.2,\n },\n\n # Track segment gap closing\n \"ALLOW_GAP_CLOSING\": True,\n \"GAP_CLOSING_MAX_DISTANCE\": 2.1,\n \"MAX_FRAME_GAP\": 2,\n \"GAP_CLOSING_FEATURE_PENALTIES\": {\n \"POSITION_X\": 0.2,\n \"POSITION_Y\": 0.4,\n \"POSITION_Z\": 0.4,\n \"FRAME\": 0.8,\n },\n\n # Track segment splitting\n \"ALLOW_TRACK_SPLITTING\": False,\n \"SPLITTING_MAX_DISTANCE\": 15.0,\n \"SPLITTING_FEATURE_PENALTIES\": {},\n\n # Track segment merging\n \"ALLOW_TRACK_MERGING\": False,\n \"MERGING_MAX_DISTANCE\": 15.0,\n \"MERGING_FEATURE_PENALTIES\": {},\n }\n if not tm.execTracking():\n return\n logging.info(\"{} tracking\".format(filename))\n displayer.render()\n\n # Configure track analyzers\n settings.addTrackAnalyzer(trackmate.features.track.TrackDurationAnalyzer())\n if not tm.computeTrackFeatures(True):\n return\n logging.info(\"{} track features\".format(filename))\n\n # Configure track filters\n trackFilterSettings = {\n \"NUMBER_SPOTS\": (9.5, None),\n }\n for key, (lowerBound, upperBound) in trackFilterSettings.items():\n if lowerBound is not None:\n settings.addTrackFilter(trackmate.features.FeatureFilter(key, lowerBound, True))\n if upperBound is not None:\n settings.addTrackFilter(trackmate.features.FeatureFilter(key, upperBound, False))\n if not tm.execTrackFiltering(True):\n return\n logging.info(\"{} track filtering\".format(filename))\n displayer.render()\n\n if not tm.computeEdgeFeatures(True):\n return\n logging.info(\"{} edge features\".format(filename))\n displayer.render()\n\n\n # Export tracks\n outputFile = java.io.File(outputFilename)\n writer = trackmate.io.TmXmlWriter(outputFile)\n writer.appendModel(tm.getModel())\n writer.appendSettings(settings)\n writer.writeToFile()\n logging.info(\"{} export tracks to xml\".format(filename))\n\n imp.changes = False\n imp.close()\n\n\n\nif __name__ == \"__builtin__\":\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s.%(msecs)03d %(levelname)8s %(filename)s:%(lineno)3s - %(msg)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n main()\n"
},
{
"alpha_fraction": 0.5682851076126099,
"alphanum_fraction": 0.5917244553565979,
"avg_line_length": 40.810001373291016,
"blob_id": "51fcb9ec6d7e3bcb913682eb4cbe4e874fc20774",
"content_id": "b479f3192f5ac746905db3d0d162e71657234bb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4181,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 100,
"path": "/trackmate.py",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport os\n\nimport logging\n\nimport argparse\nimport bs4\nimport scipy.io\n\n\n\nclass color:\n _colors = dict(zip([\"black\", \"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\", \"white\"], range(8)))\n def __init__(self, bold=True, fg=None, bg=None):\n self._bold, self._fg, self._bg = bold, fg, bg\n def __getattr__(self, c):\n if c.lower() == \"default\":\n return __class__()\n if c.lower() == \"bold\":\n return __class__(not self._bold, self._fg, self._bg)\n if c.lower() in self._colors:\n return __class__(self._bold, **dict({\"fg\": self._fg, \"bg\": self._bg}, **{\"fg\" if c == c.lower() else \"bg\": self._colors[c.lower()]}))\n if c.lower() == \"light\":\n g = self._fg if c == c.lower() else self._bg\n if g is not None and g < 16:\n return __class__(self._bold, **dict({\"fg\": self._fg, \"bg\": self._bg}, **{\"fg\" if c == c.lower() else \"bg\": g + 8 if g < 8 else g - 8}))\n raise KeyError(\"not system color can not be light\")\n if c.lower() == \"gray\":\n def gray(i):\n if 0 <= i < 26:\n return __class__(self._bold, **dict({\"fg\": self._fg, \"bg\": self._bg}, **{\"fg\" if c == c.lower() else \"bg\": 16 if i == 0 else i + 231 if i < 25 else 231}))\n raise KeyError(\"gray should be in [0, 26)\")\n return gray\n if c.lower() == \"rgb\":\n def rgb(r, g, b):\n if 0 <= r < 6 and 0 <= g < 6 and 0 <= b < 6:\n return __class__(self._bold, **dict({\"fg\": self._fg, \"bg\": self._bg}, **{\"fg\" if c == c.lower() else \"bg\": r * 36 + g * 6 + b + 16}))\n raise KeyError(\"rgb should be in [0, 6)\")\n return rgb\n raise KeyError(\"color is not correct\")\n __getitem__ = __getattr__\n def __call__(self, s=\"\"):\n fg = \"\" if self._fg is None else f';{self._fg + 30}' if self._fg < 8 else f';{self._fg + 82}' if self._fg < 16 else f';38;5;{self._fg:>03}'\n bg = \"\" if self._bg is None else f';{self._bg + 40}' if self._bg < 8 else f';{self._bg + 92}' if self._bg < 16 else f';48;5;{self._bg:>03}'\n return \"\" if s == \"\" else f'\\033[{\"1\" if self._bold else \"0\"}{fg}{bg}m{s}\\033[0m'\ncolor = color()\ndef initial():\n # os.environ.update(zip((\"LINES\", \"COLUMNS\"), os.popen('stty size', 'r').read().split()))\n for no, c in [(logging.CRITICAL, color.magenta), (logging.ERROR, color.red), (logging.WARNING, color.yellow), (logging.INFO, color.green), (logging.DEBUG, color.white)]:\n logging.addLevelName(no, c(logging.getLevelName(no)))\n\n\n\ndef trackmate(filename, **kargs):\n with open(filename, encoding=\"utf-8\") as f:\n soup = bs4.BeautifulSoup(f.read(), features=\"xml\")\n\n tracks = soup.Tracks(\"particle\")\n trajectorys = []\n for track in tracks:\n particles = []\n tPrev = int(track.detection[\"t\"]) - 1\n for particle in track(\"detection\"):\n xCurrent, tCurrent = float(particle[\"x\"]), int(particle[\"t\"])\n particles.extend([(t - tPrev) * (xCurrent - particles[-1]) / (tCurrent - tPrev) + particles[-1] for t in range(tPrev + 1, tCurrent)])\n particles.append(xCurrent)\n tPrev = tCurrent\n\n particles = list(map(float.__sub__, particles[1:], particles[:-1]))\n trajectorys.append(particles)\n\n logging.info(f\"keep {len(trajectorys)} out of {len(tracks)} trajectories\")\n\n # import pdb; pdb.set_trace()\n if kargs[\"mat\"] is not None:\n scipy.io.savemat(kargs[\"mat\"], {\"T\": trajectorys})\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Extract tracks from trackmate xml\")\n parser.add_argument(\"-V\", \"--version\", action=\"version\", version=\"%(prog)s 0.1\")\n parser.add_argument(\"-v\", \"--verbose\", help=f\"set log level to {logging.getLevelName(30)}, {logging.getLevelName(20)} or {logging.getLevelName(10)} (default {logging.getLevelName(40)})\", action=\"count\", default=0)\n\n parser.add_argument(\"filename\", help=\"xml filename\")\n parser.add_argument(\"--mat\", help=\"save to FILENAME as mat\", metavar=\"FILENAME\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=max(0, logging.ERROR - args.verbose * 10), format=\"%(asctime)s.%(msecs)03d %(levelname)19s %(filename)s:%(lineno)3s - %(msg)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n logging.info(f\"Arguments: {args}\")\n\n trackmate(**args.__dict__)\n\n\n\nif __name__ == \"__main__\":\n initial()\n main()\n"
},
{
"alpha_fraction": 0.6726190447807312,
"alphanum_fraction": 0.6845238208770752,
"avg_line_length": 15.899999618530273,
"blob_id": "27225c465e6b22ee9d8114e2bc208c3a4ca1f9bb",
"content_id": "cb81b806d34a357659e73802ccdbbbaab00b0b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 10,
"path": "/test.py",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "import imagej\n\nprint(1)\n\nij = imagej.init(ij_dir_or_version_or_endpoint = 'D:/Fiji.app', headless = True)\n\n#ij = imagej.init(headless=False)\n#ij.ui().showUI()\n\nprint(2)"
},
{
"alpha_fraction": 0.6876502633094788,
"alphanum_fraction": 0.6955191493034363,
"avg_line_length": 28.14649772644043,
"blob_id": "1eccbe5f7b9a14ac2277950caf4dd9ceec515166",
"content_id": "ec68791c0602142bcdf3993166be1cf502cf812c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4575,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 157,
"path": "/New_.py",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "from fiji.plugin.trackmate import Model\nfrom fiji.plugin.trackmate import Settings\nfrom fiji.plugin.trackmate import TrackMate\n\nfrom fiji.plugin.trackmate import SelectionModel\nfrom fiji.plugin.trackmate import Logger\nfrom fiji.plugin.trackmate.detection import DogDetectorFactory\nfrom fiji.plugin.trackmate.tracking.sparselap import SparseLAPTrackerFactory\nfrom fiji.plugin.trackmate.tracking import LAPUtils\nfrom ij import IJ\nimport fiji.plugin.trackmate.visualization.hyperstack.HyperStackDisplayer as HyperStackDisplayer\nimport fiji.plugin.trackmate.features.FeatureFilter as FeatureFilter\nimport sys\nimport fiji.plugin.trackmate.features.track.TrackDurationAnalyzer as TrackDurationAnalyzer\n\nfrom java.io import File\nfrom fiji.plugin.trackmate.io import TmXmlWriter\n\nimport os\n\n\n\nfileDir = 'C:/Users/stevenzhou/Desktop'\n\ndata = []\n\nfor root, dirs, files in os.walk(fileDir):\n\tfor file in files:\n\t\tif os.path.splitext(file)[1] == '.tif':\n\t\t\ti = root + '/' + file\n\t\t\t# Output will be in the same directory\n\t\t\to = root + '/' + os.path.splitext(file)[0] + '.xml'\n\t\t\tdata.append([i, o])\n\n\nfor inputFile,outputFile in data:\n\n\n\t# Get currently selected image\n\t#imp = WindowManager.getCurrentImage()\n\timp = IJ.openImage(inputFile)\n\t#imp.show()\n\t\n\t \n\t#----------------------------\n\t# Create the model object now\n\t#----------------------------\n\t \n\t# Some of the parameters we configure below need to have\n\t# a reference to the model at creation. So we create an\n\t# empty model now.\n\t \n\tmodel = Model()\n\t \n\t# Send all messages to ImageJ log window.\n\tmodel.setLogger(Logger.IJ_LOGGER)\n\t \n\t \n\t#------------------------\n\t# Prepare settings object\n\t#------------------------\n\t \n\tsettings = Settings()\n\tsettings.setFrom(imp)\n\t \n\t# Configure detector - We use the Strings for the keys\n\tsettings.detectorFactory = DogDetectorFactory()\n\tsettings.detectorSettings = { \n\t 'DO_SUBPIXEL_LOCALIZATION' : True,\n\t 'RADIUS' : 0.6,\n\t 'TARGET_CHANNEL' : 1,\n\t 'THRESHOLD' : 50.0,\n\t 'DO_MEDIAN_FILTERING' : False,\n\t} \n\t \n\t# Configure spot filters - Classical filter on quality\n\tfilter1 = FeatureFilter('QUALITY', 50, True)\n\tsettings.addSpotFilter(filter1)\n\t\n\tfilter2 = FeatureFilter('POSITION_Z', 0.3, True)\n\tsettings.addSpotFilter(filter2)\n\t\n\tfilter3 = FeatureFilter('POSITION_Z', 3.0, False)\n\tsettings.addSpotFilter(filter3)\n\t\n\t\n\t# Configure tracker - We want to allow merges and fusions\n\tsettings.trackerFactory = SparseLAPTrackerFactory()\n\tsettings.trackerSettings = LAPUtils.getDefaultLAPSettingsMap() # almost good enough\n\t\n\t# Frame to frame linking\n\tsettings.trackerSettings['LINKING_MAX_DISTANCE'] = 2.0\n\tsettings.trackerSettings['LINKING_FEATURE_PENALTIES'] = {'POSITION_Y': 8.0}\n\t# Track segment gap closing\n\tsettings.trackerSettings['ALLOW_GAP_CLOSING'] = True\n\tsettings.trackerSettings['GAP_CLOSING_MAX_DISTANCE'] = 4.0\n\tsettings.trackerSettings['MAX_FRAME_GAP'] = 2\n\tsettings.trackerSettings['GAP_CLOSING_FEATURE_PENALTIES'] = {'POSITION_Y': 8.0}\n\t# Track segment splitting\n\tsettings.trackerSettings['ALLOW_TRACK_SPLITTING'] = False\n\t# Track segment merging\n\tsettings.trackerSettings['ALLOW_TRACK_MERGING'] = False\n\t\n\t\n\t\n\t# Configure track analyzers - Later on we want to filter out tracks \n\t# based on their displacement, so we need to state that we want \n\t# track displacement to be calculated. By default, out of the GUI, \n\t# not features are calculated. \n\t \n\t# The displacement feature is provided by the TrackDurationAnalyzer.\n\t \n\tsettings.addTrackAnalyzer(TrackDurationAnalyzer())\n\t \n\t# Configure track filters - We want to get rid of the two immobile spots at \n\t# the bottom right of the image. Track displacement must be above 10 pixels.\n\t \n\tfilter4 = FeatureFilter('NUMBER_SPOTS', 9.03, True)\n\tsettings.addTrackFilter(filter4)\n\t \n\t \n\t#-------------------\n\t# Instantiate plugin\n\t#-------------------\n\t \n\ttrackmate = TrackMate(model, settings)\n\t \n\t#--------\n\t# Process\n\t#--------\n\t \n\tok = trackmate.checkInput()\n\tif not ok:\n\t sys.exit(str(trackmate.getErrorMessage()))\n\t \n\tok = trackmate.process()\n\tif not ok:\n\t sys.exit(str(trackmate.getErrorMessage()))\n\t \n\t \n\t#----------------\n\t# Display results\n\t#----------------\n\t''' \n\tselectionModel = SelectionModel(model)\n\tdisplayer = HyperStackDisplayer(model, selectionModel, imp)\n\tdisplayer.render()\n\tdisplayer.refresh()\n\t \n\t# Echo results with the logger we set at start:\n\tmodel.getLogger().log(str(model))\n\t'''\n\t\n\toutputFile = File(outputFile)\n\twriter = TmXmlWriter(outputFile)\n\twriter.appendModel(model)\n\twriter.writeToFile()"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.6763636469841003,
"avg_line_length": 24,
"blob_id": "2c1b19ef09cc6b985bdc077b39b015c0b5e44cae",
"content_id": "8b7d34f12a76c39419c521f6798375379017f030",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 11,
"path": "/MitoMoveData/xml2mat.sh",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nDIR=$(cd $(dirname $0); pwd)\n\nSOURCEDIR=${DIR}/trajectory\nTARGETDIR=${DIR}/../HMM\n\nfor TEMPERATURE in 27 30 32 37\ndo\n ${DIR}/trackmate -vv --mat ${TARGETDIR}/mito${TEMPERATURE}/trajectory.mat ${SOURCEDIR} '^.*Div7MitoMove'${TEMPERATURE}'C.*\\.xml$'\ndone\n"
},
{
"alpha_fraction": 0.558772623538971,
"alphanum_fraction": 0.568843424320221,
"avg_line_length": 29.401914596557617,
"blob_id": "e8caa7a1b2feccdbd201318f0068a9b3530cbce0",
"content_id": "53d17e822555c3855ce2fcd1fe9d2ce96e72942a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6355,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 209,
"path": "/HMM/+HiddenMarkov/calcTrajectoryOne.bak.cpp",
"repo_name": "hazxel/NJU-Lab136-HMM-Project",
"src_encoding": "UTF-8",
"text": "\n#include <algorithm>\n#include <bitset>\n#include <chrono>\n#include <complex>\n#include <functional>\n#include <initializer_list>\n#include <iterator>\n#include <limits>\n#include <memory>\n#include <new>\n#include <numeric>\n#include <random>\n#include <ratio>\n#include <tuple>\n#include <utility>\n#include <valarray>\n\n#include <array>\n#include <deque>\n#include <forward_list>\n#include <list>\n#include <map>\n#include <queue>\n#include <set>\n#include <stack>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include <fstream>\n#include <iomanip>\n#include <iostream>\n#include <sstream>\n\n#include <atomic>\n#include <condition_variable>\n#include <future>\n#include <mutex>\n#include <thread>\n\n#include \"mex.hpp\"\n#include \"mexAdapter.hpp\"\n\nnamespace std {\n\ntemplate <typename _Elem, typename _Traits, typename _Ty, typename _Alloc, template <typename, typename> typename _Container>\nbasic_ostream<_Elem, _Traits> & operator<<(basic_ostream<_Elem, _Traits> & _Ostr, _Container<_Ty, _Alloc> const & _Right) {\n _Ostr << \"[\";\n for (auto const & _Val : _Right) { _Ostr << \" \" << _Val; }\n _Ostr << \" ]\";\n return _Ostr;\n}\n\n} // namespace std\n\nvoid throw_matlab_exception(std::string const & file, size_t line, std::string const & func, std::string const & msg) {\n std::stringstream ss;\n ss << \"file \" << file << \" line \" << line << std::endl;\n ss << func << \": \" << msg;\n throw matlab::engine::MATLABException(ss.str());\n}\n\nvoid log_matlab_message(std::string const & file, size_t line, std::string const & func, std::string const & msg) {\n if (true) {\n std::cout << func;\n if (msg != \"\") std::cout << \": \" << msg;\n std::cout << std::endl;\n }\n}\n\n#define THROW_MATLAB(msg) throw_matlab_exception(__FILE__, __LINE__, __FUNCTION__, msg)\n// #define LOG_MATLAB(msg) log_matlab_message(__FILE__, __LINE__, __FUNCTION__, msg)\n#if 1\n#define mout (std::cout << __FUNCTION__ << \" \")\n#else\nclass nstream {\n using _Myt = nstream;\n\npublic:\n template <typename _Ty>\n _Myt & operator<<(_Ty const & _Val) { return *this; }\n _Myt & operator<<(std::ostream &(__cdecl * _Pfn)(std::ostream &)) { return *this; }\n} nout;\n\n#define mout nout\n#endif\n\nusing namespace std;\nusing namespace matlab::engine;\nusing namespace matlab::data;\nusing namespace matlab::mex;\n\nclass MexFunction : public Function {\npublic:\n void operator()(ArgumentList outputs, ArgumentList inputs) {\n cout << boolalpha << endl;\n\n checkArguments(outputs, inputs);\n inputArguments(inputs);\n\n run();\n\n outputArguments(outputs);\n\n cout << endl;\n }\n\n void run_thread(size_t s_min, size_t s_max, size_t seed) {\n uniform_real_distribution<double> rd_d;\n mt19937_64 rd_g(seed);\n auto rd = bind(rd_d, rd_g);\n\n vector<double> dx(s_max - s_min, 0.0);\n for (size_t i = 0; i < ntimes; ++i) {\n for (size_t j = s_min; j < s_max; ++j) {\n auto r = rd();\n for (size_t k = 0; k < step; ++k) {\n if (r < F[s[j] * step + k]) {\n s[j] = FI[s[j] * step + k];\n dxs[j] += vc[s[j]];\n break;\n }\n }\n }\n }\n }\n\n void run() {\n mout << endl;\n\n size_t const run_th_num = 16;\n size_t run_th_step = (ntrajs - 1) / run_th_num + 1;\n thread run_th[run_th_num];\n for (size_t i = 0; i < run_th_num; ++i) {\n mout << \"thread: \" << i << \" \" << endl;\n run_th[i] = thread(&MexFunction::run_thread, this, i * run_th_step, min((i + 1) * run_th_step, ntrajs), i);\n }\n for (size_t i = 0; i < run_th_num; ++i) run_th[i].join();\n }\n\n void checkArguments(ArgumentList outputs, ArgumentList inputs) {\n if (inputs.size() != 6) THROW_MATLAB(\"inputs requires 6 arguments\");\n if (outputs.size() != 1) THROW_MATLAB(\"outputs requires 1 arguments\");\n\n for (auto & input : inputs) {\n if (input.getType() != ArrayType::DOUBLE) THROW_MATLAB(\"input arguments has to be double\");\n }\n\n if (inputs[0].getDimensions().size() != 2) THROW_MATLAB(\"1st arguments has to be matrix\"); // F\n if (inputs[1].getDimensions().size() != 2) THROW_MATLAB(\"2nd arguments has to be matrix\"); // FI\n if (inputs[0].getDimensions() != inputs[1].getDimensions()) THROW_MATLAB(\"1st & 2nd arguments have to be the same size\"); // F & FI\n if (inputs[0].getDimensions()[1] != inputs[2].getNumberOfElements()) THROW_MATLAB(\"col of 1st argument and number of 3rd argument have to be equal\"); // F & vc\n if (inputs[3].getDimensions().size() != 2) THROW_MATLAB(\"4th arguments has to be matrix\"); // i\n if (inputs[4].getNumberOfElements() != 1) THROW_MATLAB(\"5th argument has to be scalar\"); // ntrajs\n if (inputs[5].getNumberOfElements() != 1) THROW_MATLAB(\"6th argument has to be scalar\"); // ntimes\n }\n\n void inputArguments(ArgumentList inputs) {\n mout << endl;\n\n step = inputs[0].getDimensions()[0];\n mout << \"get step: \" << step << endl;\n\n TypedArray<double> tmp = move(inputs[0]);\n F = move(vector<double>(tmp.begin(), tmp.end()));\n mout << \"get F : \" << F.size() << endl;\n\n tmp = move(inputs[1]);\n FI = move(vector<size_t>(tmp.begin(), tmp.end()));\n for (auto & FI : FI) --FI;\n mout << \"get FI : \" << FI.size() << endl;\n\n tmp = move(inputs[2]);\n vc = move(vector<double>(tmp.begin(), tmp.end()));\n mout << \"get vc : \" << vc.size() << endl;\n\n ntrajs = inputs[4][0];\n ntimes = inputs[5][0];\n\n s = move(vector<size_t>(ntrajs, size_t(inputs[3][0]) - 1));\n mout << \"get s : \" << s.size() << endl;\n\n dxs = move(vector<double>(ntrajs, 0.0));\n mout << \"get dxs : \" << dxs.size() << endl;\n }\n\n void outputArguments(ArgumentList outputs) {\n mout << endl;\n\n mout << \"ntrajs: \" << ntrajs << endl;\n mout << \"dxs size: \" << dxs.size() << endl;\n\n ArrayFactory factory;\n outputs[0] = factory.createArray({ ntrajs, 1 }, dxs.begin(), dxs.end());\n mout << \"set dxs : \" << ArrayDimensions{ ntrajs, 1 } << endl;\n }\n\nprivate:\n vector<double> F;\n vector<size_t> FI;\n vector<double> vc;\n size_t ntrajs, ntimes;\n\n vector<size_t> s;\n vector<double> dxs;\n size_t step;\n\n vector<double> rs;\n};\n"
}
] | 7 |
blankenberg/galaxy-tools-blankenberg
|
https://github.com/blankenberg/galaxy-tools-blankenberg
|
219b2f403497a8cbc053ec93cfb46aeb9fdf68f3
|
2b81f6b9cadf15796ef7f1f6a0d12f05d3f405d7
|
d0fcdf8144be5ff2b4ca1ddcfd7abaff20ad1761
|
refs/heads/main
| 2022-05-16T14:31:41.561605 | 2021-10-01T14:17:26 | 2021-10-01T14:17:26 | 157,772,588 | 0 | 1 | null | 2018-11-15T21:05:20 | 2018-11-15T21:02:11 | 2018-11-15T21:02:09 | null |
[
{
"alpha_fraction": 0.6234666705131531,
"alphanum_fraction": 0.6309333443641663,
"avg_line_length": 40.66666793823242,
"blob_id": "15ff8b10048b7685d787af336cd7824d4cd275fc",
"content_id": "6fb2f215bd2ade0940ca921937dccdeb8e688678",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1875,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 45,
"path": "/data_managers/data_manager_refgenie_pull/data_manager/data_manager_refgenie_pull.py",
"repo_name": "blankenberg/galaxy-tools-blankenberg",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\nimport argparse\nimport logging\nfrom base64 import urlsafe_b64encode\nfrom urllib.parse import urljoin\n\nimport refgenconf\nimport requests\n\n\nlog = logging.getLogger(\"tools.iuc.data_managers.data_manager_refgenie_pull\")\n\n\ndef galaxy_code_get_refgenie_assets(refgenie_config_file):\n try:\n rgc = refgenconf.RefGenConf(refgenie_config_file, writable=False, skip_read_lock=True)\n except refgenconf.exceptions.RefgenconfError as e:\n return[{'name': str(e), 'value': 'ERROR', 'options': [], 'selected': False}]\n rval = []\n for urlname, genomes in rgc.listr().items():\n urlname_64 = urlsafe_b64encode(bytes(urlname, 'utf8')).decode('utf8')\n ul = []\n for genome, assets in genomes.items():\n al = []\n for name in assets:\n al.append({'name': name, 'value': '%s/%s/%s' % (urlname_64, genome, name), 'options': [], 'selected': False})\n ul.append({'name': genome, 'value': genome, 'options': al, 'selected': False})\n rval.append({'name': urlname, 'value': urlname_64, 'options': ul, 'selected': False})\n return rval\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--names', dest='names', action='store', default=None, help='Table names to reload')\n parser.add_argument('-u', '--url', dest='url', action='store', default=None, help='Base url for reload')\n parser.add_argument('-k', '--key', dest='key', action='store', default=None, help='Galaxy API Key')\n\n args = parser.parse_args()\n if not args.names:\n tables = requests.get(urljoin(args.url, \"api/tool_data\"), params={'key': args.key}).json()\n args.names = [d.get('name') for d in tables]\n for name in args.names:\n print(requests.get(urljoin(args.url, \"api/tool_data/%s/reload\" % (name)), params={'key': args.key}).json())\n"
}
] | 1 |
AbidelLux/Proyecto02_1S2021IPC2_JAMES
|
https://github.com/AbidelLux/Proyecto02_1S2021IPC2_JAMES
|
0822be98f733a738e35f2045d4b278b1316935ad
|
2f40cd2bcdb0c12bb4f3058d219e4b584d147bab
|
09f80bcbe1b8368bfd5bc3431ebfaba6e9b17880
|
refs/heads/main
| 2023-05-12T10:29:22.148030 | 2021-04-05T01:26:50 | 2021-04-05T01:26:50 | 373,933,418 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5603898167610168,
"alphanum_fraction": 0.5892794728279114,
"avg_line_length": 37.9322509765625,
"blob_id": "d146ae8b015b43541625082f19f38e928d2beaa9",
"content_id": "39b1022db6342273358c7cee215930095a705542",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14366,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 369,
"path": "/operacionalMatriz.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "from archivoLectura import lista,fechaHora\nfrom listaSimpleAuxiliar import listaEnlazadaMatriz\nfrom matrizOctagonal import matriz_Ortogonal\nfrom matrizOctagonal import graficar_matriz\nfrom graficoGraphviz import crearDot, crearDot3,crearDot6\nfrom graficoGraphviz import crearDot2,crearDot4,crearDot5\nfrom inicio import report\nname=\"\"\nfila=0\ncolumna=0\nsizeMatriz=0\nresultado=\"\"\ndef matriz(nombre,bandera):\n global fila,columna,name,sizeMatriz\n matrizOriginal=matriz_Ortogonal()\n picture=\"\"\n for dato in lista.iterar():\n if dato[0].upper() == nombre.upper():\n name=dato[0]\n fila=int(dato[1])\n columna=int(dato[2])\n picture=dato[3]\n #picture=picture.replace(\"-\\n\",\"- \\n\")\n #picture=picture.replace(\"*\\n\",\"* \\n\")\n #picture=picture.replace(\"\\n\",\"\")\n picture= picture.split(\"\\n\")\n picture.pop(0)\n numero=int(len(picture))\n picture.pop(numero-1)\n #picture=picture.remove(\"\")\n for x in range(fila):\n for y in range(columna):\n if picture[x][y]==\"*\":\n matrizOriginal.insertar(x+1,y+1,picture[x][y])\n sizeMatriz=int(len(picture))+2 \n #graficar_matriz(matrizOriginal,\"Matriz_Original\")\n if bandera==True:\n report.add(''+str(fechaHora())+'Generanado matriz Original - matriz Usada: '+nombre+'')\n crearDot(matrizOriginal,fila,columna,sizeMatriz,\"Original\")\n return matrizOriginal\n\ndef horizontal(dato):\n global fila,columna,name,sizeMatriz,resultado\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Rotacion Horizontal - matriz Usada: '+dato+'')\n prueba0=voltearMatriz(Original)\n prueba1=rotarMatriz(prueba0)\n prueba2=rotarMatriz(prueba1)\n resultado=prueba2\n #crearDot(prueba0,fila,columna,sizeMatriz,\"Resultado1\")\n crearDot(prueba2,fila,columna,sizeMatriz,\"Resultado\")\n\ndef traspuesta(dato):\n global fila,columna,name,sizeMatriz,resultado\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Traspuesta - matriz Usada: '+dato+'')\n prueba0=voltearMatriz(Original)\n prueba1=rotarMatriz(prueba0)\n resultado=prueba1\n #crearDot(prueba0,fila,columna,sizeMatriz,\"Resultado1\")\n crearDot(prueba1,fila,columna,sizeMatriz,\"Resultado\") \n\ndef vertical(dato):\n global fila,columna,name,sizeMatriz,resultado\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Rotacion vertical - matriz Usada: '+dato+'')\n prueba0=voltearMatriz(Original) \n resultado=prueba0 \n crearDot(prueba0,fila,columna,sizeMatriz,\"Resultado\") \ndef deletMatriz(dato,x1,y1,x2,y2):\n global fila,columna,name,sizeMatriz,resultado\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Limpiar Area - matriz Usada: '+dato+'')\n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if Original.buscar(i,j)==True:\n if i>=int(x1) and i<=int(x2) and j>=int(y1) and j<=int(y2):\n continue\n #elif i<=int(x2)\n else:\n listaAux2.add(i,j,\"*\")\n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n crearDot2(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\",x1,y1,x2,y2) \n\ndef agregar_H(dato,x1,y1,x2,y2):\n global fila,columna,name,sizeMatriz,resultado\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Agregar Linea Horizontal - matriz Usada: '+dato+'')\n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if Original.buscar(i,j)==True:\n listaAux2.add(i,j,\"*\")\n else:\n if i>=int(x1) and i<=int(x2) and j>=int(y1) and j<=int(y2):\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n #crearDot(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\") \n crearDot3(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\",x1,y1,x2,y2) \ndef agregar_V(dato,x1,y1,x2,y2):\n global fila,columna,name,sizeMatriz,resultado\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Agregar Linea Vertical - matriz Usada: '+dato+'')\n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if Original.buscar(i,j)==True:\n listaAux2.add(i,j,\"*\")\n else:\n if i>=int(x1) and i<=int(x2) and j>=int(y1) and j<=int(y2):\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n #crearDot(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\") \n crearDot3(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\",x1,y1,x2,y2)\ndef agregar_R(dato,x1,y1,x2,y2):\n global fila,columna,name,sizeMatriz,resultado\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Agregar Rectangulo - matriz Usada: '+dato+'')\n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if Original.buscar(i,j)==True:\n listaAux2.add(i,j,\"*\")\n else:\n if i==int(x1) and j>=int(y1) and j<=int(y2):\n listaAux2.add(i,j,\"*\")\n elif j==int(y1) and i>=int(x1) and i<=int(x2):\n listaAux2.add(i,j,\"*\")\n elif j==int(y2) and i>=int(x1) and i<=int(x2):\n listaAux2.add(i,j,\"*\")\n elif i==int(x2) and j>=int(y1) and j<=int(y2):\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n #crearDot(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\") \n crearDot4(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\",x1,y1,x2,y2) \n \ndef agregar_T(dato,x1,y1,x2,y2):\n global fila,columna,name,sizeMatriz,resultado\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original=matriz(dato,True)\n report.add(''+str(fechaHora())+'Agregar Triangulo Rectangulo - matriz Usada: '+dato+'')\n n=1\n for i in range(1,fila+1):\n bandera=False\n for j in range(1,columna+1):\n if Original.buscar(i,j)==True:\n if j==(int(y1)+n) and j<int(y2) and i>int(x1) and i<int(x2) and bandera==False:\n listaAux2.add(i,j,\"*\")\n bandera=True\n n+=1\n else: \n listaAux2.add(i,j,\"*\")\n else:\n if i==int(x1) and j==int(y1):\n listaAux2.add(i,j,\"*\")\n elif j==int(y1) and i>int(x1) and i<int(x2):\n listaAux2.add(i,j,\"*\")\n elif j==(int(y1)+n) and j<int(y2) and i>int(x1) and i<int(x2) and bandera == False:\n listaAux2.add(i,j,\"*\")\n bandera=True\n n+=1\n elif i==int(x2) and j>=int(y1) and j<=int(y2):\n listaAux2.add(i,j,\"*\")\n else:\n continue \n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n #crearDot(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\") \n crearDot5(matrizPrueba,fila,columna,sizeMatriz,\"Resultado\",x1,y1,x2,y2) \n\ndef Union(dato1, dato2, fila1,columna1,x1,y1,x2,y2):\n global sizeMatriz,resultado,fila,columna\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original1=matriz(dato1,False) \n Original2=matriz(dato2,False)\n report.add(''+str(fechaHora())+'Union - matrices Usadas: ('+dato1+','+dato2+')')\n name=''+str(dato1)+' Union '+str(dato2)\n crearDot6(Original1,Original2,x1,y1,x2,y2,name,\"Original\") \n for i in range(1,fila1+1):\n for j in range(1,columna1+1):\n if Original1.buscar(i,j)==False and Original2.buscar(i,j)==False:\n continue\n else:\n listaAux2.add(i,j,\"*\")\n for x in range(1,fila1+1):\n for y in range(1,columna1+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n \n resultado=matrizPrueba\n fila=fila1\n columna=columna1\n crearDot(matrizPrueba,fila1,columna1,sizeMatriz,\"Resultado\") \n\ndef intersection(dato1, dato2, fila1,columna1,x1,y1,x2,y2):\n global sizeMatriz,resultado,fila,columna\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original1=matriz(dato1,False) \n Original2=matriz(dato2,False)\n report.add(''+str(fechaHora())+'Interseccion - matrices Usadas: ('+dato1+','+dato2+')')\n name=''+str(dato1)+' Intersección '+str(dato2)\n crearDot6(Original1,Original2,x1,y1,x2,y2,name,\"Original\") \n for i in range(1,fila1+1):\n for j in range(1,columna1+1):\n if Original1.buscar(i,j)==True and Original2.buscar(i,j)==True:\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila1+1):\n for y in range(1,columna1+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n fila=fila1\n columna=columna1\n crearDot(matrizPrueba,fila1,columna1,sizeMatriz,\"Resultado\") \n\ndef dif(dato1, dato2, fila1,columna1,x1,y1,x2,y2):\n global sizeMatriz,resultado,fila,columna\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original1=matriz(dato1,False) \n Original2=matriz(dato2,False)\n report.add(''+str(fechaHora())+'Diferencia - matrices Usadas: ('+dato1+','+dato2+')')\n name=''+str(dato1)+' - '+str(dato2)\n crearDot6(Original1,Original2,x1,y1,x2,y2,name,\"Original\") \n for i in range(1,fila1+1):\n for j in range(1,columna1+1):\n if Original1.buscar(i,j)==True and Original2.buscar(i,j)==True:\n continue\n else:\n if Original1.buscar(i,j)==True and Original2.buscar(i,j)==False:\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila1+1):\n for y in range(1,columna1+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n fila=fila1\n columna=columna1 \n crearDot(matrizPrueba,fila1,columna1,sizeMatriz,\"Resultado\") \n \ndef simetria(dato1, dato2, fila1,columna1,x1,y1,x2,y2):\n global sizeMatriz,resultado,fila,columna\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal()\n Original1=matriz(dato1,False) \n Original2=matriz(dato2,False)\n report.add(''+str(fechaHora())+'Diferencia Simetrica - matrices Usadas: ('+dato1+','+dato2+')')\n name=''+str(dato1)+' - '+str(dato2)\n crearDot6(Original1,Original2,x1,y1,x2,y2,name,\"Original\") \n for i in range(1,fila1+1):\n for j in range(1,columna1+1):\n if Original1.buscar(i,j)==True and Original2.buscar(i,j)==True:\n continue\n else:\n if Original1.buscar(i,j)==True and Original2.buscar(i,j)==False:\n listaAux2.add(i,j,\"*\")\n elif Original1.buscar(i,j)==False and Original2.buscar(i,j)==True:\n listaAux2.add(i,j,\"*\")\n else:\n continue\n for x in range(1,fila1+1):\n for y in range(1,columna1+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\") \n resultado=matrizPrueba\n fila=fila1\n columna=columna1\n crearDot(matrizPrueba,fila1,columna1,sizeMatriz,\"Resultado\") \n \ndef voltearMatriz(lista):\n global fila,columna,sizeMatriz\n listaAux2=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal() \n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if lista.buscar(i,j)==True:\n listaAux2.add(i,((sizeMatriz-1)-j),\"*\")\n else:\n continue\n \n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux2.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\")\n #graficar_matriz(matrizPrueba,\"matrizPrueba\") \n #crearDot(matrizPrueba,fila,columna,sizeMatriz,\"vuelta\") \n return matrizPrueba\n \ndef rotarMatriz(lista): \n global fila,columna,sizeMatriz\n listaAux=listaEnlazadaMatriz()\n matrizPrueba=matriz_Ortogonal() \n for i in range(1,fila+1):\n for j in range(1,columna+1):\n if lista.buscar(i,j)==True:\n listaAux.add(((sizeMatriz-1)-j),(i),\"*\")\n else:\n continue\n \n for x in range(1,fila+1):\n for y in range(1,columna+1):\n if listaAux.buscar2(x,y)==True:\n matrizPrueba.insertar(x,y,\"*\")\n #graficar_matriz(matrizPrueba,\"matrizPrueba\") \n return matrizPrueba\n\ndef modificar(nombre):\n global fila,columna,resultado\n image=\"\"\n for i in range(1,fila+1):\n image+='\\n'\n for j in range(1,columna+1):\n if resultado.buscar(i,j)==True:\n image+=\"*\"\n else:\n image+=\"-\"\n image+=\"\\n\"\n print(image)\n lista.modificar(nombre,image)\ndef guardar(nombre):\n global fila,columna,resultado\n image=\"\"\n for i in range(1,fila+1):\n image+='\\n'\n for j in range(1,columna+1):\n if resultado.buscar(i,j)==True:\n image+=\"*\"\n else:\n image+=\"-\"\n image+=\"\\n\"\n print(image)\n lista.add(nombre,fila,columna,image)"
},
{
"alpha_fraction": 0.40332457423210144,
"alphanum_fraction": 0.4160104990005493,
"avg_line_length": 28.320512771606445,
"blob_id": "0298dfd9575b3a4391b08f66598de530942de49a",
"content_id": "b8bbad0072189e91a549518f2bb14b8fd21a5653",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2289,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 78,
"path": "/ListaReport.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "class Nodo:\n def __init__ (self,dato):\n #self.nombre=nombre\n self.dato=dato\n self.Siguiente=None\n\n\nclass listaEnlazadaMatriz:\n def __init__(self):\n self.inicio=None\n self.cola=None\n self.tamaño=0\n \n def add(self,dato):\n nodo=Nodo(dato)\n self.tamaño +=1\n \n if self.inicio:\n self.inicio.Siguiente=nodo\n self.inicio=nodo\n else:\n self.inicio=nodo\n self.cola=nodo\n \n def iterar(self):\n actual = self.cola\n\n while actual:\n #nombre = actual.nombre\n #x=actual.CoorX\n # y=actual.CoorY\n dato=actual.dato\n actual = actual.Siguiente\n yield dato\n '''\n def crearlista(self,b,v):\n dato=\"\"\n for x in range(b):\n for y in range(v):\n for n in self.iterar():\n if x+1==int(n[0]) and y+1==int(n[1]): \n if y+1==1:\n dato+=\"[\"+str(n[2])+\",\"\n elif y+1==v and (x+1)!=b:\n dato+=str(n[2])+\"]/\"\n elif x+1==b and y+1==v:\n dato+=str(n[2])+\"]\"\n else:\n dato+=str(n[2])+\",\"\n dato=dato.split(\"/\")\n return dato \n ''' \n def buscar(self,x,y):\n for n in self.iterar():\n if x==int(n[0]) and y==int(n[1]):\n return n[2]\n def buscar2(self,x,y):\n for n in self.iterar():\n if x==n[0] and y==n[1]:\n return True\n return False\n def eliminar(self,x,y):\n actual=self.cola\n anterior=self.cola\n \n while actual:\n if int(actual.CoorX)==x and int(actual.CoorY)==y:\n if actual==self.cola:\n self.cola=actual.Siguiente\n else:\n # suponermos que tengo [1]->[2]->[3]\n #ahora quiero eliminar [2]\n #mi resultado [1]->[3]\n anterior.Siguiente=actual.Siguiente\n self.tamaño-=1\n return\n anterior=actual\n actual=actual.Siguiente"
},
{
"alpha_fraction": 0.44054505228996277,
"alphanum_fraction": 0.4653465449810028,
"avg_line_length": 42.04641342163086,
"blob_id": "1e79ae0b04ccf3cc15a8309378a47a5435dd6eab",
"content_id": "e56e8bb4a218afa85d416ae92414c6f57c26479f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10201,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 237,
"path": "/graficoGraphviz.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "import graphviz\nfrom graphviz import Digraph\nfrom graphviz import Source\nfrom graphviz import render\nimport os.path as path\nimport os\ndef crearDot(lista,fila,columna,sizeMatriz,name):\n graph='digraph {\\n\\ttbl [\\n\\tsize=\"4,4\"\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for x in range(fila+1):\n graph1+='<tr>'\n for y in range(columna+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if y==columna:\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];\\n}' \n imagen=graph+graph1+graph2\n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n Grafico(imagen,name) \ndef crearDot2(lista,fila,columna,sizeMatriz,name,x1,y1,x2,y2):\n graph='digraph {\\n\\ttbl [\\n\\tsize=\"4,4\"\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for x in range(fila+1):\n graph1+='<tr>'\n for y in range(columna+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n graph1+='<td color=\"blue\"> * </td>'\n else:\n if x>=int(x1) and x<=int(x2) and y>=int(y1) and y<=int(y2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> </td>'\n else: \n graph1+='<td color=\"blue\"> </td>'\n if y==columna:\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];\\n}' \n imagen=graph+graph1+graph2\n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n Grafico(imagen,name) \ndef crearDot3(lista,fila,columna,sizeMatriz,name,x1,y1,x2,y2):\n graph='digraph {\\n\\ttbl [\\n\\tsize=\"4,4\"\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for x in range(fila+1):\n graph1+='<tr>'\n for y in range(columna+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n if x>=int(x1) and x<=int(x2) and y>=int(y1) and y<=int(y2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if y==columna:\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];\\n}' \n imagen=graph+graph1+graph2\n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\") \n Grafico(imagen,name) \ndef crearDot4(lista,fila,columna,sizeMatriz,name,x1,y1,x2,y2):\n graph='digraph {\\n\\ttbl [\\n\\tsize=\"4,4\"\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for x in range(fila+1):\n graph1+='<tr>'\n for y in range(columna+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n if x==int(x1) and y>=int(y1) and y<=int(y2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n elif y==int(y1) and x>=int(x1) and x<=int(x2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n elif y==int(y2) and x>=int(x1) and x<=int(x2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n elif x==int(x2) and y>=int(y1) and y<=int(y2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n else: \n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if y==columna:\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];\\n}' \n imagen=graph+graph1+graph2\n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n Grafico(imagen,name) \ndef crearDot5(lista,fila,columna,sizeMatriz,name,x1,y1,x2,y2):\n graph='digraph {\\n\\ttbl [\\n\\tsize=\"4,4\"\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n n=1\n for x in range(fila+1):\n graph1+='<tr>'\n bandera=False\n for y in range(columna+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n if x==int(x1) and y==int(y1): \n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n elif y==int(y1) and x>int(x1) and x<int(x2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n elif y==(int(y1)+n) and y<int(y2) and x>int(x1) and x<int(x2) and bandera == False:\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n bandera=True\n n+=1\n elif x==int(x2) and y>=int(y1) and y<=int(y2):\n graph1+='<td color=\"blue\" bgcolor=\"#eeeeee\"> * </td>'\n else: \n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if y==columna:\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];\\n}' \n imagen=graph+graph1+graph2\n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n Grafico(imagen,name) \ndef crearDot6(lista,lista1,fila,columna,x1,y1,sizeMatriz,name):\n graph='digraph {\\n\\tsize=\"8,8\"\\n\\tlabel=\"'+str(sizeMatriz)+'\"\\n\\ttbl [\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for x in range(int(fila)+1):\n graph1+='<tr>'\n for y in range(int(columna)+1):\n if x==0 and y==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif x==0 and y>0:\n graph1+='<td color=\"green\">'+str(y)+'</td>'\n elif x>0 and y==0:\n graph1+='<td color=\"green\"> '+str(x)+' </td>'\n else:\n if lista.buscar(x,y)==True:\n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if y==int(columna):\n graph1+='</tr>\\n\\t\\t\\t'\n graph2='</table>\\n\\t\\t>];' \n imagen=graph+graph1+graph2\n graph='\\n\\ttbl1 [\\n\\tshape=plaintext\\n\\tlabel=<\\n\\n\\t\\t'\n graph+='<table border=\"0\" color=\"green\" cellpadding=\"10\" cellborder=\"1\" cellspacing=\"0\">\\n\\t'\n graph1=''\n for i in range(int(x1)+1):\n graph1+='<tr>'\n for j in range(int(y1)+1):\n if i==0 and j==0:\n graph1+='<td bgcolor = \"red\" color=\"black\"> A </td>'\n elif i==0 and j>0:\n graph1+='<td color=\"green\">'+str(j)+'</td>'\n elif i>0 and j==0:\n graph1+='<td color=\"green\"> '+str(i)+' </td>'\n else:\n if lista1.buscar(i,j)==True:\n graph1+='<td color=\"blue\"> * </td>'\n else:\n graph1+='<td color=\"blue\"> </td>'\n if j==int(y1):\n graph1+='</tr>\\n\\t\\t\\t' \n graph2='</table>\\n\\t\\t>];\\n}' \n imagen+=graph+graph1+graph2 \n #f= open(name+\".dot\",\"w+\")\n #f.write(imagen)\n #f.close() \n #os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n Grafico(imagen,name) \ndef Grafico(graphi,name):\n \n if path.exists('\\\\grafo\\\\'+name+'.dot') and path.exists('\\\\grafo\\\\'+name+'.dot.png'): \n os.remove('\\\\grafo\\\\'+name+'.dot.png')\n os.remove('\\\\grafo\\\\'+name+'.dot') \n #d = Digraph(format='png')\n d=Source(graphi)\n #d.source(grafo)\n #d.format='png'\n d.render(name+'.dot',format='png',view=False) \n #d.render(''+name+'.gv',ruta,format='svg',view=False)\n #render('dot', 'png',name+'.gv') \n else:\n #d = Digraph(format='png')\n d=Source(graphi)\n #d.source(grafo)\n #d.format='png'\n #d.format='pdf'\n d.render(name+'.dot',format='png',view=False)\n #d.render(''+name+'.gv',ruta,format='svg',view=False)"
},
{
"alpha_fraction": 0.4545454680919647,
"alphanum_fraction": 0.4753246605396271,
"avg_line_length": 22.192771911621094,
"blob_id": "ba0b514721e2443ce0ee80cc7fa0af6e1c996e32",
"content_id": "a4cb10b622d0d718a0db2b1e1faa72c83c5c1f31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1926,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 83,
"path": "/HTML.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "\nimport webbrowser\ndef pageweb():\n from inicio import report\n #from lista import evalue\n #from lista import tipo\n #aux=\"\"\n mensaje=\"\"\n f = open('Reporte.html','w')\n mensaje =\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Document</title>\n <style>\n body\n {\n background-color: #fff6f1;\n font-family: \"helvetica\",Arial;\n }\n #contenido\n {\n width:960px;\n margin: 0 auto;\n text-align:center;\n }\n h1\n {\n color:#e44e2d;\n }\n #textPr\n {\n width:800px;\n height:auto;\n background-color:#1d1d1d;\n color:#fff;\n margin: 0 auto;\n text-align:left;\n border-radius: 20px;\n padding: 20px;\n box-shadow: 0px 0px 20px rgba(0,0,0,0.5)\n }\n </style>\n </head>\n <body>\n <div id=\"contenido\">\n <header>\n <hgroup>\n <h1>Reporte Del Proyecto</h1>\n </hgroup>\n </header>\n <section>\n <div id=\"textPr\">\n <article><br>\n \"\"\"\n\n \n \n mensaje2=\"\\n<p>\"\n for n in report.iterar():\n mensaje2+=n+'<br>'\n \n mensaje3=\"\"\"\n </p>\n </article>\n </div> \n </section>\n </div>\n </body>\n </html>\n \"\"\" \n \n unir=mensaje+mensaje2+mensaje3\n f.write(unir)\n f.close() \n\n\n #Cambia la ruta para indicar la localización del archivo\n #nombreArchivo = 'file:///Users/username/Desktop/programming-historian/' + 'holamundo.html'\n #webbrowser.open_new_tab(nombreArchivo)\n #d=webbrowser.get('C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s')\n #d.open_new_tab('file:/Reporte.html')\n webbrowser.open_new_tab('Reporte.html')"
},
{
"alpha_fraction": 0.43977591395378113,
"alphanum_fraction": 0.4518207311630249,
"avg_line_length": 28.270492553710938,
"blob_id": "30a0a687a820966acc7fb71ee2f3f5ad611f5a31",
"content_id": "711e9f370b25ecc3825b796c6ec1e648e1b05030",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3575,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 122,
"path": "/listaSimpleEnlazada.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "class Nodo:\n def __init__ (self,nombre,fila,columna,image):\n self.nombre=nombre\n self.CoorX=fila\n self.CoorY=columna\n self.image=image\n self.Siguiente=None\n\n\nclass listaEnlazadaMatriz:\n def __init__(self):\n self.inicio=None\n self.cola=None\n self.tamaño=0\n \n def add(self,nombre,fila,columna,image):\n nodo=Nodo(nombre,fila,columna,image)\n self.tamaño +=1\n \n if self.inicio:\n self.inicio.Siguiente=nodo\n self.inicio=nodo\n else:\n self.inicio=nodo\n self.cola=nodo\n \n def iterar(self):\n actual = self.cola\n\n while actual:\n nombre = actual.nombre\n x=actual.CoorX\n y=actual.CoorY\n pic=actual.image\n actual = actual.Siguiente\n yield nombre,x,y,pic\n def modificar(self,nombre,imagen):\n actual=self.cola\n while actual:\n if nombre==actual.nombre:\n actual.image=imagen\n actual=actual.Siguiente\n def crearlista(self,b,v):\n dato=\"\"\n for x in range(b):\n for y in range(v):\n for n in self.iterar():\n if x+1==int(n[0]) and y+1==int(n[1]): \n if y+1==1:\n dato+=\"[\"+str(n[2])+\",\"\n elif y+1==v and (x+1)!=b:\n dato+=str(n[2])+\"]/\"\n elif x+1==b and y+1==v:\n dato+=str(n[2])+\"]\"\n else:\n dato+=str(n[2])+\",\"\n \n dato=dato.split(\"/\")\n return dato \n def crearlist(self):\n names=\"\"\n for count,n in enumerate(self.iterar()):\n if count==0:\n names+=''+str(n[0])+','\n elif (self.tamaño-1)==count:\n names+=str(n[0])+''\n else:\n names+=str(n[0])+',' \n names=names.split(\",\") \n return names \n def buscar(self,x,y):\n for n in self.iterar():\n if x==int(n[0]) and y==int(n[1]):\n return n[2]\n def buscar2(self,name):\n for n in self.iterar():\n if n[0].upper()==name.upper():\n return True\n return False\n def buscar3(self,name):\n for n in self.iterar():\n if n[0].upper()==name.upper():\n return n\n\n def eliminar(self,x,y):\n actual=self.cola\n anterior=self.cola\n \n while actual:\n if int(actual.CoorX)==x and int(actual.CoorY)==y:\n if actual==self.cola:\n self.cola=actual.Siguiente\n else:\n # suponermos que tengo [1]->[2]->[3]\n #ahora quiero eliminar [2]\n #mi resultado [1]->[3]\n anterior.Siguiente=actual.Siguiente\n self.tamaño-=1\n return\n anterior=actual\n actual=actual.Siguiente\n# def deletCola(self):\n# actual=self.cola\n \n# if not actual:\n# anterior=actual.Siguiente\n# actual=actual.Siguiente\n# self.tamaño-=1\n# return True\n# else:\n# return False\n# return False \n#def prueba(x,y,dato):\n # lista=listaEnlazadaMatriz()\n # lista.add(0,0,1)\n # lista.add(0,1,5)\n\n\n#def imprimir():\n #lista=listaEnlazadaMatriz()\n # for d in lista.iterar():\n # print(d[2])"
},
{
"alpha_fraction": 0.5329528450965881,
"alphanum_fraction": 0.56725013256073,
"avg_line_length": 39.02132034301758,
"blob_id": "43f2bc110140adb9832b38da9b8fb1adb30c10e5",
"content_id": "a88d0e2eea6e43dd28303f568f0a02909ea26265",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 39428,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 985,
"path": "/menuGraphic.pyw",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "from tkinter import * \nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom inicio import report\nfrom archivoLectura import fechaHora\nfrom tkPDFViewer import tkPDFViewer as pdf\n#REPORT=\"\"\nrespuesta=\"\"\nletra=\"\"\nbandera=False\nArchivo=False\ntipoL=\"\"\ndef menus():\n global letra, bandera, Archivo\n def lectura():\n global bandera,Archivo\n from archivoLectura import lecturaM\n root.destroy()\n lecturaM()\n Archivo=True\n bandera=False\n menus()\n #print(\"hola mundo\")\n def rotacionH():\n global bandera,letra\n from archivoLectura import lista\n from operacionalMatriz import horizontal\n root.destroy()\n busca()\n \n horizontal(respuesta)\n bandera=True\n letra=\"Rotacion Horizontal\"\n menus()\n def rotacionV():\n global bandera,letra\n from archivoLectura import lista\n from operacionalMatriz import vertical\n root.destroy()\n busca()\n vertical(respuesta)\n bandera=True\n letra=\"Rotacion Vertical\"\n menus()\n def transpuesta():\n global bandera,letra\n from archivoLectura import lista\n from operacionalMatriz import traspuesta\n root.destroy()\n busca()\n traspuesta(respuesta)\n bandera=True\n letra=\"Traspuesta\"\n menus()\n def agregarH():\n global tipoL,bandera\n tipoL=\"horizontal\"\n bandera=True\n agregarL_Ven()\n #print(\"Agregar Linea Horizontal\")\n def agregarV():\n global tipoL,bandera\n tipoL=\"vertical\"\n bandera=True\n agregarL_Ven()\n #print(\"Agregar Linea Vertical\")\n def agregarRec():\n global tipoL,bandera\n #tipoL=\"Rectangulo\"\n bandera=True\n venCuadraTri()\n #print(\"Agregar Linea Vertical\")\n #print(\"Agregar Rectangulo\")\n def agregarTRec():\n global tipoL,bandera\n #tipoL=\"Triangulo\"\n bandera=True\n agregarT_Ven()\n #print(\"Agregar Triangulo Rectangulo\")\n def imagenSize(dato):\n imagen=PhotoImage(file=dato)\n imagen=imagen.zoom(1)\n imagen=imagen.subsample(1)\n return imagen\n #labelImage1['image']=imagen\n def busca():\n global Archivo\n from archivoLectura import lista\n venP=Tk()\n def cerrar_app():\n venP.destroy()\n menus()\n \n venP.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n label=Label(venP,text=\"Ingrese el nombre de la Matriz\")\n label.grid(row=0, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n\n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n\n #creando la caja de texto \n entry=ttk.Combobox(venP,state=\"readonly\",values=dato)\n entry.grid(row=3,column=0,padx=5,pady=10)\n entry.config(font=(\"Verdana\",12))\n\n def ok():\n from archivoLectura import lista\n global respuesta\n #entry.focus_set()\n respuesta=entry.get()\n if lista.buscar2(respuesta)==True:\n venP.destroy() \n elif respuesta==\"\":\n messagebox.showerror(message=\"Por favor llene el Cuadro de Texto\")\n report.add(''+str(fechaHora())+'Error: No lleno el cuadro de texto')\n else:\n messagebox.showerror(message=\"El nombre de la matriz no existe\")\n report.add(''+str(fechaHora())+'Error: la Matriz buscada no existe')\n #\n boton=Button(venP,text=\"Buscar\", command=ok)\n boton.grid(row=4, column=0, padx=5,pady=15)\n #venP.destroy() \n venP.mainloop()\n \n #for a in lista.iterar():\n #print(\"rotacion Horizontal\")\n def limpiarVen():\n from archivoLectura import lista\n root.destroy()\n VenLimpiar=Tk()\n VenLimpiar.title(\"Ingrese Coordenada\")\n VenLimpiar.geometry(\"320x320\")\n \n def cerrar_app():\n VenLimpiar.destroy()\n menus()\n \n VenLimpiar.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n #VenLimpiar.config(bg=\"#ffffff\")\n #ingresando los label de las coordenadas\n label=Label(VenLimpiar,text=\"Buscar Matriz:\")\n label.grid(row=1, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n \n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\" \n \n entry=ttk.Combobox(VenLimpiar,state=\"readonly\",values=dato)\n entry.grid(row=0,column=1,padx=\"5\",pady=\"3\")\n entry.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n entry.place(relx=0.4,relwidth=0.5, relheight=0.10) \n \n labelX1=Label(VenLimpiar,text=\"X1\")\n labelX1.config(font=(\"verdana\",12))\n labelX1.place(relx=0.25,rely=0.15)\n \n dato1=Entry(VenLimpiar)\n dato1.place(relx=0.2,rely=0.25,relwidth=0.2,relheight=0.10)\n dato1.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n coma1=Label(VenLimpiar,text=\",\")\n coma1.config(font=(\"verdana\",12))\n coma1.place(relx=0.48,rely=0.30)\n \n parentesis1=Label(VenLimpiar,text=\"(\")\n parentesis1.config(font=(\"verdana\",32))\n parentesis1.place(relx=0.10,rely=0.20)\n\n parentesis2=Label(VenLimpiar,text=\")\")\n parentesis2.config(font=(\"verdana\",32))\n parentesis2.place(relx=0.83,rely=0.20)\n \n vocalM=Label(VenLimpiar,text=\"A\")\n vocalM.config(font=(\"verdana\",18))\n vocalM.place(relx=0.45,rely=0.38)\n \n dato2=Entry(VenLimpiar)\n dato2.place(relx=0.6,rely=0.25,relwidth=0.2,relheight=0.10)\n dato2.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n labelY1=Label(VenLimpiar,text=\"Y1\")\n labelY1.config(font=(\"verdana\",12))\n labelY1.place(relx=0.65,rely=0.15)\n \n labelX2=Label(VenLimpiar,text=\"X2\")\n labelX2.config(font=(\"verdana\",12))\n labelX2.place(relx=0.25,rely=0.5)\n\n coma2=Label(VenLimpiar,text=\",\")\n coma2.config(font=(\"verdana\",12))\n coma2.place(relx=0.48,rely=0.6)\n \n labelY2=Label(VenLimpiar,text=\"Y2\")\n labelY2.config(font=(\"verdana\",12))\n labelY2.place(relx=0.65,rely=0.5)\n \n parentesis3=Label(VenLimpiar,text=\"(\")\n parentesis3.config(font=(\"verdana\",32))\n parentesis3.place(relx=0.10,rely=0.55)\n\n parentesis4=Label(VenLimpiar,text=\")\")\n parentesis4.config(font=(\"verdana\",32))\n parentesis4.place(relx=0.83,rely=0.55)\n \n dato3=Entry(VenLimpiar)\n dato3.place(relx=0.2,rely=0.6,relwidth=0.2,relheight=0.10)\n dato3.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n dato4=Entry(VenLimpiar)\n dato4.place(relx=0.6,rely=0.6,relwidth=0.2,relheight=0.10)\n dato4.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n \n def ok2():\n global bandera,letra\n from operacionalMatriz import deletMatriz\n from archivoLectura import lista\n #entry.focus_set()\n dato1.focus_set()\n dato2.focus_set()\n dato3.focus_set()\n dato4.focus_set()\n nombre=entry.get()#respuesta nombre \n respuesta=dato1.get()#respuesta de X1\n respuesta1=dato2.get()#respuesta de Y1\n respuesta2=dato3.get()#respuesta de X2\n respuesta3=dato4.get()#respuesta de Y2\n \n if lista.buscar2(nombre)==True:\n if respuesta !=\"\" and respuesta1 !=\"\" and respuesta2 !=\"\" and respuesta3 !=\"\" and nombre !=\"\":\n VenLimpiar.destroy() \n deletMatriz(nombre,respuesta,respuesta1,respuesta2,respuesta3)\n bandera=True\n letra=\"Limpiar Zona \"+str(respuesta)+\",\"+str(respuesta1)+\" \"\n letra+=\"\"+str(respuesta2)+\",\"+str(respuesta3)\n menus()\n else:\n messagebox.showerror(message=\"Por favor llene todos los cuadros de texto\")\n report.add(''+str(fechaHora())+'Error: No se lleno el cuadro de texto')\n #limpiarVen()\n else:\n messagebox.showerror(message=\"El nombre la matriz no existe\")\n report.add(''+str(fechaHora())+'Error: El nombre de la matriz no existe')\n boton=Button(VenLimpiar,text=\"Eliminar\", command=ok2)\n boton.place(relx=0.35,rely=0.8,relwidth=0.3,relheight=0.1)\n boton.config(font=(\"verdana\",12)) \n def venCuadraTri():\n from archivoLectura import lista\n root.destroy()\n VenLimpiar=Tk()\n VenLimpiar.title(\"Ingrese Los datos\")\n VenLimpiar.geometry(\"320x320\")\n\n def cerrar_app():\n VenLimpiar.destroy()\n menus()\n \n VenLimpiar.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n \n #VenLimpiar.config(bg=\"#ffffff\")\n #ingresando los label de las coordenadas\n label=Label(VenLimpiar,text=\"Buscar Matriz:\")\n label.grid(row=1, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n\n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n \n entry=ttk.Combobox(VenLimpiar,state=\"readonly\",values=dato)\n entry.grid(row=0,column=1,padx=\"5\",pady=\"3\")\n entry.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n entry.place(relx=0.4,relwidth=0.5, relheight=0.10) \n \n labelX1=Label(VenLimpiar,text=\"X1\")\n labelX1.config(font=(\"verdana\",12))\n labelX1.place(relx=0.25,rely=0.15)\n \n dato1=Entry(VenLimpiar)\n dato1.place(relx=0.2,rely=0.25,relwidth=0.2,relheight=0.10)\n dato1.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n coma1=Label(VenLimpiar,text=\",\")\n coma1.config(font=(\"verdana\",12))\n coma1.place(relx=0.48,rely=0.30)\n \n parentesis1=Label(VenLimpiar,text=\"(\")\n parentesis1.config(font=(\"verdana\",32))\n parentesis1.place(relx=0.10,rely=0.20)\n\n parentesis2=Label(VenLimpiar,text=\")\")\n parentesis2.config(font=(\"verdana\",32))\n parentesis2.place(relx=0.83,rely=0.20)\n \n #vocalM=Label(VenLimpiar,text=\"A\")\n #vocalM.config(font=(\"verdana\",18))\n #vocalM.place(relx=0.45,rely=0.38)\n \n dato2=Entry(VenLimpiar)\n dato2.place(relx=0.6,rely=0.25,relwidth=0.2,relheight=0.10)\n dato2.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n labelY1=Label(VenLimpiar,text=\"Y1\")\n labelY1.config(font=(\"verdana\",12))\n labelY1.place(relx=0.65,rely=0.15)\n \n labelX2=Label(VenLimpiar,text=\"Alto\")\n labelX2.config(font=(\"verdana\",12))\n labelX2.place(relx=0.25,rely=0.5)\n \n labelY2=Label(VenLimpiar,text=\"Ancho\")\n labelY2.config(font=(\"verdana\",12))\n labelY2.place(relx=0.65,rely=0.5)\n\n coma2=Label(VenLimpiar,text=\"X\")\n coma2.config(font=(\"verdana\",12))\n coma2.place(relx=0.48,rely=0.6)\n \n dato3=Entry(VenLimpiar)\n dato3.place(relx=0.2,rely=0.6,relwidth=0.2,relheight=0.10)\n dato3.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n dato4=Entry(VenLimpiar)\n dato4.place(relx=0.6,rely=0.6,relwidth=0.2,relheight=0.10)\n dato4.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n \n def ok4():\n global bandera,letra,tipoL\n from archivoLectura import lista\n from operacionalMatriz import agregar_R\n from operacionalMatriz import matriz\n \n #entry.focus_set()\n dato1.focus_set()\n dato2.focus_set()\n dato3.focus_set()\n nombre=entry.get()#respuesta nombre \n respuesta=dato1.get()#respuesta de X1\n respuesta1=dato2.get()#respuesta de Y1\n respuesta2=dato3.get()#respuesta de Alto\n respuesta3=dato4.get()#respuesta de Ancho\n #matriz(nombre)\n if lista.buscar2(nombre)==True:\n dato=lista.buscar3(nombre)\n fila=(int(respuesta)+int(respuesta3))-1\n columna=(int(respuesta1)+int(respuesta2))-1\n if fila !=columna:\n if columna<= int(dato[2]) and fila<=int(dato[1]):\n if respuesta !=\"\" and respuesta1 !=\"\" and respuesta2 !=\"\" and nombre !=\"\":\n VenLimpiar.destroy() \n agregar_R(nombre,respuesta,respuesta1,str(columna),str(fila))\n bandera=True\n letra=\"Agregar Rectangulo \"+str(respuesta)+\",\"+str(respuesta1)+\" \"\n letra+=\"\"+str(respuesta2)+\"x\"+str(respuesta3)\n menus()\n else:\n messagebox.showerror(message=\"Por favor llene todos los cuadros de texto\")\n report.add(''+str(fechaHora())+'Error: No se ha llenado todos los cuadro de texto')\n #limpiarVen() \n else:\n messagebox.showerror(message=\"La fila o Columuna es mayor de la matriz\") \n report.add(''+str(fechaHora())+'Error: La fila o Columuna es mayor de la matriz')\n else:\n messagebox.showerror(message=\"Lo sentimos pero esto es un cuadrado\") \n report.add(''+str(fechaHora())+'Error: Esta intentando crear un rectangulo pero es un cuadrado') \n else: \n messagebox.showerror(message=\"El nombre de la matriz no existe\") \n report.add(''+str(fechaHora())+'Error: la Columna de la matriz '+nombre+' no es un numero') \n boton=Button(VenLimpiar,text=\"Agregar\", command=ok4)\n boton.place(relx=0.35,rely=0.8,relwidth=0.3,relheight=0.1)\n boton.config(font=(\"verdana\",12))\n #venP.destroy() \n VenLimpiar.mainloop() \n def agregarT_Ven():\n from archivoLectura import lista\n root.destroy()\n VenLimpiar=Tk()\n VenLimpiar.title(\"Ingrese Coordenada\")\n VenLimpiar.geometry(\"320x320\")\n\n def cerrar_app():\n VenLimpiar.destroy()\n menus()\n \n VenLimpiar.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n \n #VenLimpiar.config(bg=\"#ffffff\")\n #ingresando los label de las coordenadas\n label=Label(VenLimpiar,text=\"Buscar Matriz:\")\n label.grid(row=1, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n label.place(relx=0.03,rely=0.1,)\n \n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n \n entry=ttk.Combobox(VenLimpiar,state=\"readonly\",values=dato)\n entry.grid(row=0,column=1,padx=\"5\",pady=\"3\")\n entry.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n entry.place(relx=0.45,rely=0.1,relwidth=0.5, relheight=0.10) \n \n labelX1=Label(VenLimpiar,text=\"X1\")\n labelX1.config(font=(\"verdana\",12))\n labelX1.place(relx=0.25,rely=0.25)\n \n dato1=Entry(VenLimpiar)\n dato1.place(relx=0.2,rely=0.35,relwidth=0.2,relheight=0.10)\n dato1.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n coma1=Label(VenLimpiar,text=\",\")\n coma1.config(font=(\"verdana\",12))\n coma1.place(relx=0.48,rely=0.4)\n \n parentesis1=Label(VenLimpiar,text=\"(\")\n parentesis1.config(font=(\"verdana\",32))\n parentesis1.place(relx=0.10,rely=0.30)\n\n parentesis2=Label(VenLimpiar,text=\")\")\n parentesis2.config(font=(\"verdana\",32))\n parentesis2.place(relx=0.83,rely=0.30)\n \n labelY1=Label(VenLimpiar,text=\"Y1\")\n labelY1.config(font=(\"verdana\",12))\n labelY1.place(relx=0.65,rely=0.25)\n \n vocalM=Label(VenLimpiar,text=\"Longuitud de Elementos\")\n vocalM.config(font=(\"verdana\",12))\n vocalM.place(relx=0.20,rely=0.50)\n \n dato2=Entry(VenLimpiar)\n dato2.place(relx=0.6,rely=0.35,relwidth=0.2,relheight=0.10)\n dato2.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n dato3=Entry(VenLimpiar)\n dato3.place(relx=0.4,rely=0.6,relwidth=0.2,relheight=0.10)\n dato3.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n def ok5():\n from archivoLectura import lista\n from operacionalMatriz import agregar_T\n #entry.focus_set()\n dato1.focus_set()\n dato2.focus_set()\n dato3.focus_set()\n nombre=entry.get()#respuesta nombre \n respuesta=dato1.get()#respuesta de X1\n respuesta1=dato2.get()#respuesta de Y1\n respuesta2=dato3.get()#respuesta de X2\n if lista.buscar2(nombre)==True:\n dato=lista.buscar3(nombre)\n fila=(int(respuesta)+int(respuesta2))-1\n columna=(int(respuesta1)+int(respuesta2))-1\n if columna<= int(dato[2]) and fila<=int(dato[1]):\n if respuesta !=\"\" and respuesta1 !=\"\" and respuesta2 !=\"\" and nombre !=\"\":\n VenLimpiar.destroy() \n agregar_T(nombre,respuesta,respuesta1,str(fila),str(columna))\n bandera=True\n letra=\"Agregar Triangulo Rectangulo \"+str(respuesta)+\",\"+str(respuesta1)+\" \"\n letra+=\"\"+str(respuesta2)\n menus()\n else:\n messagebox.showerror(message=\"Por favor llene todos los cuadros de texto\")\n report.add(''+str(fechaHora())+'Error: No se ha llenado todos los cuadros de texto')\n #limpiarVen() \n else:\n messagebox.showerror(message=\"La fila o Columuna es mayor de la matriz\") \n report.add(''+str(fechaHora())+'Error: La fila o Columuna es mayor de la matriz')\n else: \n messagebox.showerror(message=\"El nombre de la matriz no existe\") \n report.add(''+str(fechaHora())+'Error: El nombre de la matriz no existe')\n print()\n boton=Button(VenLimpiar,text=\"Agregar\", command=ok5)\n boton.place(relx=0.35,rely=0.8,relwidth=0.3,relheight=0.1)\n boton.config(font=(\"verdana\",12))\n #venP.destroy() \n VenLimpiar.mainloop() \n def agregarL_Ven():\n from archivoLectura import lista\n root.destroy()\n VenLimpiar=Tk()\n VenLimpiar.title(\"Ingrese Coordenada\")\n VenLimpiar.geometry(\"320x320\")\n\n def cerrar_app():\n VenLimpiar.destroy()\n menus()\n \n VenLimpiar.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n #VenLimpiar.config(bg=\"#ffffff\")\n #ingresando los label de las coordenadas\n label=Label(VenLimpiar,text=\"Buscar Matriz:\")\n label.grid(row=1, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n label.place(relx=0.03,rely=0.1,)\n \n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n \n entry=ttk.Combobox(VenLimpiar,state=\"readonly\",values=dato)\n entry.grid(row=0,column=1,padx=\"5\",pady=\"3\")\n entry.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12)) \n entry.place(relx=0.45,rely=0.1,relwidth=0.5, relheight=0.10) \n \n labelX1=Label(VenLimpiar,text=\"X1\")\n labelX1.config(font=(\"verdana\",12))\n labelX1.place(relx=0.25,rely=0.25)\n \n dato1=Entry(VenLimpiar)\n dato1.place(relx=0.2,rely=0.35,relwidth=0.2,relheight=0.10)\n dato1.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n coma1=Label(VenLimpiar,text=\",\")\n coma1.config(font=(\"verdana\",12))\n coma1.place(relx=0.48,rely=0.4)\n \n parentesis1=Label(VenLimpiar,text=\"(\")\n parentesis1.config(font=(\"verdana\",32))\n parentesis1.place(relx=0.10,rely=0.30)\n\n parentesis2=Label(VenLimpiar,text=\")\")\n parentesis2.config(font=(\"verdana\",32))\n parentesis2.place(relx=0.83,rely=0.30)\n \n labelY1=Label(VenLimpiar,text=\"Y1\")\n labelY1.config(font=(\"verdana\",12))\n labelY1.place(relx=0.65,rely=0.25)\n \n vocalM=Label(VenLimpiar,text=\"cantidad de Elementos\")\n vocalM.config(font=(\"verdana\",12))\n vocalM.place(relx=0.20,rely=0.50)\n \n dato2=Entry(VenLimpiar)\n dato2.place(relx=0.6,rely=0.35,relwidth=0.2,relheight=0.10)\n dato2.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n dato3=Entry(VenLimpiar)\n dato3.place(relx=0.4,rely=0.6,relwidth=0.2,relheight=0.10)\n dato3.config(justify=\"center\",state=\"normal\",font=(\"Verdana\",12))\n \n def ok3(): \n from archivoLectura import lista\n from operacionalMatriz import agregar_H\n from operacionalMatriz import matriz\n global bandera,tipoL,letra\n #entry.focus_set()\n dato1.focus_set()\n dato2.focus_set()\n dato3.focus_set()\n nombre=entry.get()#respuesta nombre \n respuesta=dato1.get()#respuesta de X1\n respuesta1=dato2.get()#respuesta de Y1\n respuesta2=dato3.get()#respuesta de X2\n #matriz(nombre)\n if lista.buscar2(nombre)==True:\n dato=lista.buscar3(nombre)\n if tipoL==\"horizontal\":\n tamano=(int(respuesta1)+int(respuesta2))-1\n if tamano <= int(dato[2]):\n if respuesta !=\"\" and respuesta1 !=\"\" and respuesta2 !=\"\" and nombre !=\"\":\n VenLimpiar.destroy() \n agregar_H(nombre,respuesta,respuesta1,respuesta,str(tamano))\n bandera=True\n letra=\"Agregar Linea Horizontal \"+str(respuesta)+\",\"+str(respuesta1)+\" \"\n letra+=\"\"+str(respuesta2)\n menus()\n else:\n messagebox.showerror(message=\"Por favor llene todos los cuadros de texto\")\n report.add(''+str(fechaHora())+'Error: No se ha llenado todos los cuadros de texto')\n #limpiarVen() \n else:\n messagebox.showerror(message=\"La cantidad de elementos supera el tamaño de la fila de la matriz\") \n report.add(''+str(fechaHora())+'Error: La cantidad de elementos supera el tamaño de la fila de la matriz') \n elif tipoL==\"vertical\":\n tamano=(int(respuesta2)+int(respuesta))-1\n if tamano <= int(dato[1]):\n if respuesta !=\"\" and respuesta1 !=\"\" and respuesta2 !=\"\" and nombre !=\"\":\n VenLimpiar.destroy() \n agregar_H(nombre,respuesta,respuesta1,str(tamano),respuesta1)\n bandera=True\n letra=\"Agregar Linea Horizontal \"+str(respuesta)+\",\"+str(respuesta1)+\" \"\n letra+=\"\"+str(respuesta2)\n menus()\n else:\n messagebox.showerror(message=\"Por favor llene todos los cuadros de texto\")\n report.add(''+str(fechaHora())+'Error: No se ha llenado todos los cuadros de texto')\n #limpiarVen() \n else:\n messagebox.showerror(message=\"La cantidad de elementos supera el tamaño de la fila de la matriz\") \n report.add(''+str(fechaHora())+'Error: La cantidad de elementos supera el tamaño de la fila de la matriz') \n else: \n messagebox.showerror(message=\"El nombre de la matriz no existe\") \n report.add(''+str(fechaHora())+'Error: El nombre de la matriz no existe') \n boton=Button(VenLimpiar,text=\"Agregar\", command=ok3)\n boton.place(relx=0.35,rely=0.8,relwidth=0.3,relheight=0.1)\n boton.config(font=(\"verdana\",12))\n #venP.destroy() \n VenLimpiar.mainloop()\n def unioAB():\n global tipoL,bandera,letra\n root.destroy()\n tipoL=\"union\"\n bandera=True\n letra=\"Resultado Union A,B\"\n ventana2()\n def interseccionAB():\n global tipoL,bandera,letra\n root.destroy()\n tipoL=\"interseccion\"\n bandera=True\n letra=\"Resultado Interseccion A,B\"\n ventana2()\n def diferencia():\n global tipoL,bandera,letra\n root.destroy()\n tipoL=\"diferencia\"\n bandera=True\n letra=\"Resultado Diferencia A,B\"\n ventana2() \n def simetrica():\n global tipoL,bandera,letra\n root.destroy()\n tipoL=\"simetrica\"\n bandera=True\n letra=\"Resultado Diferencia Simetrica A,B\"\n ventana2() \n def saberBandera():\n global Archivo\n Archivo=True\n def ventana2():\n global Archivo\n from archivoLectura import lista\n ven=Tk()\n ven.geometry(\"500x250\") \n ven.title(\"selecciones Dato\")\n\n def cerrar_app():\n ven.destroy()\n menus()\n \n ven.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n \n if Archivo==True:\n dato=lista.crearlist()\n else:\n dato=\"\"\n \n labelA1= Label(ven,text=\"Elija la Primera Matriz:\")\n labelA1.config(font=(\"verdana\",16))\n labelA1.place(relx=0.05,rely=0.10)\n \n combo1= ttk.Combobox(ven,state=\"readonly\", values=dato)\n combo1.config(font=(\"verdana\",12))\n combo1.place(relx=0.07,rely=0.25,relwidth=0.6)\n \n labelA2=Label(ven,text=\"Elija la Segunda Matriz:\")\n labelA2.config(font=(\"verdana\",16))\n labelA2.place(relx=0.05,rely=0.35)\n \n combo2= ttk.Combobox(ven,state=\"readonly\",values=dato)\n combo2.config(font=(\"verdana\",12))\n combo2.place(relx=0.07,rely=0.5,relwidth=0.6) \n \n def archivoAgregar():\n global Archivo\n from archivoLectura2 import lecturaM\n from archivoLectura import lista\n ven.destroy()\n lecturaM()\n saberBandera()\n ventana2()\n '''\n combo1.set(\"\")\n combo2.set(\"\")\n data=lista.crearlist()\n combo1[\"values\"]=data\n combo2[\"values\"]=data\n '''\n print()\n \n boton=Button(ven,text=\"Agregar\\nnueva\\nMatriz\", command=archivoAgregar)\n boton.place(relx=0.7,rely=0.30,relwidth=0.25,relheight=0.25)\n boton.config(font=(\"verdana\",12),bg=\"#b8daba\")\n def generarMatriz():\n #global bandera\n from archivoLectura import lista\n from operacionalMatriz import Union,intersection,dif,simetria\n if combo1.get() !=\"\" and combo2.get() !=\"\":\n matriz1=combo1.get()\n matriz2=combo2.get()\n dato1=lista.buscar3(matriz1)\n dato3=int(dato1[1])+int(dato1[2])\n dato2=lista.buscar3(matriz2)\n dato4=int(dato2[1])+int(dato2[2])\n if dato1[1]==dato2[1] and dato1[2]==dato2[2]:\n fila=int(dato1[1])\n columna=int(dato1[2])\n elif dato3>dato4:\n fila=int(dato1[1])\n columna=int(dato1[2])\n elif dato3<dato4:\n fila=int(dato2[1])\n columna=int(dato2[2])\n \n if tipoL== \"union\":\n Union(matriz1,matriz2,fila,columna,dato1[1],dato1[2],dato2[1],dato2[2])\n ven.destroy()\n menus()\n elif tipoL==\"interseccion\":\n intersection(matriz1,matriz2,fila,columna,dato1[1],dato1[2],dato2[1],dato2[2])\n ven.destroy()\n menus()\n elif tipoL==\"diferencia\":\n dif(matriz1,matriz2,fila,columna,dato1[1],dato1[2],dato2[1],dato2[2])\n ven.destroy()\n menus() \n elif tipoL==\"simetrica\": \n simetria(matriz1,matriz2,fila,columna,dato1[1],dato1[2],dato2[1],dato2[2])\n ven.destroy()\n menus() \n else:\n messagebox.showerror(message=\"aun no has selecionado el nombre de las Matrices a Comparar\")\n boton1=Button(ven,text=\"Generar matriz\", command=generarMatriz)\n boton1.place(relx=0.17,rely=0.75,relwidth=0.3,relheight=0.15)\n boton1.config(font=(\"verdana\",12),bg=\"#b8daba\")\n \n def cancel():\n ven.destroy()\n menus() \n boton2=Button(ven,text=\"Cancel\", command=cancel)\n boton2.place(relx=0.50,rely=0.75,relwidth=0.3,relheight=0.15)\n boton2.config(font=(\"verdana\",12),bg=\"#b8daba\")\n ven.mainloop()\n def docPdf():\n root.destroy()\n lector=Tk()\n lector.geometry(\"1000x700\")\n lector.title(\"Lector de PDF\")\n \n def cerrar_app():\n lector.destroy()\n menus()\n \n lector.protocol(\"WM_DELETE_WINDOW\", cerrar_app)\n v1=pdf.ShowPdf().pdf_view(lector,pdf_location=\"Documentacion/Documentacion_201602983.pdf\",width=100,height=100)\n v1.pack()\n v1.place(relx=0.1,rely=0,relwidth=0.75,relheight=1)\n #print() \n lector.mainloop() \n def Info():\n panel= Tk()\n panel.geometry(\"750x320\")\n panel.title(\"Informacion del Estudiante\")\n def cerrar_app():\n panel.destroy()\n \n panel.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n tile=Label(panel,text=\"Jaime Alejandro Armira Us\\n201602983\\nIntroduccion a la Programacion y Computacion 2 Seccion \\\"D\\\"\\nIngenieria en Ciencias y Sistemas\\nPrimer Semestre 2021\")\n tile.pack()\n tile.config(font=(\"Arial\",20))\n tile.place(relx=0,rely=0.05,relwidth=1, relheight=1)\n panel.mainloop()\n print()\n def report():\n from HTML import pageweb\n pageweb()\n def modificar():\n global Archivo\n from archivoLectura import lista\n venP=Tk()\n def cerrar_app():\n venP.destroy()\n menus()\n \n venP.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n label=Label(venP,text=\"Ingrese el nombre de la Matriz\")\n label.grid(row=0, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n\n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n\n #creando la caja de texto \n entry=ttk.Combobox(venP,state=\"readonly\",values=dato)\n entry.grid(row=3,column=0,padx=5,pady=10)\n entry.config(font=(\"Verdana\",12))\n\n def ok():\n from operacionalMatriz import modificar\n from archivoLectura import lista\n global respuesta\n #entry.focus_set()\n respuesta=entry.get()\n if lista.buscar2(respuesta)==True:\n modificar(respuesta)\n messagebox.showinfo(message=\"Se ha modificado el Archivo correctamente\")\n venP.destroy() \n #menus()\n #\n boton=Button(venP,text=\"Modificar\", command=ok)\n boton.grid(row=4, column=0, padx=5,pady=15)\n #venP.destroy() \n venP.mainloop()\n def guardar():\n global Archivo\n from archivoLectura import lista\n venP=Tk()\n def cerrar_app():\n venP.destroy()\n #menus()\n \n venP.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n label=Label(venP,text=\"Ingrese el nombre de la Matriz\")\n label.grid(row=0, column=0, sticky=\"w\", padx=5, pady=5)\n label.config(justify=\"center\" , state=\"normal\",font=(\"Verdana\",12))\n\n if Archivo==True: \n dato=lista.crearlist()\n else:\n dato=\"\"\n\n #creando la caja de texto \n entry=Entry(venP)\n entry.grid(row=3,column=0,padx=5,pady=10)\n entry.config(font=(\"Verdana\",12))\n\n def ok():\n from operacionalMatriz import guardar\n from archivoLectura import lista\n global respuesta\n #entry.focus_set()\n respuesta=entry.get()\n if lista.buscar2(respuesta)==False:\n guardar(respuesta)\n messagebox.showinfo(message=\"Se ha modificado el Archivo correctamente\")\n venP.destroy() \n #menus()\n else:\n messagebox.showerror(message=\"La matriz no existe\")\n report.add(''+str(fechaHora())+'Error: la matriz '+respuesta+' no existe')\n boton=Button(venP,text=\"guardar\", command=ok)\n boton.grid(row=4, column=0, padx=5,pady=15)\n #venP.destroy() \n venP.mainloop() \n root=Tk()\n root.geometry('1200x600')\n root.title(\"Menu Principal\")\n menubar=Menu(root)\n menubar.config(font=(\"verdana\",48))\n root.configure(menu=menubar) \n\n #creando un frame\n ventana1=Frame(root)\n ventana1.pack()\n ventana1.config(bg=\"#ffffff\",bd=5,borderwidth=1)\n ventana1.config(highlightbackground=\"black\", highlightcolor=\"black\", highlightthickness=1)\n ventana1.place(relx=0.02, rely=0.04,relwidth=0.96,relheight=0.92 )\n \n tile=Label(root,text=\"Panel\")\n tile.pack()\n tile.config(font=(\"Arial\",24),bg=\"#6fa780\")\n tile.place(relx=0.05,rely=0.02)\n #crear contenedor de imagen 1\n imageVen1=Frame(ventana1)\n imageVen1.pack()\n imageVen1.config(bg=\"#646464\")\n imageVen1.config(highlightbackground=\"black\", highlightcolor=\"black\", highlightthickness=1)\n imageVen1.place(relx=0.05,rely=0.10, relwidth=0.4, relheight=0.8)\n\n #crear contenedor de imagen 2\n imageVen2=Frame(ventana1)\n imageVen2.pack()\n imageVen2.config(bg=\"#646464\",width=400,height=400)\n imageVen2.config(highlightbackground=\"black\", highlightcolor=\"black\", highlightthickness=1)\n imageVen2.place(relx=0.55,rely=0.10, relwidth=0.4, relheight=0.8) \n \n scroll=Scrollbar(imageVen1,orient='horizontal')\n scroll.pack(side=BOTTOM,fill=X,expand=True)\n scroll.place(relx=0,rely=0.96,relwidth=1,relheight=0.05)\n\n scroll1=Scrollbar(imageVen2,orient='horizontal')\n scroll1.pack()\n scroll1.place(relx=0,rely=0.96,relwidth=1,relheight=0.05) \n\n \n if bandera==True:\n imagen=imagenSize(\"Original.dot.png\")\n labelImage1=Label(imageVen1,image=imagen,bd=0) \n labelImage1.pack()\n labelImage1.config(bg=\"#646464\")\n labelImage1.place(relx=0,rely=0)\n #width=57, height=27\n \n imagen2=imagenSize(\"Resultado.dot.png\")\n labelImage2=Label(imageVen2,image=imagen2,bd=0) \n labelImage2.pack()\n labelImage2.config(bg=\"#646464\")\n labelImage2.place(relx=0,rely=0)\n #width=57, height=27\n #label2['text']=letra\n\n #label para el frame ventana1\n label1=Label(ventana1,text=\"=\")\n label1.pack()\n label1.config(font=(\"verdana\",48),bg=\"#ffffff\")\n label1.place(relx=0.48,rely=0.45)\n \n label2=Label(ventana1,text=\"Imagen Matriz Original\")\n label2.pack()\n label2.config(font=(\"verdana\",18),bg=\"#ffffff\")\n label2.place(relx=0.1,rely=0.9)\n \n labelM2=Label(ventana1,text=\"Imagen Matriz Resultado\")\n labelM2.pack()\n labelM2.config(font=(\"verdana\",18),bg=\"#ffffff\")\n if bandera ==True:\n labelM2['text']=letra\n labelM2.place(relx=0.65,rely=0.9)\n #Creando el menu de operaciones \n menuArchivo=Menu(menubar, tearoff=0)\n menuArchivo.add_command(label=\"Rotacion Horizontal de una Imagen\",command=rotacionH)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Rotación Vertical de una Imagen\",command=rotacionV)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Transpuesta de una Imagen\",command=transpuesta)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Limpiar la Zona de una Imagen\", command=limpiarVen)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Agregar Línea Horizontal a una Imagen\",command=agregarH)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Agregar Línea Vertical a una Imagen\",command=agregarV)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Agregar rectangulo\",command=agregarRec)\n menuArchivo.add_separator()\n menuArchivo.add_command(label=\"Agregar Triángulo Rectangulo\",command=agregarTRec)\n #menuArchivo.geometry(\"10x300\")\n menuArchivo2=Menu(menubar,tearoff=0)\n menuArchivo2.add_command(label=\"Union A,B\",command=unioAB)\n menuArchivo2.add_separator()\n menuArchivo2.add_command(label=\"Interseccion A,B\",command=interseccionAB)\n menuArchivo2.add_separator()\n menuArchivo2.add_command(label=\"Diferencia A,B\",command=diferencia)\n menuArchivo2.add_separator()\n menuArchivo2.add_command(label=\"Diferencia Simetrica A,B\",command=simetrica)\n \n menuCarga= Menu(menubar, tearoff=0)\n menuCarga.add_cascade(label=\"Abrir\",command=lectura)\n menuCarga.add_separator()\n menuCarga.add_cascade(label=\"Modificar\",command=modificar)\n menuCarga.add_separator()\n menuCarga.add_cascade(label=\"Guardar Como\",command=guardar)\n \n \n menuAyuda=Menu(menubar, tearoff=0)\n menuAyuda.add_cascade(label=\"Formacion del Estudiante\",command=Info)\n menuAyuda.add_separator()\n menuAyuda.add_cascade(label=\"Documentacion del Programa\",command=docPdf)\n \n menuOperacional=Menu(menubar,tearoff=0)\n menuOperacional.add_cascade(label=\"Operacion a Una imagen\",menu=menuArchivo)\n menuOperacional.add_separator()\n menuOperacional.add_cascade(label=\"Operacion a Dos Imagenes\",menu=menuArchivo2)\n \n # Creando los titulos del menu\n menubar.add_cascade(label=\"Cargar Archivo\", menu=menuCarga)\n menubar.add_cascade(label=\"Operaciones\",menu=menuOperacional)\n menubar.add_cascade(label=\"Reporte\", command=report)\n menubar.add_cascade(label=\"Ayuda\", menu=menuAyuda)\n root.mainloop() \n\ndef principal():\n #global REPORT\n from inicio import reporte\n reporte()\n menus()\nprincipal()"
},
{
"alpha_fraction": 0.4457862675189972,
"alphanum_fraction": 0.44799375534057617,
"avg_line_length": 48.05095672607422,
"blob_id": "f751084c57ca2e9661f4920d7dbed7d3391543b9",
"content_id": "e81024f471809e229133fffe80fd6baaef149114",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7705,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 157,
"path": "/archivoLectura2.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\ndef lecturaM():\n import xml.etree.ElementTree as ET\n from archivoLectura import lista\n from archivoLectura import fechaHora\n from inicio import report\n #from menuGraphic import bandera\n leer=Tk()\n leer.title(\"Abrir Archivo\")\n leer.withdraw()\n leer.filename=filedialog.askopenfilename(initialdir=\"c:/Desktop\", title=\"Selelcionar Archivo\",filetypes=((\"Archivo xml\",\"*.xml\"),(\"all files\",\"*.*\")))\n #leer.destroy()\n def cerrar_app():\n leer.destroy()\n #menus()\n \n leer.protocol(\"WM_DELETE_WINDOW\", cerrar_app) \n if leer.filename == \"\":\n messagebox.showerror(message=\"Archivo no seleccionado\")\n leer.destroy()\n else:\n leer.destroy()\n Archivo=open(leer.filename,\"r\")\n tree=ET.parse(Archivo)\n raiz=tree.getroot()\n nombre=\"\"\n columna=\"\"\n filas=\"\"\n image=\"\"\n for hijo in raiz:\n if hijo.tag == \"matriz\":\n for nieto in hijo:\n if nieto.tag==\"nombre\":\n if nieto.text!=\"\":\n nombre=nieto.text\n print(nieto.text)\n else:\n report.add(''+str(fechaHora())+'Error: una matriz no contiene nombre')\n elif nieto.tag==\"filas\":\n if (nieto.text).isdigit():\n filas=nieto.text\n #print(nieto.text)\n elif nieto.text==\"\":\n report.add(''+str(fechaHora())+'Error: la fila de la matriz '+nombre+' no contiene un numero')\n else:\n report.add(''+str(fechaHora())+'Error: la fila de la matriz '+nombre+' no es un numero')\n elif nieto.tag==\"columnas\":\n if (nieto.text).isdigit():\n columna=nieto.text\n #print(nieto.text)\n elif nieto.text==\"\":\n report.add(''+str(fechaHora())+'Error: la Columna de la matriz '+nombre+' no contiene un numero')\n else:\n report.add(''+str(fechaHora())+'Error: la Columna de la matriz '+nombre+' no es un numero')\n #print(\"no es un numero\")\n elif nieto.tag==\"imagen\":\n if nieto.text!=\"\":\n image=nieto.text\n image=image.replace(\" \", \"\")\n image=image.replace(\"\\t\",\"\")\n print(image)\n else:\n report.add(''+str(fechaHora())+'Error: la matriz no contiene una imagen para procesar')\n else: continue\n if nombre!=\"\" and filas!=\"\" and columna!=\"\" and image!=\"\":\n dato=verific(image)\n if int(filas)==dato[0] and int(columna)==dato[1]:\n if lista.buscar2(nombre)==False:\n lista.add(nombre,filas,columna,image)\n mensje=''+str(fechaHora())+''+nombre+' - Espacios LLenos:'+str(dato[2])+' - Espacios Vacios:'+str(dato[3])\n report.add(mensje) \n print(report.tamaño)\n nombre=\"\"\n filas=\"\"\n columna=\"\"\n image=\"\"\n else:\n report.add(''+str(fechaHora())+'Error: No se pudo guardar La matriz '+str(nombre)+' porque ya existe')\n continue\n else:\n report.add(''+str(fechaHora())+'Error: No se pudo guardar la matriz hace falta un elemento')\n continue\n else:\n report.add(''+str(fechaHora())+'Error: No contiene tag \"matriz\" una de de las matrices archivo .xml')\n for nieto in hijo:\n if nieto.tag==\"nombre\":\n if nieto.text!=\"\":\n nombre=nieto.text\n print(nieto.text)\n else:\n report.add(''+str(fechaHora())+'Error: una matriz no contiene nombre')\n elif nieto.tag==\"filas\":\n if (nieto.text).isdigit():\n filas=nieto.text\n #print(nieto.text)\n elif nieto.text==\"\":\n report.add(''+str(fechaHora())+'Error: la fila de la matriz '+nombre+' no contiene un numero')\n else:\n report.add(''+str(fechaHora())+'Error: la fila de la matriz '+nombre+' no es un numero')\n elif nieto.tag==\"columnas\":\n if (nieto.text).isdigit():\n columna=nieto.text\n #print(nieto.text)\n elif nieto.text==\"\":\n report.add(''+str(fechaHora())+'Error: la Columna de la matriz '+nombre+' no contiene un numero')\n else:\n report.add(''+str(fechaHora())+'Error: la Columna de la matriz '+nombre+' no es un numero')\n #print(\"no es un numero\")\n elif nieto.tag==\"imagen\":\n if nieto.text!=\"\":\n image=nieto.text\n image=image.replace(\" \", \"\")\n image=image.replace(\"\\t\",\"\")\n print(image)\n else:\n report.add(''+str(fechaHora())+'Error: la matriz no contiene una imagen para procesar')\n else: continue\n if nombre!=\"\" and filas!=\"\" and columna!=\"\" and image!=\"\":\n dato=verific(image)\n if int(filas)==dato[0] and int(columna)==dato[1]:\n if lista.buscar2(nombre)==False:\n lista.add(nombre,filas,columna,image)\n mensje=''+str(fechaHora())+''+nombre+' - Espacios LLenos:'+str(dato[2])+' - Espacios Vacios:'+str(dato[3])\n report.add(mensje) \n print(report.tamaño)\n nombre=\"\"\n filas=\"\"\n columna=\"\"\n image=\"\"\n else:\n report.add(''+str(fechaHora())+'Error: No se pudo guardar La matriz '+str(nombre)+' porque ya existe')\n continue\n else:\n report.add(''+str(fechaHora())+'Error: No se pudo guardar la matriz hace falta un elemento')\n continue\n Archivo.close()\n leer.mainloop()\ndef verific(picture):\n from listaSimpleAuxiliar import listaEnlazadaMatriz as ListaAux\n matrizOriginal=ListaAux()\n picture= picture.split(\"\\n\")\n picture.pop(0)\n numero=int(len(picture))\n picture.pop(numero-1)\n columna=int(len(picture[0]))\n fila=int(len(picture))\n #picture=picture.remove(\"\")\n for x in range(fila-1):\n for y in range(columna-1):\n if picture[x][y]==\"*\":\n matrizOriginal.add(x+1,y+1,picture[x][y]) \n llenos=matrizOriginal.tamaño\n vacios=(fila*columna)-matrizOriginal.tamaño \n return fila,columna,llenos,vacios "
},
{
"alpha_fraction": 0.597758948802948,
"alphanum_fraction": 0.6068093776702881,
"avg_line_length": 35.64210510253906,
"blob_id": "49801ee50dbbad9e456c4d89b0f14e75b4011256",
"content_id": "d3ec8571fbfcba8e04cd6b2d1249d6781c68c7f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6961,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 190,
"path": "/matrizOctagonal.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "class NodoMatrizOcta:\n def __init__(self,x,y,dato):\n self.x=x\n self.y=y\n self.dato=dato\n self.arriba=None\n self.abajo=None\n self.izquierda=None\n self.derecha=None\nclass NodoCabecera:\n def __init__(self,tipo=None,indice=None,siguiente=None,derecha=None,abajo=None):\n self.tipo=tipo\n self.indice=indice\n self.siguiente=siguiente\n self.derecha=derecha\n self.abajo=abajo\nclass NodoRaiz:\n def __init__(self):\n self.NodoFilas=None\n self.NodoColumnas=None\nclass matriz_Ortogonal:\n def __init__(self):\n self.NodoRaiz=None\n def insertar_nodoFila(self,nodo):\n filaTemporal=self.NodoRaiz.NodoFilas\n while(filaTemporal.indice!=nodo.y):\n filaTemporal=filaTemporal.siguiente\n if filaTemporal.derecha is None:\n nodo.derecha=filaTemporal.derecha\n filaTemporal.derecha=nodo\n elif filaTemporal.derecha.x >= nodo.x:\n nodo.derecha=filaTemporal.derecha\n filaTemporal.derecha=nodo\n else:\n actual=filaTemporal.derecha\n while actual.derecha is not None and actual.derecha.x < nodo.x:\n actual=actual.derecha\n nodo.derecha=actual.derecha\n actual.derecha=nodo\n def insert_nodoColumna(self,nodo):\n columnaTemporal=self.NodoRaiz.NodoColumnas\n while columnaTemporal.indice!=nodo.x:\n columnaTemporal=columnaTemporal.siguiente\n if columnaTemporal.abajo is None:\n nodo.abajo=columnaTemporal.abajo\n columnaTemporal.abajo=nodo\n elif columnaTemporal.abajo.y >= nodo.y:\n nodo.abajo=columnaTemporal.abajo\n columnaTemporal.abajo=nodo\n else:\n actual=columnaTemporal.abajo\n while actual.abajo is not None and actual.abajo.y < nodo.y:\n actual=actual.abajo\n nodo.abajo=actual.abajo\n actual.abajo=nodo\n def insert_cabecera(self,nodo,indice,tipo):\n #NodoRaiz=NodoRaiz()\n filaTemporal=nodo\n if filaTemporal.indice > indice:\n newCabeza=NodoCabecera(tipo=tipo,indice=indice)\n newCabeza.siguiente=self.NodoRaiz.NodoFilas\n self.NodoRaiz.NodoFilas=newCabeza\n else:\n actual=filaTemporal\n while actual.siguiente is not None and actual.siguiente.indice <= indice:\n actual=actual.siguiente\n if actual.indice != indice:\n newCabeza=NodoCabecera(tipo=tipo, indice=indice)\n newCabeza.siguiente=actual.siguiente\n actual.siguiente=newCabeza\n def insertar(self,x,y,dato):\n Nodo=NodoMatrizOcta(x,y,dato)\n if self.NodoRaiz is None:\n self.NodoRaiz=NodoRaiz()\n self.NodoRaiz.NodoColumnas=NodoCabecera(tipo=\"Columna\",indice=x)\n self.NodoRaiz.NodoFilas=NodoCabecera(tipo=\"Fila\",indice=y)\n self.NodoRaiz.NodoColumnas.siguiente=None\n self.NodoRaiz.NodoFilas.siguiente=None\n self.NodoRaiz.NodoColumnas.abajo=Nodo\n self.NodoRaiz.NodoFilas.derecha=Nodo\n else:\n NodoAuxiliar=self.NodoRaiz\n self.insert_cabecera(NodoAuxiliar.NodoFilas,y,\"Filas\")\n NodoAuxiliar=self.NodoRaiz\n self.insert_cabecera(NodoAuxiliar.NodoColumnas,x,\"Columna\")\n self.insertar_nodoFila(nodo=Nodo)\n self.insert_nodoColumna(nodo=Nodo)\n def buscar(self,x,y):\n nodo=self.NodoRaiz.NodoColumnas\n while nodo is not None:\n NodoAuxiliar=nodo.abajo\n while NodoAuxiliar is not None:\n if NodoAuxiliar.x==x and NodoAuxiliar.y==y:\n return True\n NodoAuxiliar=NodoAuxiliar.abajo\n nodo=nodo.siguiente\n return False\n \n''' \nimport os\n\n\nnueva_matriz = matriz_Ortogonal()\n\nnueva_matriz.insertar(2,5,\"nuevo nodo\")\nnueva_matriz.insertar(2,3,\"nuevo nodo\")\nnueva_matriz.insertar(2,4,\"nuevo nodo\")\nnueva_matriz.insertar(2,9,\"nuevo nodo\")\nnueva_matriz.insertar(3,7,\"nuevo nodo\")\nnueva_matriz.insertar(3,1,\"nuevo nodo\")\nnueva_matriz.insertar(3,8,\"nuevo nodo\")\nnueva_matriz.insertar(3,9,\"nuevo nodo\")\nnueva_matriz.insertar(6,6,\"nuevo nodo\")\nnueva_matriz.insertar(6,1,\"nuevo nodo\")\nnueva_matriz.insertar(6,5,\"nuevo nodo\")\nnueva_matriz.insertar(6,9,\"nuevo nodo\")\nnueva_matriz.insertar(8,3,\"nuevo nodo\")\nnueva_matriz.insertar(8,4,\"nuevo nodo\")\nnueva_matriz.insertar(9,8,\"nuevo nodo\")\nnueva_matriz.insertar(9,9,\"nuevo nodo\")\nnueva_matriz.insertar(8,1,\"nuevo nodo\")\nnueva_matriz.insertar(10,10,\"nuevo nodo\")\nnueva_matriz.insertar(10,1,\"nuevo nodo\")\nnodo = nueva_matriz.NodoRaiz.NodoColumnas\n\n\nnodo = nueva_matriz.NodoRaiz.NodoFilas\nwhile(nodo is not None):\n nodo_temp = nodo.derecha\n while(nodo_temp is not None):\n print(str(nodo_temp.x)+str(nodo_temp.y))\n nodo_temp=nodo_temp.derecha\n nodo=nodo.siguiente\n \nprint(\"FIN\")\nnodo = nueva_matriz.NodoRaiz.NodoColumnas\nwhile(nodo is not None):\n nodo_temp = nodo.abajo\n while(nodo_temp is not None):\n print(str(nodo_temp.x)+str(nodo_temp.y))\n nodo_temp=nodo_temp.abajo\n nodo=nodo.siguiente\n \nprint(\"FIN\")\n'''\ndef graficar_matriz(nueva_matriz,name):\n import os\n grafo = \"digraph\"\n grafo+=str(\"{\\nnode[shape=record];\\n\")\n grafo+=str(\"graph[pencolor=transparent];\\n\")\n #grafo+=str(\"rankdir=LR;\\n\")\n grafo+=str(\"node [style=filled];\\n\")\n nodo = nueva_matriz.NodoRaiz.NodoFilas\n\n for y in range(1, 11):\n nodo_temp = nodo.derecha\n for x in range(1, 11):\n if(nueva_matriz.buscar(x,y)):\n grafo+=str(\"p\"+str(x)+str(y)+\"[label=\\\"{<data>\"+str(x)+\",\"+str(y)+\"|<next>}\\\" pos=\\\"\"+str(x)+\",\"+str(10-y)+\"!\\\"];\\n\")\n if(nodo_temp.derecha!=None): \n nodo_2=nodo_temp\n nodo_temp=nodo_temp.derecha\n grafo+=str(\"p\"+str(nodo_2.x)+str(nodo_2.y)+\"->\"+\"p\"+str(nodo_temp.x)+str(nodo_temp.y)+\"[dir=both];\\n\")\n else:\n pass\n if nodo.siguiente!=None:\n if nodo.siguiente.indice==y+1:\n nodo=nodo.siguiente \n nodo = nueva_matriz.NodoRaiz.NodoColumnas\n for x in range(1, 11):\n nodo_temp = nodo.abajo\n for y in range(1, 11):\n if(nueva_matriz.buscar(x,y)):\n if(nodo_temp.abajo!=None):\n nodo_2=nodo_temp\n nodo_temp=nodo_temp.abajo\n grafo+=str(\"p\"+str(nodo_2.x)+str(nodo_2.y)+\"->\"+\"p\"+str(nodo_temp.x)+str(nodo_temp.y)+\"[dir=both];\\n\")\n else:\n pass\n if nodo.siguiente!=None:\n if nodo.siguiente.indice==x+1:\n nodo=nodo.siguiente \n grafo+=str(\"}\\n\")\n f= open(name+\".dot\",\"w+\")\n f.write(grafo)\n f.close() \n os.system(\"fdp -Tpng -o \"+name+\".png \"+name+\".dot\")\n\n\n#graficar_matriz()"
},
{
"alpha_fraction": 0.7265625,
"alphanum_fraction": 0.7265625,
"avg_line_length": 23.799999237060547,
"blob_id": "7945b464b160fc80e88ed35c741a296569d8b2e3",
"content_id": "9572f59855628e46a36c35fc520e52e3f5683b6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 5,
"path": "/inicio.py",
"repo_name": "AbidelLux/Proyecto02_1S2021IPC2_JAMES",
"src_encoding": "UTF-8",
"text": "report=\"\"\ndef reporte():\n global report\n from ListaReport import listaEnlazadaMatriz\n report=listaEnlazadaMatriz()\n "
}
] | 9 |
Herosoumen/RestFrameworkCRUD
|
https://github.com/Herosoumen/RestFrameworkCRUD
|
8fcec1ae78050a6fa4340b5c31dd1ed701b0e19d
|
d07397426a4031134db7b966506db6bd7bc74558
|
e4496ef7b95a024b86661dc64009b4a1b52f7cdf
|
refs/heads/master
| 2020-07-03T09:50:24.212510 | 2019-08-12T06:29:29 | 2019-08-12T06:29:29 | 201,870,794 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 58.70000076293945,
"blob_id": "5476722584801b99c0b0fbf281863b2684bb0552",
"content_id": "631fcdd13e75ae136b55d4c5b700947092cbd8ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 10,
"path": "/employeeproject/employeeapp/api/urls.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^user/$',views.UserListCreateView.as_view(),name = 'get_create_user'),\n url(r'^user/(?P<pk>\\d+)/$',views.UserUpdateView.as_view(),name = 'update_user'),\n url(r'^address/$',views.AddressListCreateView.as_view(),name = 'address_create'),\n url(r'^address/(?P<pk>\\d+)/$',views.AddressUpdateView.as_view(),name = 'address_update'),\n url(r'^profile/$',views.ProfileListCreateView.as_view(),name = 'profile_create'),\n url(r'^profile/(?P<pk>\\d+)/$',views.ProfileUpdateView.as_view(),name = 'profile_update'),\n]\n"
},
{
"alpha_fraction": 0.6843017935752869,
"alphanum_fraction": 0.6938421726226807,
"avg_line_length": 40.17856979370117,
"blob_id": "4dff383be4413e3cf81aadecd29eed02cd9ce967",
"content_id": "5e191eac91a955df1c398804947fc6126d5b20f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1153,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 28,
"path": "/employeeproject/employeeapp/models.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\nclass Address(models.Model):\n street_no = models.CharField(max_length = 65)\n city = models.CharField(max_length = 65)\n state = models.CharField(max_length = 65)\n pincode = models.CharField(max_length = 65)\n country = models.CharField(max_length = 65)\n created_at = models.DateField(auto_now_add = True)\n updated_at = models.DateField(auto_now = True)\n\n def __str__(self):\n return self.city\n\n\nclass Profile(models.Model):\n GENDER_CHOICES = (\n ('male', 'Male'),\n ('female', 'Female')\n )\n user = models.ForeignKey(User,on_delete=models.CASCADE,related_name = 'usermodel')\n permanent_address_city = models.ForeignKey(Address,on_delete=models.CASCADE,related_name = 'permanent_address_city')\n phone_number = models.PositiveIntegerField()\n gender = models.CharField(max_length = 5,choices = GENDER_CHOICES)\n # profile_pic = models.ImageField(upload_to='profile_pic',default = 'default.jpg')\n created_at = models.DateField(auto_now_add = True)\n updated_at = models.DateField(auto_now = True)\n"
},
{
"alpha_fraction": 0.7505422830581665,
"alphanum_fraction": 0.7505422830581665,
"avg_line_length": 29.733333587646484,
"blob_id": "0bf31d5869469a0cc5598717800a39db9ee571c6",
"content_id": "8c712c4c4bc3456a8184fe6538a9d5944a8e8ff5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 15,
"path": "/employeeproject/employeeapp/admin.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Address,Profile\n# Register your models here.\n\n\nclass AddressAdmin(admin.ModelAdmin):\n list_display = ['street_no','city','state','pincode','country','created_at','updated_at']\n\n\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ['user','permanent_address_city','phone_number','gender','created_at','updated_at']\n\n\nadmin.site.register(Address,AddressAdmin)\nadmin.site.register(Profile,ProfileAdmin)\n"
},
{
"alpha_fraction": 0.7947883009910583,
"alphanum_fraction": 0.7947883009910583,
"avg_line_length": 34.08571243286133,
"blob_id": "a603e9a8519c5466791bd4d6d51db29c50c5978d",
"content_id": "350178151fdf95ccf37c8482ec892d758de5d873",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1228,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 35,
"path": "/employeeproject/employeeapp/api/views.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "from rest_framework import generics\nfrom ..models import Address,Profile\nfrom django.contrib.auth.models import User\nfrom .serializers import UserSerializer,AddressSerializer,ProfileSerializer\n\nclass UserListCreateView(generics.ListCreateAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserUpdateView(generics.RetrieveUpdateDestroyAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass AddressListCreateView(generics.ListCreateAPIView):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n\n\nclass AddressUpdateView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n\n\nclass ProfileListCreateView(generics.ListCreateAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n def perform_create(self, serializer_class):\n serializer_class.validated_data['username'] = self.request.user.username\n return super(ProfileListCreateView, self).perform_create(serializer_class)\n\nclass ProfileUpdateView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Profile.objects.all()\n serializer_class = ProfileSerializer\n"
},
{
"alpha_fraction": 0.5771712064743042,
"alphanum_fraction": 0.5910670161247253,
"avg_line_length": 43.77777862548828,
"blob_id": "1b20939651e39590d40a0b14f91fe69a83bdba57",
"content_id": "de89efc86f2699dd84b54928760747965ca826e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2015,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 45,
"path": "/employeeproject/employeeapp/migrations/0001_initial.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2019-08-10 16:19\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Address',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('street_no', models.CharField(max_length=65)),\n ('city', models.CharField(max_length=65)),\n ('state', models.CharField(max_length=65)),\n ('pincode', models.CharField(max_length=65)),\n ('country', models.CharField(max_length=65)),\n ('created_at', models.DateField(auto_now_add=True)),\n ('updated_at', models.DateField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('phone_number', models.PositiveIntegerField()),\n ('gender', models.CharField(choices=[('male', 'Male'), ('female', 'Female')], max_length=5)),\n ('profile_pic', models.ImageField(default='default.jpg', upload_to='profile_pic')),\n ('created_at', models.DateField(auto_now_add=True)),\n ('updated_at', models.DateField(auto_now=True)),\n ('permanent_address_city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='permanent_address_city', to='employeeapp.Address')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usermodel', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.692644476890564,
"alphanum_fraction": 0.692644476890564,
"avg_line_length": 29.864864349365234,
"blob_id": "5e6de560ad8b871ccd4c106b5305bd02c2425b92",
"content_id": "309e2e0f799f35166328436097a6608346c8c553",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1142,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 37,
"path": "/employeeproject/employeeapp/api/serializers.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers,fields\nfrom ..models import Address,Profile\nfrom django.contrib.auth.models import User\n\nclass AddressSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Address\n fields = '__all__'\n\n\n\n\n\nclass UserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('username','email')\n\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n usermodel = UserSerializer(read_only = True)\n permanent_address_city = AddressSerializer(many = True)\n class Meta:\n model = Profile\n fields = ['phone_number','gender','created_at','usermodel','permanent_address_city']\n\n def create(self,validated_data):\n addresses = validated_data.pop('permanent_address_city')\n address_create = Address.objects.create(**addresses)\n username = validated_data.pop('username')\n user_details = User.objects.get(username = username)\n profile = Profile.objects.create(user = user_details,permanent_address_city = address_create,**validated_data)\n return profile\n # for validated in validated_data:\n"
},
{
"alpha_fraction": 0.5382652878761292,
"alphanum_fraction": 0.5918367505073547,
"avg_line_length": 19.63157844543457,
"blob_id": "f6584e457efcbac3b0ec65ad56e0e697c13d3f35",
"content_id": "8b63312f23c5e39e37f4df63bb9128b3cf323f28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 19,
"path": "/employeeproject/employeeapp/migrations/0002_remove_profile_profile_pic.py",
"repo_name": "Herosoumen/RestFrameworkCRUD",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2019-08-10 19:43\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('employeeapp', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='profile',\n name='profile_pic',\n ),\n ]\n"
}
] | 7 |
jnsp/casting-agency
|
https://github.com/jnsp/casting-agency
|
e0a1e40dd5df344d1a04ef20392731f3fd7428bb
|
79d46cc94de91a2c04efe6a417d02153cf1b330f
|
2bd14e870b1215b9c88ce0127c8ed518463ced2a
|
refs/heads/main
| 2023-03-04T17:03:00.205680 | 2021-02-20T00:58:23 | 2021-02-20T00:58:23 | 338,344,786 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5578333139419556,
"alphanum_fraction": 0.5711506605148315,
"avg_line_length": 35.124324798583984,
"blob_id": "0e0c19362fcf4064a44672d09fa8350012769fb4",
"content_id": "36508c85338b847da6f3d9c687eea37de907b771",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6683,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 185,
"path": "/tests/test_api.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from datetime import date\n\nimport pytest\n\nfrom app.api import convert_str_to_date\nfrom app import create_app, db\nfrom app.fake_jwt import get_fake_token\nfrom app.models import Movie, Actor\n\n\[email protected]\ndef client():\n app = create_app('testing')\n with app.test_client() as client:\n app.config['USE_FAKE_JWKS'] = True\n with app.app_context():\n db.create_all()\n yield client\n db.drop_all()\n\n\nclass TestMovie:\n @pytest.fixture\n def test_movie(self):\n movie = Movie(title='MOVIE', release_date=date(2021, 1, 1))\n movie.save()\n return movie\n\n @pytest.fixture\n def new_movie_info(self):\n return {'title': 'NEW_MOVIE', 'release_date': '2021-02-01'}\n\n def test_get_movies(self, client, test_movie):\n res = client.get('/movies', headers=header('view:movies'))\n\n expected = {'success': True, 'movies': [test_movie.to_dict()]}\n assert res.get_json() == expected\n\n def test_get_empty_movies_when_no_data(self, client):\n res = client.get('/movies', headers=header('view:movies'))\n\n expected = {'success': True, 'movies': []}\n assert res.get_json() == expected\n\n def test_make_movie(self, client, new_movie_info):\n res = client.post('/movies',\n json=new_movie_info,\n headers=header('add:movies'))\n expected = {'success': True, 'movie': {**new_movie_info, 'id': 1}}\n assert res.get_json() == expected\n\n movie = Movie.query.get(1)\n assert movie.to_dict() == {**new_movie_info, 'id': 1}\n\n def test_date_format(self, client, new_movie_info):\n new_movie_info['release_date'] = 'Tue Aug 16 1988'\n res = client.post('/movies',\n json=new_movie_info,\n headers=header('add:movies'))\n assert res.status_code == 400\n assert res.get_json() == {\n 'success': False,\n 'error': 'Wrong date format: YYYY-MM-DD',\n }\n\n def test_modify_movie(self, client, test_movie, new_movie_info):\n res = client.patch('/movies/1',\n json=new_movie_info,\n headers=header('modify:movies'))\n expected = {'success': True, 'movie': {**new_movie_info, 'id': 1}}\n assert res.get_json() == expected\n\n movie = Movie.query.get(1)\n assert movie.to_dict() == {**new_movie_info, 'id': 1}\n\n def test_not_found_error_when_modify(self, client):\n res = client.patch('/movies/1', headers=header('modify:movies'))\n assert res.status_code == 404\n assert res.get_json() == {'success': False, 'error': 'Not found'}\n\n def test_remove_movie(self, client, test_movie):\n res = client.delete('/movies/1', headers=header('delete:movies'))\n expected = {'success': True, 'deleted': test_movie.to_dict()}\n\n assert res.get_json() == expected\n assert Movie.query.get(1) is None\n\n def test_not_found_error_when_remove(self, client):\n res = client.delete('/movies/1', headers=header('delete:movies'))\n assert res.status_code == 404\n assert res.get_json() == {'success': False, 'error': 'Not found'}\n\n def test_autherror_not_has_auth_header(self, client):\n res = client.get('/movies')\n assert res.status_code == 401\n assert res.get_json() == {\n 'success': False,\n 'error': 'No Authorization header'\n }\n\n def test_autherror_not_has_proper_permission(self, client, test_movie):\n res = client.delete('/movies/1', headers=header('view:movies'))\n assert res.status_code == 403\n assert res.get_json() == {\n 'success': False,\n 'error': 'Permission not found'\n }\n\n\nclass TestActor:\n @pytest.fixture\n def test_actor(self):\n actor = Actor(name='ACTOR', age=10, gender='F')\n actor.save()\n return actor\n\n @pytest.fixture\n def new_actor_info(self):\n return {'name': 'NEW_ACTOR', 'age': 20, 'gender': 'X'}\n\n def test_get_actors(self, client, test_actor):\n res = client.get('/actors', headers=header('view:actors'))\n expected = {'success': True, 'actors': [test_actor.to_dict()]}\n assert res.get_json() == expected\n\n def test_make_actor(self, client, new_actor_info):\n res = client.post('/actors',\n json=new_actor_info,\n headers=header('add:actors'))\n expected = {'success': True, 'actor': {**new_actor_info, 'id': 1}}\n assert res.get_json() == expected\n\n actor = Actor.query.get(1)\n assert actor.to_dict() == {**new_actor_info, 'id': 1}\n\n def test_modify_actor(self, client, test_actor, new_actor_info):\n res = client.patch('/actors/1',\n json=new_actor_info,\n headers=header('modify:actors'))\n expected = {'success': True, 'actor': {**new_actor_info, 'id': 1}}\n assert res.get_json() == expected\n\n actor = Actor.query.get(1)\n assert actor.to_dict() == {**new_actor_info, 'id': 1}\n\n def test_not_found_error_when_modify(self, client):\n res = client.patch('/actors/1', headers=header('modify:actors'))\n assert res.status_code == 404\n assert res.get_json() == {'success': False, 'error': 'Not found'}\n\n def test_remove_actor(self, client, test_actor):\n res = client.delete('/actors/1', headers=header('delete:actors'))\n expected = {'success': True, 'deleted': test_actor.to_dict()}\n assert res.get_json() == expected\n assert Actor.query.get(1) is None\n\n def test_not_found_error_when_remove(self, client):\n res = client.delete('/actors/1', headers=header('delete:actors'))\n assert res.status_code == 404\n assert res.get_json() == {'success': False, 'error': 'Not found'}\n\n def test_autherror_not_has_auth_header(self, client):\n res = client.get('/actors')\n assert res.status_code == 401\n assert res.get_json() == {\n 'success': False,\n 'error': 'No Authorization header'\n }\n\n def test_autherror_not_has_proper_permission(self, client, test_actor):\n res = client.delete('/actors/1', headers=header('view:actors'))\n assert res.status_code == 403\n assert res.get_json() == {\n 'success': False,\n 'error': 'Permission not found'\n }\n\n\ndef test_convert_str_to_date():\n assert convert_str_to_date('2021-01-01') == date(2021, 1, 1)\n\n\ndef header(permission):\n token = get_fake_token([permission])\n return {'Authorization': f'bearer {token}'}\n"
},
{
"alpha_fraction": 0.5754231214523315,
"alphanum_fraction": 0.5864606499671936,
"avg_line_length": 22.297142028808594,
"blob_id": "490e508134b3c2dba8d74d86d51a7074e7eaa719",
"content_id": "6b435cc137ea11064606541068d83a81d0c57b5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4077,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 175,
"path": "/app/api.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nimport os\n\nfrom dotenv import load_dotenv\nfrom flask import Blueprint, jsonify, request, abort, redirect\n\nfrom .auth import require_auth, AuthError\nfrom .models import Movie, Actor\n\napi = Blueprint('api', __name__)\nload_dotenv()\n\n\[email protected]('/login')\ndef login():\n auth0_login_url = (f'https://{os.getenv(\"AUTH0_DOMAIN\")}/authorize?'\n f'audience={os.getenv(\"API_AUDIENCE\")}&'\n 'response_type=token&'\n f'client_id={os.getenv(\"CLIENT_ID\")}&'\n f'redirect_uri={os.getenv(\"APP_URL\")}/callback&'\n 'state=STATE')\n return redirect(auth0_login_url, 302)\n\n\[email protected]('/callback')\ndef callback():\n return 'Logged-in!'\n\n\[email protected]('/movies')\n@require_auth(permission='view:movies')\ndef get_movies():\n movies = [m.to_dict() for m in Movie.query.all()]\n return jsonify({\n 'success': True,\n 'movies': movies,\n }), 200\n\n\[email protected]('/movies', methods=['POST'])\n@require_auth(permission='add:movies')\ndef make_movie():\n body = request.get_json()\n try:\n movie = Movie(title=body['title'],\n release_date=convert_str_to_date(body['release_date']))\n except ValueError:\n abort(400, 'Wrong date format: YYYY-MM-DD')\n else:\n movie.save()\n\n return jsonify({\n 'success': True,\n 'movie': movie.to_dict(),\n }), 200\n\n\[email protected]('/movies/<int:id>', methods=['PATCH'])\n@require_auth(permission='modify:movies')\ndef modify_movie(id):\n body = request.get_json()\n movie = Movie.query.get_or_404(id)\n\n if (title := body.get('title')):\n movie.title = title\n if (release_date := body.get('release_date')):\n release_date = convert_str_to_date(release_date)\n movie.release_date = release_date\n\n movie.save()\n\n return jsonify({\n 'success': True,\n 'movie': movie.to_dict(),\n })\n\n\[email protected]('/movies/<int:id>', methods=['DELETE'])\n@require_auth(permission='delete:movies')\ndef remove_movie(id):\n movie = Movie.query.get_or_404(id)\n movie.remove()\n return jsonify({\n 'success': True,\n 'deleted': movie.to_dict(),\n })\n\n\[email protected]('/actors')\n@require_auth(permission='view:actors')\ndef get_actors():\n actors = [a.to_dict() for a in Actor.query.all()]\n return jsonify({\n 'success': True,\n 'actors': actors,\n }), 200\n\n\[email protected]('/actors', methods=['POST'])\n@require_auth(permission='add:actors')\ndef make_actor():\n body = request.get_json()\n actor = Actor(**body)\n actor.save()\n\n return jsonify({'success': True, 'actor': actor.to_dict()})\n\n\[email protected]('/actors/<int:id>', methods=['PATCH'])\n@require_auth(permission='modify:actors')\ndef modify_actor(id):\n body = request.get_json()\n actor = Actor.query.get_or_404(id)\n\n if (name := body.get('name')):\n actor.name = name\n if (age := body.get('age')):\n actor.age = age\n if (gender := body.get('gender')):\n actor.gender = gender\n\n actor.save()\n\n return jsonify({\n 'success': True,\n 'actor': actor.to_dict(),\n })\n\n\[email protected]('/actors/<int:id>', methods=['DELETE'])\n@require_auth(permission='delete:actors')\ndef remove_actor(id):\n actor = Actor.query.get_or_404(id)\n actor.remove()\n\n return jsonify({\n 'success': True,\n 'deleted': actor.to_dict(),\n })\n\n\[email protected]_errorhandler(400)\ndef bad_request(e):\n return jsonify({\n 'success': False,\n 'error': e.description,\n }), 400\n\n\[email protected]_errorhandler(404)\ndef not_found(e):\n return jsonify({\n 'success': False,\n 'error': 'Not found',\n }), 404\n\n\[email protected]_errorhandler(405)\ndef methoed_not_allowed(e):\n return jsonify({\n 'success': False,\n 'error': 'Method now allowed',\n })\n\n\[email protected]_errorhandler(AuthError)\ndef forbidden(e):\n return jsonify({\n 'success': False,\n 'error': e.error,\n }), e.status_code\n\n\ndef convert_str_to_date(date_str):\n return datetime.strptime(date_str, '%Y-%m-%d').date()\n"
},
{
"alpha_fraction": 0.6614583134651184,
"alphanum_fraction": 0.6614583134651184,
"avg_line_length": 17.285715103149414,
"blob_id": "b71a7ac5b34d30eded05d8d2e8a7035487a8ff23",
"content_id": "b70daac6394e1b4dffdbfad02e7bae28d4455bb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 21,
"path": "/tests/test_basic.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from flask import current_app\nimport pytest\n\nfrom app import create_app, db\n\n\[email protected]\ndef init_app():\n app = create_app('testing')\n with app.app_context():\n db.create_all()\n yield\n db.drop_all()\n\n\ndef test_app_exists(init_app):\n assert current_app is not None\n\n\ndef test_app_is_testing(init_app):\n assert current_app.config['TESTING'] is True\n"
},
{
"alpha_fraction": 0.601763129234314,
"alphanum_fraction": 0.6159448027610779,
"avg_line_length": 27.670330047607422,
"blob_id": "335c81e932e28cf31ddc40add5a0a38b2ecffd8d",
"content_id": "b520f4246d7ab42b0a8638e3d33158732482cc20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 91,
"path": "/app/auth.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from functools import wraps\nimport os\n\nfrom dotenv import load_dotenv\nfrom flask import request, current_app\nfrom jose import jwt\nimport requests\n\nfrom . import fake_jwt\n\nload_dotenv()\n\n\ndef get_auth_token():\n if (auth_header := request.headers.get('Authorization')) is None:\n raise AuthError('No Authorization header', 401)\n\n auth_parts = auth_header.split(' ')\n if len(auth_parts) != 2:\n raise AuthError('Token splited too few or many', 401)\n\n auth_type, token = auth_parts\n if auth_type.lower() != 'bearer':\n raise AuthError('Not bearer auth type', 401)\n\n return token\n\n\ndef get_jwks():\n jwks_url = f\"https://{os.getenv('AUTH0_DOMAIN')}/.well-known/jwks.json\"\n return requests.get(jwks_url).json()\n\n\ndef validate_jwt(token, jwks):\n try:\n token_header = jwt.get_unverified_header(token)\n except jwt.JWTError:\n raise AuthError('Unable decoding token headers', 401)\n\n if not (kid := token_header.get('kid')):\n raise AuthError('Token has no kid', 401)\n\n if not (signing_key := [k for k in jwks['keys'] if k['kid'] == kid]):\n raise AuthError('No matched kid', 401)\n\n try:\n payload = jwt.decode(token,\n signing_key[0],\n algorithms=os.getenv('ALGORITHM'),\n audience=os.getenv('API_AUDIENCE'),\n issuer=f\"https://{os.getenv('AUTH0_DOMAIN')}/\")\n except jwt.ExpiredSignatureError:\n raise AuthError('Token is expired', 401)\n except jwt.JWTClaimsError:\n raise AuthError('Invalid claim', 401)\n except Exception:\n raise AuthError('Unable validate token', 401)\n\n return payload\n\n\ndef check_permission(permission, payload):\n if (permissions := payload.get('permissions')) is None:\n raise AuthError('Payload has NO permissions', 403)\n\n if permission and permission not in permissions:\n raise AuthError('Permission not found', 403)\n\n return True\n\n\ndef require_auth(permission=None):\n def _require_auth(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n jwt = get_auth_token()\n use_test_jwks = current_app.config.get('USE_FAKE_JWKS')\n jwks = get_jwks() if not use_test_jwks else fake_jwt.jwks\n payload = validate_jwt(jwt, jwks)\n check_permission(permission, payload)\n return f(*args, **kwargs)\n\n return wrapper\n\n return _require_auth\n\n\nclass AuthError(Exception):\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n"
},
{
"alpha_fraction": 0.5393258333206177,
"alphanum_fraction": 0.5808124542236328,
"avg_line_length": 23.10416603088379,
"blob_id": "4cf14c85d0bdb23811bf5b2c6610e4471d7d983e",
"content_id": "f2ace02724acf7106f9d1d5b08da67a6607209a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 48,
"path": "/app/fake_jwt.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThis is only used for tests.\nEvery keys here are NOT used in production env.\n\"\"\"\nfrom jose import jwt\n\nprivate_key = (\n '-----BEGIN PRIVATE KEY-----\\n'\n 'MIIBUwIBADANBgkqhkiG9w0BAQEFAASCAT0wggE5AgEAAkEAiRTQJ4g8GcKnRQMz'\n 'tEWE2NxU2HJerjMlIhbaVfztJpMWZ70JvB1sG8JzVMc5mgaPEOQbBAHxz5EK9fOd'\n 'W333YwIDAQABAkBRoRU7FUNEy8czr25woR00zi+wHJsI/OfV3unxXoYR+5GpR185'\n '4RVKSUw7aeDvLfBh2P32hqZ3fTNcniIoiexxAiEAv2wdlscUI572ChSsLno858uZ'\n '1rLnd+xKI6Ic7mWwALUCIQC3U58UEayMEekPhZKL28VM9Sx18BFguUiklwovcp9e'\n 'twIgFqOr0DRVXm0jfke5oXmVkHiVBj58f8NzdUlsEIn4Se0CICZTaA1lCIKb9/JT'\n 'xWhRwLSvCOV7E9b5xVMLdIio2OKPAiBqWSbsCQ3pXeBlPpfkbNNLeOt4CQYR7pJ9'\n 'P792/e675A==\\n'\n '-----END PRIVATE KEY-----')\n\npublic_key = (\n '-----BEGIN PUBLIC KEY-----\\n'\n 'MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAIkU0CeIPBnCp0UDM7RFhNjcVNhyXq4z'\n 'JSIW2lX87SaTFme9CbwdbBvCc1THOZoGjxDkGwQB8c+RCvXznVt992MCAwEAAQ==\\n'\n '-----END PUBLIC KEY-----')\n\njwks = {\n 'keys': [{\n 'alg': 'RS256',\n 'kty': 'RSA',\n 'use': 'sig',\n 'n': 'iRTQJ4g8GcKnRQMztEWE2NxU2HJerjMlIhbaVfztJpMWZ70JvB1sG8' \\\n 'JzVMc5mgaPEOQbBAHxz5EK9fOdW333Yw',\n 'e': 'AQAB',\n 'kid': 'LeE4htANOADB4/QY2qD45+oukIY=',\n 'x5c': public_key,\n }]\n}\n\nheaders = {'kid': 'LeE4htANOADB4/QY2qD45+oukIY='}\npayload = {\n 'aud': 'casting-agency-api',\n 'iss': 'https://jnsp-casting-agency.us.auth0.com/',\n 'permissions': []\n}\n\n\ndef get_fake_token(permissions):\n payload['permissions'] = permissions\n return jwt.encode(payload, private_key, algorithm='RS256', headers=headers)\n"
},
{
"alpha_fraction": 0.5972602963447571,
"alphanum_fraction": 0.6278538703918457,
"avg_line_length": 25.385541915893555,
"blob_id": "0a5a4455756c02dee8162fe2e2205b63cbfb60a2",
"content_id": "eeaf16e9b5113c6a14c61c205775070040887a71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2190,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 83,
"path": "/tests/test_models.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from datetime import date\n\nimport pytest\n\nfrom app import create_app, db\nfrom app.models import Movie, Actor, ValidationError\n\n\[email protected](autouse=True)\ndef init_test_db():\n app = create_app('testing')\n with app.app_context():\n db.create_all()\n yield\n db.drop_all()\n\n\ndef test_movie_model():\n first_movie = Movie(title='TITLE1', release_date=date(2020, 1, 1))\n first_movie.save()\n second_movie = Movie(title='TITLE2', release_date=date(2020, 1, 2))\n second_movie.save()\n\n saved_movies = Movie.query.all()\n assert len(saved_movies) == 2\n\n first_saved_movie = Movie.query.get(1)\n assert first_saved_movie.title == 'TITLE1'\n assert first_saved_movie.release_date == date(2020, 1, 1)\n\n second_saved_movie = Movie.query.get(2)\n assert second_saved_movie.title == 'TITLE2'\n assert second_saved_movie.release_date == date(2020, 1, 2)\n\n first_saved_movie.remove()\n assert Movie.query.get(1) is None\n\n\ndef test_movie_to_dict():\n movie = Movie(title='TITLE', release_date=date(2020, 1, 1))\n assert movie.to_dict() == {\n 'id': None,\n 'title': 'TITLE',\n 'release_date': '2020-01-01',\n }\n\n\ndef test_actor_model():\n first_actor = Actor(name='ACTOR1', age=10, gender='F')\n first_actor.save()\n second_actor = Actor(name='ACTOR2', age=20, gender='M')\n second_actor.save()\n\n saved_actors = Actor.query.all()\n assert len(saved_actors) == 2\n\n first_saved_actor = Actor.query.get(1)\n assert first_saved_actor.name == 'ACTOR1'\n assert first_saved_actor.age == 10\n assert first_saved_actor.gender == 'F'\n\n second_saved_actor = Actor.query.get(2)\n assert second_saved_actor.name == 'ACTOR2'\n assert second_saved_actor.age == 20\n assert second_saved_actor.gender == 'M'\n\n first_saved_actor.remove()\n assert Actor.query.get(1) is None\n\n\ndef test_actor_non_negative_age():\n with pytest.raises(ValidationError, match='Age is negative'):\n Actor(age=-1)\n\n\ndef test_actor_to_dict():\n actor = Actor(name='Actor', age=10, gender='F')\n assert actor.to_dict() == {\n 'id': None,\n 'name': 'Actor',\n 'age': 10,\n 'gender': 'F'\n }\n"
},
{
"alpha_fraction": 0.611857533454895,
"alphanum_fraction": 0.6206014156341553,
"avg_line_length": 33.99253845214844,
"blob_id": "e6424a11488092e4a8f5c00408bf69dde1236c3b",
"content_id": "94ff0f4d65ee8b1268d7c03a6fe8510ee26ee99b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4689,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 134,
"path": "/tests/test_auth.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from base64 import b64encode, b64decode\nimport json\nimport os\n\nfrom dotenv import load_dotenv\nimport pytest\nimport requests\n\nfrom app import create_app\nfrom app.auth import get_auth_token, get_jwks, validate_jwt, \\\n check_permission, AuthError\n\n\nclass TESTAuthToken:\n @pytest.fixture()\n def app(self):\n return create_app('testing')\n\n def test_get_auth_token(self, app):\n token_header = {'Authorization': 'bearer TOKEN'}\n\n with app.test_request_context(headers=token_header):\n assert get_auth_token() == 'TOKEN'\n\n def test_autherror_without_auth_header(self, app):\n no_header = {}\n\n with app.test_request_context(headers=no_header):\n with pytest.raises(AuthError) as e:\n get_auth_token()\n assert e.value.error == 'No Authorization header'\n assert e.value.status_code == 401\n\n def test_autherror_when_auth_splited_improperly(self, app):\n too_many_splited_header = {'Authorization': 'A B C'}\n\n with app.test_request_context(headers=too_many_splited_header):\n with pytest.raises(AuthError) as e:\n get_auth_token()\n assert e.value.error == 'Token splited too few or many'\n assert e.status_code == 401\n\n def test_autherror_when_auth_type_is_not_bearer(self, app):\n not_bearer_token_header = {'Authorization': 'basic TOKEN'}\n\n with app.test_request_context(headers=not_bearer_token_header):\n with pytest.raises(AuthError) as e:\n get_auth_token()\n assert e.value.error == 'Not bearer auth type'\n assert e.status_code == 401\n\n\nclass TestValidateJWT:\n @pytest.fixture(scope='class')\n def jwks(self):\n return get_jwks()\n\n def test_validate_jwt(self, jwks):\n test_token = self.get_test_jwt()\n expected_payload = json.loads(\n b64decode(test_token.split('.')[1]).decode('utf-8'))\n assert validate_jwt(test_token, jwks) == expected_payload\n\n def test_autherror_when_token_has_no_header(self, jwks):\n no_header_token = 'TOKEN'\n\n with pytest.raises(AuthError) as e:\n validate_jwt(no_header_token, jwks)\n assert e.value.error == 'Unable decoding token headers'\n assert e.value.status_code == 401\n\n def test_autherror_when_token_has_no_kid(self, jwks):\n no_kid_header = b64encode(b'{\"no_kid\": \"KID\"}').decode('utf-8')\n no_kid_token = no_kid_header + '..'\n\n with pytest.raises(AuthError) as e:\n validate_jwt(no_kid_token, jwks)\n assert e.value.error == 'Token has no kid'\n assert e.value.status_code == 401\n\n def test_autherror_when_no_matched_kid(self, jwks):\n unmatched_header = b64encode(b'{\"kid\": \"KID\"}').decode('utf-8')\n unmatched_token = unmatched_header + '..'\n\n with pytest.raises(AuthError) as e:\n validate_jwt(unmatched_token, jwks)\n assert e.value.error == 'No matched kid'\n assert e.value.status_code == 401\n\n def get_test_jwt(self):\n load_dotenv()\n url = f\"https://{os.getenv('AUTH0_DOMAIN')}/oauth/token\"\n headers = {'content-type': 'application/json'}\n data = {\n 'client_id': os.getenv('TEST_CLIENT_ID'),\n 'client_secret': os.getenv('TEST_CLIENT_SECRET'),\n 'audience': os.getenv('API_AUDIENCE'),\n 'grant_type': 'client_credentials',\n }\n res = requests.post(url, headers=headers, json=data)\n\n return res.json()['access_token']\n\n\nclass TestPermmision:\n def test_check_permission(self):\n permission = 'run:test'\n payload = {'permissions': ['run:test']}\n\n assert check_permission(permission, payload) is True\n\n def test_check_empty_permission(self):\n empty_permission = None\n payload = {'permissions': ['run:test']}\n\n assert check_permission(empty_permission, payload) is True\n\n def test_check_permission_not_permitted(self):\n permission = 'creat:test'\n not_permitted_payload = {'permissions': ['run:test']}\n\n with pytest.raises(AuthError) as e:\n check_permission(permission, not_permitted_payload)\n assert e.value.error == 'Permission not found'\n assert e.value.status_code == 403\n\n def test_autherror_when_payload_has_no_permissions(self):\n any_permission = 'any permission'\n no_permissions_payload = {'no_permissions': ['any permissions']}\n\n with pytest.raises(AuthError) as e:\n check_permission(any_permission, no_permissions_payload)\n assert e.value.error == 'Payload has NO permissions'\n assert e.value.status_code == 403\n"
},
{
"alpha_fraction": 0.727830171585083,
"alphanum_fraction": 0.7327830195426941,
"avg_line_length": 25.5,
"blob_id": "454a244f7790d3ea697b7dc8c191e38e16f81dec",
"content_id": "0b242b86d282689ed3d12c990b5bec87b6b9dcfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4240,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 160,
"path": "/README.md",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "# casting-agency\n\nThis is casting agency's API server.\nTemporaily, this is deployed on 'http://casting-agency-jnsp.herokuapp.com/'.\n\n## Endpoints\n\n* GET `/login`: Redirect to login page of Auth0.com.\n* GET `/movies`: Get movies. Required permission `view:movies`.\n* POST `/movies`: Add new movie. Required permission `add:movies`. New movie data should be json body.\n* PATCH `/movies/<id>`: Modify movie of `<id>`. Required permission `modify:movies`. Modified movie data should be json body.\n* DELETE `/movies/<id>`: Remove movie of `<id>`. Required permission `delete:movies`.\n* GET `/actors`: Get actors. Required permission `view:actors`.\n* POST `/actors`: Add new actor. Required permission `add:actors`. New actor data should be json body.\n* PATCH `/actors/<id>`: Modify actor of `<id>`. Required permission `modify:actors`. Modified actor data should be json body.\n* DELETE `/actors/<id>`: Remove actor of `<id>`. Required permission `delete:actors`.\n\n### Role and Permissions\n\nThere are 3 Roles, and each roles has its own permissions.\n\n* Casting Assistant: `view:movies`, `view:actors`\n* Casting Director: Casting Assistant's + `add:actors`, `delete:actors`, `modify:actors`, `modify:movies`\n* Executive Producer: Casting Director's + `add:movies`, `delete:movies`\n\n## How to install\n\nThis project is based on `Python 3.9.1`. Just clone this to use it.\n\n```bash\ngit clone https://github.com/jnsp/casting-agency.git\n```\n\n### Requirements\n\n#### For development\n\nNote: If you want to do pytest, install `requirements/dev.txt` requirements like below.\n\n```bash\npip install -r requirements/dev.txt\n```\n\n#### For production\n\n```bash\npip install -r requirements/prod.txt\n```\n\n#### For Heroku deployment\n\n```bash\npip install -r requirements/heroku.txt\n```\n\nOR\n\n```bash\npip install -r requirements.txt\n```\n\n### Environment variables\n\nThis project uses `.env` file for sensitive inforamation.\n`.env` file has to be in the root directory of this project.\n`.env` file includes variables like below\n\n```\nAUTH0_DOMAIN=[auth0 domain]\nAPI_AUDIENCE=[auth0 api audience]\nALGORITHM=[auth algorithm]\nAPP_URL=[app url after deployment]\nCLIENT_ID=[auth0 client id]\nTEST_CLIENT_ID=[auth0 test client id]\nTEST_CLIENT_SECRET=[auth0 test client secret]\nCASTING_ASSISTANT_TOKEN=[casting assitant's jwt for test]\nCASTING_DIRECTOR_TOKEN=[casting director's jwt for test]\nEXECUTIVE_PRODUCER_TOKEN=[executive producer's jwt for test]\n```\n\n## How to run server\n\nIn development mode, you can run server with localhost.\n\n```bash\nexport FLASK_APP=ca.py\nexport FLASK_ENV=development\nflask run\n```\n\n\n## How to deploy to Heroku\n\n### Add DB server to Heroku\n\n```bash\nheroku addons:create heroku-postgresql:hobby-dev\n```\n\nThis command adds database in Heroku and env variable `DATABASE_URL`. You can check the `DATABASE_URL` with the commaind like below.\n\n```bash\nheroku config\n```\n\n### Set environment variable\n\nHeroku needs env variable but `.env` can't be pushed. Set env variable with heroku cli.\n\n```bash\nheroku config:set AUTH_DOMANI=[auth0 domain]\n```\n\nYou need to set 7 variables manually.\n\n```\nFLASK_APP=ca.py\nFLASK_CONFIG=heroku\nAUTH0_DOMAIN=[auth0 domain]\nAPI_AUDIENCE=[auth0 api audience]\nALGORITHM=[auth algorithm]\nAPP_URL=[app url after deployment]\nCLIENT_ID=[auth0 client id]\n```\n\n### Push repository to Heroku\n\nAfter Heroku cli settings, just git push to the heroku repository.\n\n```bash\ngit push heroku\n```\n\n### DB upgrade\n\n```bash\nheroku run flask db upgrade\n```\n\n## How to test\n\nYou can test this project with `pytest`.\n\n```bash\npytest -v\n```\n\nThere are five test files here.\n\n* `test_api.py`: unit tests for api endpoints\n* `test_auth.py`: unit tests for authentication and authorization with fake JWT\n* `test_basic.py`: unit tests for app factory\n* `test_heroku.py`: functional tests for real endpoints after Heroku deployment\n* `test_models.py`: unit tests for data models\n\nNote: `test_heroku` is only able to be tested after deployment and `.env` file has `CASTING_ASISTANT_TOKEN`, `CASTING_DIRECTOR_TOKEN`, and `EXECUTIVE_PRODUCER_TOKEN`.\n\n## Note\n\nThis is the Capstone Project of '[Full Stack Web Developer Nanodegree Program](https://www.udacity.com/course/full-stack-web-developer-nanodegree--nd0044)' on [Udacity.com](https://www.udacity.com).\n"
},
{
"alpha_fraction": 0.5516803860664368,
"alphanum_fraction": 0.5561192035675049,
"avg_line_length": 24.03174591064453,
"blob_id": "1ba236b1c86558e7fdd509d02fa22e2f1e8c94fd",
"content_id": "535bfc23fb175c8d5de35c0620387eb840636e90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1577,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 63,
"path": "/app/models.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from app import db\n\n\nclass Movie(db.Model):\n __tablename__ = 'movies'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(64), nullable=False)\n release_date = db.Column(db.Date, nullable=False)\n\n def __str__(self):\n return f'<Movie(title={self.title}, release_date={self.release_date}>'\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def remove(self):\n db.session.delete(self)\n db.session.commit()\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'release_date': str(self.release_date)\n }\n\n\nclass Actor(db.Model):\n __tablename__ = 'actors'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), nullable=False)\n age = db.Column(db.Integer, nullable=False)\n gender = db.Column(db.String(10), nullable=False)\n\n def __init__(self, **kwargs):\n super(Actor, self).__init__(**kwargs)\n if self.age < 0:\n raise ValidationError('Age is negative')\n\n def __str__(self):\n return f'<Actor(name={self.name}, age={self.age}, ' \\\n 'gender={self.gender}>'\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def remove(self):\n db.session.delete(self)\n db.session.commit()\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': self.age,\n 'gender': self.gender,\n }\n\n\nclass ValidationError(ValueError):\n pass\n"
},
{
"alpha_fraction": 0.6059168577194214,
"alphanum_fraction": 0.6145687699317932,
"avg_line_length": 35.56122589111328,
"blob_id": "d2557750cc669bf71ad0630d468610554a460676",
"content_id": "f1b83945f86af2e2b89b99d5d6c7a29af8422a02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3583,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 98,
"path": "/tests/test_heroku.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\nimport os\n\nfrom dotenv import load_dotenv\nimport pytest\nimport requests\n\nload_dotenv()\nAPP_URL = os.getenv('APP_URL')\nToken = namedtuple('Token', ['name', 'body'])\nCA_TOKEN = Token('Casting Assistant', os.getenv('CASTING_ASSISTANT_TOKEN'))\nCD_TOKEN = Token('Casting Director', os.getenv('CASTING_DIRECTOR_TOKEN'))\nEP_TOKEN = Token('Executive Producer', os.getenv('EXECUTIVE_PRODUCER_TOKEN'))\n\n\[email protected]\ndef new_actor_info():\n return {'name': 'NEW_ACTOR', 'age': 20, 'gender': 'X'}\n\n\[email protected]\ndef new_movie_info():\n return {'title': 'NEW_MOVIE', 'release_date': '2021-02-01'}\n\n\[email protected]('token', (CA_TOKEN, CD_TOKEN, EP_TOKEN),\n ids=lambda t: t.name)\[email protected]('resource', ('movies', 'actors'))\ndef test_get(token, resource):\n res = requests.get(f'{APP_URL}/{resource}',\n headers={'Authorization': f'bearer {token.body}'})\n assert res.status_code == 200\n\n\[email protected]('token', (CD_TOKEN, EP_TOKEN), ids=lambda t: t.name)\ndef test_add_delete_actors(token, new_actor_info):\n res = requests.post(f'{APP_URL}/actors',\n headers={'Authorization': f'bearer {token.body}'},\n json=new_actor_info)\n assert res.status_code == 200\n\n actor_id = res.json()['actor']['id']\n res = requests.delete(f'{APP_URL}/actors/{actor_id}',\n headers={'Authorization': f'bearer {token.body}'})\n assert res.status_code == 200\n\n\[email protected]('token', (EP_TOKEN, ), ids=lambda t: t.name)\ndef test_add_delete_movies(token, new_movie_info):\n res = requests.post(f'{APP_URL}/movies',\n headers={'Authorization': f'bearer {token.body}'},\n json=new_movie_info)\n assert res.status_code == 200\n\n movie_id = res.json()['movie']['id']\n res = requests.delete(f'{APP_URL}/movies/{movie_id}',\n headers={'Authorization': f'bearer {token.body}'})\n assert res.status_code == 200\n\n\[email protected]('token', (CD_TOKEN, EP_TOKEN), ids=lambda t: t.name)\ndef test_modify_actor(token, new_actor_info):\n res_add_actor = requests.post(\n f'{APP_URL}/actors',\n headers={'Authorization': f'bearer {EP_TOKEN.body}'},\n json=new_actor_info)\n actor_id = res_add_actor.json()['actor']['id']\n\n res_modify_actor = requests.patch(\n f'{APP_URL}/actors/{actor_id}',\n headers={'Authorization': f'bearer {token.body}'},\n json={'name': 'MODIFIED_NAME'})\n\n assert res_modify_actor.status_code == 200\n assert res_modify_actor.json()['actor']['name'] == 'MODIFIED_NAME'\n\n requests.delete(f'{APP_URL}/actors/{actor_id}',\n headers={'Authorization': f'bearer {EP_TOKEN.body}'})\n\n\[email protected]('token', (CD_TOKEN, EP_TOKEN), ids=lambda t: t.name)\ndef test_modify_movie(token, new_movie_info):\n res_add_movie = requests.post(\n f'{APP_URL}/movies',\n headers={'Authorization': f'bearer {EP_TOKEN.body}'},\n json=new_movie_info)\n movie_id = res_add_movie.json()['movie']['id']\n\n res_modify_movie = requests.patch(\n f'{APP_URL}/movies/{movie_id}',\n headers={'Authorization': f'bearer {token.body}'},\n json={'title': 'MODIFIED_TITLE'})\n\n assert res_modify_movie.status_code == 200\n assert res_modify_movie.json()['movie']['title'] == 'MODIFIED_TITLE'\n\n requests.delete(f'{APP_URL}/movies/{movie_id}',\n headers={'Authorization': f'bearer {EP_TOKEN.body}'})\n"
},
{
"alpha_fraction": 0.6684350371360779,
"alphanum_fraction": 0.6684350371360779,
"avg_line_length": 19.94444465637207,
"blob_id": "0464354ceb364f6b85178d1d328bb1f1146c5639",
"content_id": "17a6ee885f43431567f136e7e10e1535201dccbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 36,
"path": "/config.py",
"repo_name": "jnsp/casting-agency",
"src_encoding": "UTF-8",
"text": "import os\n\nbase_dir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Config:\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = \\\n 'sqlite:///' + os.path.join(base_dir, 'dev_data.sqlite')\n\n\nclass TestConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = 'sqlite://'\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(base_dir, 'data.sqlite')\n\n \nclass HerokuConfig(ProductionConfig):\n pass\n\n\nconfig = {\n 'testing': TestConfig,\n 'development': DevelopmentConfig,\n 'production': ProductionConfig,\n 'heroku': HerokuConfig,\n 'default': DevelopmentConfig,\n}\n"
}
] | 11 |
rasa97/grover-sim
|
https://github.com/rasa97/grover-sim
|
7192289dfcaa30df2d1f5fea9b9be7a44327f39d
|
bc6fab248360d447fdbf7b6382c8f716091458cc
|
ac60e6ed593da4beaa8e1877233ad80a6d5294ff
|
refs/heads/master
| 2021-08-31T11:16:19.971854 | 2017-12-21T05:31:46 | 2017-12-21T05:31:46 | 114,966,395 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.49916619062423706,
"alphanum_fraction": 0.5052807331085205,
"avg_line_length": 22.064102172851562,
"blob_id": "d498c57b8e2580b82f3c5c255cf3db324cf6d0ff",
"content_id": "4b11fb1540d499e150b51f9f212204267678f803",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1799,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 78,
"path": "/server.py",
"repo_name": "rasa97/grover-sim",
"src_encoding": "UTF-8",
"text": "import json\nimport urllib\n\nimport pyquil.quil as pq\nimport pyquil.api as api\nfrom pyquil.gates import *\nimport numpy as np\n\nqvm = api.SyncConnection()\n\nfrom flask import Flask, request, redirect, url_for, render_template, jsonify\napp = Flask(__name__)\n\np = pq.Program()\n\ndef orcl(n, bst):\n q = pq.Program()\n N=2**n\n gate = np.identity(N)\n gate[bst][bst] = -1.0\n q.defgate(\"oracle\", gate)\n oc = \"oracle\"\n for i in range(n):\n oc+=\" \"+str(i)\n return q.inst(oc)\n\n\ndef diff(n,N):\n q = pq.Program()\n diff_gate = np.full((N, N), 2.0/N)\n for i in range(N):\n diff_gate[i][i] -= 1\n q.defgate(\"diffop\", diff_gate)\n dc = \"diffop\"\n for i in range(n):\n dc+=\" \"+str(i)\n return q.inst(dc)\n\ndef newop(n):\n q=pq.Program()\n cmd=\"\"\n for i in range(n):\n cmd=cmd+\"H \"+str(i)+\"\\n\"\n return q.inst(cmd)\n\[email protected]('/', methods = ['POST', 'GET'])\ndef home():\n if request.method == 'POST':\n jsdata = request.get_json()\n if(jsdata['type']==0):\n num = jsdata['n']\n p = newop(int(num))\n wvf, _ = qvm.wavefunction(p)\n prob = wvf.get_outcome_probs()\n print prob\n return jsonify(prob)\n elif(jsdata['type']==1):\n string = jsdata['bits']\n bit_string = int(string, 2)\n n = int(jsdata['numq'])\n global p\n p+=orcl(n, bit_string)\n p+=diff(n, 2**n)\n print p\n\n wvf, _ = qvm.wavefunction(p)\n prob = wvf.get_outcome_probs()\n print prob\n return jsonify(prob)\n elif(jsdata['type']==2):\n global p\n while(p.pop()):\n p.pop()\n\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n\tapp.run()\n"
},
{
"alpha_fraction": 0.4724719226360321,
"alphanum_fraction": 0.5044943690299988,
"avg_line_length": 24.61151123046875,
"blob_id": "af6adae0d3e3ad180bbf26ce9685a9f90d72e5b0",
"content_id": "2fe085d73c5ec81583c89c703db32ffca9b3e146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3560,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 139,
"path": "/static/js/script.js",
"repo_name": "rasa97/grover-sim",
"src_encoding": "UTF-8",
"text": "function reverse(s){\n return s.split(\"\").reverse().join(\"\");\n}\n\nmn=0\n\nfunction reline(n){\n N=2**n;\n sqn = Math.sqrt(N);\n theta = Math.asin(1/sqn);\n var ctx = document.getElementById(\"myvec\").getContext('2d');\n ctx.beginPath();\n ctx.moveTo(175,175);\n ctx.lineTo(175+120*Math.cos((mn+1)*theta) , 175-120*Math.sin((mn+1)*theta));\n ctx.font=\"13pt Calibri\";\n ctx.fillText(\"|s>\",175+120*Math.cos((mn+1)*theta),175-120*Math.sin((mn+1)*theta));\n ctx.lineWidth = 6;\n ctx.strokeStyle = '#6CAFB7';\n ctx.stroke();\n mn=mn+2;\n}\n\nfunction redraw(){\n var ax = document.getElementById(\"myvec\").getContext('2d');\n ax.clearRect(0, 0, 350, 350);\n ax.font=\"13pt Calibri\";\n ax.fillText(\"|w>\",145,25);\n ax.fillText(\"|s'>\", 326,200);\n ax.beginPath();\n ax.strokeStyle = '#000000';\n ax.moveTo(0,175);\n ax.lineTo(350,175);\n ax.lineWidth = 3;\n ax.stroke();\n\n ax.beginPath();\n ax.moveTo(175,0);\n ax.lineTo(175,350);\n ax.lineWidth = 3;\n ax.stroke();\n}\n\nfunction successCallBack(returnData) {\n lbl=[];\n val=[];\n for (var key in returnData){\n lbl.push(reverse(String(key)));\n val.push(returnData[key]);\n }\n var ctx = document.getElementById('myChart').getContext('2d');\n ctx.clearRect(0, 0, ctx.width, ctx.height);\n var chart = new Chart(ctx, {\n type: 'bar',\n data: {\n labels: lbl,\n datasets: [{\n label: \"Probability\",\n backgroundColor: '#6CAFB7',\n borderColor: '#6CAFB7',\n data: val,\n }]\n }\n });\n\n redraw();\n reline($('#noq').val());\n}\n\nfunction resetCallBack() {\n document.getElementById('myChart').getContext('2d').destroy();\n mn=0;\n}\n\n$(document).ready(function() {\n\n var slider = document.getElementById(\"br\");\n var output = document.getElementById(\"bts\");\n var now,smanx;\n $('#oprs').hide();\n $('#resrow').hide();\n\n $(\"#set\").click(function() {\n if($('#noq').val()){\n now = $('#noq').val();\n smax = 2**now - 1;\n var qn = {};\n qn['n']=now;\n qn['type']=0;\n $('#oprs').show()\n $('#resrow').show();\n $('#br').attr('max', String(smax));\n $('#noq').val(String(now));\n $.ajax({\n \t\t\ttype: 'POST',\n \t\t\turl: window.location.href,\n \t\t\tdata: JSON.stringify(qn),\n \t\t\tcontentType: 'application/json;charset=UTF-8',\n success: successCallBack\n \t\t});\n }\n });\n\n $('#br').on(\"change mousemove\", function() {\n var x = (parseInt(this.value)).toString(2);\n var str = \"0\".repeat(now-x.length) + x;\n output.innerHTML = str;\n });\n\n $(\"#grover\").click(function() {\n var response={};\n response['numq']=$('#noq').val();\n response['bits']=$('#bts').text();\n response['type']=1;\n $.ajax({\n type: 'POST',\n url: window.location.href,\n data: JSON.stringify(response),\n contentType: 'application/json;charset=UTF-8',\n success: successCallBack\n });\n });\n\n $(\"#reset\").click(function() {\n $('#noq').val(\"\");\n $('#oprs').hide();\n $('#resrow').hide();\n redraw();\n mn=0;\n var response={};\n response['type']=2;\n $.ajax({\n type: 'POST',\n url: window.location.href,\n data: JSON.stringify(response),\n contentType: 'application/json;charset=UTF-8',\n success: resetCallBack\n });\n });\n});\n"
}
] | 2 |
bembid668/GoogleHashCode
|
https://github.com/bembid668/GoogleHashCode
|
8f91b71ab8da3340ab77ac82ad03c7e5a3d3e5d4
|
61fb4ea0f4524b3f7286e58171c1737d281d5658
|
fc3f8d8d1055f7c41c5e67a1671cb1beff1efa88
|
refs/heads/master
| 2021-01-25T12:29:59.367963 | 2018-03-01T21:23:55 | 2018-03-01T21:23:55 | 123,472,257 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8387096524238586,
"alphanum_fraction": 0.8387096524238586,
"avg_line_length": 30,
"blob_id": "6e5545db5eef276a97c3d924c03ae77369965de7",
"content_id": "543da6d80d5898729809c1747c62fca1c2fb7877",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 2,
"path": "/README.md",
"repo_name": "bembid668/GoogleHashCode",
"src_encoding": "UTF-8",
"text": "# GoogleHashCode\nProblem set and solution for Google HashCode\n"
},
{
"alpha_fraction": 0.6124660968780518,
"alphanum_fraction": 0.6303523182868958,
"avg_line_length": 24.27397346496582,
"blob_id": "09c15a66e6273036443da59f3eedc8dad784addb",
"content_id": "e926fe21f0cbc6d76bea8981a59db052c7b907f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1845,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 73,
"path": "/runtime.py",
"repo_name": "bembid668/GoogleHashCode",
"src_encoding": "UTF-8",
"text": "import Maths\nimport numpy as np\nimport sys\n\ndef process(filename):\n\tqueue = []\n\toutput = \"\"\n\twith open(filename,'r') as file:\n\t\tcount = 0\n\t\tfor line in file:\n\t\t\tif count == 0:\n\t\t\t\t[numRows,nuymCols,numVehicles,numRides,perRideBonus,numSteps] = line.strip().split(\" \")\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\t[xCoordStrt,yCoordStrt,xCoordEnd,yCoordEnd,strtTm,endTm] = line.strip().split(\" \")\n\t\t\t\tqueue.append({\n\t\t\t\t\t\"start\":[xCoordStrt,yCoordStrt],\n\t\t\t\t\t\"end\":[xCoordStrt,yCoordStrt],\n\t\t\t\t\t\"startTime\":[xCoordStrt,yCoordStrt],\n\t\t\t\t\t\"endTime\":[xCoordStrt,yCoordStrt],\n\t\t\t\t})\n\tcars = []\n\tfor x in range(0, numVehicles):\n\t\tcars.append([0, 0, None])\n\nfor x in range(0, numSteps):\n\t\texcTrips = []\n\t\tfor car in cars:\n\t\t\ttripIndex = None\n\t\t\tdistance = None\n\n\t\t\tfor index, trip in enumerate(queue[]:\n\t\t\t\ttemp_distance = Math.abs(cars[0]-xCoordStrt)+Maths.abs(cars[1]-yCoordStrt)\n\t\t\t\tif temp_distance < distance:\n\t\t\t\t\tdistance = temp_distance\n\t\t\t\t\ttripIndex = index\n\t\t\t\t\tmove_intersection()\n\n\t\t\t\telif(temp_distance == 0){\n\t\t\t\t\tcar[3] = tripIndex\n\t\t\t\t}\n\n\t\t\t\ttemp_end_distance = Math.abs(cars[0]-xCoordEnd)+Maths.abs(cars[1]-yCoordEnd)\n\t\t\t\tif temp_end_distance < distance:\n\t\t\t\t\tdistance = temp_end_distance\n\t\t\t\t\ttripIndex = tripIndex\n\t\t\t\t\tmove_intersection()\n\t\t\t\telif(temp_distance == 0){\n\t\t\t\t\tcar[3] = None;\n\t\t\t\t\toutput += \n\t\t\t\t\t}\n\n\ndef move_intersection(self, carsID, tripIndex):\n\t\t\t\tif(tripIndex != None):\n\t\t\t\t\txDif = cars[carsID][0] - xCoordStrt\n\t\t\t\t\tyDif = cars[carsID][1] - yCoordStrt\n\t\t\t\t\tif(xDif>0){cars[carsID][0] = cars[carsID][0] + 1}\n\t\t\t\t\telif(xDif<0){cars[carsID][0] = cars[carsID][0] - 1}\n\t\t\t\t\telif(yDif>0){cars[carsID][1] = cars[carsID][1] + 1}\n\t\t\t\t\telif(yDif<0){cars[carsID][1] = cars[carsID][1] - 1}\n\t\t\t\t\texcTrips.append(tripIndex)\n\n\noutput = \"\"\ndef write_file(, filename):\n\toutput +=\n\twith open(filename) as f:\n\t\tstring s =\n\n\n\nprocess(\"a_example.in\")\n"
}
] | 2 |
anandksrao/BUSCO_phylogenomics
|
https://github.com/anandksrao/BUSCO_phylogenomics
|
63158e0d34fb69031bcd0625c58a29dd656ac31a
|
d6ca95b153da97892b1d447cf0d168066053b5fa
|
6dd6bf2a97868034556c2254de19dacc54655300
|
refs/heads/master
| 2023-03-25T22:31:30.666229 | 2021-03-28T19:05:45 | 2021-03-28T19:05:45 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5767203569412231,
"alphanum_fraction": 0.5828579664230347,
"avg_line_length": 36.228492736816406,
"blob_id": "b676e4eaed1b7602a44f935ce61edfdae2addc25",
"content_id": "9eb277df888f0f6459e3453910a02b67aa4fcc2d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13849,
"license_type": "permissive",
"max_line_length": 124,
"num_lines": 372,
"path": "/BUSCO_phylogenomics.py",
"repo_name": "anandksrao/BUSCO_phylogenomics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# BUSCO_phylogenomics.py\n# 2019 Jamie McGowan <[email protected]>\n#\n# Utility script to construct species phylogenies using BUSCO results.\n# Can perform ML supermatrix or generate datasets for supertree methods.\n# Works directly from BUSCO output, as long as the same BUSCO dataset\n# has been used for each genome\n#\n# Dependencies:\n# - BioPython\n# - MUSCLE\n# - trimAL\n# - IQ-TREE\n#\n\nimport argparse\nimport multiprocessing as mp\nimport os\nimport sys\nfrom time import gmtime, strftime\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n# If these programs aren't in $PATH, replace the string below with full\n# paths to the programs, including the program name\nmuscle = \"muscle\"\niqtree = \"iqtree\"\ntrimal = \"trimal\"\n\n\n# astral = \"astral.jar\"\n\n# TODO Add FastTree support\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Perform phylogenomic reconstruction using BUSCOs\")\n parser.add_argument(\"--supermatrix\",\n help=\"Concatenate alignments of ubuquitious single copy BUSCOs and perform supermatrix \"\n \"species phylogeny reconstruction using IQTREE/ML\",\n action=\"store_true\")\n parser.add_argument(\"--supertree\",\n help=\"Generate individual ML phylogenies of each BUSCO persent in at least 4 genomes for \"\n \"supertree species phylogeny reconstruction with ASTRAL\",\n action=\"store_true\")\n parser.add_argument(\"-t\", \"--threads\", type=int, help=\"Number of threads to use\", required=True)\n parser.add_argument(\"-d\", \"--directory\", type=str, help=\"Directory containing completed BUSCO runs\", required=True)\n parser.add_argument(\"-o\", \"--output\", type=str, help=\"Output directory to store results\", required=True)\n parser.add_argument(\"-l\", \"--lineage\", type=str, help=\"Name of lineage used to run BUSCO\", required=False)\n parser.add_argument(\"-psc\", \"--percent_single_copy\", type=float, action=\"store\", dest=\"psc\",\n help=\"BUSCOs that are present and single copy in N percent of species will be included in the \"\n \"concatenated alignment\")\n parser.add_argument(\"--stop_early\",\n help=\"Stop pipeline early after generating datasets (before phylogeny inference)\",\n action=\"store_true\")\n args = parser.parse_args()\n\n start_directory = os.path.abspath(args.directory)\n working_directory = os.path.abspath(args.output)\n threads = int(args.threads)\n supermatrix = args.supermatrix\n supertree = args.supertree\n stop_early = args.stop_early\n lineage = args.lineage\n\n if args.psc is None:\n percent_single_copy = 100\n print(percent_single_copy)\n else:\n percent_single_copy = float(args.psc)\n print(percent_single_copy)\n\n if not supermatrix and not supertree:\n print(\"Error! Please select at least one of '--supermatrix' or '--supertree'\")\n sys.exit(1)\n\n # Check input directory exists\n if os.path.isdir(start_directory):\n os.chdir(start_directory)\n else:\n print(\"Error! \" + start_directory + \" is not a directory!\")\n\n # Check if output directory already exists\n if os.path.isdir(working_directory):\n print(\"Error! \" + working_directory + \" already exists\")\n sys.exit(1)\n else:\n os.mkdir(working_directory)\n\n if lineage == None:\n lineage = \"\"\n\n # TODO check dependencies are installed\n\n print_message(\"Starting BUSCO Phylogenomics Pipeline\")\n\n # Scan start directory to identify BUSCO runs (begin with 'run_')\n busco_dirs = []\n\n for item in os.listdir(\".\"):\n if item[0:4] == \"run_\":\n if os.path.isdir(item):\n busco_dirs.append(item)\n\n print(\"Found \" + str(len(busco_dirs)) + \" BUSCO runs:\")\n\n for directory in busco_dirs:\n print(\"\\t\" + directory)\n\n print(\"\")\n\n buscos = {}\n all_species = []\n\n for directory in busco_dirs:\n os.chdir(start_directory)\n\n species = directory.split(\"run_\")[1]\n all_species.append(species)\n\n os.chdir(directory)\n # os.chdir(\"run_\" + lineage) # Issue with BUSCO version >= 4?\n os.chdir(\"busco_sequences\")\n os.chdir(\"single_copy_busco_sequences\")\n \n print(species)\n\n for busco in os.listdir(\".\"):\n if busco.endswith(\".faa\"):\n #print(busco)\n busco_name = busco[0:len(busco) - 4]\n record = SeqIO.read(busco, \"fasta\")\n new_record = SeqRecord(Seq(str(record.seq)), id=species, description=\"\")\n\n if busco_name not in buscos:\n buscos[busco_name] = []\n\n buscos[busco_name].append(new_record)\n\n print(\"BUSCO\\t # Species Single Copy\")\n for busco in buscos:\n print(busco + \" \" + str(len(buscos[busco])))\n\n print_message((str(len(buscos))) + \" BUSCOs were found\")\n print(\"\")\n\n if supertree:\n print_message(\"Beginning SUPERTREE Analysis\")\n print(\"\")\n\n # Identify BUSCOs that are present (single copy) in at least 4 species\n four_single_copy = []\n\n for busco in buscos:\n if len(buscos[busco]) >= 4:\n four_single_copy.append(busco)\n\n if len(four_single_copy) == 0:\n print_message(\"0 BUSCOs are present and single copy in at least 4 species\")\n # Should break out or quit here\n else:\n print_message(str(len(four_single_copy)) + \" BUSCOs are single copy and present in at least 4 species\")\n\n os.chdir(working_directory)\n os.mkdir(\"proteins_4\")\n os.mkdir(\"alignments_4\")\n os.mkdir(\"trimmed_alignments_4\")\n os.mkdir(\"trees_4\")\n os.mkdir(\"trees_4/iqtree_files\")\n\n print(\"\")\n\n print_message(\"Writing protein sequences to: \" + os.path.join(working_directory, \"proteins_4\"))\n\n for busco in four_single_copy:\n busco_seqs = buscos[busco]\n\n SeqIO.write(busco_seqs, os.path.join(\"proteins_4\", busco + \".faa\"), \"fasta\")\n\n print(\"\")\n print_message(\"Aligning protein sequences using MUSCLE with\", threads, \"threads to:\",\n os.path.join(working_directory, \"alignments_4\"))\n\n mp_commands = []\n for busco in four_single_copy:\n mp_commands.append(\n [os.path.join(\"proteins_4\", busco + \".faa\"), os.path.join(\"alignments_4\", busco + \".aln\")])\n\n pool = mp.Pool(processes=threads)\n results = pool.map(run_muscle, mp_commands)\n\n print(\"\")\n print_message(\"Trimming alignments using trimAl (-automated1) with\", threads, \"threads to: \",\n os.path.join(working_directory, \"trimmed_alignments_4\"))\n\n mp_commands = []\n\n for busco in four_single_copy:\n mp_commands.append([os.path.join(\"alignments_4\", busco + \".aln\"),\n os.path.join(\"trimmed_alignments_4\", busco + \".trimmed.aln\")])\n\n pool = mp.Pool(processes=threads)\n results = pool.map(run_trimal, mp_commands)\n\n print(\"\")\n print_message(\"Generating phylogenies using IQ-TREE (with model testing) for each BUSCO family with\",\n threads, \"threads to:\", os.path.join(working_directory, \"trees_4\"))\n\n mp_commands = []\n\n for busco in four_single_copy:\n mp_commands.append([os.path.join(\"trimmed_alignments_4\", busco + \".trimmed.aln\")])\n\n pool = mp.Pool(processes=threads)\n results = pool.map(run_iqtree, mp_commands)\n\n # Move all IQ-TREE generated files to trees_4 folder\n os.system(\"mv trimmed_alignments_4/*.treefile trees_4\")\n os.system(\"mv trimmed_alignments_4/*.trimmed.aln.* trees_4/iqtree_files\")\n\n print(\"\")\n print_message(\"Concatenating all TREEs to: \", os.path.join(working_directory, \"ALL.trees\"))\n\n os.chdir(working_directory)\n os.system(\"cat trees_4/*.treefile > ALL.trees\")\n\n print(\"\")\n\n print_message(\"Finished generating dataset for supertree analysis. Use programs such as Astral or CLANN \"\n \"to infer species tree from trees_4/ALL.trees\")\n\n print(\"\")\n\n if supermatrix:\n single_copy_buscos = []\n if args.psc is None:\n print_message(\"Identifying BUSCOs that are single copy in all \" + str(len(all_species)) + \" species\")\n\n for busco in buscos:\n if len(buscos[busco]) == len(all_species):\n single_copy_buscos.append(busco)\n\n if len(single_copy_buscos) == 0:\n print_message(\"0 BUSCO families were present and single copy in all species\")\n print_message(\"Exiting\")\n sys.exit(0)\n else:\n print(str(len(single_copy_buscos)) + \" BUSCOs are single copy in all \" + str(len(all_species)) + \" species\")\n else:\n psc = args.psc\n # Identify BUSCOs that are single copy and present in psc% of species\n\n for busco in buscos:\n percent_species_with_single_copy = (len(buscos[busco]) / (len(all_species) * 1.0)) * 100\n\n if percent_species_with_single_copy >= psc:\n single_copy_buscos.append(busco)\n\n print(str(len(single_copy_buscos)) + \" BUSCOs are single copy in >= \" + str(psc) + \" of species\")\n\n os.chdir(working_directory)\n os.mkdir(\"proteins\")\n os.mkdir(\"alignments\")\n os.mkdir(\"trimmed_alignments\")\n\n print(\"\")\n\n print_message(\"Writing protein sequences to: \" + os.path.join(working_directory, \"proteins\"))\n\n for busco in single_copy_buscos:\n busco_seqs = buscos[busco]\n\n SeqIO.write(busco_seqs, os.path.join(working_directory, \"proteins\", busco + \".faa\"), \"fasta\")\n\n print(\"\")\n print_message(\"Aligning protein sequences using MUSCLE with\", threads, \"threads to: \",\n os.path.join(working_directory))\n\n mp_commands = []\n\n for busco in single_copy_buscos:\n mp_commands.append([os.path.join(working_directory, \"proteins\", busco + \".faa\"),\n os.path.join(working_directory, \"alignments\", busco + \".aln\")])\n\n pool = mp.Pool(processes=threads)\n results = pool.map(run_muscle, mp_commands)\n\n print(\"\")\n print_message(\"Trimming alignments using trimAl (-automated1) with\", threads, \"threads to: \",\n os.path.join(working_directory, \"trimmed_alignments\"))\n\n mp_commands = []\n\n for busco in single_copy_buscos:\n mp_commands.append([os.path.join(working_directory, \"alignments\", busco + \".aln\"),\n os.path.join(working_directory, \"trimmed_alignments\", busco + \".trimmed.aln\")])\n\n pool = mp.Pool(processes=threads)\n results = pool.map(run_trimal, mp_commands)\n\n print(\"\")\n print_message(\"Concatenating all trimmed alignments for SUPERMATRIX analysis\")\n\n os.chdir(os.path.join(working_directory, \"trimmed_alignments\"))\n alignments = {}\n\n for species in all_species:\n alignments[species] = \"\"\n\n # if psc isn't set, or is == 100, we can simple just concatenate alignments\n if args.psc is None:\n for alignment in os.listdir(\".\"):\n for record in SeqIO.parse(alignment, \"fasta\"):\n alignments[str(record.id)] += str(record.seq)\n else:\n # We need to check if a species is missing from a family, if so append with \"-\" to represent missing data\n for alignment in os.listdir(\".\"):\n # Keep track of which species are present and missing\n check_species = all_species[:]\n\n for record in SeqIO.parse(alignment, \"fasta\"):\n alignments[str(record.id)] += str(record.seq)\n check_species.remove(str(record.id))\n\n if len(check_species) > 0:\n # There are missing species, fill with N * \"?\"\n seq_len = len(str(record.seq))\n for species in check_species:\n alignments[species] += (\"?\" * seq_len)\n\n os.chdir(working_directory)\n fo = open(\"SUPERMATRIX.aln\", \"w\")\n\n for species in alignments:\n fo.write(\">\" + species + \"\\n\")\n fo.write(alignments[species] + \"\\n\")\n\n fo.close()\n\n print_message(\"Supermatrix alignment is \" + str(len(alignments[species])) + \" amino acids in length\")\n\n if stop_early:\n print_message(\"Stopping early\")\n sys.exit(0)\n\n print_message(\"Reconstructing species phylogeny using IQ-TREE with model selection from ModelFinder, \"\n \"1000 ultrafast bootstrap approximations and 1000 SH-aLRTs: SUPERMATRIX.aln.treefile\")\n print(\"\")\n\n os.system(\"iqtree -s SUPERMATRIX.aln -bb 1000 -alrt 1000 -nt AUTO -ntmax \" + str(threads) + \" > /dev/null\")\n\n print(\"\")\n print_message(\"SUPERMATRIX phylogeny construction complete! See treefile: SUPERMATRIX.aln.treefile\")\n\n\ndef run_muscle(io):\n os.system(\"muscle -in \" + io[0] + \" -out \" + io[1] + \" > /dev/null 2>&1\")\n\ndef run_trimal(io):\n os.system(\"trimal -in \" + io[0] + \" -out \" + io[1] + \" -automated1 \")\n\ndef run_iqtree(io):\n os.system(\"iqtree -s \" + io[0] + \" > /dev/null 2>&1\")\n\ndef print_message(*message):\n print(strftime(\"%d-%m-%Y %H:%M:%S\", gmtime()) + \"\\t\" + \" \".join(map(str, message)))\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6724386811256409,
"alphanum_fraction": 0.6810966730117798,
"avg_line_length": 22.88505744934082,
"blob_id": "c0ec38185b061b63bff39fcf443654294bf550e4",
"content_id": "1d71ba85d4fbc610b22665abb71ca1aa15dd3a98",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2079,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 87,
"path": "/count_frequency_busco_family.py",
"repo_name": "anandksrao/BUSCO_phylogenomics",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# count_frequence_busco_family.py\n# 2020 Jamie McGowan <[email protected]>\n# \n# Reports how many species a BUSCO family was found to be single copy in\n# ONLY LOOKS AT SINGLE COPIES, IGNORES IF PRESENT AS MULTI COPY\n# Usage: python count_frequency_busco_family.py busco_working_directory\n\nimport os, sys\n\t\nif len(sys.argv) < 2:\n\tprint(\"Usage: python count_frequency_busco_family.py busco_working_directory\")\n\tsys.exit(0)\n\nworking_directory = os.path.abspath(sys.argv[1])\nos.chdir(working_directory)\nbusco_dirs = []\n\nfor item in os.listdir(\".\"):\n\tif item[0:4] == \"run_\":\n\t\tif os.path.isdir(item):\n\t\t\tbusco_dirs.append(item)\n\nn_busco_runs = len(busco_dirs)\n\nprint(\"Found \" + str(n_busco_runs) + \" BUSCO runs\")\nprint(\"\")\nprint(\"BUSCO Run\\t#Single Copy BUSCOs\")\n\nall_species = []\nall_buscos = set()\nbusco_per_species = {}\n\nfor directory in busco_dirs:\n\tos.chdir(working_directory)\n\tspecies = directory.split(\"run_\")[1]\n\tall_species.append(species)\n\tbuscos = []\n\n\tos.chdir(directory)\n\t# os.chdir(\"run_\" + lineage)\n\tos.chdir(\"busco_sequences\")\n\tos.chdir(\"single_copy_busco_sequences\")\n\n\n\tfor busco in os.listdir(\".\"):\n\t\tif busco.endswith(\".faa\"):\n\t\t\tbusco_name = busco[0:len(busco) - 4]\n\t\t\tbuscos.append(busco_name)\n\t\t\tall_buscos.add(busco_name)\n\n\tprint(species + \"\\t\" + str(len(buscos)))\n\tbusco_per_species[species] = buscos\n\nprint(\"\\n\\n\")\n\nall_buscos_count = {}\nfor busco in all_buscos:\n\tall_buscos_count[busco] = 0\n\t\n\tfor species in all_species:\n\t\tif busco in busco_per_species[species]:\n\t\t\tall_buscos_count[busco] += 1\n\nprint(\"BUSCO\\t#Species\\t%Species\")\nfor busco in all_buscos:\n\tpercent = ((all_buscos_count[busco] / float(n_busco_runs)) * 100)\n\tpercent = \"{:.2f}\".format(percent)\n\tprint(busco + \"\\t\" + str(all_buscos_count[busco]) + '\\t' + percent)\n\n\nprint(\"\\n\\n\")\n\n# Print presence/absence matrix of all found buscos per species\nline = [\"BUSCO\"] + all_species\nprint (\"\\t\".join(line))\n\nfor busco in all_buscos:\n\tline = [busco]\n\tfor species in all_species:\n\t\tif busco in busco_per_species[species]:\n\t\t\tline.append(\"Y\")\n\t\telse:\n\t\t\tline.append(\"N\")\n\n\tprint (\"\\t\".join(line))\n\n"
},
{
"alpha_fraction": 0.7064834237098694,
"alphanum_fraction": 0.7459478378295898,
"avg_line_length": 43.34375,
"blob_id": "eea76481df21038d2e03c8cad8b47c0f97613e32",
"content_id": "f71662c92d5e86d39f6e91d6275eb5dccddd8738",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2838,
"license_type": "permissive",
"max_line_length": 364,
"num_lines": 64,
"path": "/README.md",
"repo_name": "anandksrao/BUSCO_phylogenomics",
"src_encoding": "UTF-8",
"text": "# BUSCO_Phylogenomics\n\n\n<a href=\"https://jamiemcgowan.ie\" target=\"_blank\">Jamie McGowan - 2020</a>\n\nUtility script to construct species phylogenies using BUSCOs. Works directly from BUSCO output and can be used for supermatrix or supertree/coalescent methods.\n\n\n\nThis pipeline runs directly on the output from BUSCO. Move results directories from each BUSCO run (begins with \"run_\" by default) into the same directory. Example structure, where `INPUT_DIRECTORY` is passed to the `-d` parameter of the pipeline:\n\n```\n* INPUT_DIRECTORY\n\t* run_species1\n\t* run_species2\n\t* run_species3\n\t* run_species4\n\t* run_species5\n\t* run_species6\n\t* ........\n\n```\n\n\nThe majority of steps are parallelizable (e.g. family alignments) so running the pipeline with multiple threads leads to a dramatic decrease in runtime.\n\n### Usage\n\tpython BUSCO_Phylogenomics.py -d INPUT_DIRECTORY -o OUTPUT_DIRECTORY --supermatrix --threads 20\n\t\n\t\n\t\n### Required parameters\n* `-d --directory` input directory containing BUSCO runs\n* `-o --output` output directory\n* `-t --threads` number of threads to use\n* `--supermatrix` and/or `--supertree` choose to run supermatrix and/or supertree methods\n\n\n### Optional parameters\n* `-psc` BUSCO families that are present and single-copy in N% of species will be included in supermatrix analysis (default = 100%). Families that are missing for a species will be replaced with missing characters (\"?\").\n* `--stop_early` stop pipeline early before phylogenetic inference (i.e., for the supermatrix approach this will stop after generating the concatenated alignment). This is **recommended** so you can manually choose your own parameters (e.g., bootstrapping/model selection methods) or manually processing/filtering the alignments further when running IQ-Tree, etc..\n\n\n\n### Requirements\n* [Python](https://www.python.org/)\n* [BioPython](https://biopython.org/)\n* [MUSCLE](https://www.drive5.com/muscle/)\n* [trimAl](http://trimal.cgenomics.org/)\n* [IQ-TREE](http://www.iqtree.org/)\n\n\n`muscle`, `trimal` and `iqtree` should be in `$PATH`\n\n\n### Citation\n\nThese scripts were initially written to generate species phylogenies for the following publications:\n\n- [McGowan, J., & Fitzpatrick, D. A. (2020). Recent advances in oomycete genomics. Advances in Genetics. **DOI: 10.1016/bs.adgen.2020.03.001**](https://www.sciencedirect.com/science/article/abs/pii/S0065266020300043)\n- [McGowan, J., O’Hanlon, R., Owens, R. A., & Fitzpatrick, D. A. (2020). Comparative Genomic and Proteomic Analyses of Three Widespread Phytophthora Species: Phytophthora chlamydospora, Phytophthora gonapodyides and Phytophthora pseudosyringae. Microorganisms, 8(5), 653. **DOI: 10.3390/microorganisms8050653**](https://www.mdpi.com/2076-2607/8/5/653)\n\n\n[](https://doi.org/10.5281/zenodo.4320788)\n"
}
] | 3 |
nguyenvantan0125/Sentence_Classification
|
https://github.com/nguyenvantan0125/Sentence_Classification
|
4dd54cb6db1a79b8a1f02eded27cb444f3eac62c
|
a0b87e3db8080e387cc8d40af84e862a504d4618
|
0b99ac0264ff6c5a755f12feab78793261ab6c34
|
refs/heads/master
| 2022-12-07T08:09:27.064639 | 2020-08-29T10:27:02 | 2020-08-29T10:27:02 | 291,084,651 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7038152813911438,
"alphanum_fraction": 0.7058233022689819,
"avg_line_length": 33.2068977355957,
"blob_id": "e9ef6eb0e185194b152c876984de4744b6a7de68",
"content_id": "d4b7bc16d43cf8e1eb42bdb408d2db79449ac710",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1072,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 29,
"path": "/Sentence_Classification.py",
"repo_name": "nguyenvantan0125/Sentence_Classification",
"src_encoding": "UTF-8",
"text": " # Import module was created\nimport module as tx\n # Read data from CSV - đọc data từ file csv\ndf = tx.readcsv(\"data_train.csv\")\ndf.rename(columns={0:'feature',1:'label'},inplace=True)\n\n # create corpus - tạo bộ văn bản\ncorpus = df['feature'].values.tolist()\n\n # create label - tạo label \ny = df['label'].values.tolist()\n \n # Entry sentence relate to greeting or asking weather(Vietnamese requirement)\n # Nhập nhập những câu về chào hỏi hoặc hỏi về thời tiết (yêu cầu nhập tiếng việt)\nprint(\"\\nEntry sentence relate to greeting OR asking weather(Vietnamese requirement)\")\nprint(\"\\nNhập nhập những câu về chào hỏi HOẶC hỏi về thời tiết (yêu cầu nhập tiếng việt)\")\ntest_data = [str(input('entry: '))]\n\n # Model SVC\nmodel_SVC = tx.SVC_linear(corpus,y,test_data)\nmodel_SVC.processing()\n\n # Model Navie Bayes \nmodel_NB = tx.NavieBayes(corpus,y,test_data)\nmodel_NB.processing()\n\n # Compute Cosine Similarity\ncon_sim = tx.Cosine_Sim(corpus,y,test_data)\ncon_sim.processing()\n"
},
{
"alpha_fraction": 0.5904706120491028,
"alphanum_fraction": 0.5957322716712952,
"avg_line_length": 34.164947509765625,
"blob_id": "9e2435aa24e13eaec7d6de4275b1bc1771ba79c2",
"content_id": "b7e1c47ada1e6bfef5924bfa323e7dffa73bd3c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3486,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 97,
"path": "/module.py",
"repo_name": "nguyenvantan0125/Sentence_Classification",
"src_encoding": "UTF-8",
"text": "\nfrom pyvi import ViTokenizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import svm\nfrom sklearn.pipeline import Pipeline\nimport pandas \n \n # read file CSV\ndef readcsv(filename):\n a = pandas.read_csv(filename,header = None)\n return a \n\n\n # Tokenizer - tách từ tiếng việt\nclass tokenizer():\n def __init__(self,corpus):\n self.corpus = corpus\n def vi_tokenizer(self):\n for idx, txt in enumerate(self.corpus):\n self.corpus[idx] = ViTokenizer.tokenize(txt)\n return self.corpus\n\n \nclass SVC_linear():\n \"\"\"\n 1. Khởi tạo và thực hiện tách từ tiếng việt\n 2. Tạo pipeline từ scikit-learn \n 2.1. Encoder bằng CountVectorizer\n 2.2. Chọn model SVC-learn\n 3. Fit và Predict \n 4. Print kết quả \n \"\"\"\n \n def __init__(self, raw_corpus,y, sentence):\n self.corpus = tokenizer(raw_corpus).vi_tokenizer()\n self.y = y\n self.sentence = tokenizer(sentence).vi_tokenizer() \n def processing(self):\n pipe_line = Pipeline([\n (\"vect\", CountVectorizer(token_pattern=u\"(?u)\\\\b\\\\w+\\\\b\")),#bag-of-words\n (\"clf\", svm.SVC(probability = True, kernel='linear')) #model\n ])\n pipe_line.fit(self.corpus,self.y)\n predicted = pipe_line.predict(self.sentence)\n predict_proba = pipe_line.predict_proba(self.sentence)\n print('\\n\\t SVC-linear:')\n print (\"\\nPredicted Value:\", *predicted)\n print (\"\\nPredict_Proba: \", max(*predict_proba))\n\nclass NavieBayes():\n \"\"\"\n 1. Khởi tạo và thực hiện tách từ tiếng việt\n 2. Tạo pipeline từ scikit-learn \n 2.1. Encoder bằng CountVectorizer\n 2.2. Chọn model Navie Bayes\n 3. Fit và Predict \n 4. Print kết quả \n \"\"\"\n def __init__(self, raw_corpus,y, sentence):\n self.corpus = tokenizer(raw_corpus).vi_tokenizer()\n self.y = y\n self.sentence = tokenizer(sentence).vi_tokenizer() \n def processing(self):\n pipe_line = Pipeline([\n (\"vect\", CountVectorizer(token_pattern=u\"(?u)\\\\b\\\\w+\\\\b\")),\n (\"clf\", MultinomialNB())\n ])\n pipe_line.fit(self.corpus,self.y)\n predicted = pipe_line.predict(self.sentence)\n predict_proba = pipe_line.predict_proba(self.sentence)\n print('\\n\\t Naive Bayer:')\n print (\"\\nPredicted Value:\", *predicted)\n print (\"\\nPredict_Proba: \", max(*predict_proba))\n\nclass Cosine_Sim():\n def __init__(self,raw_corpus, y, sentence):\n self.corpus = tokenizer(raw_corpus).vi_tokenizer()\n self.y = y\n self.sentence = tokenizer(sentence).vi_tokenizer() \n def processing(self):\n tfidf = TfidfVectorizer(token_pattern=u\"(?u)\\\\b\\\\w+\\\\b\")\n X_train = tfidf.fit_transform(self.corpus).toarray()\n X_test = tfidf.transform(self.sentence).toarray()\n result = []\n for idx in range(len(X_train)):\n x = X_train[idx].reshape(1,len(X_train[idx]))\n a = cosine_similarity(x,X_test)\n result.append(a[0])\n m = max(result)\n for i, j in enumerate(result): \n if j == m:\n idx = i\n break\n # show result\n print('\\n\\t Consine Similarity:')\n print('\\nPredict Values: ',self.y[idx])\n \n"
}
] | 2 |
SharmaAjay19/CNNBasics
|
https://github.com/SharmaAjay19/CNNBasics
|
36803bdf8569dc14a5cbe37c9687f31be12e52ba
|
09e7f227d096cbf59fe446bf7a38b95e20bafa5f
|
3ab4579454bd8fb66b7fa89f262d4b3d4e27e74f
|
refs/heads/master
| 2021-04-28T20:21:43.605098 | 2018-03-08T02:44:45 | 2018-03-08T02:44:45 | 121,922,556 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6543415188789368,
"alphanum_fraction": 0.7008724808692932,
"avg_line_length": 29.871795654296875,
"blob_id": "5a1a7305b7a3acdfadaae62f8b92303ded67bcc3",
"content_id": "346edfa4de2fee0461206b79af057a92031610de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2407,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 78,
"path": "/Predict.py",
"repo_name": "SharmaAjay19/CNNBasics",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom keras.models import model_from_json\nimport sys\ntry:\n\timgname = sys.argv[1]\nexcept:\n\tprint(\"No image provided\")\n\tsys.exit()\nimg = cv2.imread('digits/'+imgname, cv2.IMREAD_GRAYSCALE)\nresized = cv2.resize(img, (28, 28))\ncv2.imshow(\"resized\", resized)\ncv2.waitKey(0)\nresized = resized.reshape(1, 1, 28, 28).astype('float32')\nresized = resized / 255\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nmodel = model_from_json(loaded_model_json)\n# load weights into new model\nmodel.load_weights(\"model.h5\")\nprint(\"Loaded model from disk\")\n'''with open('ComplexCNN.pkl', 'rb') as f:\n\tmodel = pickle.load(f)\n\tf.close()'''\npred = model.predict(resized)\ni = 0\nfor cp in list(map(lambda x: str(float(x)*100), list(pred)[0])):\n\tprint(i, '-->', cp)\n\ti += 1\nprint(pred.argmax(axis=-1))\n\n'''from keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n#Convert 784 vector into 1x28x28 shape for Convolutional Neural Network\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\n# define the larger model\ndef larger_model():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Conv2D(15, (3, 3), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.2))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dense(50, activation='relu'))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n\n\n# build the model\nmodel = larger_model()\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\nwith open('ComplexCNN.pkl', 'wb') as f:\n\tpickle.dump(model, f)\n\tf.close()\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Large CNN Error: %.2f%%\" % (100-scores[1]*100))'''"
},
{
"alpha_fraction": 0.6776527166366577,
"alphanum_fraction": 0.7118167281150818,
"avg_line_length": 33.56944274902344,
"blob_id": "cb2edfa549450ac2fd7a7fd0653f9559c28e8b1f",
"content_id": "e47fff022b918bf7224cec6f4db09ce621206b82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2488,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 72,
"path": "/ComplexCNN.py",
"repo_name": "SharmaAjay19/CNNBasics",
"src_encoding": "UTF-8",
"text": "import pickle\n# Plot ad hoc mnist instances\nload = True\nif load:\n\tfrom keras.datasets import mnist\n\t(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\tpickle.dump(X_train, open('x_train.pkl', 'wb'))\n\tpickle.dump(y_train, open('y_train.pkl', 'wb'))\n\tpickle.dump(X_test, open('x_test.pkl', 'wb'))\n\tpickle.dump(y_test, open('y_test.pkl', 'wb'))\nelse:\n\tX_train = pickle.load(open('x_train.pkl', 'rb'))\n\ty_train = pickle.load(open('y_train.pkl', 'rb'))\n\tX_test = pickle.load(open('x_test.pkl', 'rb'))\n\ty_test = pickle.load(open('y_test.pkl', 'rb'))\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n#Convert 784 vector into 1x28x28 shape for Convolutional Neural Network\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\n# define the larger model\ndef larger_model():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Conv2D(15, (3, 3), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.2))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dense(50, activation='relu'))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n\n\n# build the model\nmodel = larger_model()\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=20, batch_size=200, verbose=2)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Large CNN Error: %.2f%%\" % (100-scores[1]*100))\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model.h5\")\nprint(\"Saved model to disk\")"
},
{
"alpha_fraction": 0.6706255078315735,
"alphanum_fraction": 0.7110055685043335,
"avg_line_length": 33.14864730834961,
"blob_id": "17376f79fec04b672156694b81c1ccb77485b1d1",
"content_id": "d641e4d2adfdc0ff0eeba554de65f81e93e6f85e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2526,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 74,
"path": "/SimpleCNN.py",
"repo_name": "SharmaAjay19/CNNBasics",
"src_encoding": "UTF-8",
"text": "import cv2\n\nimg = cv2.imread('digits/demo.png', cv2.IMREAD_GRAYSCALE)\nresized = cv2.resize(img, (28, 28))\n#cv2.imshow(\"resized\", resized)\n#cv2.waitKey(0)\nresized = resized.reshape(1, 1, 28, 28).astype('float32')\nresized = resized / 255\nimport pickle\n# Plot ad hoc mnist instances\nload = True\nif load:\n\tfrom keras.datasets import mnist\n\t(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\tpickle.dump(X_train, open('x_train.pkl', 'wb'))\n\tpickle.dump(y_train, open('y_train.pkl', 'wb'))\n\tpickle.dump(X_test, open('x_test.pkl', 'wb'))\n\tpickle.dump(y_test, open('y_test.pkl', 'wb'))\nelse:\n\tX_train = pickle.load(open('x_train.pkl', 'rb'))\n\ty_train = pickle.load(open('y_train.pkl', 'rb'))\n\tX_test = pickle.load(open('x_test.pkl', 'rb'))\n\ty_test = pickle.load(open('y_test.pkl', 'rb'))\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n#Convert 784 vector into 1x28x28 shape for Convolutional Neural Network\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n# one hot encode outputs\ny_resized = numpy.array([4])\ny_resized = np_utils.to_categorical(y_resized)\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\ndef baseline_model():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.2))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n# build the model\nmodel = baseline_model()\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=1, batch_size=500)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"CNN Error: %.2f%%\" % (100-scores[1]*100))\n\n\nscore = model.evaluate(resized, y_resized)\nprint(\"CNN Error: %.2f%%\" % (100-scores[1]*100))"
},
{
"alpha_fraction": 0.8067484498023987,
"alphanum_fraction": 0.8067484498023987,
"avg_line_length": 45.57143020629883,
"blob_id": "9d486892fd9180cba0880063dcec5ae03d2a3beb",
"content_id": "8e730a43281d16777f087d5dab49ab06bacbe6af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 7,
"path": "/README.md",
"repo_name": "SharmaAjay19/CNNBasics",
"src_encoding": "UTF-8",
"text": "# CNNBasics\nA demonstration of using Keras to create Simple as well as Complex Convolutional Networks to learn from images. In the given sample, the dataset used is the MNIST digit recognition.\nAlso the file Predict.py shows how to use a learnt model to recognize a digit in an image.\n\nRequired libraries:\nkeras\nopencv-python\n"
}
] | 4 |
ITU-Photogrammetry-Lab/satrap
|
https://github.com/ITU-Photogrammetry-Lab/satrap
|
7827e8c30ee04d12b1d8b1b2907459b161980fe1
|
64cc33f3127006b1f1e592a719c08d31863d2218
|
635310e10371fdea7eead026dd409b7194d54a5e
|
refs/heads/master
| 2021-02-08T20:35:43.285155 | 2020-03-01T16:13:53 | 2020-03-01T16:13:53 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8200562596321106,
"alphanum_fraction": 0.8200562596321106,
"avg_line_length": 70.13333129882812,
"blob_id": "6ca2861bf0cdef29ebe5ad06887e104f08c2fdfb",
"content_id": "a49a70156bc7999d589fbe8a29d6027d9f2049c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1067,
"license_type": "no_license",
"max_line_length": 506,
"num_lines": 15,
"path": "/README.md",
"repo_name": "ITU-Photogrammetry-Lab/satrap",
"src_encoding": "UTF-8",
"text": "# Spatial Analyst of Transportation Performance -- SATRAP\n\nSATRAP quantifies and illustrates centrality and accessibility metrics of a road network. The software is designed as an independent GIS software that can calculate transportation performance metrics with an all-round open source approach. Users can obtain, analyze and visualize open source spatial data without handling any technical difficulties with SATRAP. It targets policy makers and researchers who want to analyze a road network's geometric and topologic properties with its easy-to-use interface.\n\n# To Start SATRAP\n\nIn this version of the software, it is opened from a conda environment that has the required libraries. Environment file (pygis.yml) in this repository can easily be created with the following command on Anaconda Command Prompt or cmd.\n\n>conda env create -f pygis.yml\n\nAfter the environment is created, the following command should be executed to use the environment.\n\n>conda activate pygis\n\nAfter these processes, satrap.py can be opened via prompt screen from pygis environment.\n"
},
{
"alpha_fraction": 0.6545171737670898,
"alphanum_fraction": 0.6752417087554932,
"avg_line_length": 47.124332427978516,
"blob_id": "0ae669df5258fa4ddf1938d4e74d983a7665ca42",
"content_id": "fd89bf7776ce93072bfda724ce49e911c2057d66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 35996,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 748,
"path": "/OldVersions/v1.0--Qt/frontend.py",
"repo_name": "ITU-Photogrammetry-Lab/satrap",
"src_encoding": "UTF-8",
"text": "import sys\nimport backend as bk\nfrom PyQt5 import QtGui, QtCore, QtWidgets\n\nclass Controller:\n \n def __init__(self):\n self.mainwindow = MainAppWindow()\n self.getstudyarea = GsaWindow()\n self.centrality = CentralityWindow()\n self.accessibility = AccessibilityWindow()\n \n def open_main(self):\n self.mainwindow.switch_window1.connect(self.open_gsa)\n self.mainwindow.switch_window2.connect(self.open_cent)\n self.mainwindow.switch_window3.connect(self.open_access)\n self.getstudyarea.hide()\n self.centrality.hide()\n self.accessibility.hide()\n self.mainwindow.show()\n \n def open_gsa(self):\n self.getstudyarea.switch_window.connect(self.open_main)\n self.mainwindow.hide()\n self.getstudyarea.show()\n \n def open_cent(self):\n self.centrality.switch_window.connect(self.open_main)\n self.mainwindow.hide()\n self.centrality.show()\n \n def open_access(self):\n self.accessibility.switch_window.connect(self.open_main)\n self.mainwindow.hide()\n self.accessibility.show()\n \nclass MainAppWindow(QtWidgets.QFrame):\n \n switch_window1 = QtCore.pyqtSignal()\n switch_window2 = QtCore.pyqtSignal()\n switch_window3 = QtCore.pyqtSignal()\n \n def __init__(self):\n super(MainAppWindow, self).__init__()\n \n self.mainwindow = MainWindow()\n self.mainwindow.setupUi(self)\n self.mainwindow.GetStudyAreaButton.clicked.connect(self.open_gsa_handler)\n self.mainwindow.CentralityAnalysisButton.clicked.connect(self.open_cent_handler)\n self.mainwindow.AccessibilityAnalysisButton.clicked.connect(self.open_access_handler)\n \n def open_gsa_handler(self):\n self.switch_window1.emit()\n \n def open_cent_handler(self):\n self.switch_window2.emit()\n \n def open_access_handler(self):\n self.switch_window3.emit()\n \nclass GsaWindow(QtWidgets.QFrame):\n \n switch_window = QtCore.pyqtSignal()\n \n def __init__(self):\n super(GsaWindow,self).__init__()\n self.getstudyarea = GetStudyArea()\n self.getstudyarea.setupUi(self)\n self.getstudyarea.BackButton.clicked.connect(self.open_main_handler)\n\n def open_main_handler(self):\n self.switch_window.emit()\n\nclass CentralityWindow(QtWidgets.QFrame):\n \n switch_window = QtCore.pyqtSignal()\n \n def __init__(self):\n super(CentralityWindow,self).__init__()\n self.centrality = Centrality()\n self.centrality.setupUi(self)\n self.centrality.BackButton.clicked.connect(self.open_main_handler)\n\n def open_main_handler(self):\n self.switch_window.emit()\n \nclass AccessibilityWindow(QtWidgets.QFrame):\n \n switch_window = QtCore.pyqtSignal()\n \n def __init__(self):\n super(AccessibilityWindow,self).__init__()\n self.accessibility = Accessibility()\n self.accessibility.setupUi(self)\n self.accessibility.BackButton.clicked.connect(self.open_main_handler)\n\n def open_main_handler(self):\n self.switch_window.emit()\n\nclass MainWindow(object):\n def setupUi(self, MainFrame):\n MainFrame.setObjectName(\"MainFrame\")\n MainFrame.setFixedSize(870, 230)\n MainFrame.setWindowIcon(QtGui.QIcon(\"interface.ico\"))\n MainFrame.setFrameShape(QtWidgets.QFrame.Box)\n \n # Fonts\n font = QtGui.QFont()\n font.setFamily(\"Palatino Linotype\")\n font.setPointSize(10)\n \n font2 = QtGui.QFont()\n font2.setFamily(\"Palatino Linotype\")\n font2.setPointSize(20)\n font2.setBold(True)\n font2.setWeight(75)\n \n font3 = QtGui.QFont()\n font3.setFamily(\"Palatino Linotype\")\n font3.setPointSize(11)\n font3.setBold(True)\n font3.setWeight(75)\n \n # Labels\n self.ChooseAnalysisLabel = QtWidgets.QLabel(MainFrame)\n self.ChooseAnalysisLabel.setGeometry(QtCore.QRect(310, 20, 251, 101))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 127))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n self.ChooseAnalysisLabel.setPalette(palette)\n self.ChooseAnalysisLabel.setFont(font2)\n self.ChooseAnalysisLabel.setAutoFillBackground(False)\n self.ChooseAnalysisLabel.setAlignment(QtCore.Qt.AlignCenter)\n\n # Buttons\n self.GetStudyAreaButton = QtWidgets.QPushButton(MainFrame)\n self.GetStudyAreaButton.setGeometry(QtCore.QRect(0, 0, 161, 31))\n self.GetStudyAreaButton.setFont(font)\n \n self.CentralityAnalysisButton = QtWidgets.QPushButton(MainFrame)\n self.CentralityAnalysisButton.setGeometry(QtCore.QRect(350, 100, 181, 31))\n self.CentralityAnalysisButton.setFont(font3)\n \n self.AccessibilityAnalysisButton = QtWidgets.QPushButton(MainFrame)\n self.AccessibilityAnalysisButton.setGeometry(QtCore.QRect(350, 140, 181, 31))\n self.AccessibilityAnalysisButton.setFont(font3)\n \n self.retranslateUi(MainFrame)\n QtCore.QMetaObject.connectSlotsByName(MainFrame)\n\n def retranslateUi(self, MainFrame):\n _translate = QtCore.QCoreApplication.translate\n MainFrame.setWindowTitle(_translate(\"MainFrame\", \"SATRAP\"))\n self.ChooseAnalysisLabel.setText(_translate(\"MainFrame\", \"Choose Analysis\"))\n self.GetStudyAreaButton.setText(_translate(\"MainFrame\", \"Get Study Area\"))\n self.CentralityAnalysisButton.setText(_translate(\"MainFrame\", \"Centrality Analysis\"))\n self.AccessibilityAnalysisButton.setText(_translate(\"MainFrame\", \"Accessibility Analysis\"))\n \nclass GetStudyArea(object):\n def setupUi(self, Frame):\n Frame.setObjectName(\"Frame\")\n Frame.setFixedSize(870, 230)\n Frame.setWindowIcon(QtGui.QIcon(\"interface.ico\"))\n \n # Fonts\n font = QtGui.QFont()\n font.setFamily(\"Palatino Linotype\")\n font.setPointSize(9)\n font.setUnderline(False)\n \n font2 = QtGui.QFont()\n font2.setFamily(\"Palatino Linotype\")\n font2.setPointSize(11)\n font2.setBold(True)\n font2.setWeight(75)\n \n # Labels\n self.RegionNameLabel = QtWidgets.QLabel(Frame)\n self.RegionNameLabel.setGeometry(QtCore.QRect(20, 80, 181, 21))\n self.RegionNameLabel.setFont(font)\n self.RegionNameLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n self.ShpOutputLabel = QtWidgets.QLabel(Frame)\n self.ShpOutputLabel.setGeometry(QtCore.QRect(20, 110, 181, 21))\n self.ShpOutputLabel.setFont(font)\n self.ShpOutputLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n self.ShpOutputLabel2 = QtWidgets.QLabel(Frame)\n self.ShpOutputLabel2.setGeometry(QtCore.QRect(20, 140, 181, 21))\n self.ShpOutputLabel2.setFont(font)\n self.ShpOutputLabel2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n # TextBoxes\n self.RegionNameBox = QtWidgets.QLineEdit(Frame)\n self.RegionNameBox.setGeometry(QtCore.QRect(210, 80, 221, 20))\n\n self.RegionNumberBox = QtWidgets.QLineEdit(Frame)\n self.RegionNumberBox.setGeometry(QtCore.QRect(440, 80, 41, 20))\n\n self.ShpOutputBox1 = QtWidgets.QLineEdit(Frame)\n self.ShpOutputBox1.setGeometry(QtCore.QRect(210, 110, 271, 20))\n \n self.ShpOutputBox2 = QtWidgets.QLineEdit(Frame)\n self.ShpOutputBox2.setGeometry(QtCore.QRect(210, 140, 271, 20))\n\n # Buttons\n self.BackButton = QtWidgets.QPushButton(Frame)\n self.BackButton.setGeometry(QtCore.QRect(0, 0, 149, 24))\n self.BackButton.setFont(font)\n \n self.HelpButton = QtWidgets.QPushButton(Frame)\n self.HelpButton.setGeometry(QtCore.QRect(720, 0, 149, 24))\n self.HelpButton.setFont(font)\n self.HelpButton.clicked.connect(self.getHelp)\n \n self.ShpOutputBrowse = QtWidgets.QPushButton(Frame)\n self.ShpOutputBrowse.setGeometry(QtCore.QRect(490, 110, 75, 21))\n self.ShpOutputBrowse.setFont(font)\n self.ShpOutputBrowse.clicked.connect(self.saveShapefile)\n \n self.ExecuteButton = QtWidgets.QPushButton(Frame)\n self.ExecuteButton.setGeometry(QtCore.QRect(730, 180, 121, 41))\n self.ExecuteButton.setFont(font2)\n self.ExecuteButton.clicked.connect(self.getPoly)\n \n self.retranslateUi(Frame)\n QtCore.QMetaObject.connectSlotsByName(Frame)\n \n def getHelp(self):\n QtWidgets.QMessageBox.about(None, \"About\", \"* Region name should be checked first on https://nominatim.openstreetmap.org. \\n\\\n* If there is no region name on the website search, region's network is unaccessible.\")\n \n def getPoly(self):\n try:\n if len(self.RegionNumberBox.text()) > 0:\n bk.getPolyData(self.RegionNameBox.text(), self.ShpOutputBox1.text(), self.ShpOutputBox2.text(), whichResult=self.RegionNumberBox.text())\n elif len(self.RegionNumberBox.text()) == 0:\n bk.getPolyData(self.RegionNameBox.text(), self.ShpOutputBox1.text(), self.ShpOutputBox2.text())\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Valid Output Location should be entered\")\n except ValueError:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Valid Region Name/Number should be entered\")\n \n def saveShapefile(self):\n path = QtWidgets.QFileDialog.getExistingDirectory(None, \"Select Directory\")\n self.ShpOutputBox1.setText(path)\n \n def retranslateUi(self, Frame):\n _translate = QtCore.QCoreApplication.translate\n Frame.setWindowTitle(_translate(\"Frame\", \"Get Study Area\"))\n self.ExecuteButton.setText(_translate(\"Frame\", \"Execute\"))\n self.ShpOutputLabel2.setText(_translate(\"Frame\", \"Shapefile Output Name\"))\n self.RegionNameLabel.setText(_translate(\"Frame\", \"Region Name/Result Number\"))\n self.HelpButton.setText(_translate(\"Frame\", \"Help\"))\n self.BackButton.setText(_translate(\"Frame\", \"Back\"))\n self.ShpOutputLabel.setText(_translate(\"Frame\", \"Shapefile Output Folder\"))\n self.ShpOutputBrowse.setText(_translate(\"Frame\", \"Browse\"))\n\nclass Centrality(object):\n def setupUi(self, Frame):\n Frame.setObjectName(\"Frame\")\n Frame.setFixedSize(870, 230)\n Frame.setWindowIcon(QtGui.QIcon(\"interface.ico\"))\n \n # Fonts\n font = QtGui.QFont()\n font.setFamily(\"Palatino Linotype\")\n font.setPointSize(9)\n font.setUnderline(False)\n \n font2 = QtGui.QFont()\n font2.setFamily(\"Palatino Linotype\")\n font2.setPointSize(11)\n font2.setBold(True)\n font2.setWeight(75)\n \n self.verticalline = QtWidgets.QFrame(Frame)\n self.verticalline.setGeometry(QtCore.QRect(600, 0, 20, 231))\n self.verticalline.setFrameShape(QtWidgets.QFrame.VLine)\n self.verticalline.setFrameShadow(QtWidgets.QFrame.Sunken)\n \n # Labels\n self.PolyBoundaryLabel = QtWidgets.QLabel(Frame)\n self.PolyBoundaryLabel.setGeometry(QtCore.QRect(20, 50, 181, 21))\n self.PolyBoundaryLabel.setFont(font)\n self.PolyBoundaryLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.RegionNameLabel = QtWidgets.QLabel(Frame)\n self.RegionNameLabel.setGeometry(QtCore.QRect(20, 80, 181, 21))\n self.RegionNameLabel.setFont(font)\n self.RegionNameLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.TransportationModeLabel = QtWidgets.QLabel(Frame)\n self.TransportationModeLabel.setGeometry(QtCore.QRect(20, 110, 181, 21))\n self.TransportationModeLabel.setFont(font)\n self.TransportationModeLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n self.WebMapLabel = QtWidgets.QLabel(Frame)\n self.WebMapLabel.setGeometry(QtCore.QRect(20, 140, 181, 21))\n self.WebMapLabel.setFont(font)\n self.WebMapLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n self.ShpOutputLabel = QtWidgets.QLabel(Frame)\n self.ShpOutputLabel.setGeometry(QtCore.QRect(20, 170, 181, 21))\n self.ShpOutputLabel.setFont(font)\n self.ShpOutputLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.DataInputLabel = QtWidgets.QLabel(Frame)\n self.DataInputLabel.setGeometry(QtCore.QRect(640, 80, 111, 21))\n self.DataInputLabel.setFont(font)\n self.DataInputLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.AnalysisMethodLabel = QtWidgets.QLabel(Frame)\n self.AnalysisMethodLabel.setGeometry(QtCore.QRect(640, 110, 111, 21))\n self.AnalysisMethodLabel.setFont(font)\n self.AnalysisMethodLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n # TextBoxes\n self.PolyBoundaryBox = QtWidgets.QLineEdit(Frame)\n self.PolyBoundaryBox.setGeometry(QtCore.QRect(210, 50, 271, 20))\n\n self.RegionNameBox = QtWidgets.QLineEdit(Frame)\n self.RegionNameBox.setGeometry(QtCore.QRect(210, 80, 221, 20))\n self.RegionNameBox.setEnabled(False)\n \n self.RegionNumberBox = QtWidgets.QLineEdit(Frame)\n self.RegionNumberBox.setGeometry(QtCore.QRect(440, 80, 41, 20))\n self.RegionNumberBox.setEnabled(False)\n \n self.TransportationModeBox = QtWidgets.QLineEdit(Frame)\n self.TransportationModeBox.setGeometry(QtCore.QRect(210, 110, 271, 20))\n \n self.WebMapBox = QtWidgets.QLineEdit(Frame)\n self.WebMapBox.setGeometry(QtCore.QRect(210, 140, 271, 20))\n\n self.ShpOutputBox = QtWidgets.QLineEdit(Frame)\n self.ShpOutputBox.setGeometry(QtCore.QRect(210, 170, 271, 20))\n\n # Buttons\n self.BackButton = QtWidgets.QPushButton(Frame)\n self.BackButton.setGeometry(QtCore.QRect(0, 0, 149, 24))\n self.BackButton.setFont(font)\n\n self.HelpButton = QtWidgets.QPushButton(Frame)\n self.HelpButton.setGeometry(QtCore.QRect(720, 0, 149, 24))\n self.HelpButton.setFont(font)\n self.HelpButton.clicked.connect(self.getHelp)\n\n self.PolyBrowse = QtWidgets.QPushButton(Frame)\n self.PolyBrowse.setGeometry(QtCore.QRect(490, 50, 75, 21))\n self.PolyBrowse.setFont(font)\n self.PolyBrowse.clicked.connect(self.openFile)\n \n self.WebMapOutputBrowse = QtWidgets.QPushButton(Frame)\n self.WebMapOutputBrowse.setGeometry(QtCore.QRect(490, 140, 75, 21))\n self.WebMapOutputBrowse.setFont(font)\n self.WebMapOutputBrowse.clicked.connect(self.saveWebmap)\n \n self.ShpOutputBrowse = QtWidgets.QPushButton(Frame)\n self.ShpOutputBrowse.setGeometry(QtCore.QRect(490, 170, 75, 21))\n self.ShpOutputBrowse.setFont(font)\n self.ShpOutputBrowse.clicked.connect(self.saveShapefile)\n \n self.Execute = QtWidgets.QPushButton(Frame)\n self.Execute.setGeometry(QtCore.QRect(730, 180, 121, 41))\n self.Execute.setFont(font2)\n self.Execute.clicked.connect(self.returnedFunction)\n \n # Comboboxes\n self.DataInputSelection = QtWidgets.QComboBox(Frame)\n self.DataInputSelection.setGeometry(QtCore.QRect(760, 80, 91, 22))\n self.DataInputSelection.setFont(font)\n self.DataInputSelection.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.DataInputSelection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)\n self.DataInputSelection.addItem(\"Boundary\")\n self.DataInputSelection.addItem(\"Region Name\")\n self.DataInputSelection.currentIndexChanged.connect(self.methodSelection)\n \n self.AnalysisMethodSelection = QtWidgets.QComboBox(Frame)\n self.AnalysisMethodSelection.setGeometry(QtCore.QRect(760, 110, 91, 22))\n self.AnalysisMethodSelection.setFont(font)\n self.AnalysisMethodSelection.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.AnalysisMethodSelection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)\n self.AnalysisMethodSelection.addItem(\"Degree\")\n self.AnalysisMethodSelection.addItem(\"Betweenness\")\n self.AnalysisMethodSelection.addItem(\"Closeness\")\n \n self.retranslateUi(Frame)\n QtCore.QMetaObject.connectSlotsByName(Frame)\n\n def getHelp(self):\n QtWidgets.QMessageBox.about(None, \"About\", \"* Input polygon boundary data should be in polygon type shapefile format \\n\\\n* Region name should be checked first on https://nominatim.openstreetmap.org. \\n\\\n* If there is no region name on the website search, region's network is unaccessible. \\n\\\n* Choose the path where the road network of the area of interest and the result of the analysis is stored in shapefile format via 'Shapefile Output Folder' \\n\\\n* Choose the path where the webmap that is generated by the result of analysis is stored via 'Webmap Output Path'. \\n\\\n* Available transportation modes on OSM database are;\\n\\\n 'drive' - get drivable public streets (but not service roads)\\n\\\n 'drive_service' - get drivable public streets, including service roads\\n\\\n 'walk' - get all streets and paths that pedestrians can use (this network type ignores one-way directionality)\\n\\\n 'bike' - get all streets and paths that cyclists can use\\n\\\n 'all' - download all (non-private) OSM streets and paths\\n\\\n 'all_private' - download all OSM streets and paths, including private-access ones\")\n \n def methodSelection(self):\n if self.DataInputSelection.currentText() == \"Boundary\":\n self.PolyBoundaryBox.setEnabled(True)\n self.PolyBrowse.setEnabled(True)\n self.RegionNameBox.setEnabled(False)\n self.RegionNumberBox.setEnabled(False)\n elif self.DataInputSelection.currentText() == \"Region Name\":\n self.PolyBoundaryBox.setEnabled(False)\n self.PolyBrowse.setEnabled(False) \n self.RegionNameBox.setEnabled(True)\n self.RegionNumberBox.setEnabled(True)\n \n def returnedFunction(self):\n try:\n funcs = {\"Degree\": self.degreeCentrality,\n \"Betweenness\": self.betweennessCentrality,\n \"Closeness\": self.closenessCentrality}\n function = funcs[self.AnalysisMethodSelection.currentText()]\n return function()\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Valid output path should be entered\")\n except:\n if len(self.ShpOutputBox.text()) == 0:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Enter output path.\")\n else:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Region name / Polygon boundary should be entered in valid format.\")\n\n def degreeCentrality(self):\n print(\"Calculating Degree Centrality..\")\n G = self.findG()\n if len(self.WebMapBox.text()) > 0:\n bk.degreeCentrality(G, self.ShpOutputBox.text(), self.WebMapBox.text())\n else:\n bk.degreeCentrality(G, self.ShpOutputBox.text())\n \n def betweennessCentrality(self):\n print(\"Calculating Betweenness..\")\n G = self.findG()\n if len(self.WebMapBox.text()) > 0:\n bk.betweennessCentrality(G, self.ShpOutputBox.text(), self.WebMapBox.text())\n else:\n bk.betweennessCentrality(G, self.ShpOutputBox.text())\n \n def closenessCentrality(self):\n print(\"Calculating Closeness..\")\n G = self.findG()\n if len(self.WebMapBox.text()) > 0:\n bk.closenessCentrality(G, self.ShpOutputBox.text(), self.WebMapBox.text())\n else:\n bk.closenessCentrality(G, self.ShpOutputBox.text())\n\n def openFile(self):\n name = QtWidgets.QFileDialog.getOpenFileName()\n self.PolyBoundaryBox.setText(str(name[0]))\n\n def saveWebmap(self):\n path = QtWidgets.QFileDialog.getSaveFileName(None, \"Select Directory\", \"webmap.html\", \"HTML Files (*.html)\")\n self.WebMapBox.setText(str(path[0]))\n \n def saveShapefile(self):\n path2 = QtWidgets.QFileDialog.getExistingDirectory(None, \"Select Directory\")\n self.ShpOutputBox.setText(path2)\n\n def findG(self):\n if self.DataInputSelection.currentText() == \"Boundary\":\n if len(self.TransportationModeBox.text()) > 0:\n G = bk.networkFromPolygon(self.PolyBoundaryBox.text(), self.TransportationModeBox.text())\n else:\n G = bk.networkFromPolygon(self.PolyBoundaryBox.text())\n elif self.DataInputSelection.currentText() == \"Region Name\":\n if len(self.TransportationModeBox.text()) > 0 and len(self.RegionNumberBox.text()) > 0:\n G = bk.networkFromPlaceName(self.RegionNameBox.text(), networkType=self.TransportationModeBox.text(),\n whichResult=self.RegionNumberBox.text())\n elif len(self.TransportationModeBox.text()) > 0 and len(self.RegionNumberBox.text()) == 0:\n G = bk.networkFromPlaceName(self.RegionNameBox.text(), networkType=self.TransportationModeBox.text())\n elif len(self.TransportationModeBox.text()) == 0 and len(self.RegionNumberBox.text()) > 0:\n G = bk.networkFromPlaceName(self.RegionNameBox.text(), whichResult=self.RegionNumberBox.text())\n else:\n G = bk.networkFromPlaceName(self.RegionNameBox.text())\n return G\n \n def retranslateUi(self, Frame):\n _translate = QtCore.QCoreApplication.translate\n Frame.setWindowTitle(_translate(\"Frame\", \"Centrality\"))\n self.TransportationModeLabel.setText(_translate(\"Frame\", \"Transportation Mode (Optional)\"))\n self.ShpOutputBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.WebMapLabel.setText(_translate(\"Frame\", \"Webmap Output Path (Optional)\"))\n self.Execute.setText(_translate(\"Frame\", \"Execute\"))\n self.PolyBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.DataInputSelection.setItemText(0, _translate(\"Frame\", \"Boundary\"))\n self.DataInputSelection.setItemText(1, _translate(\"Frame\", \"Region Name\"))\n self.HelpButton.setText(_translate(\"Frame\", \"Help\"))\n self.AnalysisMethodSelection.setItemText(0, _translate(\"Frame\", \"Degree\"))\n self.AnalysisMethodSelection.setItemText(1, _translate(\"Frame\", \"Betweenness\"))\n self.AnalysisMethodSelection.setItemText(2, _translate(\"Frame\", \"Closeness\"))\n self.PolyBoundaryLabel.setText(_translate(\"Frame\", \"Polygon Boundary of Area\"))\n self.WebMapOutputBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.ShpOutputLabel.setText(_translate(\"Frame\", \"Shapefile Output Folder\"))\n self.AnalysisMethodLabel.setText(_translate(\"Frame\", \"Analysis Method\"))\n self.DataInputLabel.setText(_translate(\"Frame\", \"Data Input Method\"))\n self.RegionNameLabel.setText(_translate(\"Frame\", \"Region Name/Result Number\"))\n self.BackButton.setText(_translate(\"Frame\", \"Back\"))\n \nclass Accessibility(object):\n def setupUi(self, Frame):\n Frame.setObjectName(\"Frame\")\n Frame.setFixedSize(870, 230)\n Frame.setWindowIcon(QtGui.QIcon(\"interface.ico\"))\n \n font = QtGui.QFont()\n font.setFamily(\"Palatino Linotype\")\n font.setPointSize(9)\n font.setUnderline(False)\n \n font2 = QtGui.QFont()\n font2.setFamily(\"Palatino Linotype\")\n font2.setPointSize(11)\n font2.setBold(True)\n font2.setWeight(75)\n\n self.verticalline = QtWidgets.QFrame(Frame)\n self.verticalline.setGeometry(QtCore.QRect(600, 0, 20, 231))\n self.verticalline.setFrameShape(QtWidgets.QFrame.VLine)\n self.verticalline.setFrameShadow(QtWidgets.QFrame.Sunken)\n \n # Labels\n self.OriginsLabel = QtWidgets.QLabel(Frame)\n self.OriginsLabel.setGeometry(QtCore.QRect(20, 40, 181, 21))\n self.OriginsLabel.setFont(font)\n self.OriginsLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n self.DestinationsLabel = QtWidgets.QLabel(Frame)\n self.DestinationsLabel.setGeometry(QtCore.QRect(20, 70, 181, 21))\n self.DestinationsLabel.setFont(font)\n self.DestinationsLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.TransportationModeLabel = QtWidgets.QLabel(Frame)\n self.TransportationModeLabel.setGeometry(QtCore.QRect(20, 100, 181, 21))\n self.TransportationModeLabel.setFont(font)\n self.TransportationModeLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.ThresholdLabel = QtWidgets.QLabel(Frame)\n self.ThresholdLabel.setGeometry(QtCore.QRect(20, 130, 181, 21))\n self.ThresholdLabel.setFont(font)\n self.ThresholdLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.WebMapLabel = QtWidgets.QLabel(Frame)\n self.WebMapLabel.setGeometry(QtCore.QRect(20, 160, 181, 21))\n self.WebMapLabel.setFont(font)\n self.WebMapLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.ShpOutputLabel = QtWidgets.QLabel(Frame)\n self.ShpOutputLabel.setGeometry(QtCore.QRect(20, 190, 181, 21))\n self.ShpOutputLabel.setFont(font)\n self.ShpOutputLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n\n self.AnalysisMethodLabel = QtWidgets.QLabel(Frame)\n self.AnalysisMethodLabel.setGeometry(QtCore.QRect(640, 100, 111, 21))\n self.AnalysisMethodLabel.setFont(font)\n self.AnalysisMethodLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n \n # TextBoxes\n self.OriginsBox = QtWidgets.QLineEdit(Frame)\n self.OriginsBox.setGeometry(QtCore.QRect(210, 40, 271, 20))\n \n self.DestinationsBox = QtWidgets.QLineEdit(Frame)\n self.DestinationsBox.setGeometry(QtCore.QRect(210, 70, 271, 20))\n\n self.TransportationModeBox = QtWidgets.QLineEdit(Frame)\n self.TransportationModeBox.setGeometry(QtCore.QRect(210, 100, 271, 20))\n \n self.ThresholdBox = QtWidgets.QLineEdit(Frame)\n self.ThresholdBox.setGeometry(QtCore.QRect(210, 130, 271, 20))\n self.ThresholdBox.setEnabled(False)\n \n self.WebMapBox = QtWidgets.QLineEdit(Frame)\n self.WebMapBox.setGeometry(QtCore.QRect(210, 160, 271, 20))\n \n self.ShpOutputBox = QtWidgets.QLineEdit(Frame)\n self.ShpOutputBox.setGeometry(QtCore.QRect(210, 190, 271, 20))\n \n # Buttons\n self.BackButton = QtWidgets.QPushButton(Frame)\n self.BackButton.setGeometry(QtCore.QRect(0, 0, 149, 24))\n self.BackButton.setFont(font)\n \n self.HelpButton = QtWidgets.QPushButton(Frame)\n self.HelpButton.setGeometry(QtCore.QRect(720, 0, 149, 24))\n self.HelpButton.setFont(font)\n self.HelpButton.clicked.connect(self.getHelp)\n\n self.OriginsBrowse = QtWidgets.QPushButton(Frame)\n self.OriginsBrowse.setGeometry(QtCore.QRect(490, 40, 75, 21))\n self.OriginsBrowse.setFont(font)\n self.OriginsBrowse.clicked.connect(self.openFile)\n \n self.DestinationsBrowse = QtWidgets.QPushButton(Frame)\n self.DestinationsBrowse.setGeometry(QtCore.QRect(490, 70, 75, 21))\n self.DestinationsBrowse.setFont(font)\n self.DestinationsBrowse.clicked.connect(self.openFile2)\n\n self.WebMapOutputBrowse = QtWidgets.QPushButton(Frame)\n self.WebMapOutputBrowse.setGeometry(QtCore.QRect(490, 160, 75, 21))\n self.WebMapOutputBrowse.setFont(font)\n self.WebMapOutputBrowse.clicked.connect(self.saveWebmap)\n \n self.ShpOutputBrowse = QtWidgets.QPushButton(Frame)\n self.ShpOutputBrowse.setGeometry(QtCore.QRect(490, 190, 75, 21))\n self.ShpOutputBrowse.setFont(font)\n self.ShpOutputBrowse.clicked.connect(self.saveShapefile)\n \n self.Execute = QtWidgets.QPushButton(Frame)\n self.Execute.setGeometry(QtCore.QRect(730, 180, 121, 41))\n self.Execute.setFont(font2)\n self.Execute.clicked.connect(self.returnedFunction)\n \n # SelectionBoxes\n self.AnalysisMethodSelection = QtWidgets.QComboBox(Frame)\n self.AnalysisMethodSelection.setGeometry(QtCore.QRect(760, 100, 91, 22))\n self.AnalysisMethodSelection.setFont(font)\n self.AnalysisMethodSelection.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.AnalysisMethodSelection.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)\n self.AnalysisMethodSelection.addItem(\"Potential\")\n self.AnalysisMethodSelection.addItem(\"Daily\")\n self.AnalysisMethodSelection.currentIndexChanged.connect(self.enableSelection)\n\n self.retranslateUi(Frame)\n QtCore.QMetaObject.connectSlotsByName(Frame)\n\n def getHelp(self):\n QtWidgets.QMessageBox.about(None, \"About\", \"* Input origins and destinations data should be in point type shapefile format \\n\\\n* Choose the path where the road network of the area of interest and the result of the analysis is stored in shapefile format via 'Shapefile Output Folder' \\n\\\n* Choose the path where the webmap that is generated by the result of analysis is stored via 'Webmap Output Path'. \\n\\\n* Available transportation modes on OSM database are;\\n\\\n 'drive' - get drivable public streets (but not service roads)\\n\\\n 'drive_service' - get drivable public streets, including service roads\\n\\\n 'walk' - get all streets and paths that pedestrians can use (this network type ignores one-way directionality)\\n\\\n 'bike' - get all streets and paths that cyclists can use\\n\\\n 'all' - download all (non-private) OSM streets and paths\\n\\\n 'all_private' - download all OSM streets and paths, including private-access ones\")\n \n def enableSelection(self):\n if self.AnalysisMethodSelection.currentText() == \"Potential\":\n self.ThresholdBox.setEnabled(False)\n elif self.AnalysisMethodSelection.currentText() == \"Daily\":\n self.ThresholdBox.setEnabled(True)\n\n def returnedFunction(self):\n try:\n funcs = {\"Potential\": self.potentialAccessibility,\n \"Daily\": self.dailyAccessibility}\n function = funcs[self.AnalysisMethodSelection.currentText()]\n return function()\n except FileNotFoundError:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Valid output path should be entered\")\n except:\n if len(self.ShpOutputBox.text()) == 0:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Enter output path.\")\n else:\n QtWidgets.QMessageBox.critical(None, \"Error\", \"ERROR: Origins and destinations should be entered in valid format.\")\n\n def origdest(self):\n if len(self.TransportationModeBox.text()) > 0:\n route_geom, nodes, G_proj = bk.origindestination(self.OriginsBox.text(), self.DestinationsBox.text(), \n self.TransportationModeBox.text())\n else:\n route_geom, nodes, G_proj = bk.origindestination(self.OriginsBox.text(), self.DestinationsBox.text())\n return route_geom, nodes, G_proj\n \n def potentialAccessibility(self):\n route_geom, nodes, G_proj = self.origdest()\n if len(self.WebMapBox.text()) > 0:\n bk.potentialAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text(),\n self.WebMapBox.text())\n else:\n bk.potentialAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text())\n \n def dailyAccessibility(self):\n route_geom, nodes, G_proj = self.origdest()\n if len(self.ThresholdBox.text()) > 0 and len(self.WebMapBox.text()) > 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text(), self.ThresholdBox.text(),\n self.WebMapBox.text())\n elif len(self.ThresholdBox.text()) > 0 and len(self.WebMapBox.text()) > 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text(), self.ThresholdBox.text())\n elif len(self.ThresholdBox.text()) == 0 and len(self.WebMapBox.text()) == 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text(), 3000,\n self.WebMapBox.text())\n else:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.ShpOutputBox.text())\n \n def openFile(self):\n orig = QtWidgets.QFileDialog.getOpenFileName()\n orig_text = str(orig[0])\n self.OriginsBox.setText(orig_text)\n \n def openFile2(self):\n dest = QtWidgets.QFileDialog.getOpenFileName()\n dest_text = str(dest[0])\n self.DestinationsBox.setText(dest_text)\n \n def saveWebmap(self):\n path = QtWidgets.QFileDialog.getSaveFileName(None, \"Select Directory\", \"webmap.html\", \"HTML Files (*.html)\")\n self.WebMapBox.setText(str(path[0]))\n \n def saveShapefile(self):\n path2 = QtWidgets.QFileDialog.getExistingDirectory(None, \"Select Directory\")\n self.ShpOutputBox.setText(path2)\n \n def retranslateUi(self, Frame):\n _translate = QtCore.QCoreApplication.translate\n Frame.setWindowTitle(_translate(\"Frame\", \"Accessibility\"))\n self.Execute.setText(_translate(\"Frame\", \"Execute\"))\n self.DestinationsLabel.setText(_translate(\"Frame\", \"Destinations\"))\n self.WebMapOutputBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.AnalysisMethodLabel.setText(_translate(\"Frame\", \"Analysis Method\"))\n self.OriginsLabel.setText(_translate(\"Frame\", \"Origins\"))\n self.AnalysisMethodSelection.setItemText(0, _translate(\"Frame\", \"Potential\"))\n self.AnalysisMethodSelection.setItemText(1, _translate(\"Frame\", \"Daily\"))\n self.TransportationModeLabel.setText(_translate(\"Frame\", \"Transportation Mode (Optional)\"))\n self.WebMapLabel.setText(_translate(\"Frame\", \"Webmap Output Path (Optional)\"))\n self.ShpOutputBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.ThresholdLabel.setText(_translate(\"Frame\", \"Distance Threshold (m)\"))\n self.BackButton.setText(_translate(\"Frame\", \"Back\"))\n self.DestinationsBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.OriginsBrowse.setText(_translate(\"Frame\", \"Browse\"))\n self.ShpOutputLabel.setText(_translate(\"Frame\", \"Shapefile Output Folder\"))\n self.HelpButton.setText(_translate(\"Frame\", \"Help\"))\n \ndef main():\n app = QtWidgets.QApplication(sys.argv)\n app.setApplicationName(\"SATRAP\")\n mywindow = Controller()\n mywindow.open_main()\n sys.exit(app.exec_())\n \nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5973644852638245,
"alphanum_fraction": 0.6039765477180481,
"avg_line_length": 40.134098052978516,
"blob_id": "f7b68dd45eacadf189489044f21da68c2eb49be2",
"content_id": "956d9b72c7d141318fd584e21698f3ea8637d76a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21476,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 522,
"path": "/OldVersions/v0.9--Tkinter_thesis/backend.py",
"repo_name": "ITU-Photogrammetry-Lab/satrap",
"src_encoding": "UTF-8",
"text": "import osmnx as ox\nimport networkx as nx\nimport geopandas as gpd\nimport pandas as pd\nfrom shapely.geometry import LineString\nfrom fiona.crs import from_epsg\nimport branca.colormap as cm\nimport folium\nimport pysal\nimport time\n\ndef networkFromPlaceName(regionName, networkType=\"all\", whichResult=1):\n \"\"\"\n Create a networkx graph from OSM database within the spatial boundaries of user entered region name.\n \n Parameters\n ----------\n regionName: string\n name of the area where the road network data is obtained\n networkType: string\n what type of street network to get\n whichResult: integer\n resulting number that returns polygon data from the\n query of OpenStreetMap database\n \n Returns\n -------\n G: networkx multidigraph\n road network of region\n \"\"\"\n \n G = ox.graph_from_place(regionName, network_type=str(networkType), which_result=int(whichResult))\n return G\n \ndef networkFromPolygon(shpLocation, *args):\n \"\"\"\n Create a networkx graph from OSM database within the spatial boundaries of user entered shapefile.\n \n Parameters\n ----------\n name: string\n absolute path of the shapefile to be used\n *args: string\n what type of street network to get\n \n Returns\n -------\n G: networkx multidigraph\n road network of region\n \"\"\"\n \n boundary = gpd.read_file(shpLocation)\n boundary[\"geometry\"] = boundary[\"geometry\"].to_crs(epsg=4326)\n boundary.crs = from_epsg(4326)\n geometry = boundary.geometry.values[0]\n try:\n argument = [i for i in args]\n G = ox.graph_from_polygon(geometry, clean_periphery=False, network_type=argument[0])\n except:\n G = ox.graph_from_polygon(geometry, clean_periphery=False)\n return G\n\ndef getPolyData(regionName, saveFolder, saveName, whichResult=1): \n \"\"\"\n Store the shapefile of a region from a single place name query.\n\n Parameters\n ----------\n regionName: string\n name of the area where the road network data is obtained\n saveFolder: string\n absolute path to save the shapefile\n saveName: string\n name of the shapefile\n whichResult: integer\n resulting number that returns polygon data from the query of OpenStreetMap database\n \n Returns\n -------\n ############################################\n \"\"\"\n city = ox.gdf_from_place(regionName, which_result=int(whichResult))\n ox.save_gdf_shapefile(city, filename=saveName, folder=saveFolder)\n print(\"Polygon Boundaries taken\")\n \ndef degreeCentrality(G, outPath, *args):\n \"\"\"\n Compute degree centrality of a node and save the output and road network\n data in shapefile format.\n\n Parameters\n ----------\n G: graph\n A networkx graph\n outpath: string\n absolute path to save the output of analysis\n *args: string\n path to save the output of analysis in html webmap format\n \n Returns\n -------\n ################################################################\n \"\"\"\n \n process_start_time = time.time()\n G_proj = nx.MultiGraph(ox.project_graph(G)) \n nodes, edges = ox.graph_to_gdfs(G_proj)\n nodes[\"x\"] = nodes[\"x\"].astype(float)\n \n degree_centrality = nx.degree_centrality(G_proj)\n degree = gpd.GeoDataFrame(pd.Series(degree_centrality), columns=[\"degree\"])\n nodes_dgr = nodes.merge(degree, left_index=True, right_index=True)\n \n argument = [i for i in args]\n if any(argument): \n linear = cm.LinearColormap([\"blue\", \"yellow\", \"red\"], vmin=0, vmax=5)\n nodes_dgr['geoid'] = nodes_dgr.index.astype(str)\n lat = list(nodes_dgr[\"lat\"])\n lon = list(nodes_dgr[\"lon\"])\n dgr = list(nodes_dgr[\"degree\"])\n name = list(nodes_dgr[\"osmid\"])\n \n #map = folium.Map(location=[nodes_dgr[\"lat\"].mean(), nodes_dgr[\"lon\"].mean()], zoom_start=13, control_scale=True, prefer_canvas=True)\n m = ox.plot_graph_folium(G, edge_width=1.5)\n fgdgr = folium.FeatureGroup(name=\"Degrees of Nodes\")\n \n classifier = pysal.viz.mapclassify.Natural_Breaks.make(k=5)\n classifications = nodes_dgr[[\"degree\"]].apply(classifier)\n classifications.columns = [\"class\"]\n nodes_dgr = nodes_dgr.join(classifications)\n \n for lt, ln, dg, nm, cl in zip(lat, lon, dgr, name, list(nodes_dgr[\"class\"])):\n fgdgr.add_child(folium.CircleMarker(location=[lt, ln], popup='Degree Centrality of the {}. node is {}'.format(str(nm), str(dg)),\n radius=4, fill_color=linear(cl), color=\"grey\", fill_opacity=1, weight=0.5))\n \n #fgavg.add_child(marker_cluster)\n m.add_child(fgdgr)\n #m.add_child(linear)\n m.save(argument[0])\n \n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_dgr.to_file(outPath + \"/degree_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n else:\n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_dgr.to_file(outPath + \"/degree_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n \ndef betweennessCentrality(G, outPath, *args):\n \"\"\"\n Compute betweenness centrality of a node and save the output and road network\n data in shapefile format.\n\n Parameters\n ----------\n G: graph\n A networkx graph\n outpath: string\n absolute path to save the output of analysis\n *args: string\n path to save the output of analysis in html webmap format\n \n Returns\n -------\n ################################################################\n \"\"\"\n \n process_start_time = time.time()\n G_proj = nx.MultiGraph(ox.project_graph(G)) \n nodes, edges = ox.graph_to_gdfs(G_proj)\n nodes[\"x\"] = nodes[\"x\"].astype(float)\n \n btw_centrality = nx.betweenness_centrality(G_proj)\n betweenness = gpd.GeoDataFrame(pd.Series(btw_centrality), columns=[\"betweenness\"])\n nodes_btw = nodes.merge(betweenness, left_index=True, right_index=True)\n \n argument = [i for i in args]\n if any(argument):\n linear = cm.LinearColormap([\"blue\", \"yellow\", \"red\"], vmin=0, vmax=5)\n nodes_btw['geoid'] = nodes_btw.index.astype(str)\n lat = list(nodes_btw[\"lat\"])\n lon = list(nodes_btw[\"lon\"])\n btw = list(nodes_btw[\"betweenness\"])\n name = list(nodes_btw[\"osmid\"])\n \n m = ox.plot_graph_folium(G, edge_width=1.5)\n fgbtw = folium.FeatureGroup(name=\"Betweenness Centrality of Nodes\")\n \n classifier = pysal.viz.mapclassify.Natural_Breaks.make(k=5)\n classifications = nodes_btw[[\"betweenness\"]].apply(classifier)\n classifications.columns = [\"class\"] \n nodes_btw = nodes_btw.join(classifications)\n \n for lt, ln, bt, nm, cl in zip(lat, lon, btw, name, list(nodes_btw[\"class\"])):\n fgbtw.add_child(folium.CircleMarker(location=[lt, ln], popup='Betweenness Degree of the {}. node is {}'.format(str(nm), str(bt)),\n radius=4, fill_color=linear(cl), color=\"grey\", fill_opacity=1, weight=0.5))\n \n m.add_child(fgbtw)\n m.save(argument[0])\n \n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_btw.to_file(outPath + \"/btw_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n else:\n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_btw.to_file(outPath + \"/btw_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n \ndef closenessCentrality(G, outPath, *args): \n \"\"\"\n Compute closeness centrality of a node and save the output and road network\n data in shapefile format.\n\n Parameters\n ----------\n G: graph\n A networkx graph\n outpath: string\n path to save the output of analysis\n *args: string\n path to save the output of analysis in html webmap format\n \n Returns\n -------\n ################################################################\n \"\"\"\n \n process_start_time = time.time()\n G_proj = nx.MultiGraph(ox.project_graph(G)) \n nodes, edges = ox.graph_to_gdfs(G_proj)\n nodes[\"x\"] = nodes[\"x\"].astype(float)\n \n cls_centrality = nx.closeness_centrality(G_proj)\n closeness = gpd.GeoDataFrame(pd.Series(cls_centrality), columns=[\"closeness\"])\n nodes_cls = nodes.merge(closeness, left_index=True, right_index=True)\n \n argument = [i for i in args]\n if any(argument): \n linear = cm.LinearColormap([\"blue\", \"yellow\", \"red\"], vmin=0, vmax=5)\n nodes_cls['geoid'] = nodes_cls.index.astype(str)\n lat = list(nodes_cls[\"lat\"])\n lon = list(nodes_cls[\"lon\"])\n clsn = list(nodes_cls[\"closeness\"])\n name = list(nodes_cls[\"osmid\"])\n \n m = ox.plot_graph_folium(G, edge_width=1.5)\n fgcls = folium.FeatureGroup(name=\"Closeness Centrality of Nodes\")\n \n classifier = pysal.viz.mapclassify.Natural_Breaks.make(k=5)\n classifications = nodes_cls[[\"closeness\"]].apply(classifier)\n classifications.columns = [\"class\"]\n nodes_cls = nodes_cls.join(classifications)\n \n for lt, ln, cl, nm, clss in zip(lat, lon, clsn , name, list(nodes_cls[\"class\"])):\n fgcls.add_child(folium.CircleMarker(location=[lt, ln], popup='Closeness Degree of the {}. node is {}'.format(str(nm), str(cl)),\n radius=4, fill_color=linear(clss), color=\"grey\", fill_opacity=1, weight=0.5))\n \n m.add_child(fgcls)\n m.save(argument[0])\n \n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_cls.to_file(outPath + \"/cls_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n else:\n ox.save_graph_shapefile(G_proj, filename=\"network\", folder=outPath)\n nodes_cls.to_file(outPath + \"/cls_centrality.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-process_start_time))\n\ndef origindestination(originPath, destinationPath, networkType=\"all\"):\n \"\"\"\n Create a bounding box from user entered origin and destination points, get the corresponding road network\n and form the origin destination cost matrix.\n\n Parameters\n ----------\n originPath: string\n absolute path of the origin shapefile to be used\n destinationPath: string\n absolute path of the destination shapefile to be used\n networkType: string\n what type of street network to get\n \n Returns\n -------\n route_geom: GeoDataFrame\n geopandas dataframe of routes\n nodes: GeoDataFrame\n geopandas dataframe of nodes\n G_proj: networkx multidigraph\n road network of closed region that is reprojected in corresponding UTM zone\n \"\"\"\n \n global od_start_time\n od_start_time = time.time()\n origins = gpd.read_file(originPath)\n destinations = gpd.read_file(destinationPath)\n \n origins[\"geometry\"] = origins[\"geometry\"].to_crs(epsg=4326)\n destinations[\"geometry\"] = destinations[\"geometry\"].to_crs(epsg=4326)\n origins.crs = from_epsg(4326)\n destinations.crs = from_epsg(4326)\n \n points_concat = gpd.GeoDataFrame(pd.concat([origins, destinations], ignore_index=True, sort=True))\n minx, miny, maxx, maxy = points_concat.geometry.total_bounds\n \n G = ox.graph_from_bbox(maxy+0.005, miny-0.005, maxx+0.005, minx-0.005, clean_periphery=False, network_type=str(networkType)) \n \n G_proj = nx.MultiGraph(ox.project_graph(G))\n nodes, edges = ox.graph_to_gdfs(G_proj)\n nodes[\"x\"] = nodes[\"x\"].astype(float)\n\n origins = origins.to_crs(nodes.crs)\n destinations = destinations.to_crs(nodes.crs)\n origins[\"x\"] = origins.geometry.x\n origins[\"y\"] = origins.geometry.y\n destinations[\"x\"] = destinations.geometry.x\n destinations[\"y\"] = destinations.geometry.y\n \n # Origin ve Destination noktalarina en yakin nodelari almak\n orignodes = list(ox.get_nearest_nodes(G_proj, origins[\"x\"], origins[\"y\"], method=\"kdtree\"))\n destnodes = list(ox.get_nearest_nodes(G_proj, destinations[\"x\"], destinations[\"y\"], method=\"kdtree\"))\n\n # Yollari bulup Folium'a uygun hale getirmek\n routelist = []\n routelinelist = []\n routenodeslist = []\n for dest in destnodes:\n for org in orignodes:\n try:\n route = nx.shortest_path(G_proj, org, dest, weight=\"length\")\n route_nodes = nodes.loc[route]\n route_line = LineString(list(route_nodes.geometry.values))\n routelist.append(route) \n routenodeslist.append(route_nodes)\n routelinelist.append(route_line)\n except:\n print(\"No path from {} to {}\".format(org, dest))\n \n route_geom = gpd.GeoDataFrame(crs=edges.crs)\n route_geom[\"geometry\"] = routelinelist\n route_geom[\"length\"] = route_geom.length\n \n fromlist = []\n tolist = []\n for df in routenodeslist:\n fromid = df.iloc[0, 3]\n fromlist.append(fromid)\n toid = df.iloc[-1, 3]\n tolist.append(toid)\n \n route_geom[\"from_id\"] = fromlist\n route_geom[\"to_id\"] = tolist\n return route_geom, nodes, G_proj\n\ndef potentialAccessibility(gdf, nodes, G, outPath, *args):\n \"\"\"\n Write the result of potential accessibility analysis to shapefile and webmap*.\n *Optional\n\n Parameters\n ----------\n route_geom: GeoDataFrame\n geopandas dataframe of routes\n nodes: GeoDataFrame\n geopandas dataframe of nodes\n G_proj: networkx multidigraph\n road network\n *args: string\n path to save the output of analysis in html webmap format\n \n Returns\n -------\n ####################################################\n \"\"\"\n \n gdf[\"access\"] = (1 / (gdf[\"length\"]/1000))\n dest_grouped = ((gdf.groupby(\"to_id\")).sum()).merge(nodes, left_index=True, right_on=\"osmid\")\n origin_grouped = ((gdf.groupby(\"from_id\")).sum()).merge(nodes, left_index=True, right_on=\"osmid\")\n argument = [i for i in args]\n if any(argument):\n linear = cm.LinearColormap([\"blue\", \"yellow\", \"red\"], vmin=0, vmax=5)\n dest_grouped['geoid'] = dest_grouped.index.astype(str)\n origin_grouped['geoid'] = origin_grouped.index.astype(str)\n \n latd = list(dest_grouped[\"lat\"])\n lond = list(dest_grouped[\"lon\"])\n accd = list(dest_grouped[\"access\"])\n named = list(dest_grouped[\"osmid\"])\n \n lato = list(origin_grouped[\"lat\"])\n lono = list(origin_grouped[\"lon\"])\n acco = list(origin_grouped[\"access\"])\n nameo = list(origin_grouped[\"osmid\"])\n \n m = folium.Map(location=[dest_grouped[\"lat\"].mean(), dest_grouped[\"lon\"].mean()], zoom_start=13, control_scale=True, prefer_canvas=True)\n #m = ox.plot_graph_folium(G, edge_width=1.5)\n \n fgdest = folium.FeatureGroup(name=\"Destination Accessibility\")\n \n classifier = pysal.viz.mapclassify.Natural_Breaks.make(k=5)\n classifications = dest_grouped[[\"access\"]].apply(classifier)\n classifications.columns = [\"class\"]\n dest_grouped = dest_grouped.join(classifications)\n \n for lt, ln, av, nm, cl in zip(latd, lond, accd, named, list(dest_grouped[\"class\"])):\n fgdest.add_child(folium.CircleMarker(location=[lt, ln], popup='Accessibilty of {}. node is {}'.format(str(nm), str(av)),\n radius=4, fill_color=linear(cl), color=\"grey\", fill_opacity=1, weight=0.5))\n \n fgorig = folium.FeatureGroup(name=\"Origin Accessibility\")\n \n classifications2 = origin_grouped[[\"access\"]].apply(classifier)\n classifications2.columns = [\"class\"]\n origin_grouped = origin_grouped.join(classifications2)\n \n for lto, lno, avo, nmo, clo in zip(lato, lono, acco, nameo, list(origin_grouped[\"class\"])):\n fgorig.add_child(folium.CircleMarker(location=[lto, lno], popup='Accessibilty of {}. node is {}'.format(str(nmo), str(avo)),\n radius=4, fill_color=linear(clo), color=\"grey\", fill_opacity=1, weight=0.5))\n \n #fgavg.add_child(marker_cluster)\n m.add_child(fgdest)\n m.add_child(fgorig)\n folium.LayerControl().add_to(m)\n m.save(argument[0])\n origin_gdf = gpd.GeoDataFrame(origin_grouped, crs=nodes.crs)\n dest_gdf = gpd.GeoDataFrame(dest_grouped, crs=nodes.crs)\n ox.save_graph_shapefile(G, filename=\"network\", folder=outPath)\n origin_gdf.to_file(outPath + \"/origins.shp\")\n dest_gdf.to_file(outPath + \"/destinations.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-od_start_time))\n else:\n origin_gdf = gpd.GeoDataFrame(origin_grouped, crs=nodes.crs)\n dest_gdf = gpd.GeoDataFrame(dest_grouped, crs=nodes.crs)\n ox.save_graph_shapefile(G, filename=\"network\", folder=outPath)\n origin_gdf.to_file(outPath + \"/origins.shp\")\n dest_gdf.to_file(outPath + \"/destinations.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-od_start_time))\n \ndef dailyAccessibility(gdf, nodes, G, outPath, threshold=3000, *args):\n \"\"\"\n Write the result of daily accessibility analysis to shapefile and webmap*.\n *Optional\n\n Parameters\n ----------\n route_geom: GeoDataFrame\n geopandas dataframe of routes\n nodes: GeoDataFrame\n geopandas dataframe of nodes\n G_proj: networkx multidigraph\n road network\n threshold: integer or float\n threshold value for daily accessibility computations\n *args: string\n path to save the output of analysis in html webmap format\n \n Returns\n -------\n ####################################################\n \"\"\"\n \n gdf = gdf[gdf[\"length\"] < int(threshold)]\n gdf[\"access\"] = (1 / (gdf[\"length\"]/1000))\n dest_grouped = ((gdf.groupby(\"to_id\")).sum()).merge(nodes, left_index=True, right_on=\"osmid\")\n origin_grouped = ((gdf.groupby(\"from_id\")).sum()).merge(nodes, left_index=True, right_on=\"osmid\")\n argument = [i for i in args]\n if any(argument):\n linear = cm.LinearColormap([\"blue\", \"yellow\", \"red\"], vmin=0, vmax=5)\n dest_grouped['geoid'] = dest_grouped.index.astype(str)\n origin_grouped['geoid'] = origin_grouped.index.astype(str)\n \n latd = list(dest_grouped[\"lat\"])\n lond = list(dest_grouped[\"lon\"])\n accd = list(dest_grouped[\"access\"])\n named = list(dest_grouped[\"osmid\"])\n \n lato = list(origin_grouped[\"lat\"])\n lono = list(origin_grouped[\"lon\"])\n acco = list(origin_grouped[\"access\"])\n nameo = list(origin_grouped[\"osmid\"])\n \n m = folium.Map(location=[dest_grouped[\"lat\"].mean(), dest_grouped[\"lon\"].mean()], zoom_start=13, control_scale=True, prefer_canvas=True)\n #m = ox.plot_graph_folium(G, edge_width=1.5)\n \n fgdest = folium.FeatureGroup(name=\"Destination Accessibility\")\n \n classifier = pysal.viz.mapclassify.Natural_Breaks.make(k=5)\n classifications = dest_grouped[[\"access\"]].apply(classifier)\n classifications.columns = [\"class\"]\n dest_grouped = dest_grouped.join(classifications)\n \n for lt, ln, av, nm, cl in zip(latd, lond, accd, named, list(dest_grouped[\"class\"])):\n fgdest.add_child(folium.CircleMarker(location=[lt, ln], popup='Accessibilty of {}. node is {}'.format(str(nm), str(av)),\n radius=4, fill_color=linear(cl), color=\"grey\", fill_opacity=1, weight=0.5))\n \n fgorig = folium.FeatureGroup(name=\"Origin Accessibility\")\n \n classifications2 = origin_grouped[[\"access\"]].apply(classifier)\n classifications2.columns = [\"class\"]\n origin_grouped = origin_grouped.join(classifications2)\n \n for lto, lno, avo, nmo, clo in zip(lato, lono, acco, nameo, list(origin_grouped[\"class\"])):\n fgorig.add_child(folium.CircleMarker(location=[lto, lno], popup='Accessibilty of {}. node is {}'.format(str(nmo), str(avo)),\n radius=4, fill_color=linear(clo), color=\"grey\", fill_opacity=1, weight=0.5))\n \n #fgavg.add_child(marker_cluster)\n m.add_child(fgdest)\n m.add_child(fgorig)\n folium.LayerControl().add_to(m)\n m.save(argument[0])\n origin_gdf = gpd.GeoDataFrame(origin_grouped, crs=nodes.crs)\n dest_gdf = gpd.GeoDataFrame(dest_grouped, crs=nodes.crs)\n ox.save_graph_shapefile(G, filename=\"network\", folder=outPath)\n origin_gdf.to_file(outPath + \"/origins.shp\")\n dest_gdf.to_file(outPath + \"/destinations.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-od_start_time))\n else:\n origin_gdf = gpd.GeoDataFrame(origin_grouped, crs=nodes.crs)\n dest_gdf = gpd.GeoDataFrame(dest_grouped, crs=nodes.crs)\n ox.save_graph_shapefile(G, filename=\"network\", folder=outPath)\n origin_gdf.to_file(outPath + \"/origins.shp\")\n dest_gdf.to_file(outPath + \"/destinations.shp\")\n print(\"--- {0} seconds ---\".format(time.time()-od_start_time))\n "
},
{
"alpha_fraction": 0.5924605131149292,
"alphanum_fraction": 0.6140686273574829,
"avg_line_length": 50.77362823486328,
"blob_id": "cb40e2a09c553d1cac11c224d0c8e910e2346ea5",
"content_id": "00a14e473eb140816d636653f24589580015e8e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23556,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 455,
"path": "/OldVersions/v0.9--Tkinter_thesis/frontend.py",
"repo_name": "ITU-Photogrammetry-Lab/satrap",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename, askdirectory\nimport backend as bk\nfrom PIL import ImageTk, Image\n\nclass Application(tk.Tk):\n \n def __init__(self):\n tk.Tk.__init__(self)\n tk.Tk.wm_title(self, \"SATRAP\")\n tk.Tk.geometry(self, \"1000x200\")\n tk.Tk.minsize(self, width=200, height=200)\n tk.Tk.iconbitmap(self, default=\"interface.ico\")\n \n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n \n self.frames = {}\n \n for F in (StartPage, HelpMenu, Centrality, Accessibility, GetPoly):\n frame = F(container, self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n \n self.show_frame(StartPage)\n \n def show_frame(self, cont=None):\n if cont is None:\n # show last frame\n cont = self.lastcont\n frame = self.frames[cont]\n frame.tkraise()\n if cont != HelpMenu:\n self.lastcont = cont\n\nclass StartPage(tk.Frame):\n \n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n \n tk.Frame.update(self)\n tk.Frame.grid_columnconfigure(self, 0, weight=1)\n tk.Frame.grid_columnconfigure(self, 1, weight=1)\n tk.Frame.grid_columnconfigure(self, 2, weight=1)\n tk.Frame.grid_columnconfigure(self, 3, weight=1)\n tk.Frame.grid_columnconfigure(self, 4, weight=1)\n tk.Frame.grid_columnconfigure(self, 5, weight=1)\n# self.createCanvasImage()\n \n methodLabel = tk.Label(self, text=\"Choose Method\", font=(\"Verdana\", 11))\n methodLabel.grid(row=3, column=2)\n \n centButton = ttk.Button(self, text=\"Centrality Analysis\",\n command=lambda: controller.show_frame(Centrality))\n centButton.grid(row=4, column=2)\n \n accessButton = ttk.Button(self, text=\"Accessibility Analysis\",\n command=lambda: controller.show_frame(Accessibility))\n accessButton.grid(row=5, column=2)\n \n getStudyAreabutton = ttk.Button(self, text=\"Get Study Area\",\n command=lambda: controller.show_frame(GetPoly))\n getStudyAreabutton.grid(row=0, column=0)\n \n# def createCanvasImage(self):\n# canvas = tk.Canvas(self, width=1000, height=200)\n# canvas.grid(row=0, column=0, rowspan=10, columnspan=7)\n# = Image.open(r\"C:\\Users\\OmrAkn\\Desktop\\YuksekLisansTezUygulama\\SATRAP\\s5uaze.png\")\n# canvas.image = ImageTk.PhotoImage(img)\n# canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n\nclass GetPoly(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n \n tk.Frame.update(self)\n tk.Frame.grid_columnconfigure(self, 0, weight=1)\n tk.Frame.grid_columnconfigure(self, 1, weight=1)\n tk.Frame.grid_columnconfigure(self, 2, weight=1)\n tk.Frame.grid_columnconfigure(self, 3, weight=1)\n tk.Frame.grid_columnconfigure(self, 4, weight=1)\n tk.Frame.grid_columnconfigure(self, 5, weight=1)\n tk.Frame.grid_columnconfigure(self, 6, weight=1)\n tk.Frame.grid_columnconfigure(self, 7, weight=1)\n# self.createCanvasImage()\n \n backButton = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n backButton.grid(row=0, column=0, sticky=tk.W)\n \n helpButton = ttk.Button(self, text=\"Help\",\n command=lambda: controller.show_frame(HelpMenu))\n helpButton.grid(row=0, column=2, sticky=tk.E) \n \n tk.Label(self, text=\"Region Name / Result Number\").grid(row=1, sticky=tk.E)\n tk.Label(self, text=\"Shapefile Output Folder\").grid(row=2, sticky=tk.E)\n tk.Label(self, text=\"Shapefile Output Name\").grid(row=3, sticky=tk.E)\n self.placeName = tk.StringVar(self)\n self.e2 = tk.Entry(self, width=35, font=\"Times 9\", textvariable=self.placeName)\n self.e2.grid(row=1, column=1, sticky=tk.W) \n self.saveName = tk.StringVar(self)\n self.e3 = tk.Entry(self, width=40, font=\"Times 9\", textvariable=self.saveName)\n self.e3.grid(row=3, column=1, sticky=tk.W)\n self.whichResult = tk.StringVar(self)\n self.e4 = tk.Entry(self, width=5, font=\"Times 9\", textvariable=self.whichResult)\n self.e4.grid(row=1, column=1, sticky=tk.E)\n ttk.Button(self, text=\"Browse\", command=self.SaveFile).grid(row=2, column=2, sticky=tk.W)\n self.shpOutput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.shpOutput.grid(row=2, column=1, sticky=tk.W)\n ttk.Button(self, text=\"Execute\", width=12, command=self.getPoly).grid(row=4, column=1)\n \n def SaveFile(self):\n global shpLocation\n shpLocation = askdirectory(initialdir=\"\\\\\",\n title = \"Choose output folder for shapefiles\")\n self.shpOutput.delete(\"1.0\", tk.END)\n self.shpOutput.insert(tk.END, shpLocation)\n \n def getPoly(self):\n if len(self.whichResult.get()) > 0:\n bk.getPolyData(self.placeName.get(), self.shpOutput.get(\"1.0\",'end-1c'), self.saveName.get(), whichResult=self.whichResult.get())\n elif not self.whichResult.get():\n bk.getPolyData(self.placeName.get(), self.shpOutput.get(\"1.0\",'end-1c'), self.saveName.get())\n \n# def createCanvasImage(self):\n# canvas = tk.Canvas(self, width=1000, height=200)\n# canvas.grid(row=0, column=0, rowspan=10, columnspan=7)\n# img = Image.open(r\"C:\\Users\\OmrAkn\\Desktop\\YuksekLisansTezUygulama\\SATRAP\\s5uaze.png\")\n# canvas.image = ImageTk.PhotoImage(img)\n# canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n \nclass HelpMenu(tk.Frame):\n \n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n# self.createCanvasImage()\n \n ttk.Button(self, text=\"Back\", command=lambda: controller.show_frame()).grid(row=0, column=0, sticky=tk.W)\n \n tk.Label(self, text=\"1) Input polygon boundary data should be in polygon shapefile format and origins or destinations data should be in point shapefile format.\\n\\\n2) Region name should be checked first on https://nominatim.openstreetmap.org. If there is no region name on the website search, region's network is unaccessible.\\n\\\n3) User should choose the path where the road network of the area of interest and the result of the analysis is stored in shapefile format via 'Shapefile Output Folder' \\n\\\nOptional Selections; \\n\\\n4) Available transportation modes on OSM database are;\\n\\\n 'drive' - get drivable public streets (but not service roads)\\n\\\n 'drive_service' - get drivable public streets, including service roads\\n\\\n 'walk' - get all streets and paths that pedestrians can use (this network type ignores one-way directionality)\\n\\\n 'bike' - get all streets and paths that cyclists can use\\n\\\n 'all' - download all (non-private) OSM streets and paths\\n\\\n 'all_private' - download all OSM streets and paths, including private-access ones\\n\\\n5) User could choose the path where the webmap that is generated by the result of analysis is stored.\", font=(\"Verdana\", 8), justify=tk.LEFT).grid(row=1, sticky=tk.E)\n \n# def createCanvasImage(self):\n# canvas = tk.Canvas(self, width=1000, height=200)\n# canvas.grid(row=0, column=0, rowspan=10, columnspan=7)\n# img = Image.open(r\"C:\\Users\\OmrAkn\\Desktop\\YuksekLisansTezUygulama\\SATRAP\\s5uaze.png\")\n# canvas.image = ImageTk.PhotoImage(img)\n# canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n \nclass Centrality(tk.Frame):\n \n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent) \n tk.Frame.update(self)\n tk.Frame.grid_columnconfigure(self, 0, weight=1)\n tk.Frame.grid_columnconfigure(self, 1, weight=1)\n tk.Frame.grid_columnconfigure(self, 2, weight=1)\n tk.Frame.grid_columnconfigure(self, 3, weight=1)\n tk.Frame.grid_columnconfigure(self, 4, weight=1)\n tk.Frame.grid_columnconfigure(self, 5, weight=1)\n# self.createCanvasImage()\n \n backButton = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n backButton.grid(row=0, column=0, sticky=tk.W)\n \n helpButton = ttk.Button(self, text=\"Help\",\n command=lambda: controller.show_frame(HelpMenu))\n helpButton.grid(row=0, column=5, sticky=tk.E) \n \n # Alghoritms\n optionlist = [\"Degree\", \"Betweenness\", \"Closeness\"]\n self.dropvar = tk.StringVar(self)\n self.dropvar.set(\"Degree\")\n tk.Label(self, text=\"Analysis method\").grid(row=3, column=5, columnspan=2)\n dropmenu = tk.OptionMenu(self, self.dropvar, *optionlist)\n dropmenu.grid(row=4, column=5, columnspan=2)\n \n # Data Acquisiton Method\n optionlist2 = [\"From boundary\", \"From region name\"]\n self.dropvar2 = tk.StringVar(self)\n self.dropvar2.set(\"From boundary\")\n tk.Label(self, text=\"Data input method\").grid(row=3, column=3, columnspan=2)\n dropmenu2 = tk.OptionMenu(self, self.dropvar2, *optionlist2)\n dropmenu2.grid(row=4, column=3, columnspan=2)\n self.dropvar2.trace(\"w\", self.chgdrp)\n\n # Buttons & Labes\n tk.Label(self, text=\"Polygon Boundary of area\").grid(row=2, sticky=tk.E)\n tk.Label(self, text=\"Region Name / Result Number\").grid(row=3, sticky=tk.E)\n tk.Label(self, text=\"Transportation Mode (Optional)\").grid(row=4, sticky=tk.E)\n tk.Label(self, text=\"Webmap Output Path (Optional)\").grid(row=5, sticky=tk.E)\n tk.Label(self, text=\"Shapefile Output Folder\").grid(row=6, sticky=tk.E)\n self.browseButton = ttk.Button(self, text=\"Browse\", command=self.OpenFile)\n self.browseButton.grid(row=2, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Browse\", command=self.SaveFile).grid(row=5, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Browse\", command=self.SaveFile2).grid(row=6, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Execute\", width=12, command=self.returnedFunction).grid(row=8, column=5, columnspan=2)\n self.shpInput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.shpInput.grid(row=2, column=1, sticky=tk.W)\n self.webmapOutput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.webmapOutput.grid(row=5, column=1, sticky=tk.W)\n self.shpOutput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.shpOutput.grid(row=6, column=1, sticky=tk.W)\n self.placeName = tk.StringVar(self)\n self.e2 = tk.Entry(self, width=35, font=\"Times 9\", textvariable=self.placeName)\n self.e2.config(state=\"disabled\")\n self.e2.grid(row=3, column=1, sticky=tk.W)\n self.networkType = tk.StringVar(self)\n self.e3 = tk.Entry(self, width=40, font=\"Times 9\", textvariable=self.networkType)\n self.e3.grid(row=4, column=1, sticky=tk.W)\n self.whichResult = tk.StringVar(self)\n self.e4 = tk.Entry(self, width=5, font=\"Times 9\", textvariable=self.whichResult)\n self.e4.config(state=\"disabled\")\n self.e4.grid(row=3, column=1, sticky=tk.E)\n\n # Choosing function based on user selection\n def returnedFunction(self):\n funcs = {\"Degree\": self.degreeCentrality,\n \"Betweenness\": self.betweennessCentrality,\n \"Closeness\": self.closenessCentrality}\n function = funcs[self.dropvar.get()]\n return function()\n\n # Getting Network\n def findG(self):\n if self.dropvar2.get() == \"From boundary\":\n if len(self.networkType.get()) > 0:\n G = bk.networkFromPolygon(self.shpInput.get(\"1.0\",'end-1c'), self.networkType.get())\n else:\n G = bk.networkFromPolygon(self.shpInput.get(\"1.0\",'end-1c'))\n elif self.dropvar2.get() == \"From region name\":\n if len(self.networkType.get()) > 0 and len(self.whichResult.get()) > 0:\n G = bk.networkFromPlaceName(self.placeName.get(), networkType=self.networkType.get(), whichResult=self.whichResult.get())\n elif len(self.networkType.get()) > 0 and not self.whichResult.get():\n G = bk.networkFromPlaceName(self.placeName.get(), networkType=self.networkType.get())\n elif len(self.networkType.get()) == 0 and self.whichResult.get():\n G = bk.networkFromPlaceName(self.placeName.get(), whichResult=self.whichResult.get())\n else:\n G = bk.networkFromPlaceName(self.placeName.get())\n return G\n\n def degreeCentrality(self):\n print(\"Calculating Degree Centrality..\")\n G = self.findG()\n if len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.degreeCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'), self.webmapOutput.get(\"1.0\",'end-1c'))\n else:\n bk.degreeCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'))\n \n def betweennessCentrality(self):\n print(\"Calculating Betweenness..\")\n G = self.findG()\n if len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.betweennessCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'), self.webmapOutput.get(\"1.0\",'end-1c'))\n else:\n bk.betweennessCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'))\n \n def closenessCentrality(self):\n print(\"Calculating Closeness..\")\n G = self.findG()\n if len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.closenessCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'), self.webmapOutput.get(\"1.0\",'end-1c'))\n else:\n bk.closenessCentrality(G, self.shpOutput.get(\"1.0\",'end-1c'))\n \n def chgdrp(self, *args):\n if self.dropvar2.get() == \"From boundary\":\n self.browseButton.config(state=\"normal\")\n self.e2.config(state=\"disabled\")\n self.e4.config(state=\"disabled\")\n self.shpInput.config(state=\"normal\")\n elif self.dropvar2.get() == \"From region name\":\n self.browseButton.config(state=\"disabled\")\n self.e2.config(state=\"normal\")\n self.e4.config(state=\"normal\")\n self.shpInput.config(state=\"disabled\")\n\n def OpenFile(self):\n shpLocation = askopenfilename(initialdir=\"\\\\\", filetypes =((\"Shapefile\", \"*.shp\"),(\"All Files\",\"*.*\")),\n title = \"Choose origin shapefile\")\n self.shpInput.delete(\"1.0\", tk.END)\n self.shpInput.insert(tk.END, shpLocation)\n \n def SaveFile(self):\n webmapLocation = asksaveasfilename(initialdir=\"\\\\\", filetypes=((\"Html Files\", \"*.html\"),(\"All Files\",\"*.*\")),\n title = \"Choose output path for webmap\")\n if webmapLocation[-5:] == \".html\":\n pass\n else:\n webmapLocation += \".html\"\n self.webmapOutput.delete(\"1.0\", tk.END)\n self.webmapOutput.insert(tk.END, webmapLocation)\n \n def SaveFile2(self):\n shpOutputLocation = askdirectory(initialdir=\"\\\\\",\n title = \"Choose output folder for shapefiles\")\n self.shpOutput.delete(\"1.0\", tk.END)\n self.shpOutput.insert(tk.END, shpOutputLocation)\n\n# def createCanvasImage(self):\n# canvas = tk.Canvas(self, width=1000, height=200)\n# canvas.grid(row=0, column=0, rowspan=10, columnspan=7)\n# img = Image.open(r\"C:\\Users\\OmrAkn\\Desktop\\YuksekLisansTezUygulama\\SATRAP\\s5uaze.png\")\n# canvas.image = ImageTk.PhotoImage(img)\n# canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n \nclass Accessibility(tk.Frame):\n \n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n tk.Frame.grid_columnconfigure(self, 0, weight=1)\n tk.Frame.grid_columnconfigure(self, 1, weight=1)\n tk.Frame.grid_columnconfigure(self, 2, weight=1)\n tk.Frame.grid_columnconfigure(self, 3, weight=1)\n tk.Frame.grid_columnconfigure(self, 4, weight=1)\n tk.Frame.grid_columnconfigure(self, 5, weight=1)\n tk.Frame.grid_columnconfigure(self, 6, weight=1)\n tk.Frame.grid_columnconfigure(self, 7, weight=1)\n# self.createCanvasImage()\n \n backButton = ttk.Button(self, text=\"Back\",\n command=lambda: controller.show_frame(StartPage))\n backButton.grid(row=0, column=0, sticky=tk.W)\n \n helpButton = ttk.Button(self, text=\"Help\",\n command=lambda: controller.show_frame(HelpMenu))\n helpButton.grid(row=0, column=3, sticky=tk.E)\n \n tk.Label(self, text=\"Origins\").grid(row=2, sticky=tk.E)\n tk.Label(self, text=\"Destinations\").grid(row=3, sticky=tk.E)\n tk.Label(self, text=\"Webmap Output Path (Optional)\").grid(row=5, sticky=tk.E)\n tk.Label(self, text=\"Transportation Mode (Optional)\").grid(row=4, sticky=tk.E)\n tk.Label(self, text=\"Shapefile Output Folder\").grid(row=6, sticky=tk.E)\n tk.Label(self, text=\"Distance Threshold (m)\").grid(row=7, sticky=tk.E)\n ttk.Button(self, text=\"Browse\", command=self.OpenFile1).grid(row=2, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Browse\", command=self.OpenFile2).grid(row=3, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Browse\", command=self.SaveFile).grid(row=5, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Browse\", command=self.SaveFile2).grid(row=6, column=2, sticky=tk.W)\n ttk.Button(self, text=\"Execute\", width=12, command=self.returned_func).grid(row=6, column=3)\n self.originInput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.originInput.grid(row=2, column=1, sticky=tk.W)\n self.destInput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.destInput.grid(row=3, column=1, sticky=tk.W)\n self.webmapOutput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.webmapOutput.grid(row=5, column=1, sticky=tk.W)\n self.shpOutput = tk.Text(self, height=1, width=40, font=\"Times 9\")\n self.shpOutput.grid(row=6, column=1, sticky=tk.W) \n self.networkType = tk.StringVar(self)\n self.e1 = tk.Entry(self, width=40, font=\"Times 9\", textvariable=self.networkType)\n self.e1.grid(row=4, column=1, sticky=tk.W)\n self.threshold = tk.StringVar(self)\n self.e2 = tk.Entry(self, width=40, font=\"Times 9\", textvariable=self.threshold)\n self.e2.config(state=\"disabled\")\n self.e2.grid(row=7, column=1, sticky=tk.W)\n \n optionlist = [\"Potential Accessibility\", \"Daily Accessibility\"]\n self.dropvar = tk.StringVar(self)\n self.dropvar.set(\"Potential Accessibility\")\n tk.Label(self, text=\"Analysis method\").grid(row=2, column=3)\n dropmenu = tk.OptionMenu(self, self.dropvar, *optionlist)\n dropmenu.grid(row=3, column=3)\n self.dropvar.trace(\"w\", self.chgdrp)\n \n def returned_func(self):\n funcs = {\"Potential Accessibility\": self.potentialAccessibility,\n \"Daily Accessibility\": self.dailyAccessibility}\n function = funcs[self.dropvar.get()]\n return function() \n \n def origdest(self):\n if len(self.networkType.get()) > 0:\n route_geom, nodes, G_proj = bk.origindestination(self.originInput.get(\"1.0\",'end-1c'), self.destInput.get(\"1.0\",'end-1c'),\n self.networkType.get())\n else:\n route_geom, nodes, G_proj = bk.origindestination(self.originInput.get(\"1.0\",'end-1c'), self.destInput.get(\"1.0\",'end-1c'))\n return route_geom, nodes, G_proj \n \n def potentialAccessibility(self):\n route_geom, nodes, G_proj = self.origdest()\n if len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.potentialAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'),\n self.webmapOutput.get(\"1.0\",'end-1c'))\n else:\n bk.potentialAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'))\n \n def dailyAccessibility(self):\n route_geom, nodes, G_proj = self.origdest()\n if len(self.threshold.get()) > 0 and len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'), self.threshold.get(),\n self.webmapOutput.get(\"1.0\",'end-1c'))\n elif len(self.threshold.get()) > 0 and len(self.webmapOutput.get(\"1.0\",'end-1c')) > 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'), self.threshold.get())\n elif len(self.threshold.get()) == 0 and len(self.webmapOutput.get(\"1.0\",'end-1c')) == 0:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'), 3000,\n self.webmapOutput.get(\"1.0\",'end-1c'))\n else:\n bk.dailyAccessibility(route_geom, nodes, G_proj, self.shpOutput.get(\"1.0\",'end-1c'))\n\n def chgdrp(self, *args):\n if self.dropvar.get() == \"Potential Accessibility\":\n self.e2.config(state=\"disabled\")\n elif self.dropvar.get() == \"Daily Accessibility\":\n self.e2.config(state=\"normal\")\n \n def OpenFile1(self):\n originLocation = askopenfilename(initialdir=\"\\\\\", filetypes =((\"Shapefile\", \"*.shp\"),(\"All Files\",\"*.*\")),\n title = \"Choose origin shapefile\")\n self.originInput.delete(\"1.0\", tk.END)\n self.originInput.insert(tk.END, originLocation)\n\n def OpenFile2(self):\n destinationLocation = askopenfilename(initialdir=\"\\\\\", filetypes =((\"Shapefile\", \"*.shp\"),(\"All Files\",\"*.*\")),\n title = \"Choose destination shapefile\")\n self.destInput.delete(\"1.0\", tk.END)\n self.destInput.insert(tk.END, destinationLocation)\n \n def SaveFile(self):\n webmapLocation = asksaveasfilename(initialdir=\"\\\\\", filetypes=((\"Html Files\", \"*.html\"),(\"All Files\",\"*.*\")),\n title = \"Choose output path for webmap\")\n if webmapLocation[-5:] == \".html\":\n pass\n else:\n webmapLocation += \".html\"\n self.webmapOutput.delete(\"1.0\", tk.END)\n self.webmapOutput.insert(tk.END, webmapLocation)\n \n def SaveFile2(self):\n shpOutputLocation = askdirectory(initialdir=\"\\\\\",\n title = \"Choose output folder for shapefiles\")\n self.shpOutput.delete(\"1.0\", tk.END)\n self.shpOutput.insert(tk.END, shpOutputLocation)\n\n# def createCanvasImage(self):\n# canvas = tk.Canvas(self, width=1000, height=200)\n# canvas.grid(row=0, column=0, rowspan=10, columnspan=7)\n# img = Image.open(r\"C:\\Users\\OmrAkn\\Desktop\\YuksekLisansTezUygulama\\SATRAP\\s5uaze.png\")\n# canvas.image = ImageTk.PhotoImage(img)\n# canvas.create_image(0, 0, image=canvas.image, anchor=tk.NW)\n \napp = Application()\napp.mainloop()"
}
] | 4 |
abijith-vijayendra/Image_Recognition
|
https://github.com/abijith-vijayendra/Image_Recognition
|
fa1e0fb67c2c9921227b067fdf03b51ff3eb8d5f
|
abd8dd6353ae057c460a81635d01244d7d47460c
|
a11cb22da1077fe1e2aebad8a914ee07ab580678
|
refs/heads/master
| 2022-03-06T21:17:57.686155 | 2019-06-12T10:58:48 | 2019-06-12T10:58:48 | 191,549,402 | 0 | 0 | null | 2019-06-12T10:26:54 | 2019-06-12T10:58:51 | 2022-02-10T00:11:19 |
Python
|
[
{
"alpha_fraction": 0.7136628031730652,
"alphanum_fraction": 0.7252907156944275,
"avg_line_length": 23.571428298950195,
"blob_id": "c1f7bb98dfbc16ac31a3f24279816e22e264f2dd",
"content_id": "04bc9deedf29086c75a6b6aa914adda56ec298d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1376,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 56,
"path": "/predictions.py",
"repo_name": "abijith-vijayendra/Image_Recognition",
"src_encoding": "UTF-8",
"text": "from keras.models import model_from_json\nfrom pathlib import Path\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.applications import vgg16\n\n#CIFAR labels from the training data\nclass_labels = [\n \"Plane\",\n \"Car\",\n \"Bird\",\n \"Cat\",\n \"Deer\",\n \"Dog\",\n \"Frog\",\n \"Horse\",\n \"Boat\",\n \"Truck\"\n]\n\n#load the json file that contains the model's structure\nf = Path(\"model_structure.json\")\nmodel_structure = f.read_text()\n\n#Recreate the keras model object from the json file\nmodel = model_from_json(model_structure)\n\n#Reload the models trained weights\nmodel.load_weights(\"model_weights.h5\")\n\n#Load an image file to test\nimg = image.load_img(\"dog.png\", target_size=(64,64))\n\n#Convert image to numpy array\nimage_to_test = image.img_to_array(img)\n\n#Add a fourth dimension to the image\nlist_of_images = np.expand_dims(image_to_test, axis=0)\n\n#Normalize the data\nimages = vgg16.preprocess_input(list_of_images)\n\nfeature_extraction_model = vgg16.VGG16(weights=\"imagenet\", include_top=False)\n\n\n#Make predictions\nresults = model.predict(list_of_images)\n#Just one image\nsingle_result = results[0]\n\nmost_likely_class_index = int(np.argmax(single_result))\nclass_likelihood = single_result[most_likely_class_index]\n\nclass_label = class_labels[most_likely_class_index]\n\nprint(\"This is a {} - likelihood : {:2f}\".format(class_label, class_likelihood, ))\n"
},
{
"alpha_fraction": 0.7152900099754333,
"alphanum_fraction": 0.753954291343689,
"avg_line_length": 30.61111068725586,
"blob_id": "251d82df1304fce78f9700938bd503d2361a667c",
"content_id": "c8f285580f8b12d14adce99e4597b96f7a542e62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 18,
"path": "/loading_image_dataset.py",
"repo_name": "abijith-vijayendra/Image_Recognition",
"src_encoding": "UTF-8",
"text": "import keras\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom pathlib import Path\n\n#Load Dataset\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n#Normalize dataset to 0-1 range\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n#Convert Class Vectors to binary class matrices\n#Our labels are single values from 0 to 9\n#Instead,, we want each label to be a array with one element set to 1 and the rest set to 0\n"
},
{
"alpha_fraction": 0.7978395223617554,
"alphanum_fraction": 0.8070987462997437,
"avg_line_length": 91.57142639160156,
"blob_id": "bad4ee3b9a4dc8117f9d104cebef82fd4ddbd13a",
"content_id": "da01cc24a1648fb864cb68859930cfd6cdb65590",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 261,
"num_lines": 7,
"path": "/README.md",
"repo_name": "abijith-vijayendra/Image_Recognition",
"src_encoding": "UTF-8",
"text": "# Image_Recognition\nBuilt an Image Recognition system using Python and Deep Learning Library Tensorflow-Keras.\nFeatures - \n1.) Built our own neural network model and played around with the model by modifying the structure by adding and subtracting different layers. Made use of Max Pooling, Convolution, Flatten and other network layers to build the model. Save the model and weights.\n2.) Made use of Pre-Trained models such as VGG16 and Imagenet to extract features and used our exixting NN model to predict the targets.\n3.) Used Binary Cross Entropy as our loss function and 'adam' optimizer to measure accuracy\n4.) Implemented Transfer Learning\n"
},
{
"alpha_fraction": 0.7394871711730957,
"alphanum_fraction": 0.7671794891357422,
"avg_line_length": 32.620689392089844,
"blob_id": "5b3750be3dedef1e7b2ff7cb6543f4503f05ce67",
"content_id": "4f3ab1ee6465af5ddd2b1684f960f2e334ca930e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 29,
"path": "/image_rec_pretrained.py",
"repo_name": "abijith-vijayendra/Image_Recognition",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom keras.preprocessing import image\nfrom keras.applications import vgg16\n\n#Load the Keras VGG16 model pre-trained against ImageNet Database\nmodel = vgg16.VGG16()\n\n#Load the image file and resizing it to 224x224 pixels(Model requirement)\nimg = image.load_img(\"bay.jpg\", target_size=(224,224))\n\n#Convert Image to array\nx = image.img_to_array(img)\n\n#Add fourth dimension(since keras expects a lists of images)\nx = np.expand_dims(x, axis=0)\n\n#Normalize the input image's pixel values to the range used when training the neural network\nx = vgg16.preprocess_input(x)\n\n#Run the image through the model to make a prediction\npredictions = model.predict(x)\n\n#Look up the names of the predicted classes. Index zero is the results for the first\npredicted_classes = vgg16.decode_predictions(predictions)\n\nprint(\"Top predictions for this image:\")\n\nfor imagenet_id, name, likelihood in predicted_classes[0]:\n print(\"Prediction: {} - {:2f}\".format(name, likelihood))\n"
},
{
"alpha_fraction": 0.6629143357276917,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 25.649999618530273,
"blob_id": "62df6fa855d05e29feeb70386bc721fc6273dda9",
"content_id": "15f323790076dbc472037e0bb1f9c66fca1f7312",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1599,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 60,
"path": "/dense_layers.py",
"repo_name": "abijith-vijayendra/Image_Recognition",
"src_encoding": "UTF-8",
"text": "import keras\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom pathlib import Path\n\n#Load dataset\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n#Normalize dataset to 0-1 range\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n#Convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)\n\n#Create a model and add layers\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(32,32,3)))\nmodel.add(Conv2D(32, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\nmodel.add(Conv2D(64, (3,3), padding='same', activation='relu'))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\n\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\n\n#Compile the model\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n\n#Train the model\nmodel.fit(\n x_train,\n y_train,\n batch_size=64,\n epochs=30,\n validation_data=(x_test, y_test),\n shuffle = True\n)\n\n#Save the neural networks structure\nmodel_structure = model.to_json()\nf = Path(\"model_structure.json\")\nf.write_text(model_structure)\n\n#Save the neural network's trained weights\nmodel.save_weights(\"model_weights.h5\")\n"
}
] | 5 |
NathanPaceydev/Physics-Plinko-Simulation
|
https://github.com/NathanPaceydev/Physics-Plinko-Simulation
|
f580f0844e1f220d185227d08535c336c3909123
|
f2c8f442fd18d5f571dbebdd812ac3a581a7ca2a
|
1943c4cf273c91b6e924fff31a9a066c63ba4635
|
refs/heads/main
| 2023-08-30T19:41:36.024527 | 2021-10-04T03:01:04 | 2021-10-04T03:01:04 | 395,502,285 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5942205786705017,
"alphanum_fraction": 0.6190731525421143,
"avg_line_length": 28.356224060058594,
"blob_id": "088ab6ec363c71c3d3a9303425b8059c0b98a152",
"content_id": "9c32278d46c01c8b172ded6e6094417b5a62ce00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20534,
"license_type": "no_license",
"max_line_length": 200,
"num_lines": 699,
"path": "/Plinko-Simulation-With-Physics.py",
"repo_name": "NathanPaceydev/Physics-Plinko-Simulation",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Plinko Simulation Project\n# #### Nathan Pacey\n# \n# Plinko Record: https://www.youtube.com/watch?v=yKTBv7ZqFcU&ab_channel=EntertainmentTonight\n# \n# The objective of this code is to model a Plinko drop with a varying coefficient of drag. This can be\n# done to compare the Plinko in air and “underwater”. \n\n# ## Import Necessary Libraries\n\n# In[1]:\n\n\nimport numpy as np\nimport math as m\nfrom random import random\nfrom matplotlib import pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport random\n\n\n# ### Functions to randomize initial conditions\n\n# In[2]:\n\n\n# set the initial position of the ball to a random position between -0.2 and 0.2\ndef rand_initPos(x):\n x += random.uniform(-0.20,0.20)\n return x\n\n# set the starting angle to a random value between 0.9(theta) and 1.1(theta) ****CHECK****\ndef rand_angle(theta):\n theta += random.uniform(-theta*0.10,theta*0.10)\n return theta\n\n# set the x & y velocities to random values between 0(v) and 0.05(v)\ndef rand_absorption(x_vel, y_vel, theta):\n x_vel += random.uniform(0.00, 0.05)*abs(x_vel)*np.cos(theta)\n y_vel += random.uniform(0.00, 0.05)*abs(y_vel)* np.sin(theta)\n return x_vel, y_vel\n\n\n# #### Hypotenuse Function (for collision detection later)\n\n# In[3]:\n\n\n# uses pythagorean th. to calculate hypotenuse of x & y vectors\ndef pythagorean(x_0, x1, y_0, y1):\n h = ((x_0-x1)**2 + (y_0-y1)**2)**0.5\n return h\n\n\n# #### Functions finding velocity and acceleration based on drag/gravitational forces\n\n# In[4]:\n\n\n# calculates acceleration in the x-direction given a drag force\n# no gravity in x-direction\ndef x_acceleration(x_vel, drag):\n \n if (x_vel < 0): # drag opposes velocity\n drag = -np.copy(drag)\n\n acceleration = -drag * x_vel**2 # calculate x-acceleration\n return acceleration\n\n\n# calculate the acceleration in the y-direction given a drag force and gravity\ndef y_acceleration(y_vel, drag):\n gravity = 981 # acceleration due to gravity\n \n if (y_vel < 0): # drag opposes velocity\n drag = -np.copy(drag)\n \n acceleration = -gravity-drag * y_vel**2 # calculate y-acceleration\n return acceleration\n\n# updates incoming x & y velocities based on calculated x & y accelerations\ndef velocity(x_vel1, y_vel1, drag, del_t):\n # calculate new x & y velocities\n x_vel2 = x_vel1 + x_acceleration(x_vel1, drag)*del_t\n y_vel2 = y_vel1 + y_acceleration(y_vel1, drag)*del_t\n\n # returns refracted velocities\n return x_vel2, y_vel2\n\n\n# ### Setting Up the Plinko Container\n\n# In[5]:\n\n\n# initializes variables to be plotted\ndef plotVars():\n xpts = np.arange(0, 101, 10) # 10 x-points from 0 -> 100\n\n half_xpts = np.arange(5, 100, 10)\n \n y = np.arange(0, 80, 10*np.sin(np.pi/3)) # set up y-points with sine function\n\n y_flipped = np.flip(y) # flip y elements\n \n return xpts, half_xpts, y_flipped\n\n# plot the Plinko board\ndef plot_board(plot):\n xpts, half_xpts, y_flipped = plotVars() # initialize variables\n \n if plot == 1:\n # plot pegs\n plt.figure(figsize=(30,30))\n plt.xlim(0, 100)\n plt.ylim(-5, 100) # y-limit allows last peg to show\n \n plt.title (\"Plinko Game!\", fontsize = 50)\n plt.xlabel(\"X-Axis\", fontsize = 40)\n plt.ylabel(\"Y-Axis\", fontsize = 40)\n \n plt.xticks(np.arange(0, 101, 10)) # ticks match the bins\n plt.yticks(np.arange(0, 101, 10))\n plt.xticks(fontsize = 25)\n plt.yticks(fontsize = 25)\n \n for j in range (10):\n if (j%2 == 1):\n y_peg = y_flipped[j] * np.ones(np.size(half_xpts))\n plt.scatter(half_xpts, y_peg, marker = 'o', color = 'cornflowerblue', s = 1500, label = 'Pegs')\n \n if (j%2 == 0):\n y_peg = y_flipped[j] * np.ones(np.size(xpts))\n plt.scatter(xpts, y_peg, marker = 'o', color = 'cornflowerblue', s = 1500, label = 'Pegs')\n \n return half_xpts, xpts, y_flipped # return arrays\n\n\nplot_board(1) # test the configuration is accurate\n \n\n\n# ### Plotting the Ball Movement\n\n# In[6]:\n\n\n# plotting the trajectory of the ball\ndef plot_ball(x, y, collisions):\n \n # indices of impact points correspond to position at each impact\n xCollision = []\n yCollision = []\n \n for col in collisions: # separate x and y collisions\n xCollision = np.append(xCollision, x[col])\n yCollision = np.append(yCollision, y[col])\n \n # plot the path of the ball\n plt.plot(x, y, 'ro', ms = 10)\n # add points of collision to plot\n plt.plot(xCollision, yCollision, 'co', ms = 20, label = 'Collisions')\n\n plt.legend([\"Ball Motion\", \"Collisions\", \"Pegs\"], prop={'size': 30}) \n plt.show()\n\n\n# ## Now, to deal with collisions:\n# \n# \n\n# ### How the Ball Moves\n\n# In[7]:\n\n\n# radiii of objects [cm]\nr_ball = 2.5\nr_pegs = 0.5\n\n# first need a function to define the bounce of the ball off the pegs\n# returns the velocity of the ball\ndef peg_bounce(x_vel, y_vel, x_ball, x_peg, y_ball, y_peg):\n\n # determine angle of strike\n if (x_ball != x_peg):\n angleIn = np.arctan((y_ball - y_peg)/(x_ball - x_peg))\n else:\n # if ball strikes vertically set angle to pi/2 to avoid division by zero\n angleIn = np.pi/2\n \n # random reflected angle\n angleOut = rand_angle(angleIn)\n \n # x & y velocities found with deflection equations\n x_vel2 = -(np.cos(2*angleOut)*x_vel + y_vel * np.sin(2*angleOut))\n \n y_vel2 = -(np.sin(2*angleOut)*x_vel - y_vel * np.cos(2*angleOut))\n \n # adjust the velocity to account for a random absorption\n x_vel, y_vel = rand_absorption(x_vel2, y_vel2, angleOut)\n \n return x_vel, y_vel, 1 # 1 indicates a collision, 0 means no collision\n\n\n# ### Detecting Collisions\n\n# In[8]:\n\n\n## ** detecting collisions and setting the collision velocities **\n\ndef collision_dect (collision, x_vel, y_vel, x_ball, y_ball, yArray, half_xpts, xpts):\n \n # function dependent on the collisions conditional\n if collision == 0:\n closest_y = 0\n \n for j in range(10): # find the vertical y, closest value\n if abs(y_ball-yArray[j]) < abs(y_ball-yArray[closest_y]):\n closest_y = np.copy(j)\n \n # if the ball is in the vertical range of the pegs test the horizontal range\n if (r_ball + r_pegs)>= abs(y_ball - yArray[closest_y]):\n closest_x = 0\n \n if closest_y%2 == 1: # now find the closest x value for the right peg\n\n for i in range(9): # update the x-values\n if abs(x_ball-half_xpts[i+1]) < abs(x_ball-half_xpts[closest_x]):\n closest_x = np.copy(i+1)\n \n # return the hypotenuse of the new velocity vector\n radii = pythagorean(x_ball, half_xpts[closest_x], y_ball, yArray[closest_y])\n \n if radii <= (r_ball + r_pegs): # call the bounce method when the ball meets the collision condition\n return peg_bounce(x_vel, y_vel, x_ball, half_xpts[closest_x], y_ball, yArray[closest_y])\n\n if closest_y%2 == 0: # given the right row, determine the close x value\n for i in range(1, 11):\n # update the x array\n if abs(x_ball-xpts[i]) < abs(x_ball-xpts[closest_x]):\n closest_x = np.copy(i)\n \n # return the hypotenuse of the new velocity vector\n radii = pythagorean(x_ball, xpts[closest_x], y_ball, yArray[closest_y])\n \n if radii <= (r_ball + r_pegs): # call the bounce method when the ball meets the collision condition\n return peg_bounce(x_vel, y_vel, x_ball, xpts[closest_x], y_ball, yArray[closest_y])\n \n return x_vel, y_vel, 0 # 1 indicates a collision, 0 means no collision\n\n\n# ## Plotting Functions\n\n# In[9]:\n\n\n# x-position vs. y-position\ndef x_yPlot(x,y):\n plt.plot(x,y,\"r\")\n plt.title(\"X and Y Positions\")\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.show()\n\n# position vs. time\ndef position_timePlot(initialX, x, y, time, plot):\n t = np.arange(0, x.size*0.001, 0.001)\n if plot == 1: # only plot if passed a 1 by user\n plt.plot(t, x, label = 'X Position')\n plt.plot(t, y, label = 'Y Position')\n plt.xlabel(\"Time\")\n plt.ylabel(\"Position\")\n plt.title('Position over Time')\n plt.legend(bbox_to_anchor=(1.0, 0.5))\n plt.show()\n # reset distance for each new position\n xdist = 0\n ydist = 0\n \n # need distance between current and last points in x and y directions\n for i in range (1,len(x)):\n xdist += abs(x[i]-x[i-1])\n for i in range (1,len(y)):\n ydist += abs(y[i]-y[i-1])\n \n if plot ==1:\n print(\"Total x distance\", xdist)\n print(\"Total y distance\", ydist)\n # returns total distances travelled in x and y directions\n return xdist, ydist\n\n# velocity vs. time\ndef velocity_time(time, x_vel,y_vel,x):\n time = np.arange(0, x.size*0.001, 0.001)\n plt.plot(time, y_vel, label = 'Y Velocity')\n plt.plot(time, x_vel, label = 'X Velocity')\n plt.title('vy vs t (blue) and vx vs t (orange)')\n plt.xlabel(\"Time\")\n plt.ylabel(\"Velocity Value\")\n plt.legend(bbox_to_anchor=(1.0, 0.5))\n plt.show()\n\n\n \n# *****Freq transformation to find the max ******** \n\ndef BouncingFreq_Plot(y_vel, plot):\n\n Vyk = np.fft.fft(y_vel)[1:-1]\n Vyk = abs(Vyk)\n Max_freq = np.argmax(Vyk[0:210])\n freq = np.arange(0, 210)\n \n if plot == 1:\n plt.plot(freq, (Vyk)[0:210])\n plt.title('Frequency of change of Direction')\n plt.xlabel(\"Frequency [Hz]\")\n plt.ylabel(\"Absolute value of bouncing Frequency\")\n plt.show()\n \n print(\"The maximum bouncing frequency is\",Vyk[Max_freq],\"and occurs at\",Max_freq,\"Hz\")\n\n area = np.sum(Vyk[0:150])\n \n if plot == 1:\n print(\"\\nThe integrated area of frequency from 0 Hz to 150 Hz is\", area)\n\n # returns the maximum bouncing frequency and the area under the plot\n return Vyk[Max_freq], area\n\n\n# ## The Final Plinko Simulation\n\n# In[10]:\n\n\ndef Plinko(initialX, time, drag, plot):\n # declare constants\n del_t = 0.001\n Num = int(time/del_t) #determine the number of points\n \n initialY = 90 # set the initial positions and velocities\n initialX_Vel = 0 \n initialY_Vel = 0\n\n xVel = np.ones(Num) * rand_initPos(initialX_Vel)\n yVel = np.ones(Num) * rand_initPos(initialY_Vel)\n \n # determine all the arrays for position and velocity\n x = np.ones(Num) * rand_initPos(initialX)\n y = np.ones(Num) * rand_initPos(initialY)\n \n # initialize the collisions array with a zero condition (miss condition)\n collisions = []\n Collision = 0\n\n # create the Plinko Board\n half_xpts, xpts, yArray = plot_board(plot) # call the plotting function\n \n for i in range (1, Num): # for every point in the x,y arrays determine if a collision occurs and output the properties ie velocities\n\n newX_Vel, newY_Vel, Collision = collision_dect (Collision, xVel[i-1], yVel[i-1], x[i-1], y[i-1], yArray, half_xpts, xpts)\n \n if Collision == 1: # add collisions to collision array\n collisions = np.append(collisions, int(i))\n\n # determine the positions and velocities given a collision\n y[i] = newY_Vel*del_t+ y[i-1]\n x[i] = newX_Vel*del_t+ x[i-1] #calculate positions\n \n xVel[i], yVel[i] = velocity(newX_Vel, newY_Vel, drag, del_t) # calculate new velocities\n\n # ball wraps around the boundaries when it meets the walls\n if x[i] <= 0:\n x[i] = (x[i] + 100) # set an x at 0 to 100\n \n if x[i] >= 100:\n x[i] = (x[i] - 100) # set an x at 100 to 0 \n \n if y[i] <= 0: # simulation ends when y <= 0, set the arrays to the appropriate size\n x = x[0:i] # by splicing up to the last y index length\n y = y[0:i]\n xVel = xVel[0:i]\n yVel = yVel[0:i]\n \n collisions = collisions.astype(int) # set the collisions var to an int\n break\n \n Bin = m.floor((x[-1] + 5)/10) # x-position of final bin\n if Bin == 10: # wrap the last bin to the 0th bin\n Bin = 0\n \n time = x.size*0.001 # update the time\n \n if plot==1:\n plot_ball(x, y, collisions) # plot the path of the ball\n x_yPlot(x,y) # plot the x position vs. y position\n velocity_time(time, xVel,yVel,x) # plot the velocity vs. time\n print(\"Plinko!!\\n\\nThe Ball Landed in Bin\",Bin, \"With a final x position:\",x[-1]) # test print\n \n print(\"The Simulation took\", time ,\"s to reach a y 0f 0\") # display the total time\n \n xdist, ydist = position_timePlot(initialX, x, y, time, plot) # plot the position vs. time\n \n # ****** calling the freq plotting function *****\n Maxfreq, area = BouncingFreq_Plot(yVel, plot) # plot the bouncing frequency over time\n\n return Bin, x, y, xVel, yVel, collisions, time, xdist, ydist, Maxfreq, area\n\n\n# ### Running the simulation many times and Histogram Data\n# Histogram of many runs for the time to reach y = 0\n# \n# ◦ Histogram of many runs for the total distance travelled\n# \n# ◦ Histogram of many runs for the bin the disc landed in.\n# \n# ◦ Histogram of many runs for the frequency of the maximum occurrence of bouncing\n# \n# ◦ Histogram of many runs for the integrated area of the frequency of bouncing (from 0-\n# 150 Hz)\n\n# In[11]:\n\n\n#making many times 10 because my computer cannot run the program more than 10 times in 5 minutes\nmany = 100\ninitialX = 50\ndrag = 0.01\n\ndef manyRuns(drag,many,initialX):\n time = 5\n BinsT = np.zeros(many)\n timeT = np.zeros(many)\n xdistT = np.zeros(many) \n ydistT = np.zeros(many)\n Max_freq = np.zeros(many)\n area = np.zeros(many)\n\n for i in range(many):\n BinsT[i], xT, yT, x_velT, y_velT, collisionsT,timeT[i], xdistT[i], ydistT[i], Max_freq[i], area[i] = Plinko(initialX, time,drag, 0)\n \n return BinsT, timeT, xdistT, ydistT, Max_freq, area\n \n\nBinsT, timeT, xdistT, ydistT, Max_freq, area = manyRuns(drag,many, initialX)\n\n\n# #### Histogram Functions\n\n# In[12]:\n\n\n# time for the ball to reach the bottom\ndef Hist_time(time):\n plt.hist(time, bins = 'auto', label = \"time\" )\n plt.title(\"Total Time to Reach y = 0\")\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Bins\")\n plt.show()\n\n# total x-distance travelled\ndef Hist_disX(xdistT):\n plt.hist(xdistT, bins = 'auto', label = \"X-Distance\", color = (random.random(),random.random(),random.random()))\n plt.title(\"Total X-Distance Travelled\")\n plt.xlabel(\"x-Distance\")\n plt.ylabel(\"Bins\")\n plt.show()\n\ndef Hist_disY(ydistT):\n plt.hist(ydistT, bins = 'auto', label = \"Y-Distance\", color = (random.random(),random.random(),random.random()))\n plt.title(\"Total Y-Distance\")\n plt.xlabel(\"Y-Distance\")\n plt.ylabel(\"Bins\")\n plt.show()\n \ndef Hist_bins(BinsT):\n plt.hist(BinsT, bins = 'auto', label = \"Bins\", color = (random.random(),random.random(),random.random()))\n plt.title(\"Bins that were Landed In\")\n plt.xlabel(\"Bins Landed In\")\n plt.ylabel(\"Bins\")\n plt.show()\n\ndef Hist_freq(Max_freq):\n plt.hist(Max_freq, bins = 'auto', label = \"Max Freq.\", color = (random.random(),random.random(),random.random()))\n plt.title(\"Maximum Frequency\")\n plt.xlabel(\"Frequency [Hz]\")\n plt.ylabel(\"Bins\")\n plt.show()\n \ndef Hist_freqArea(area):\n plt.hist(area, bins = 'auto', label = \"area\", color = (random.random(),random.random(),random.random()))\n plt.title(\"Area Under Maximum Frequency Plot\")\n plt.xlabel(\"Area [Hz]\")\n plt.ylabel(\"Bins\")\n plt.show()\n \n\ndef initializeHist(BinsT, timeT, xdistT, ydistT, Max_freq, area): \n Hist_time(timeT)\n Hist_disX(xdistT)\n Hist_disY(ydistT)\n Hist_bins(BinsT)\n Hist_freq(Max_freq)\n Hist_freqArea(area)\n\n\n# ## Simulation With Drag = 0.01\n\n# In[13]:\n\n\n# setting the simulation conditions to pass to the function\ninitialX = 50\ndrag = 0.01\ntime = 5\n\nBins, x, y, x_vel, y_vel, collisions,time, xdist, ydist, Maxfreq, area = Plinko(initialX, time,drag,1)\nBinsT, timeT, xdistT, ydistT, Max_freq, area = manyRuns(drag,many, initialX)\n\n\n# In[14]:\n\n\n# output all plots\ninitializeHist(BinsT, timeT, xdistT, ydistT, Max_freq, area)\n\n\n# ## Simulation With Drag = 0.05\n\n# In[15]:\n\n\n# setting the simulation conditions to pass to the function\n\ninitialX = 50\ndrag = 0.05\ntime = 5\nprint(\"Simulation with drag = 0.05\")\n\nBins, x, y, x_vel, y_vel, collisions, time, xdist, ydist, Max_freq, area = Plinko(initialX, time, drag, 1)\n\n\n# In[16]:\n\n\nBinsT, timeT, xdistT, ydistT, Max_freq, area = manyRuns(drag,many, initialX)\n\n\n# In[17]:\n\n\ninitializeHist(BinsT, timeT, xdistT, ydistT, Max_freq, area)\n\n\n# ## Simulation With Drag = 0.1\n\n# In[18]:\n\n\n# setting the simulation conditions to pass to the function\ninitialX = 50\ndrag = 0.1\ntime = 5\nprint(\"Simulation with drag = 0.1\")\n\nBins, x, y, x_vel, y_vel, collisions, time, xdist, ydist, Max_freq, area = Plinko(initialX, time,drag,1)\n\n\n# In[19]:\n\n\nBinsT, timeT, xdistT, ydistT, Max_freq, area = manyRuns(drag,many, initialX)\n\n\n# In[20]:\n\n\ninitializeHist(BinsT, timeT, xdistT, ydistT, Max_freq, area)\n\n\n# ## Simulation With drag = 0.5\n\n# In[21]:\n\n\n# setting the simulation conditions to pass to the function\ninitialX = 50\ndrag = 0.1\ntime = 5\nprint(\"Simulation with drag = 0.5\")\n\nBins, x, y, x_vel, y_vel, collisions, time, xdist, ydist, Max_freq, area = Plinko(initialX, time,drag,1)\nBinsT, timeT, xdistT, ydistT, Max_freq, area = manyRuns(drag,many, initialX)\n\n\n# In[22]:\n\n\ninitializeHist(BinsT, timeT, xdistT, ydistT, Max_freq, area)\n\n\n# ## What changing the drag coefficient does\n# \n# When comparing the drag coefficients here are some of the significant differences:\n# - the lower the drag the more time it takes for the ball to reach y = 0, since the ball encounters more collisions\n# - the lower the drag the more x and y distance travelled. For 0.01 drag the average x and y distance was about 200 and 225, while for the 0.5 drag the x distance was 75 while the y distance was 120.\n# - The frequency is also much higher for a lower drag coefficient; from a maximum frequency of 65000 Hz (drag 0.01) to only 27500 Hz (drag 0.5)\n# - The lower frequency also decreases the area under the frequency curve\n# \n\n# # Testing the Simulation by changing the initial position\n\n# In[23]:\n\n\n# setting the simulation conditions to pass to the function\ninitialX = 75 # changing the starting x position to 75\ndrag = 0.05 #setting the drag back to 0.05\ntime = 5\nprint(\"Changing the initial position to 75 in x\")\nBins, x, y, x_vel, y_vel, collisions, time, xdist, ydist, Max_freq, area = Plinko(initialX, time,drag,1)\n\n\n# ## Animating the Plinko Simulation¶\n\n# In[24]:\n\n\nfrom matplotlib.animation import FuncAnimation\nfrom IPython.display import HTML\nfrom matplotlib import rcParams\n\n#rcParams['animation.ffmpeg_path']='/usr/bin/ffmpeg'\n\nrcParams['animation.embed_limit'] = 2**128\n\nxp, half_xp, y_f = plotVars()\n\nfg = plt.figure(figsize=(10,10))\nax = plt.axes(xlim=(0, 100),ylim=(-5, 100))\n\nplt.title (\"Plinko Game!\", fontsize = 20)\nplt.xlabel(\"x-Range\", fontsize = 10)\nplt.ylabel(\"y-Range\", fontsize = 10)\n\nplt.xticks(np.arange(0, 101, 10)) # ticks match the bins\nplt.yticks(np.arange(0, 101, 10))\nplt.xticks(fontsize = 10)\nplt.yticks(fontsize = 10)\n\nfor j in range (10):\n if (j%2 == 1):\n yP = y_f[j] * np.ones(len(half_xp))\n plt.scatter(half_xp, yP, marker = 'o', color = 'cornflowerblue', s = 300, label = 'Pegs')\n\n if (j%2 == 0):\n yP = y_f[j] * np.ones(len(xp))\n plt.scatter(xp, yP, marker = 'o', color = 'cornflowerblue', s = 300, label = 'Pegs')\n\n\nline, = plt.plot([], [], lw=5)\n\ndef init():\n line.set_data([], [])\n return (line,)\n\nxa, ya =[], []\nxx = np.array(x[0::25])\nyy = np.array(y[0::25])\n\n\nxy = np.zeros([len(xx), len(xx)])\nfor i in range(len(xx)):\n xy[0, i] = xx[i]\n for j in range(len(xx)):\n xy[i, j] = yy[i]\n\ndef ann(i):\n xp = np.linspace(0, 100)\n line.set_data(xp[:i], xy[i, :i])\n return line\ndef an(i):\n #xa.append(x[i])\n #ya.append([y[i])\n #line.set_data(xa, ya)\n line.set_data(xx[:i], yy[:i])\n return (line,)\n\nanim = FuncAnimation(fg, an, init_func=init, frames=len(xx), blit=True)\n\n#HTML(anim.to_html5_video())\nHTML(anim.to_jshtml())\n#anim\n\n"
},
{
"alpha_fraction": 0.6983606815338135,
"alphanum_fraction": 0.8295081853866577,
"avg_line_length": 42.42856979370117,
"blob_id": "5e70fb7f811d737de277a338b60251ab2d4ebbbd",
"content_id": "cd56cc070ceb10a336c6e94b30b61787dbb5ff44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 7,
"path": "/README.md",
"repo_name": "NathanPaceydev/Physics-Plinko-Simulation",
"src_encoding": "UTF-8",
"text": "# Physics-Plinko-Simulation\nA practical physics approach to creating a Plinko simulation in Jupyter Notebook. Complete with varying drag coefficients, increased drop velocity and positional input. \n\n\n\n\nhttps://user-images.githubusercontent.com/64051575/135786915-05d65761-c55c-4141-92c3-fae86e15b9c5.mp4\n\n"
}
] | 2 |
XNYu/Statistic
|
https://github.com/XNYu/Statistic
|
5e6ddea06a8b17d71760ef997b52f530b41b56b4
|
0087adf4078ce78244fe02ddadbf5b3987180bde
|
7da084e2df6e74bf269c7ca6c4194da3e27a55fa
|
refs/heads/master
| 2021-01-19T21:00:56.705227 | 2016-06-17T07:38:27 | 2016-06-17T07:38:27 | 61,354,998 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.31490787863731384,
"alphanum_fraction": 0.4271356761455536,
"avg_line_length": 26.5238094329834,
"blob_id": "56fbc00c54c0b2d2a4fba2cdb5a466871f765f6f",
"content_id": "477acbba520b44d355bf57ea3673bc3b7d4ab226",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 597,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 21,
"path": "/01描述统计.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "class Solution():\r\n def describe(self, a):\r\n if len(a)==1:\r\n return[a[0],None,0.0,-3.0]\r\n n=len(a)\r\n mean=sum(a)/float(len(a))\r\n s2,s3,s4=0.0,0.0,0.0\r\n for i in a:\r\n s2+=(i-mean)**2\r\n s3+=(i-mean)**3\r\n s4+=(i-mean)**4\r\n var=s2/(n-1)\r\n b2=s2/n\r\n if s2==0:\r\n return[mean,var,0.0,-3.0]\r\n s3=s3/n/b2**1.5\r\n s4=s4/n/b2**2\r\n return[round(mean,6),round(var,6),round(s3,6),round(s4-3,6)] \r\nb = [1.0,8.6,1.1,4.8,2.6,0.7,3.1,1.8,3.5,4.8]\r\np=Solution()\r\nprint p.describe(b)"
},
{
"alpha_fraction": 0.30444443225860596,
"alphanum_fraction": 0.3422222137451172,
"avg_line_length": 21.789474487304688,
"blob_id": "ac69d4eca1da22f30dde2495ec32ba83c588296a",
"content_id": "3e3001b3d4b5694a0277bfa648ad67ef425c8033",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 19,
"path": "/examination/test2.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import string\r\nclass Solution():\r\n def solve(self, A):\r\n sss={}\r\n for ss in A:\r\n print ss\r\n ss=str(ss)\r\n sm = list(ss)\r\n print sm\r\n \r\n for s in sm:\r\n if sss.has_key(s):\r\n sss[s] = sss[s]+1\r\n else:\r\n sss[s] = 1\r\n print sss\r\n \r\ns = Solution();\r\ns.solve(['12','34','567', '36','809','120'] )"
},
{
"alpha_fraction": 0.3658119738101959,
"alphanum_fraction": 0.40341880917549133,
"avg_line_length": 25.85714340209961,
"blob_id": "7b70efa0a78583354ddebb1dbdf1623a078a5dfb",
"content_id": "bdb204ebd669b80f18930277d40518e7973e63bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 585,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/07蒙特卡洛.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from math import exp\r\nfrom math import pi\r\nimport random\r\nclass Solution:\r\n def solve(self,a,b):\r\n if(a<b):\r\n n = 100000\r\n k = 0\r\n Y = exp(-a**2/2)/(2*pi)**0.5\r\n for i in range(n):\r\n x = random.uniform(a,b)\r\n X = exp(-x**2/2)/(2*pi)**0.5\r\n y = random.uniform(0,Y)\r\n if(X>y):\r\n k+=1\r\n return round(Y*(b-a)*k/n,6)\r\n else:\r\n return None\r\n#if __name__ ==\"__main__\":\r\n# solution = Solution()\r\n# print solution.solve(1,2)\r\n"
},
{
"alpha_fraction": 0.5705263018608093,
"alphanum_fraction": 0.6063157916069031,
"avg_line_length": 21.850000381469727,
"blob_id": "79893aa3ccc07067883d411cdb0bd1ed45d3b771",
"content_id": "3be93a44c59bccb20f4e2e6d2ff87cbf5c59b427",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 20,
"path": "/examination/56皮尔森相关系数.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "'''\r\n皮尔森相关系数用于度量两个变量之间相关程度,介于-1到1之间,其中-1表示完全负相关,0表示无关,1表示完全正相关。\r\n'''\r\n#-*- coding:utf-8 -*-\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy import stats\r\n\r\ndef pearsonr():\r\n x = np.linspace(-5, 5, num=150)\r\n y = x + np.random.normal(size=x.size)\r\n y[12:14] += 10\r\n \r\n print(stats.pearsonr(x, y))\r\n plt.scatter(x,y)\r\n plt.savefig('pearsonr.png')\r\n plt.show()\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n pearsonr()"
},
{
"alpha_fraction": 0.5664739608764648,
"alphanum_fraction": 0.5968208312988281,
"avg_line_length": 23.703702926635742,
"blob_id": "0255a9c8be7bc218a40352fa70bafdb8e2c6b24d",
"content_id": "83c68904ee3e2c27562e3c7305564f6f686affc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 692,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 27,
"path": "/examination/36.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nimport matplotlib.plot_api as plt\r\n\r\ndef sampling_distribution():\r\n fig, ax = plt.subplots(1, 1)\r\n #display the probability density function\r\n x = np.linspace(-4, 4, 100)\r\n ax.plot(x, norm.pdf(x))\r\n\r\n #simulate the sampling distribution\r\n y = []\r\n n=100\r\n for i in range(1000):\r\n r = norm.rvs(loc=5, scale=2, size=n)\r\n rsum=np.sum(r)\r\n z=(rsum-n*5)/np.sqrt(n*4)\r\n y.append(z)\r\n\r\n ax.hist(y, normed=True, alpha=0.2)\r\n plt.savefig('sampling_distribution.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n sampling_distribution()"
},
{
"alpha_fraction": 0.5508772134780884,
"alphanum_fraction": 0.5929824709892273,
"avg_line_length": 20.076923370361328,
"blob_id": "b1af98220299af5566c70bd125fc4fd67934d885",
"content_id": "e7fad2836c8dbfb66052c4e9e91af30df940c2f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 13,
"path": "/examination/50单样本T检验.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy import stats\r\nfrom log_api import log\r\n\r\ndef ttest():\r\n x = stats.norm.rvs(loc=5, scale=10, size=50)\r\n\r\n log(stats.ttest_1samp(x,5.0))\r\n log(stats.ttest_1samp(x,1.0))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n ttest()"
},
{
"alpha_fraction": 0.34958383440971375,
"alphanum_fraction": 0.428061842918396,
"avg_line_length": 17.159090042114258,
"blob_id": "a4f3f939d361d488d84a08bd75dae784fddcf5da",
"content_id": "62697d59f19774e316d4ff1dcc75fba2046a77e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 44,
"path": "/src/case5.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\n# 2015-06-22\r\n# author cylong\r\n\r\n\"\"\"\r\n描述:\r\n\r\n利用python实现简化版双样本K-S检验函数\r\n输入:\r\n\r\na,b分别为非空一维数组;示例输入 : [1.0,2.0,3.0],[1.0,2.0,3.0]\r\n\r\n输出:\r\n\r\nks-value为检验结果KS值;示例输出 : 0.0\r\n\r\n注意:\r\n\r\n(1)不能调用math、scipy、numpy包\r\n\"\"\"\r\n\r\nclass Solution():\r\n def ks_2samp(self, data1, data2):\r\n data = data1 + data2\r\n data.sort()\r\n D = 0.0\r\n for tmp in data:\r\n num_1 = 0.0\r\n for t in data1:\r\n if t <= tmp:\r\n num_1 += 1.0\r\n num_2 = 0.0\r\n for t in data2:\r\n if t <= tmp:\r\n num_2 += 1.0\r\n a = abs(num_1 / len(data1) - num_2 / len(data2))\r\n D = max(a, D)\r\n return round(D, 6)\r\n \r\n\r\ns = Solution()\r\ndata1 = [1, 1.3, 2, 3.4]\r\ndata2 = [1, 2.1, 3.1]\r\nprint s.ks_2samp(data1, data2)"
},
{
"alpha_fraction": 0.5645390152931213,
"alphanum_fraction": 0.590070903301239,
"avg_line_length": 22.379310607910156,
"blob_id": "b5a0ba4d5c7e050f79532821792d69c8b1813a27",
"content_id": "9d1ba85e281d389da80cac279250827ac27e9929",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 29,
"path": "/examination/33.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nfrom scipy.stats import chi2\r\nfrom scipy.stats import t\r\nimport matplotlib.plot_api as plt\r\n\r\ndef t_distribution():\r\n fig, ax = plt.subplots(1, 1)\r\n #display the probability density function\r\n df = 10\r\n x=np.linspace(-4, 4, 100)\r\n ax.plot(x, t.pdf(x,df))\r\n \r\n #simulate the t-distribution\r\n y = []\r\n for i in range(1000):\r\n rx = norm.rvs()\r\n ry = chi2.rvs(df)\r\n rt = rx/np.sqrt(ry/df)\r\n y.append(rt)\r\n\r\n ax.hist(y, normed=True, alpha=0.2)\r\n plt.savefig('t_distribution.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n t_distribution()"
},
{
"alpha_fraction": 0.3767705261707306,
"alphanum_fraction": 0.40934842824935913,
"avg_line_length": 18.22857093811035,
"blob_id": "739741e362ee35f65a24beb8479bd2f21f917b71",
"content_id": "c24d95efd38728b3f2b011dbc6f8315453bdb237",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 35,
"path": "/examination/test4.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom _ast import Num\r\n\r\nclass Solution():\r\n def solve(self, A):\r\n #call function prime\r\n mm=[]\r\n for i in A:\r\n if s.prime(i):\r\n pass\r\n else:\r\n mm.append(i)\r\n \r\n print mm\r\n\r\n #judge whether x is prime or not\r\n def prime(self, x):\r\n num = x**0.5\r\n num = int(num)\r\n for i in range(2,num+1):\r\n ss = x%i\r\n if ss == 0:\r\n return True\r\n return False\r\n \r\ns=Solution()\r\ns.solve([23, 45, 76, 67, 17] )\r\nn=0;\r\nfor i in range(2,5000):\r\n if s.prime(i):\r\n pass\r\n else:\r\n print i\r\n n = n+1\r\nprint \"n=\"+str(n)"
},
{
"alpha_fraction": 0.5394737124443054,
"alphanum_fraction": 0.5921052694320679,
"avg_line_length": 20.899999618530273,
"blob_id": "c87d46cb34cbb63bf55659a0ada6ae65ca504704",
"content_id": "44ae756a7787a4e414eeb9b79a37f0beba07afe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 20,
"path": "/examination/54KS检测.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "GB18030",
"text": "'''\r\nKS检验通常用于检验单一样本是否服从某特定分布,或者两样本是否来自相同分布\r\n'''\r\n#-*- coding:utf-8 -*-\r\nfrom scipy import stats\r\nfrom log_api import log\r\n\r\ndef kstest():\r\n n1=200\r\n n2=300\r\n a = stats.norm.rvs(size=n1, loc=0, scale=1)\r\n b = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)\r\n c = stats.norm.rvs(size=n2, loc=0.01, scale=1)\r\n\r\n log(stats.ks_2samp(a, b))\r\n log(stats.ks_2samp(a, c))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n kstest()"
},
{
"alpha_fraction": 0.5936982035636902,
"alphanum_fraction": 0.6334991455078125,
"avg_line_length": 28.25,
"blob_id": "e26763d5966a47821edde05cf8ad234d4b14b5e9",
"content_id": "94f569e5e7fbaf74da02718c38acae2b67fb2c43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 603,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 20,
"path": "/examination/27.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport numpy as np\r\nfrom scipy import integrate\r\nfrom scipy.stats import expon\r\nfrom log_api import log\r\n\r\ndef nc_of_expon():\r\n #1st non-center moment of expon distribution whose lambda is 0.5\r\n E1 = lambda x: x*0.5*np.exp(-x/2)\r\n #2nd non-center moment of expon distribution whose lambda is 0.5\r\n E2 = lambda x: x**2*0.5*np.exp(-x/2)\r\n log(integrate.quad(E1, 0, np.inf))\r\n log(integrate.quad(E2, 0, np.inf))\r\n\r\n log(expon(scale=2).moment(1))\r\n log(expon(scale=2).moment(2))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_expon()"
},
{
"alpha_fraction": 0.4012738764286041,
"alphanum_fraction": 0.42929935455322266,
"avg_line_length": 19.86111068725586,
"blob_id": "7bac82590f9df940fbd6876350f15f8e80f8ebdc",
"content_id": "67a2a2048ee272127fd028c9c727cbaf39f30a5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 785,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 36,
"path": "/src/mtkl.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\n\r\nclass Solution:\r\n def getNum(self,x):\r\n a=math.pow(x, 2)\r\n a=-a/2\r\n b=math.exp(a)\r\n c=math.pi\r\n d=math.sqrt(c)\r\n e=math.sqrt(2)\r\n f=b/(d*e)\r\n return f\r\n def solve(self,a,b):\r\n total=0\r\n useful=0\r\n shuzu=[]\r\n for i in range(0,1000000) :\r\n c=random.uniform(a,b)\r\n c=self.getNum(c)\r\n shuzu.append(c)\r\n maax=max(shuzu)\r\n miin=min(shuzu)\r\n for i in shuzu:\r\n total+=1\r\n d=random.uniform(miin,maax)\r\n if(d<=i):\r\n useful+=1\r\n m=1.0*total\r\n d=useful/m\r\n e=(maax-miin)*(b-a)\r\n m=d*e\r\n return m\r\n \r\nsolu=Solution()\r\nsolu.solve(0.0,10.0)"
},
{
"alpha_fraction": 0.5336927175521851,
"alphanum_fraction": 0.5579515099525452,
"avg_line_length": 19.941177368164062,
"blob_id": "b2443da3d473dc5696f27c37cb5c6ff1b7deec8a",
"content_id": "25b7b9fd17c58697d60d2948f4d7333ddc28a30a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 17,
"path": "/examination/22.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy.stats import binom\r\nfrom log_api import log\r\n\r\ndef nc_of_binom():\r\n rv = binom(10,0.2)\r\n log(rv.mean())\r\n log(rv.var())\r\n log(rv.moment(1))\r\n log(rv.moment(2))\r\n log(rv.moment(3))\r\n log(rv.moment(4))\r\n log(rv.stats(moments='mvsk'))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_binom()"
},
{
"alpha_fraction": 0.41091954708099365,
"alphanum_fraction": 0.46264368295669556,
"avg_line_length": 29.727272033691406,
"blob_id": "4d5f81754df477463426986b7271359feb2206c4",
"content_id": "c888e1752a0a97d6a8bebd3af6822269fee5b4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 22,
"path": "/03皮尔森相关系数.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import t\r\nclass Solution():\r\n def pearsonr(self, x, y):\r\n if x==None or y==None or len(x)==0 or len(y)==0:\r\n return[None,None]\r\n meanx,meany,sx,sy,r=0.0,0.0,0.0,0.0,0.0\r\n meanx=sum(x)/float(len(x))\r\n meany=sum(y)/float(len(y))\r\n for i in x:\r\n sx+=(i-meanx)**2\r\n sx/=float(len(x)-1)\r\n sx=sx**0.5\r\n for i in y:\r\n sy+=(i-meany)**2\r\n sy/=float(len(y)-1)\r\n sy=sy**0.5\r\n for i in range(len(x)):\r\n r+=(x[i]-meanx)*(y[i]-meany)/sx/sy\r\n r/=float(len(x)-1)\r\n return[round(r,6),round(0,6)]\r\ns = Solution()\r\nprint s.pearsonr([1.0,2.0,3.0],[2.0,2.0,3.0])"
},
{
"alpha_fraction": 0.5314465165138245,
"alphanum_fraction": 0.5691823959350586,
"avg_line_length": 20.85714340209961,
"blob_id": "07844b985cef4332c522fa3fcc73e23ab0acc839",
"content_id": "23e7c2133070ff2da0685203271f1dafe345e6fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 14,
"path": "/examination/26.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy.stats import norm\r\nfrom log_api import log\r\n\r\ndef nc_of_norm():\r\n f1 = lambda x: x**4\r\n f2 = lambda x: x**2-x+2\r\n\r\n log(norm.expect(f1, loc=1, scale=2))\r\n log(norm.expect(f2, loc=2, scale=5))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_norm()"
},
{
"alpha_fraction": 0.5364372730255127,
"alphanum_fraction": 0.568825900554657,
"avg_line_length": 21.619047164916992,
"blob_id": "824a228a62f862d18c63e97c633507c2968b533d",
"content_id": "a24aed38843af4690617b1f8ad0c4e05574011ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/examination/31.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import binom\r\nimport matplotlib.plot_api as plt\r\n\r\ndef central_limit_theorem():\r\n y = []\r\n n=100\r\n for i in range(1000):\r\n r = binom.rvs(n, 0.3)\r\n rsum=np.sum(r)\r\n z=(rsum-n*0.3)/np.sqrt(n*0.3*0.7)\r\n y.append(z)\r\n \r\n plt.hist(y,color='grey')\r\n plt.savefig('central_limit_theorem.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n central_limit_theorem()"
},
{
"alpha_fraction": 0.6120527386665344,
"alphanum_fraction": 0.647834300994873,
"avg_line_length": 27.61111068725586,
"blob_id": "6a1a628e73ec8692adf8189c176609282edf5888",
"content_id": "2b18fdc966d8500c9de79eda95cb80b9093a595c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 18,
"path": "/examination/10二项分布.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import binom as B\r\n\r\ndef binom_pmf():\r\n rv=B(10, 0.5)#10 independent trials, probability of success is 0.5\r\n x=np.arange(0, 11, 1)#return evenly spaced values within 1 interval between [0,11)\r\n y=rv.pmf(x)#probability mass function\r\n\r\n plt.bar(x, y, width=0.6, color='grey')#make bar chart\r\n plt.savefig('fig.png')\r\n plt.show()\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n binom_pmf()"
},
{
"alpha_fraction": 0.3354271352291107,
"alphanum_fraction": 0.37814071774482727,
"avg_line_length": 26.5,
"blob_id": "2910821ed8988a229f6c69b549a64433ebb3d156",
"content_id": "e1e3bb0f4c6f315efd64632d808e5221ab10c44c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 796,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 28,
"path": "/06独立性检验.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import chi2\r\nclass Solution():\r\n def independence_test(self, A):\r\n if len(A)==1:\r\n return [0.0,None]\r\n if len(A[0])==1:\r\n return [0.0,None]\r\n r = len(A)\r\n c = len(A[0])\r\n x = []\r\n y = []\r\n n = 0.0\r\n C = 0.0\r\n for i in A:\r\n x.append(sum(i))\r\n for i in A[0]:\r\n y.append(0.0)\r\n for i in range(r):\r\n for j in range(c):\r\n y[j]+=A[i][j]\r\n n+=A[i][j]\r\n for i in range(r):\r\n for j in range(c):\r\n C+=((A[i][j]-x[i]*y[j]/n)**2)/(x[i]*y[j]/n)\r\n P = chi2.sf(C,(c-1)*(r-1))\r\n return[round(C,6),round(P,6)]\r\ns = Solution()\r\nprint s.independence_test( [[1.0,2.0,3.0],[2.0,2.0,3.0]])"
},
{
"alpha_fraction": 0.47302159667015076,
"alphanum_fraction": 0.5152877569198608,
"avg_line_length": 23.86046600341797,
"blob_id": "e2731e8ad8678b3848080ed0cd28e0c56b890232",
"content_id": "844aae5e69ad75a82112044046e8eba12a9cd52d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1284,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 43,
"path": "/src/case4.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "GB18030",
"text": "\r\n\"\"\"\r\n描述:\r\n利用python实现简化版单总体T检验函数\r\n输入:\r\na : 非空一维数组;popmean:假设总体期望值;示例输入 : [1.0,2.0,3.0],2.0\r\n输出:\r\n[t-val,p-value]分别代表检验结果T值与其对应的P值;示例输出 : [0.000000,1.000000]\r\n注意:\r\n(1)scipy包只能使用scipy.x.ppf或scipy.x.sf函数\r\n(2)结果保留6位小数点\r\n\"\"\"\r\nimport math\r\nimport numpy\r\nfrom scipy import stats\r\n\r\nclass Solution():\r\n def ttest_1samp(self, a, popmean):\r\n mean = self.mean(a)\r\n var = self.var(a) * len(a) / (len(a) - 1)\r\n s = math.sqrt(var)\r\n t_val = (mean - popmean) / (s / math.sqrt(len(a)))\r\n p_val = stats.t.sf(numpy.abs(t_val), len(a) - 1) * 2\r\n # p_val = 2 * stats.norm.sf(numpy.abs(t_val))\r\n return [round(t_val, 6), round(p_val, 6)]\r\n \r\n def mean(self, arr):\r\n sumArr = 0.0\r\n for tmp in arr:\r\n sumArr += tmp\r\n mean = sumArr / len(arr)\r\n return mean\r\n\r\n def var(self, arr):\r\n mean = self.mean(arr)\r\n sumArr = 0.0\r\n for tmp in arr:\r\n sumArr += (tmp - mean) ** 2\r\n var = sumArr / len(arr)\r\n return var\r\n \r\ns = Solution()\r\na = [1.0, 2.0, 3.0, 5]\r\nprint s.ttest_1samp(a, 2.0)"
},
{
"alpha_fraction": 0.3589394986629486,
"alphanum_fraction": 0.37865397334098816,
"avg_line_length": 22.63793182373047,
"blob_id": "3f9064300244cee894f316d0fe321a952ea593a6",
"content_id": "6289835755e584df4c9a9be8e222b08dafce4307",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1471,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 58,
"path": "/src/单因素方差.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\n\r\nclass Solution():\r\n def f_oneway(self, *args):\r\n if args[0]==None:\r\n return [None,None]\r\n a=args\r\n n=0\r\n for a in args:\r\n n+=len(a)\r\n arrlen=len(args[0])\r\n arglen=len(args)\r\n As=self.getXi(args)\r\n print As\r\n QA=0.0\r\n for i in range(arglen):\r\n QA=QA+As[i]*As[i]\r\n QA=QA/arrlen\r\n QT=0.0\r\n for i in range(arglen):\r\n for j in range(arrlen):\r\n QT=QT+args[i][j]*args[i][j]\r\n c=0\r\n for i in As:\r\n c+=i\r\n c=c*c/n\r\n ST=QT-c\r\n SA=QA-c\r\n Se=ST-SA\r\n F=(SA/(arglen-1))/(Se/(n-arglen))\r\n print F\r\n def getXi(self,args):\r\n total=0\r\n arr=[]\r\n arglen=len(args)\r\n for i in range(arglen):\r\n for a in args[i]:\r\n total+=a\r\n arr.append(total)\r\n return arr\r\n# def getQT(self,*args):\r\n# total=0\r\n# for a in args:\r\n# for i in a:\r\n# \r\n# total+=i**2.0\r\n# return total\r\n# def getxi2(self,*args):\r\n# ans=0\r\n# for a in args:\r\n# total=0\r\n# for i in a:\r\n# total+=i\r\n# total=total/len(a)\r\n# ans+=total*total\r\n# return ans\r\ns=Solution()\r\ns.f_oneway([1.0,2.0,3.0],[2.0,2.0,3.0])\r\n \r\n \r\n \r\n "
},
{
"alpha_fraction": 0.39731544256210327,
"alphanum_fraction": 0.43355703353881836,
"avg_line_length": 26.653846740722656,
"blob_id": "f729143f4183cdab3a9de436d63cf36f9504e6e5",
"content_id": "ac9baba628ab29f300bde15b90df1ebbf4e7dbb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 745,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 26,
"path": "/02方差检验.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import f\r\nclass Solution():\r\n def f_oneway(self, *args):\r\n if args==None or len(args)==0 or len(args[0])==0:\r\n return[None,None]\r\n r = len(args)\r\n c = len(args[0])\r\n meanr=[]\r\n mean,sa,se=0.0,0.0,0.0\r\n for i in args:\r\n meanr.append(sum(i)/float(len(i)))\r\n mean+=sum(i)\r\n mean/=(r*c)\r\n for i in range(r):\r\n sa+=c*(meanr[i]-mean)**2\r\n for j in range(c):\r\n se+=(args[i][j]-meanr[i])**2\r\n fa=r-1\r\n fe=r*c-r\r\n va=sa/fa\r\n ve=se/fe\r\n F=va/ve\r\n P=f.sf(F,fa,fe)\r\n return[round(F,6),round(P,6)]\r\ns = Solution();\r\nprint s.f_oneway([1.0,2.0,3.0],[2.0,2.0,3.0])\r\n"
},
{
"alpha_fraction": 0.5748987793922424,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 25.518518447875977,
"blob_id": "eb972fe3d6f0c8c7679764b5ff2ab0957628bd2d",
"content_id": "d1b8e34e15217d15953aa66723995a438084e769",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 27,
"path": "/examination/37.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import chi2\r\nfrom scipy.stats import norm\r\nimport matplotlib.plot_api as plt\r\n\r\ndef sampling_distribution():\r\n fig, ax = plt.subplots(1, 1)\r\n #display the probability density function\r\n df = 10\r\n x=np.linspace(chi2.ppf(0.01, df), chi2.ppf(0.99, df), 100)\r\n ax.plot(x, chi2.pdf(x, df))\r\n\r\n #simulate the sampling distribution\r\n y = []\r\n for i in range(1000):\r\n r = norm.rvs(loc=5, scale=2, size=df+1)\r\n rchi2 =(df)*np.var(r)/4\r\n y.append(rchi2)\r\n\r\n ax.hist(y, normed=True, alpha=0.2) \r\n plt.savefig('sampling_distribution.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n sampling_distribution()"
},
{
"alpha_fraction": 0.35257411003112793,
"alphanum_fraction": 0.4134165346622467,
"avg_line_length": 28.619047164916992,
"blob_id": "116efad096b90e9ec50cc5322f7532f012a6cd19",
"content_id": "634eef03cebbd9e1d7a29873177fe5e1f5fec5f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 641,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 21,
"path": "/05K-S检验.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "class Solution():\r\n def ks_2samp(self, data1, data2):\r\n maxi = 0.0\r\n minr = min(min(data1),min(data2))\r\n maxr = max(max(data1),max(data2))\r\n d = (maxr-minr)/max(len(data1),len(data2))\r\n n = 1\r\n while minr+d*n<maxr+d:\r\n n1 = 0.0\r\n n2 = 0.0\r\n for i in data1:\r\n if i<minr+d*n:\r\n n1+=1\r\n for i in data2:\r\n if i<minr+d*n:\r\n n2+=1\r\n maxi = max(maxi,abs(n1/len(data1)-n2/len(data2))) \r\n n+=1\r\n print round(maxi,6)\r\ns=Solution()\r\ns.ks_2samp([0,1,2,3], [1,2,3,4])"
},
{
"alpha_fraction": 0.5459940433502197,
"alphanum_fraction": 0.5697329640388489,
"avg_line_length": 20.600000381469727,
"blob_id": "805a36a03d8684a0b7a7bf60aa9428a94724996e",
"content_id": "0542cd80286337f15ead037b6ce84858a61a9757",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 337,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/examination/test6.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import binom\r\n\r\ndef nc_of_binom():\r\n rv = binom(10,0.2)\r\n print(rv.mean())\r\n print(rv.var())\r\n print(rv.moment(1))\r\n print(rv.moment(2))\r\n print(rv.moment(3))\r\n print(rv.moment(4))\r\n print(rv.stats(moments='mvsk'))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_binom()"
},
{
"alpha_fraction": 0.4455205798149109,
"alphanum_fraction": 0.4878934621810913,
"avg_line_length": 28.66666603088379,
"blob_id": "1b5bcf39a57812a7cc91d1dd6a020e1a9aa93595",
"content_id": "1c3f61893cd83e37a4598fdaa98f7a988a676071",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 27,
"path": "/10年龄的影响.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import urllib2\r\nclass Solution:\r\n def solve(self):\r\n url='http://112.124.1.3:8060/getData/101.json'\r\n data=urllib2.urlopen(url).read()\r\n babyArr=eval(data)['data']\r\n weight=[]\r\n age=[]\r\n for i in babyArr:\r\n weight.append(i[4])\r\n age.append(i[6])\r\n lxy,lxx=0.0,0.0\r\n meanx=sum(age)/float(len(age))\r\n meany=sum(weight)/float(len(weight))\r\n for i in range(len(age)):\r\n lxx+=(age[i]-meanx)**2\r\n lxy+=(age[i]-meanx)*(weight[i]-meany)\r\n b=lxy/lxx\r\n a=meany-b*meanx\r\n sr,st=0.0,0.0\r\n for i in range(len(age)):\r\n sr+=(a+b*age[i]-meany)**2\r\n st+=(weight[i]-meany)**2\r\n r2=sr/st\r\n return[round(a,6),round(b,6),round(r2,6)]\r\ns = Solution()\r\nprint s.solve()"
},
{
"alpha_fraction": 0.5456989407539368,
"alphanum_fraction": 0.5618279576301575,
"avg_line_length": 20,
"blob_id": "8196eec87c14aeeb1f175d9b9379f0bf10d9ccbe",
"content_id": "4a9e326dc515127c415868a0710d51f5a83c1b71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 17,
"path": "/examination/25.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy.stats import expon\r\nfrom log_api import log\r\n\r\ndef nc_of_expon():\r\n rv = expon(scale=2)\r\n log(rv.mean())\r\n log(rv.var())\r\n log(rv.moment(1))\r\n log(rv.moment(2))\r\n log(rv.moment(3))\r\n log(rv.moment(4))\r\n log(rv.stats(moments='mvsk'))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_expon()"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 16.66666603088379,
"blob_id": "ce96c376aceb280d78b547b3a85c2db0d438724d",
"content_id": "3343eb5096891279ff48425f6f10ce8d434dd2ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 3,
"path": "/src/py.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nf=open('F:/player/'+gamenum+'.html','ab')"
},
{
"alpha_fraction": 0.552971601486206,
"alphanum_fraction": 0.5710594058036804,
"avg_line_length": 20.882352828979492,
"blob_id": "f5508902e362b7c66812499fd03b0806b6bb8f69",
"content_id": "c99badfbb22924a8bbd2261945a2678230bbe220",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 17,
"path": "/examination/24.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy.stats import uniform\r\nfrom log_api import log\r\n\r\ndef nc_of_uniform():\r\n rv = uniform(loc=2, scale=6)\r\n log(rv.mean())\r\n log(rv.var())\r\n log(rv.moment(1))\r\n log(rv.moment(2))\r\n log(rv.moment(3))\r\n log(rv.moment(4))\r\n log(rv.stats(moments='mvsk'))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_uniform()"
},
{
"alpha_fraction": 0.582047700881958,
"alphanum_fraction": 0.6072931289672852,
"avg_line_length": 23.535715103149414,
"blob_id": "e745d15fe36f8d01b7d2cccb1cbf5fb443e969bf",
"content_id": "5ee980cbffba1044c4a976d2c3a4db0fd437ee66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 28,
"path": "/examination/35.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nfrom scipy.stats import expon\r\nimport matplotlib.plot_api as plt\r\n\r\ndef sampling_distribution():\r\n fig, ax = plt.subplots(1, 1)\r\n #display the probability density function\r\n x = np.linspace(-4, 4, 100)\r\n ax.plot(x, norm.pdf(x))\r\n\r\n #simulate the sampling distribution\r\n y = []\r\n n=100\r\n for i in range(1000):\r\n r = expon.rvs(scale=1, size=n)\r\n rsum=np.sum(r)\r\n z=(rsum-n)/np.sqrt(n)\r\n y.append(z)\r\n\r\n ax.hist(y, normed=True, alpha=0.2)\r\n plt.savefig('sampling_distribution.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n sampling_distribution()"
},
{
"alpha_fraction": 0.3408239781856537,
"alphanum_fraction": 0.4241572916507721,
"avg_line_length": 32.45161437988281,
"blob_id": "0eec7c1baf28df63b3ade7e886c13f826b2cb9f0",
"content_id": "aae6e03d5b658cd9b97a72e9703cea2be995c42b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1068,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 31,
"path": "/02方差检验again.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import f\r\nclass Solution():\r\n def f_oneway(self, *args):\r\n if len(args)==0:\r\n return[None,None]\r\n elif len(args)==1:\r\n return[None,None]\r\n else:\r\n meanr = []\r\n mean = 0.0\r\n sa = 0.0\r\n se = 0.0\r\n for i in args:\r\n meanr.append((float)(sum(i))/len(i))\r\n mean+=sum(i)\r\n mean/=(len(args)*len(args[0]))\r\n for i in meanr:\r\n sa+=len(args[0])*(i-mean)**2\r\n for i in range(len(meanr)):\r\n for j in range(len(args[0])):\r\n se+=(args[i][j]-meanr[i])**2\r\n fa = len(args)-1\r\n fe = len(args)*len(args[0])-len(args)\r\n va = sa/fa\r\n ve = se/fe\r\n F = va/ve\r\n P = f.sf(F,fa,fe) \r\n return[round(F,6),round(P,6)]\r\ns = Solution()\r\nprint s.f_oneway([1.0,2.0,3.0],[2.0,2.0,3.0])\r\nprint s.f_oneway([25.6,22.2,28.0,29.8],[24.4,30.0,29.0,27.5],[25.0,27.7,23.0,32.2],[28.8,28.0,31.5,25.9],[20.6,21.2,22.0,21.2])\r\n"
},
{
"alpha_fraction": 0.6040403842926025,
"alphanum_fraction": 0.6343434453010559,
"avg_line_length": 27.235294342041016,
"blob_id": "76079b1a4f556953628c9ca6f569028baf49978b",
"content_id": "586f079ead8c1274c1455daac2aeaddfac2c1db0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 17,
"path": "/examination/11.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\nfrom scipy.stats import geom as G\r\n\r\ndef geom_pmf():\r\n rv=G(0.2)#probability of success is 0.2\r\n x = np.arange(1, 11, 1)#return evenly spaced values within 1 interval between [1,11)\r\n y = rv.pmf(x)#probability mass function\r\n \r\n plt.bar(x, y, width=0.6, color='grey')#make bar chart\r\n plt.savefig('fig.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n geom_pmf()"
},
{
"alpha_fraction": 0.5248091816902161,
"alphanum_fraction": 0.5667939186096191,
"avg_line_length": 23.047618865966797,
"blob_id": "4b53b159f7012f1909654f76ae2c461b4f7fecf6",
"content_id": "b35da36b5a089932986d7457342f8d8aeee21d07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 524,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 21,
"path": "/examination/28.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\nfrom scipy.stats import bernoulli\r\n\r\ndef law_of_large_numbers():\r\n x = np.arange(1, 1001, 1) \r\n r = bernoulli.rvs(0.3, size=1000)\r\n y = []\r\n rsum =0.0\r\n for i in range(1000):\r\n if r[i]==1:\r\n rsum=rsum+1\r\n y.append(rsum/(i+1))\r\n plt.plot(x, y, color='red')\r\n plt.savefig('law_of_large_numbers.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n law_of_large_numbers()"
},
{
"alpha_fraction": 0.403191477060318,
"alphanum_fraction": 0.42553192377090454,
"avg_line_length": 20.380952835083008,
"blob_id": "76a332a96503e687a61a7b55a57397600acb1905",
"content_id": "798e553db3bbf28411b46239be45a437090d6d48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 940,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 42,
"path": "/src/test1.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "class Solution():\r\n def getMean(self,a):\r\n result=0\r\n for i in range(len(a)):\r\n result+=a[i]\r\n return result/len(a)\r\n\r\n def getV(self,a,c):\r\n result=0\r\n mean=self.getMean(a)\r\n for i in range(len(a)):\r\n result+=(a[i]-mean)**c\r\n return result/len(a)\r\n \r\n def describe(self, a):\r\n if a==None :\r\n return None\r\n if len(a)==1:\r\n return [a[0],a[0],None,None]\r\n else:\r\n result=[]\r\n mean=self.getMean(a)\r\n v2=self.getV(a,2)\r\n v3=self.getV(a,3)\r\n v4=self.getV(a,4)\r\n\r\n var=v2\r\n\r\n skew=v3/(v2**1.5)\r\n\r\n kurt=(v4/v2**2)-3\r\n\r\n result.append(mean)\r\n result.append(var)\r\n result.append(skew)\r\n result.append(kurt)\r\n \r\n return result\r\n\r\ns=Solution()\r\na=[1]\r\nprint(s.describe(a))\r\n"
},
{
"alpha_fraction": 0.5317220687866211,
"alphanum_fraction": 0.5921450257301331,
"avg_line_length": 23.538461685180664,
"blob_id": "1d1f7fe321a78c11760b7a0626b444afce9510e8",
"content_id": "1a10ce9736da4febbbe9b4141fd27d7962ae6721",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 26,
"path": "/examination/29.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\nfrom scipy.stats import binom\r\nfrom scipy.stats import poisson\r\nfrom scipy.stats import norm\r\n\r\ndef law_of_large_numbers():\r\n x = np.arange(1, 1001, 1) \r\n r1 = binom.rvs(10, 0.6, size=1000)\r\n r2 = poisson.rvs(mu=6, size=1000)\r\n r3 = norm.rvs(loc=6, size=1000)\r\n\r\n y = []\r\n rsum=0.0\r\n for i in range(1000):\r\n rsum=rsum+(r1[i]+r2[i]+r3[i])\r\n y.append(rsum/((i+1)*3)-6)\r\n\r\n plt.plot(x, y, color='red')\r\n plt.savefig('law_of_large_numbers.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n law_of_large_numbers()"
},
{
"alpha_fraction": 0.5799372792243958,
"alphanum_fraction": 0.6238244771957397,
"avg_line_length": 30,
"blob_id": "4b1aab6764783e91556873111cdc9d560fd3512a",
"content_id": "4a763a7f622d5c04dfbc8fbe852bddbde6634687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 20,
"path": "/examination/14.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\nfrom scipy.stats import expon as E\r\n\r\ndef expon_pdf():\r\n x = np.linspace(0, 20, 100)#return evenly spaced samples, calculated over the interval [0,20]\r\n rv1=E(scale = 1.5)#the scale is 1.5\r\n rv2=E(scale = 1.0)#the scale is 1.0\r\n rv3=E(scale = 0.5)#the scale is 0.5\r\n\r\n plt.plot(x, rv1.pdf(x), color='green')#make chart\r\n plt.plot(x, rv2.pdf(x), color='blue')#make chart\r\n plt.plot(x, rv3.pdf(x), color='red')#make chart\r\n plt.savefig('fig.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n expon_pdf()"
},
{
"alpha_fraction": 0.5647348761558533,
"alphanum_fraction": 0.6029593348503113,
"avg_line_length": 27.035715103149414,
"blob_id": "2a9d9d6fa73e78731400b2167afcb433b4065997",
"content_id": "21161165a0ea68b6f9496a054feb3ea26b232d99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/examination/39.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import f\r\nfrom scipy.stats import norm\r\nimport matplotlib.plot_api as plt\r\n\r\ndef sampling_distribution():\r\n fig, ax = plt.subplots(1, 1)\r\n #display the probability density function\r\n dfn, dfm = 10, 5\r\n x=np.linspace(f.ppf(0.01, dfn, dfm), f.ppf(0.99, dfn, dfm), 100)\r\n ax.plot(x, f.pdf(x, dfn, dfm))\r\n \r\n #simulate the sampling distribution\r\n y = []\r\n for i in range(1000):\r\n r1 = norm.rvs(loc=5, scale=2, size=dfn+1)\r\n r2 = norm.rvs(loc=3, scale=2, size=dfm+1)\r\n rf =np.var(r1)/np.var(r2)\r\n y.append(rf)\r\n\r\n ax.hist(y, normed=True, alpha=0.2)\r\n plt.savefig('sampling_distribution.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n sampling_distribution()"
},
{
"alpha_fraction": 0.3741588294506073,
"alphanum_fraction": 0.4212651550769806,
"avg_line_length": 23.55172348022461,
"blob_id": "d9b077a23d861d13be441c48fcc865eaf52857e9",
"content_id": "970f40d8a2701e4dcf04118a8fc84496f1fe31f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 29,
"path": "/examination/test3.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "class Solution():\r\n def solve(self, A):\r\n #use isPalindrom function to check if the string is palindrome or not\r\n sss=[]\r\n for ss in A:\r\n if s.isPalindrome(ss):\r\n sss.append(ss)\r\n print sss \r\n pass\r\n\r\n def isPalindrome(self, x):\r\n mm = list(x)\r\n long = mm.__len__()\r\n if (long==1)|(long==0):\r\n return True\r\n else:\r\n if(mm[0]==mm[long-1]):\r\n del mm[long-1]\r\n del mm[0]\r\n return s.isPalindrome(mm)\r\n else:\r\n return False\r\n \r\n pass\r\n \r\n \r\n \r\ns =Solution()\r\nprint s.solve(['123', '232', '4556554','12123', '3443','1314131'] )\r\n\r\n"
},
{
"alpha_fraction": 0.5570651888847351,
"alphanum_fraction": 0.6222826242446899,
"avg_line_length": 22.66666603088379,
"blob_id": "df89141592b60f2ef99567da04d3d3f7ee0cf1a0",
"content_id": "663ee390495a200994d6802cba5b0441f297a5e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 15,
"path": "/examination/52卡芳.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "# 卡方拟合优度检验通常用于根据样本的频数分布来推断总体的分布。下面为卡方拟合优度检验问题的python实现,请完成以下练习:\r\n#-*- coding:utf-8 -*-\r\nfrom scipy import stats\r\nfrom log_api import log\r\n\r\ndef chisquare():\r\n A=[16, 18, 16, 14, 12, 12]\r\n B=[16, 16, 16, 16, 16, 8]\r\n\r\n log(stats.chisquare(A))\r\n log(stats.chisquare(A, f_exp=B))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n chisquare()"
},
{
"alpha_fraction": 0.39064475893974304,
"alphanum_fraction": 0.4083438813686371,
"avg_line_length": 23.387096405029297,
"blob_id": "c23794efa9342a558d8d7ac3c869ce614719823c",
"content_id": "177cfb5e0ddb50ab2a717e0d5901e1cf0692e4b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1582,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 62,
"path": "/src/test2.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import f\r\n\r\nclass Solution():\r\n def f_oneway(self, *args):\r\n if args[0]==None:\r\n return [None,None]\r\n result=[]\r\n n=len(args)*len(args[0])\r\n m=len(args)\r\n fS=m-1\r\n fe=n-m\r\n SA=self.getSA(*args)\r\n Se=self.getSe(*args)\r\n VA=SA/fS\r\n Ve=Se/fe\r\n FA=VA/Ve\r\n F=f(fS,fe)\r\n p=F.sf(FA)\r\n result.append(float('%.6f'%FA))\r\n result.append(float('%.6f'%p))\r\n return [float('%.6f'%FA),float('%.6f'%p)]\r\n\r\n def getSe(self,*args):\r\n result=0\r\n r=len(args[0])\r\n m=len(args)\r\n xi_avg=[]\r\n for i in range(m):\r\n temp=args[i]\r\n avg_t=0\r\n for j in range(r):\r\n avg_t+=temp[j]\r\n avg_t=avg_t/r\r\n xi_avg.append(avg_t)\r\n for i in range(m):\r\n temp=args[i]\r\n for j in range(r):\r\n result+=(temp[j]-xi_avg[i])**2\r\n return result\r\n \r\n def getSA(self,*args):\r\n result=0\r\n r=len(args[0])\r\n m=len(args)\r\n x_avg=0\r\n xi_avg=[]\r\n for i in range(m):\r\n temp=args[i]\r\n avg_t=0\r\n for j in range(r):\r\n x_avg+=temp[j]\r\n avg_t+=temp[j]\r\n avg_t=avg_t/r\r\n xi_avg.append(avg_t)\r\n x_avg=x_avg/(m*r)\r\n for i in range(m):\r\n result+=(xi_avg[i]-x_avg)**2\r\n result=result*r\r\n return result\r\n\r\ns=Solution()\r\nprint(s.f_oneway([1.0,2.0,3.0],[2.0,2.0,3.0]))\r\n\r\n \r\n"
},
{
"alpha_fraction": 0.5580447912216187,
"alphanum_fraction": 0.5763747692108154,
"avg_line_length": 21.4761905670166,
"blob_id": "6323d56366b27a5a231cbaab7a270e66adb23de8",
"content_id": "01bff1050d0276d531844116144150fc0e074e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/examination/30.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nfrom scipy.stats import expon\r\nimport matplotlib.plot_api as plt\r\n\r\ndef central_limit_theorem():\r\n y = []\r\n n=100\r\n for i in range(1000):\r\n r = expon.rvs(scale=1, size=n)\r\n rsum=np.sum(r)\r\n z=(rsum-n)/np.sqrt(n)\r\n y.append(z)\r\n \r\n plt.hist(y,color='grey')\r\n plt.savefig('central_limit_theorem.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n central_limit_theorem()"
},
{
"alpha_fraction": 0.42942050099372864,
"alphanum_fraction": 0.4962852895259857,
"avg_line_length": 26.125,
"blob_id": "bf605745316c13435c4939cf0222748f74417f2a",
"content_id": "7dcd4640e563d1f001b8c5c23344449640fba9c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 673,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 24,
"path": "/08宝宝什么时候出生.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import urllib2\r\nfrom scipy.stats import norm as N\r\n\r\nclass Solution:\r\n def solve(self):\r\n url = 'http://112.124.1.3:8050/getData/101'\r\n data = urllib2.urlopen(url).read()\r\n \r\n babyArr = eval(data)['data']\r\n baby=[]\r\n for i in babyArr:\r\n if i[2]>5 and i[2]<=10:\r\n baby.append(i[2]*4.33)\r\n if i[2]<49 and i[2]>25:\r\n baby.append(i[2])\r\n mean=sum(baby)/float(len(baby))\r\n print mean\r\n z=-N.ppf(0.025)\r\n lower=mean-z*4/(len(baby)**0.5)\r\n higher=mean+z*4/(len(baby)**0.5)\r\n print[round(lower,6),round(higher,6)]\r\n\r\nso=Solution()\r\nso.solve()"
},
{
"alpha_fraction": 0.5222222208976746,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 18.22222137451172,
"blob_id": "043414c8898ab2099d621d47a05a57d1b0ae27b4",
"content_id": "b03a39ff0af1a11b577f357deab88c72a03b178e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/src/1.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import random\r\nimport math\r\na=[]\r\nb=[]\r\nfor c in range(1,40):\r\n for i in range(1,200):\r\n a.append(random.randint(1,199))\r\n b.append(sum(a)/len(a))\r\nprint sum(b)/len(b)"
},
{
"alpha_fraction": 0.6126482486724854,
"alphanum_fraction": 0.6422924995422363,
"avg_line_length": 27.882352828979492,
"blob_id": "c98f66dcf85eb9b722162d3b1ce828fca55108ec",
"content_id": "81b1220805ce4e72a891200b92c83ef3705964de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 17,
"path": "/examination/12.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\nfrom scipy.stats import poisson as Pie\r\n\r\ndef poisson_pmf():\r\n rv=Pie(4.5)#the average incident is 4.5\r\n x = np.arange(0, 11, 1)#return evenly spaced values within 1 interval between [1,11)\r\n y = rv.pmf(x)#probability mass function\r\n \r\n plt.bar(x, y, width=0.6, color='grey')#make bar chart\r\n plt.savefig('fig.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n poisson_pmf()"
},
{
"alpha_fraction": 0.4334140419960022,
"alphanum_fraction": 0.48184019327163696,
"avg_line_length": 23.9375,
"blob_id": "dbeac156e4365617db65b21fa3ec13e2bb66fb78",
"content_id": "874b2247dd07d154ee7888b6467c592d7ab3ea77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 413,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 16,
"path": "/04T检验.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy.stats import t\r\nclass Solution():\r\n def ttest_1samp(self, a, popmean):\r\n mean,s=0.0,0.0\r\n mean=sum(a)/float(len(a))\r\n for i in a:\r\n s+=(i-mean)**2\r\n s/=(len(a)-1)\r\n s=s**0.5\r\n T=(mean-popmean)/(s/(len(a))**0.5)\r\n P=t.sf(abs(T),len(a)-1)*2\r\n return[round(T,6),round(P,6)]\r\ns=Solution()\r\na=[1,2,3]\r\npop = 2\r\nprint s.ttest_1samp(a,pop)"
},
{
"alpha_fraction": 0.522765576839447,
"alphanum_fraction": 0.6014614701271057,
"avg_line_length": 27.58333396911621,
"blob_id": "78d9d8ec3a86aeddbf44c682114fd941a3f19686",
"content_id": "fdc2b3d093ae321f7840b014d771777a2548b0ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2541,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 60,
"path": "/src/case8.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\n'''\r\n描述:\r\n\r\n美国疾病控制与预防中心(CDC)从1973年开始推行全国家庭成长调查(NSFG),目的是收集(美国)“家庭的生活、婚姻状况、生育、避孕和男女健康信息。”\r\n现有从2002年1月到3月收集的调查数据(url为http://112.124.1.3:8060/getData/101.json),包含上万条调查数据,每条数据包括 caseid(标识符),\r\n prglength(婴儿第几周出生), outcome(怀孕结果,1表示活产), totalwgt_oz(婴儿出生重量,单位盎司), \r\nbirthord(第几胎,1表示第一胎), agepreg(怀孕时年龄), finalwgt(被调查者的统计权重,表明这名调查者所代表的人群在美国总人口中的比例。过采样人群的权重偏低)等信息\r\n另据某研究显示,婴儿出生周数符合方差为16的正态分布,试写函数solve估计婴儿平均出生周数的置信区间(置信水平为95%)。\r\n输入:\r\n\r\n调查样本数据,格式为{data\":[[1, 1, 39, 1, 141, 1, 33.16, 6448.271111704751], [1, 2, 39, 1, 126, 2, 39.25, 6448.271111704751], ...]}\r\n\r\n输出:\r\n\r\n[lower,upper]分别代表平均出生周数的估计下限与上限\r\n\r\n注意:\r\n\r\n(1)婴儿第几周出生数据由于被调查人选填错误等原因出现了一些不合理数据,比如错填了月份(5<prglength<=10),\r\n其他错填(prglength<=5, 10<prglength<=25, prglength>=49)\r\n,对于错填月份的情况,将月份*4.33作为其周数,对于其他错填情况则舍弃此条数据\r\n'''\r\n\r\nimport urllib2\r\nimport scipy.stats as ss\r\nimport numpy as np\r\nclass Solution:\r\n def solve(self):\r\n data = self.getwebdatass()\r\n dara=eval(data)['data']\r\n crr=[]\r\n for i in dara:\r\n prglength = i[2]\r\n if 5<prglength and prglength<=10:\r\n i=prglength*4.33\r\n crr.append(i)\r\n elif 25<prglength and prglength<49:\r\n crr.append(prglength)\r\n for i in crr:\r\n crr.remove(i)\r\n i=float(i)\r\n crr.append(i)\r\n n=len(crr)\r\n p=0.025\r\n ssans=-ss.norm.ppf(p) \r\n tks= 4.0*ssans/np.sqrt(n)\r\n meanx=self.getMean(crr)\r\n return[round(meanx-tks,6),round(meanx+tks,6)]\r\n def getwebdatass(self):\r\n url = 'http://112.124.1.3:8050/getData/101'\r\n data = urllib2.urlopen(url).read()\r\n return data\r\n def getMean(self,arr):\r\n return sum(arr)/len(arr)\r\n\r\n \r\n \r\nso=Solution()\r\nso.solve()\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.2834506928920746,
"alphanum_fraction": 0.36883804202079773,
"avg_line_length": 25.512195587158203,
"blob_id": "82e0355a40782760b9544792bb82a79be550b991",
"content_id": "26f5f7c058df4e0445aeb86db96396ca32b2c69e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1136,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 41,
"path": "/09第一胎VS第二胎.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "import urllib2\r\nfrom scipy.stats import chi2\r\nclass Solution:\r\n def solve(self):\r\n url='http://112.124.1.3:8060/getData/101.json'\r\n data=urllib2.urlopen(url).read()\r\n babyArr=eval(data)['data']\r\n baby=[]\r\n num=[]\r\n for i in babyArr:\r\n if i[2]<=10 and i[2]>5:\r\n baby.append(i[2]*4.33)\r\n num.append(i[5])\r\n if i[2]<49 and i[2]>25:\r\n baby.append(i[2])\r\n num.append(i[5])\r\n a1,a2,a3,n1,n2,n3=0.0,0.0,0.0,0.0,0.0,0.0\r\n for i in range(len(baby)):\r\n if baby[i]<=37:\r\n a1+=1\r\n if num[i]==1:\r\n n1+=1\r\n elif baby[i]>=41:\r\n a3+=1\r\n if num[i]==1:\r\n n3+=1\r\n else:\r\n a2+=1\r\n if num[i]==1:\r\n n2+=1\r\n a=a1+a2+a3\r\n p1=a1/a\r\n p2=a2/a\r\n p3=a3/a\r\n n=n1+n2+n3\r\n c=n1**2/(n*p1)+n2**2/(n*p2)+n3**2/(n*p3)-n\r\n p=chi2.sf(c,2)\r\n print[c,p]\r\n \r\ns=Solution()\r\ns.solve()\r\n "
},
{
"alpha_fraction": 0.5213754773139954,
"alphanum_fraction": 0.5594795346260071,
"avg_line_length": 23.069766998291016,
"blob_id": "3f5fc8ba7ef218ce82f3c82086d964935b37e2e4",
"content_id": "b6d2b3c33669ef3184137c23c4bcb616b2716c1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1076,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 43,
"path": "/examination/57回归分析.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport matplotlib.plot_api as plt\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom log_api import log\r\n\r\ndef linregress1():\r\n x = np.linspace(-5, 5, num=150)\r\n y = x + np.random.normal(size=x.size)\r\n y[12:14] += 10 \r\n \r\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\r\n log(slope)\r\n log(intercept)\r\n log(r_value)\r\n log(p_value)\r\n log(std_err)\r\n \r\n plt.plot(x, y, 'b.')\r\n plt.plot(x, slope * x + intercept, 'r-')\r\n plt.savefig('linregress1.png')\r\n \r\ndef linregress2():\r\n x = np.linspace(-5, 5, num=150)\r\n y = x**2 + np.random.normal(size=x.size)\r\n y[12:14] += 10 \r\n y[137:141] -= 6\r\n x1=x**2\r\n slope, intercept, r_value, p_value, std_err = stats.linregress(x1,y)\r\n log(slope)\r\n log(intercept)\r\n log(r_value)\r\n log(p_value)\r\n log(std_err)\r\n \r\n plt.plot(x, y, 'b.')\r\n plt.plot(x, slope * x1 + intercept, 'r-')\r\n plt.savefig('linregress2.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n linregress1()\r\n linregress2()"
},
{
"alpha_fraction": 0.3238566219806671,
"alphanum_fraction": 0.34610629081726074,
"avg_line_length": 24.700000762939453,
"blob_id": "8694c8527848bbf1ac9f43bf9a3cbf0bee902b0e",
"content_id": "e0c1a157eab34ed9b1937fd1af6f7b44a67bda23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 809,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 30,
"path": "/src/case2.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "from scipy import stats\r\n\r\nclass Solution():\r\n def f_oneway(self, *args):\r\n if not len(args):\r\n return [None, None]\r\n sum = 0.0\r\n n = 0.0\r\n QT = 0.0\r\n QA = 0.0\r\n for arr in args:\r\n if not len(arr):\r\n return [None, None]\r\n sumAi = 0.0\r\n for i in arr:\r\n sum += i\r\n n += 1\r\n QT += i ** 2\r\n sumAi += i\r\n QA += sumAi ** 2 / len(arr)\r\n C = sum ** 2 / n\r\n ST = QT - C\r\n SA = QA - C\r\n Se = ST - SA\r\n fT = n - 1\r\n fA = len(args) - 1\r\n fe = n - len(args)\r\n f_val = SA / fA / (Se / fe)\r\n p_val = stats.f.sf(f_val, fA, fe)\r\n return [round(f_val, 6), round(p_val, 6)]\r\n "
},
{
"alpha_fraction": 0.5017300844192505,
"alphanum_fraction": 0.5622837543487549,
"avg_line_length": 23.217391967773438,
"blob_id": "6db4b0e18a5f9f68d6345eb91a3bc2b341171bc0",
"content_id": "6ec4c2b686737654993608947f857f18092b5256",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 578,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 23,
"path": "/examination/03.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\n\r\n#you can write your code here\r\ndef draw():\r\n #get input data\r\n menMeans = (20, 35, 30, 35, 27)\r\n menStd = (2, 3, 4, 1, 2)\r\n womenMeans = (25, 32, 34, 20, 25)\r\n womenStd = (3, 5, 2, 3, 3)\r\n\r\n ind = np.arange(5)\r\n width = 0.35\r\n # the histogram of the data\r\n plt.bar(ind, menMeans, width, color='r')\r\n plt.bar(ind+width, womenMeans, width, color='y')\r\n #show image\r\n plt.savefig('fig.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n draw()"
},
{
"alpha_fraction": 0.36858972907066345,
"alphanum_fraction": 0.39529913663864136,
"avg_line_length": 22.63157844543457,
"blob_id": "abb985a2e570df6592f1598c5c6df7abbf26ab1e",
"content_id": "62023a3f217c9b9ac0423ced0733439254e08470",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 936,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 38,
"path": "/src/case1.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nclass Solution():\r\n def describe(self, a):\r\n length=len(a)\r\n xave=self.ave(a)\r\n fc=self.getCifang(a, xave, 2)\r\n if(length==0):\r\n xave=0\r\n fcxz=0\r\n pd=0\r\n fd=-3\r\n if(length==1):\r\n fcxz=fc\r\n pd=0\r\n fd=-3\r\n else:\r\n fcxz=fc*length/(length-1)\r\n pd=self.getCifang(a, xave, 3)/(fc**(3./2))\r\n fd=self.getCifang(a, xave, 4)/(fc**2./1)-3\r\n arr=[]\r\n arr.append(xave)\r\n arr.append(fcxz)\r\n arr.append(pd)\r\n arr.append(fd)\r\n return arr\r\n def ave(self,a):\r\n b=0\r\n for i in a:\r\n b+=i\r\n b=1.0*b\r\n return b/a.__len__()\r\n def getCifang(self,a,xave,cishu):\r\n b=0\r\n lenth=0.0\r\n for i in a:\r\n b+=((i-xave)**cishu)\r\n lenth+=1\r\n return b/lenth\r\n"
},
{
"alpha_fraction": 0.40032678842544556,
"alphanum_fraction": 0.43382352590560913,
"avg_line_length": 27.85365867614746,
"blob_id": "adbada3c1d72deab5683651e186d89dc2c61c648",
"content_id": "31939a1585e7607cf8ca4594918a6e0c7d2e5955",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 41,
"path": "/src/case3.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "\r\nimport math\r\nfrom scipy import stats\r\n\r\nclass Solution():\r\n def pearsonr(self, x, y):\r\n if x is None or y is None or len(x) == 0:\r\n return [None, None]\r\n sx = math.sqrt(self.var(x) * len(x) / (len(x) - 1))\r\n sy = math.sqrt(self.var(y) * len(y) / (len(y) - 1))\r\n meanx = self.mean(x)\r\n meany = self.mean(y)\r\n rxy = 0.0\r\n for i in range(len(x)):\r\n rxy += ((x[i] - meanx) / sx) * ((y[i] - meany) / sy)\r\n rxy /= (len(x) - 1)\r\n if rxy == 1 or (len(x) - 2) == 0:\r\n return [1.0, 0.0]\r\n t = rxy * math.sqrt((len(x) - 2) / (1 - rxy ** 2))\r\n p_val = stats.t.sf(abs(t), len(x) - 2) * 2\r\n return [round(rxy, 6), round(p_val, 6)]\r\n \r\n def mean(self, arr):\r\n sumArr = 0.0\r\n for t in arr:\r\n sumArr += t\r\n mean = sumArr / len(arr)\r\n return mean\r\n\r\n def var(self, arr):\r\n mean = self.mean(arr)\r\n sumArr = 0.0\r\n for t in arr:\r\n sumArr += (t - mean) ** 2\r\n var = sumArr / len(arr)\r\n return var\r\n \r\ns = Solution()\r\nx = [1.0, 2.0, 3.0, 4.0]\r\ny = [2.0, 2.0, 3.0, 5.0]\r\nprint stats.pearsonr(x, y)\r\nprint s.pearsonr(x, y)"
},
{
"alpha_fraction": 0.5517241358757019,
"alphanum_fraction": 0.5676392316818237,
"avg_line_length": 20.294116973876953,
"blob_id": "8e9656be40d3c4546c13bdfffe9cbf1bd301000a",
"content_id": "49e97d3f90efdabe1b37b57291435a289f21c992",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 17,
"path": "/examination/23.py",
"repo_name": "XNYu/Statistic",
"src_encoding": "UTF-8",
"text": "#-*- coding:utf-8 -*-\r\nfrom scipy.stats import poisson\r\nfrom log_api import log\r\n\r\ndef nc_of_poisson():\r\n rv = poisson(mu=5)\r\n log(rv.mean())\r\n log(rv.var())\r\n log(rv.moment(1))\r\n log(rv.moment(2))\r\n log(rv.moment(3))\r\n log(rv.moment(4))\r\n log(rv.stats(moments='mvsk'))\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n nc_of_poisson()"
}
] | 52 |
n30chido/odoo-mexico
|
https://github.com/n30chido/odoo-mexico
|
e40125b144c157a39c93a9da0c24eada3e80eec3
|
5cd81329218eb16b7dcd60df7ab112a7e19941f9
|
4ad66a52b939f9d3a3b2aa24aebbc15d04e33af5
|
refs/heads/master
| 2021-01-20T10:53:57.702794 | 2017-02-27T19:21:00 | 2017-02-27T19:21:00 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6648477911949158,
"alphanum_fraction": 0.6703044176101685,
"avg_line_length": 47.90140914916992,
"blob_id": "e0e420c8ec1ffeff55c9097c9b0b89fefb73c5cf",
"content_id": "4ead0ed351053ed0414cd4daee83ad6362289feb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3486,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 71,
"path": "/l10n_mx_import_info/stock.py",
"repo_name": "n30chido/odoo-mexico",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n# Author=Nhomar Hernandez [email protected]\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\nimport time\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp import pooler, tools\nfrom openerp import release\n\n\nclass stock_quant_package(osv.Model):\n _inherit = \"stock.quant.package\"\n _columns = {\n 'import_id': fields.many2one('import.info', 'Pedimento Aduanal', required=False,\n help=\"Imformación de Importación (Pedimento aduanal), necesaria para Facturación Electrónica.\"),\n }\n\n \nclass stock_move(osv.osv):\n _inherit = \"stock.move\" \n \n def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):\n \n res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)\n tracking_ids = []\n invoice_line_custom_obj = self.pool.get('account.invoice.line.customs')\n for link in move.linked_move_operation_ids:\n if link.operation_id and link.operation_id.package_id and link.operation_id.package_id.import_id:\n tracking_ids.append(link.operation_id.package_id.import_id.id)\n if tracking_ids:\n res.update({'import_ids': [(6,0,tracking_ids)]})\n return res\n\n\n#class sale_order_line(osv.Model):\n# _inherit = \"sale.order.line\"\n \n# def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n \"\"\"Prepare the dict of values to create the new invoice line for a\n sales order line. This method may be overridden to implement custom\n invoice generation (making sure to call super() to establish\n a clean extension chain).\n\n :param browse_record line: sale.order.line record to invoice\n :param int account_id: optional ID of a G/L account to force\n (this is used for returning products including service)\n :return: dict of values to create() the invoice line\n \"\"\"\n# stock_move_obj = self.pool.get('stock.move')\n# invoice_line_custom_obj = self.pool.get('account.invoice.line.customs')\n# res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id, context)\n# for procur in line.procurement_ids:\n# if line.product_id.track_all or (line.product_id.track_incoming and line.product_id.track_outgoing):\n# for move in procur.move_ids:\n# for link in move.linked_move_operation_ids:\n# if link.operation_id and link.operation_id.package_id and link.operation_id.package_id.import_id:\n# x = invoice_line_custom_obj.create(cr, uid, {'account_invoice_line_id':line.id, 'import_id':link.operation_id.package_id.import_id.id})\n# return res \n \n\n"
},
{
"alpha_fraction": 0.6605678200721741,
"alphanum_fraction": 0.6725552082061768,
"avg_line_length": 37.63414764404297,
"blob_id": "18a464f3468e8cc57b87d9e4df41992f8741b4a5",
"content_id": "b3a040746c36c0379f2c4d7715bcbf6ce4d7190a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1586,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 41,
"path": "/l10n_mx_import_info/invoice.py",
"repo_name": "n30chido/odoo-mexico",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n# Author=Moises Lopez [email protected]\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\nimport time\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp import pooler, tools\nfrom openerp import netsvc\n\n\nclass account_invoice_line(osv.Model):\n _inherit = \"account.invoice.line\"\n\n _columns = {\n 'import_ids': fields.many2many('import.info', 'account_invoice_line_import_info_rel', 'invoice_line_id', 'import_id',\n string='Pedimentos'),\n #tracking_id\n #'move_id': fields.many2one('stock.move', 'Stock Move'),\n }\n\n\nclass import_info(osv.Model):\n _inherit = \"import.info\" \n \n _columns = {\n 'invoice_line_ids': fields.many2many('account.invoice.line', 'account_invoice_line_import_info_rel', 'import_id', 'invoice_line_id',\n string='Líneas de Factura'),\n }\n\n"
},
{
"alpha_fraction": 0.6252919435501099,
"alphanum_fraction": 0.6366366147994995,
"avg_line_length": 48.95000076293945,
"blob_id": "54e2bdbfde59b98c31f59e231e58cf5468512ac8",
"content_id": "cb1c554248948ecef4e9b600cf6ab183b946e634",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3006,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 60,
"path": "/l10n_mx_import_info/import_info.py",
"repo_name": "n30chido/odoo-mexico",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n# Author=Nhomar Hernandez [email protected]\n# Audited by=\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n\nfrom openerp.tools.translate import _\nfrom openerp.osv import fields, osv\n\n\nclass import_info(osv.Model):\n _name = \"import.info\"\n _description = \"Information about customs\"\n _order = 'name asc'\n\n \"\"\"\n def _get_audit(self, cr, uid, ids, field_name, arg, context=None):\n if context is None:\n context = {}\n result = {}\n prod_obj = self.pool.get('product.product')\n for i in ids:\n chain = ''\n for p in self.browse(cr, uid, [i], context)[0].product_info_ids:\n if not self.browse(cr, uid, [i], context)[0].supplier_id.id in [\n s.name.id for s in p.product_id.seller_ids]:\n chain2 = '\\nVerify the product: %s the Supplier on this document is not related to this product.\\n' % p.product_id.name\n chain = chain+chain2\n result[i] = chain\n return result\n\n \"\"\"\n \n _columns = {\n 'name' : fields.char('Número Pedimento', 15,help=\"Número de Pedimento o Trámite\", required=True),\n 'customs' : fields.char('Aduana', 64, help=\"Aduana usada para la importación de los productos\", required=True),\n 'date' : fields.date('Fecha', help=\"Fecha del Pedimento\", required=True),\n 'package_ids' : fields.one2many('stock.quant.package', 'import_id', 'Empaquetado'),\n 'rate' : fields.float('Tipo de Cambio', required=True, digits=(16, 4),help='Tipo de Cambio utilizado en el Pedimento Aduanal'),\n 'company_id' : fields.many2one('res.company', 'Compañía', required=True, select=1),\n 'supplier_id' : fields.many2one('res.partner', 'Agencia Aduanal', select=1, help=\"Agencia aduanal con la que se realizó el trámite de importación ...\"),\n 'invoice_ids' : fields.many2many('account.invoice', 'account_invoice_rel', 'import_id', 'invoice_id', 'Facturas relacionadas'),\n #'product_info_ids': fields.one2many('product.import.info', 'import_id', 'Productos', required=False),\n 'notes' : fields.text('Observaciones'),\n }\n\n _defaults = {\n 'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'import.info', context=c)\n }\n"
},
{
"alpha_fraction": 0.5950919985771179,
"alphanum_fraction": 0.5987730026245117,
"avg_line_length": 29.185184478759766,
"blob_id": "eed6521f4c97292949a3af9b0588ee2c3df06029",
"content_id": "5e4de87a1f318c14b0054015e85b50e4440a0d8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 27,
"path": "/l10n_mx_import_info/__openerp__.py",
"repo_name": "n30chido/odoo-mexico",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n\n{\n \"name\" : \"Customs Information on lots\",\n \"version\" : \"0.1\",\n \"author\" : \"Argil Consulting\",\n \"category\" : \"Localization/Mexico\",\n \"website\": \"http://www.argil.mx\",\n \"description\": \"\"\"\nMake relation between information of import with goverment.\nWith this module you will be able to make a relation between invoice and Information of importing transaction.\nIt will work as production lot make better control with quantities.\n \"\"\",\n \"depends\" : [\"stock_account\",\"account\"],\n \"demo\" : [],\n \"data\" : [\n 'security/ir.model.access.csv',\n 'import_info_view.xml',\n #'product_view.xml',\n 'stock_view.xml',\n #'label_report.xml',\n 'security/groups.xml',\n 'invoice_view.xml'\n ],\n \"active\": False,\n \"installable\": True\n}\n"
},
{
"alpha_fraction": 0.5652515888214111,
"alphanum_fraction": 0.571540892124176,
"avg_line_length": 37.54545593261719,
"blob_id": "d943a088f0609bac46744cb66ccfc5650539aed5",
"content_id": "155df5203836664f652833a520070c5861c6e5c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2546,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 66,
"path": "/l10n_mx_payment_method/__openerp__.py",
"repo_name": "n30chido/odoo-mexico",
"src_encoding": "UTF-8",
"text": "# -*- encoding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n#\n# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com\n# All Rights Reserved.\n# [email protected]\n############################################################################\n# Coded by: moylop260 ([email protected])\n# Coded by: isaac ([email protected])\n############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\n{\n \"name\" : \"Agrega método de pago al partner y factura\",\n \"version\" : \"1.0\",\n \"author\" : \"Vauxoo\",\n \"category\" : \"Localization/Mexico\",\n \"description\" : \"\"\"Add \"Payment Method\" to partner and invoice, \n it's used by l10n_mx_facturae module and \"acc_payment\" to invoice\n \n Correr el siguiente script ANTES de actualizar módulo.\n \nupdate account_invoice ai\nset comment = comment || '\\n' || (select pm.name from pay_method pm where pm.id=ai.pay_method_id)\nwhere ai.state in ('open','paid') and ai.type in ('out_invoice','out_refund');\n\nupdate sale_order so\nset note = note || '\\n' || (select pm.name from pay_method pm where pm.id=so.pay_method_id)\nwhere so.state not in ('cancel');\n\nupdate purchase_order po\nset notes = notes || '\\n' || (select pm.name from pay_method pm where pm.id=po.pay_method_id)\nwhere po.state not in ('cancel');\n\n \"\"\",\n \"website\" : \"www.vauxoo.com\",\n \"license\" : \"AGPL-3\",\n \"depends\" : [\"account\", \"l10n_mx_facturae_groups\",\n ],\n \"demo\" : [],\n \"data\" : [\n #\"security/payment_method.xml\",\n \"security/ir.model.access.csv\",\n \"pay_method_view.xml\",\n \"partner_view.xml\",\n \"invoice_view.xml\",\n \"data/payment_method_data.xml\",\n ],\n \"installable\" : True,\n \"active\" : False,\n}\n"
}
] | 5 |
Jerry-goodboy/NankaiMTA
|
https://github.com/Jerry-goodboy/NankaiMTA
|
d70a63d7e4bc30ee245e48b37d25b411e13ee09d
|
1641b52faf7052755874e92df64243f52b3078e5
|
9b1d668007c3e8810d20c63131f93c57bcce47a6
|
refs/heads/master
| 2020-05-09T14:17:14.044900 | 2019-04-18T03:15:56 | 2019-04-18T03:15:56 | 181,188,504 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5024875402450562,
"alphanum_fraction": 0.5089935064315796,
"avg_line_length": 28.659090042114258,
"blob_id": "23d59f591d546a47b45591f5253d8f432db7eaa1",
"content_id": "bcb7e8fab61c5c82c81c00dc78c15ef93db101c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2717,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 88,
"path": "/generateMarkdown.py",
"repo_name": "Jerry-goodboy/NankaiMTA",
"src_encoding": "UTF-8",
"text": "#!/user/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = 'liucaidong'\n\nimport os\nimport os.path\n\nexcludeFiles = set(['.DS_Store'])\n\nseq = []\n\ndef recursive_showdir(path, depth, dirName, seq):\n global href\n dirs = os.listdir(path)\n dirs.sort()\n for item in dirs:\n if '.git' not in item:\n newitem = path +'/'+ item\n if os.path.isdir(newitem):\n if depth < 3:\n seq.append(\"#\" * depth + \"# \" + item)\n elif depth == 3:\n seq.append(\"#\" * depth + \"# 《\" + item + \"》\")\n else:\n seq.append(\" \" * (depth - 4) + \"- \" + item)\n href = item if(dirName == \"\") else dirName + \"/\" + item\n recursive_showdir(newitem, depth +1, href, seq)\n elif os.path.isfile(newitem):\n if item not in excludeFiles and depth > 1:\n seq.append(\" \" * (depth - 4) + '- <a href=\"' + href + '/' + item + '\">' + item +'</a>')\n\n\nif __name__ == '__main__':\n recursive_showdir(os.getcwd(), 1, \"\", seq)\n fo = open(\"README.md\", \"w\")\n toc = '# Table of Contents\\n\\nInspired by [sindresorhus/awesome](https://github.com/sindresorhus/awesome)\\n\\n'\n fo.writelines(toc)\n fo.writelines([line + '\\n\\n' for line in seq])\n fo.close()\n print(\"Everything is OK!\")\n\n\n\n\n\n# import os\n# import platform\n\n# currPath = os.getcwd() # 获取当前路径\n# # print(currPath)\n\n# rawMarkdownTitles = []\n\n# excludeDirs = set(['.git'])\n# excludeFiles = set(['.DS_Store'])\n\n# fo = open(\"test.md\", \"w\")\n# seq = []\n# fo.writelines([line + '\\n' for line in seq])\n# fo.close()\n\n# # ['电子书和参考资料', '2019年上学期', '旅游投资与财务管理', '各主题资料', '成本控制主题']\n\n# for root, dirs, files in os.walk(currPath, topdown=True):\n# for d in list(dirs):\n# if d in excludeDirs:\n# dirs.remove(d)\n# files.clear()\n# # print(\"根目录:\", root)\n# print(\"文件夹:\", dirs)\n# # print(\"文件名:\", files)\n# for name in files:\n# filePath = os.path.join(root, name)\n# finalPath = filePath.replace(currPath, '', 1)\n# finalPath = finalPath.replace(\"\\\\\", \"/\")\n# if os.name == \"posix\":\n# finalPath = finalPath[1:]\n# elif os.name == \"nt\":\n# finalPath = finalPath[1:]\n# dirNames = finalPath.split(\"/\")\n# if dirNames[len(dirNames) - 1] not in excludeFiles:\n# dirNames = dirNames[:len(dirNames) - 1]\n# if dirNames not in rawMarkdownTitles:\n# rawMarkdownTitles.append(dirNames)\n# else:\n# pass\n# # print(finalPath)\n\n\n\n"
},
{
"alpha_fraction": 0.7205311059951782,
"alphanum_fraction": 0.7539939284324646,
"avg_line_length": 48.010581970214844,
"blob_id": "3ca8d5d3589f01651add0ca5f5eeb89e2ab46f15",
"content_id": "332bccb089026cf8aeb3d381491272baa9d3b2e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13684,
"license_type": "no_license",
"max_line_length": 212,
"num_lines": 189,
"path": "/README.md",
"repo_name": "Jerry-goodboy/NankaiMTA",
"src_encoding": "GB18030",
"text": "# Table of Contents\n\nInspired by [sindresorhus/awesome](https://github.com/sindresorhus/awesome)\n\n<!-- MarkdownTOC -->\n\n- [名师风采](#名师风采)\n- [思维导图](#思维导图)\n- [拓展阅读](#拓展阅读)\n + [2019年上学期](#2019年上学期)\n * [《专业英语》](#《专业英语》)\n * [《旅游投资与财务管理》](#《旅游投资与财务管理》)\n * [《研究方法》](#《研究方法》)\n- [教学课件](#教学课件)\n- [网络文章](#网络文章)\n\n<!-- /MarkdownTOC -->\n\n\n<a id=\"名师风采\"></a>\n## 名师风采\n\n<a id=\"思维导图\"></a>\n## 思维导图\n\n<a id=\"拓展阅读\"></a>\n## 拓展阅读\n\n<a id=\"2019年上学期\"></a>\n### 2019年上学期\n\n<a id=\"《专业英语》\"></a>\n#### 《专业英语》\n\n- 教材\n\n - <a href=\"拓展阅读/2019年上学期/专业英语/教材/旅游从业英语——行业综览与实践.pdf\">旅游从业英语——行业综览与实践.pdf</a>\n\n<a id=\"《旅游投资与财务管理》\"></a>\n#### 《旅游投资与财务管理》\n\n- 各主题资料\n\n - 成本控制主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/A survey of factors influencing the cost system design in hotels.pdf\">A survey of factors influencing the cost system design in hotels.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/Changes in emotions and their interactions with personality.pdf\">Changes in emotions and their interactions with personality.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/Responsibility cost control system in China.pdf\">Responsibility cost control system in China.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/Supply chain cost management and value-based pricing.pdf\">Supply chain cost management and value-based pricing.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/The role of management control systems.pdf\">The role of management control systems.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/成本控制主题/Transaction cost framework in operations and supply chain.pdf\">Transaction cost framework in operations and supply chain.pdf</a>\n\n - 投资主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/投资主题/Improving Hospitality.pdf\">Improving Hospitality.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/投资主题/Nationality and differences in auditor risk assessment.pdf\">Nationality and differences in auditor risk assessment.pdf</a>\n\n - 收入管理主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Analysing the online pricing practices of hotels.pdf\">Analysing the online pricing practices of hotels.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Antecedents and consequences of strategic price management.pdf\">Antecedents and consequences of strategic price management.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Dynamic room pricing model for hotel revenue.pdf\">Dynamic room pricing model for hotel revenue.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Hotel revenue management and the Internet.pdf\">Hotel revenue management and the Internet.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Influences of consumer characteristics on fairness perceptions of revenue.pdf\">Influences of consumer characteristics on fairness perceptions of revenue.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Modeling hotel room price with geographically weighted regression.pdf\">Modeling hotel room price with geographically weighted regression.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Pricing determinants in the hotel industry Quantile regression analysis.pdf\">Pricing determinants in the hotel industry Quantile regression analysis.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Relationship or revenue.pdf\">Relationship or revenue.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Technology revenue management system for customer groups in hotels.pdf\">Technology revenue management system for customer groups in hotels.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/The basics of yield management.pdf\">The basics of yield management.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Towards value-based pricing—An integrative framework.pdf\">Towards value-based pricing—An integrative framework.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/收入管理主题/Weekend vs. midweek stays.pdf\">Weekend vs. midweek stays.pdf</a>\n\n - 绩效评价主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/A model and a performance measurement system for.pdf\">A model and a performance measurement system for.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/An investigation of the effect of Balanced Scorecard.pdf\">An investigation of the effect of Balanced Scorecard.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/CONCEPTUALIZING YIELD.pdf\">CONCEPTUALIZING YIELD.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Choice and change of measures in performance.pdf\">Choice and change of measures in performance.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Designing a performance measurement system.pdf\">Designing a performance measurement system.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Measuring the immeasurable.pdf\">Measuring the immeasurable.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Multiple Perspectives of Performance Measures.pdf\">Multiple Perspectives of Performance Measures.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Performance management a framework.pdf\">Performance management a framework.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Performance measurement and performance management.pdf\">Performance measurement and performance management.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/Performance measurement impacts on management and.pdf\">Performance measurement impacts on management and.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/The impact of IT capabilities on firm performance.pdf\">The impact of IT capabilities on firm performance.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/The interplay of different levers of control.pdf\">The interplay of different levers of control.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绩效评价主题/The theory and practice of performance measurementPietro.pdf\">The theory and practice of performance measurementPietro.pdf</a>\n\n - 绪论主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绪论主题/A conceptual development of Simons’ Levers of Control framework.pdf\">A conceptual development of Simons’ Levers of Control framework.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绪论主题/Hospitality finance and.pdf\">Hospitality finance and.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/绪论主题/How information systems influence user decisions.pdf\">How information systems influence user decisions.pdf</a>\n\n - 融资主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/融资主题/Capital structure, free cash flow, diversification and firm performance.pdf\">Capital structure, free cash flow, diversification and firm performance.pdf</a>\n\n - 预算主题\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/Beyond budgeting or budgeting reconsidered.pdf\">Beyond budgeting or budgeting reconsidered.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/Budgeting practices in the Turkish hospitality industry.pdf\">Budgeting practices in the Turkish hospitality industry.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/Coping with ambiguity through the budget.pdf\">Coping with ambiguity through the budget.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/Improving hotel budgetary practiceA positive theory model.pdf\">Improving hotel budgetary practiceA positive theory model.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/The differential effect of environmental.pdf\">The differential effect of environmental.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/各主题资料/预算主题/‘‘Continuous” budgeting Reconciling budget flexibility.pdf\">‘‘Continuous” budgeting Reconciling budget flexibility.pdf</a>\n\n- 研究方法\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/Asking Good Survey Questions.pdf\">Asking Good Survey Questions.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/如何写文献综述.ppt\">如何写文献综述.ppt</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/扎根理论.docx\">扎根理论.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/文献综述.docx\">文献综述.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/案例研究.docx\">案例研究.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/概念模型含义.docx\">概念模型含义.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/论文摘要.docx\">论文摘要.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/调查研究法.docx\">调查研究法.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/质性方法.docx\">质性方法.docx</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/质的研究方法与社会科学研究--陈向明.pdf\">质的研究方法与社会科学研究--陈向明.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/旅游投资与财务管理/研究方法/陈向明质化研究PPT.ppt\">陈向明质化研究PPT.ppt</a>\n\n<a id=\"《研究方法》\"></a>\n#### 《研究方法》\n\n- 扎根理论\n\n - <a href=\"拓展阅读/2019年上学期/研究方法/扎根理论/阅读顺序1 蒙牛公司快速成长模式及其影响因素研究—扎根理论研究方法的运用 《管理科学》2006.06.pdf\">阅读顺序1 蒙牛公司快速成长模式及其影响因素研究—扎根理论研究方法的运用 《管理科学》2006.06.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/研究方法/扎根理论/阅读顺序2 基于扎根理论方法的孵化型裂变创业探索性研究—以海尔集团孵化雷神公司为例 《管理学报》2016.07.pdf\">阅读顺序2 基于扎根理论方法的孵化型裂变创业探索性研究—以海尔集团孵化雷神公司为例 《管理学报》2016.07.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/研究方法/扎根理论/阅读顺序3 商业模式传承型裂变创业内在机理研究《南开管理评论》201705.pdf\">阅读顺序3 商业模式传承型裂变创业内在机理研究《南开管理评论》201705.pdf</a>\n\n - <a href=\"拓展阅读/2019年上学期/研究方法/扎根理论/阅读顺序5 扎根理论方法在科学研究中的运用分析 《东方论坛》2007.4.pdf\">阅读顺序5 扎根理论方法在科学研究中的运用分析 《东方论坛》2007.4.pdf</a>\n\n- 调查问卷\n\n - <a href=\"拓展阅读/2019年上学期/研究方法/调查问卷/调查问卷的设计与评估.pdf\">调查问卷的设计与评估.pdf</a>\n\n<a id=\"教学课件\"></a>\n## 教学课件\n\n<a id=\"网络文章\"></a>\n## 网络文章\n\n"
}
] | 2 |
nikolakatic/SciComp-DeViSE
|
https://github.com/nikolakatic/SciComp-DeViSE
|
114a5adc0ae600bfb76dfff273f1ed99ade30425
|
b163666ffac0aee6ab052dab1dc926704fee37fd
|
ebaa41fa34f3c6e3aac7699b8f518a48621f5489
|
refs/heads/master
| 2022-12-25T23:49:33.448908 | 2020-09-24T06:23:27 | 2020-09-24T06:23:27 | 258,715,903 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6966887712478638,
"alphanum_fraction": 0.7695364356040955,
"avg_line_length": 124.83333587646484,
"blob_id": "78098034d40e6c876faa586bd7bbd86c7b15a92b",
"content_id": "9b27a2851dd4aa631ea652f8dafdaf92e10f041c",
"detected_licenses": [
"CC-BY-3.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1512,
"license_type": "permissive",
"max_line_length": 358,
"num_lines": 12,
"path": "/presentation_versions/README.md",
"repo_name": "nikolakatic/SciComp-DeViSE",
"src_encoding": "UTF-8",
"text": "# Python notebooks used for presentation\n\nThis directory contains different Jupyter notebooks used for training the model with different parameters or datasets:\n\n- 1000 images dataset, 30 epochs, 32 batch size, 3 additional layers -- [Jupyter notebook](./semantic-image-search-30epoch-32batch-3layer.ipynb) and corresponding [PDF](./semantic-image-search-30epoch-32batch-3layer.pdf)\n- 1000 images dataset, 50 epochs, 32 batch size, 2 additional layers -- [Jupyter notebook](./semantic-image-search-final.ipynb) and corresponding [PDF](./semantic-image-search-final.pdf)\n- 1000 images dataset, 30 epochs, 16 batch size, 1 additional layer -- [Jupyter notebook](./semantic-image-search-30epoch-16batch-1layer.ipynb) and corresponding [PDF](./semantic-image-search-30epoch-16batch-1layer.pdf)\n- ≈10000 images dataset, 50 epochs, 32 batch size, 2 additional layers -- [Jupyter notebook](./semantic-image-search-iaprtc12.ipynb) and corresponding [PDF](./semantic-image-search-iaprtc12.pdf)\n\nThe first three models were trained using the same image dataset which is available under (CC BY 3.0) licence [here](https://vision.cs.uiuc.edu/pascal-sentences/).\n\nIn the last notebook the IAPR TC-12 Benchmark is used, it is available without copyright restriction [here](http://www-i6.informatik.rwth-aachen.de/imageclef/resources/iaprtc12.tgz). We loaded around half of the set. These are the directories which were kept for training: `00 05 06 07 10 11 17 21 23 24 27 28 29 30 31 32 33 34 35 37 39`.\n"
},
{
"alpha_fraction": 0.6952662467956543,
"alphanum_fraction": 0.6982248425483704,
"avg_line_length": 25,
"blob_id": "4aa1e76ceb20809b8b9df7a5f62be74d063a2a1e",
"content_id": "d79256de49d634ab5f1622c503ccf51eb21f1287",
"detected_licenses": [
"CC-BY-3.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 26,
"path": "/get-dataset.py",
"repo_name": "nikolakatic/SciComp-DeViSE",
"src_encoding": "UTF-8",
"text": "from os import makedirs, path\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nimport wget\n\ntarget_dir = './dataset/'\n\nmakedirs(target_dir, exist_ok=True)\n\nbase_url = 'https://vision.cs.uiuc.edu/pascal-sentences/'\npage = request.urlopen(base_url)\nsoup = BeautifulSoup(page, features=\"html5lib\")\n\nall_images = soup.body.findAll('img')\n\nfor image in all_images:\n suffix = image.get('src')\n link = base_url + suffix\n dir_name, img_name = path.split(suffix)\n\n dir_path = path.join(target_dir, dir_name)\n makedirs(dir_path, exist_ok=True)\n\n filepath = path.join(dir_path, img_name)\n print('Downloading %s ...' % link)\n wget.download(link, filepath)\n"
},
{
"alpha_fraction": 0.7604684233665466,
"alphanum_fraction": 0.7760823369026184,
"avg_line_length": 84.39393615722656,
"blob_id": "81ecc7b76700159ca022c0e37ae809e5cc440bca",
"content_id": "76877e175128a72952ee9ff646aa5d8f56eeb8df",
"detected_licenses": [
"CC-BY-3.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2818,
"license_type": "permissive",
"max_line_length": 463,
"num_lines": 33,
"path": "/README.md",
"repo_name": "nikolakatic/SciComp-DeViSE",
"src_encoding": "UTF-8",
"text": "### About\n\nCode found here is a reimplementation of the model described in [DeViSE: A Deep Visual-Semantic Embedding Model](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41473.pdf).\n\nThe goal is to achieve semantic search over image dataset in a scalable fashion. We used [VGG16](https://keras.io/api/applications/vgg/) CNN pretrained on [ImageNet](http://www.image-net.org/about-overview) dataset, combining it with [GloVE](https://nlp.stanford.edu/projects/glove/) pretrained model consisting of [400k words and their 300D corresponding vectors](http://nlp.stanford.edu/data/glove.6B.zip).\n\nSuch a hybrid model performs reasonably well on images outside of image corpus, as well as on some categories that were not present during model creation.\n\nSearching over dataset can be done in different ways:\n\n* text -> images\n* image -> labels\n* image -> images\n\nFast queries are facilitated by [Annoy](https://github.com/spotify/annoy) library which implements an [approximate nearest neighbor](https://en.wikipedia.org/wiki/Nearest_neighbor_search#Approximate_nearest_neighbor) search algorithm.\n\n### Demo\n\nTo see results without needing to download datasets and run everything, you can download `semantic-image-search_with-all-output-shown.html` and its corresponding directory `semantic-image-search_with-all-output-shown_files`. That way you can view complete execution of the attached notebook `semantic-image-search.ipynb`.\n\n### Evaluation\n\nWe used metrics described in original paper - **flat hit@k** and **hierarchical precision@k**. The results can be found in [final_versions](./final_versions/) directory. As it can be observed, actual results were sometimes better than what simple metrics could tell. A good example is an excerpt from [this](./final_versions/semantic-image-search-FINAL-30epoch.ipynb) Jupyter notebook: `label 'cow' not found in ['cows', 'calves', 'ailment', 'goose', 'cheese']`.\n\n### Resources and links\n\nYou can download training dataset by using `get-dataset.py`. Dataset is under (CC BY 3.0) licence and it's available [here](https://vision.cs.uiuc.edu/pascal-sentences/). This dataset is also used by [Emmanuel Ameisen](https://mlpowered.com/about/) in his [QCon 2018 talk](https://www.infoq.com/presentations/semantic-search-engine/) where he discusses DeViSE paper. For evaluation purposes we split multiword categories with underscores (e.g. pottedplant -> potted_plant).\n\nOther resources covering the same topic:\n\n* <https://medium.com/@fpingham/devise-a-deep-visual-semantic-embedding-be2fd605de05>\n* <https://www.pyimagesearch.com/2017/03/20/imagenet-vggnet-resnet-inception-xception-keras/>\n* [Jeremy Howard](https://medium.com/@jeremyphoward)'s [lecture](https://www.youtube.com/watch?v=tY0n9OT5_nA&feature=youtu.be&t=1h55m23s) where he explains DeViSE approach\n"
}
] | 3 |
SoumakChakraborty/spam_detection
|
https://github.com/SoumakChakraborty/spam_detection
|
7a11728e50d7638438c808c81c65ed432e43bcb8
|
406d8f3d071862da44e924e67c5739cf848deb56
|
cb4e116783c3ced1e920eed1ea1a74e28695af78
|
refs/heads/master
| 2023-01-19T04:50:00.901325 | 2020-11-19T11:53:55 | 2020-11-19T11:53:55 | 311,617,300 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7179487347602844,
"alphanum_fraction": 0.7376068234443665,
"avg_line_length": 16.16176414489746,
"blob_id": "3adc3cb414f446bcd59fc19f86967c40905eb1f6",
"content_id": "175a8000fccf223c35994a2bcc14a271162f44da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 68,
"path": "/Sp.py",
"repo_name": "SoumakChakraborty/spam_detection",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom sklearn import svm,ensemble\nfrom sklearn import metrics,preprocessing\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer\nfrom sklearn.model_selection import cross_val_score\nimport pickle\nfrom sklearn import feature_selection\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import LogisticRegression\n\ndf=pd.read_csv('spam.csv',encoding='iso-8859-1',usecols=['v1','v2'])\n\n\n\ndf['v2']=df['v2'].str.lower()\n\n\n\ndf['v2']=df['v2'].str.replace('^a-zA-Z',\"\")\n\n\n\ndf['v1'].replace('ham',0,inplace=True)\n\n\ndf['v1'].replace('spam',1,inplace=True)\n\n\n\n\nY=df['v1']\n\n\n\n\nX=df['v2']\n\n\n\n\nRM=RandomOverSampler(random_state=42)\n\n\n\n\nTF=TfidfVectorizer(ngram_range=(1,2),strip_accents=None,stop_words='english')\n\n\n\nX_f=TF.fit_transform(X)\n\nX_samp,Y_samp=RM.fit_sample(X_f,Y)\n\n\n\n\n#X_train,X_test,Y_train,Y_test=train_test_split(X_samp,Y_samp,train_size=.70)\n\n\nmodel=MultinomialNB()\n\nmodel.fit(X_samp,Y_samp)\n\npickle.dump(model,open('spam.pkl','wb'))\npickle.dump(TF,open('vectorizer.pkl','wb'))\n\n\n\n"
},
{
"alpha_fraction": 0.5760095119476318,
"alphanum_fraction": 0.5878859758377075,
"avg_line_length": 29.071428298950195,
"blob_id": "4d9baccdb4c50b1d67533892566da88ee1fc46be",
"content_id": "e4dc3a6309b94a00616def86eed73f5f626bfd94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 28,
"path": "/app.py",
"repo_name": "SoumakChakraborty/spam_detection",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pickle\nimport os\nfrom flask import Flask,render_template,redirect,request,url_for\n\napp=Flask(__name__)\n\[email protected]('/',methods=[\"GET\",\"POST\"])\ndef detect():\n if request.method==\"GET\":\n return render_template('index.html')\n else:\n msg=request.form.get(\"message\")\n model=pickle.load(open('spam.pkl','rb'))\n msg=msg.lower()\n msg=msg.replace('^a-zA-Z','')\n X_r=[msg]\n TF=pickle.load(open('vectorizer.pkl','rb'))\n X=TF.transform(X_r).toarray()\n Y=model.predict(X)\n if Y[0]==0:\n return render_template('index.html',message='Not Spam')\n else:\n return render_template('index.html',message='Spam')\n\nif __name__=='__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port, debug=True)\n"
}
] | 2 |
SergeyParamonov/HabraAnalyticsTools
|
https://github.com/SergeyParamonov/HabraAnalyticsTools
|
fca330e67d9e90871fac28a0362a187e9a1534ae
|
c049ab41a57132b1243b74be835b1fb1d00a99be
|
3212838468d18769050db93aa76aecddb7dd9976
|
refs/heads/master
| 2020-04-06T07:07:59.071455 | 2014-04-29T18:56:01 | 2014-04-29T18:56:01 | 19,126,437 | 4 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6219230890274048,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 32.33333206176758,
"blob_id": "10fed4f4ac5a80eaa0b97544dc52cb9235d6c714",
"content_id": "de0d29bf1403bf5a80b03e398d9a222836facb3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2600,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 78,
"path": "/src/hubs_wrapper.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "from reader import Reader\nimport glob\nimport operator\nimport pylab as pl\n\n\ndef jaccard_index(set1,set2):\n set1 = set(set1)\n set2 = set(set2)\n intrsct = float(len(set1.intersection(set2)))\n union = float(len(set1.union(set2)))\n jaccard_index = intrsct/union\n return round(jaccard_index,3)\n\n#inclusion of set1 into ses2\n#not symmetric!!!\ndef inclusion(set1,set2):\n set1 = set(set1)\n set2 = set(set2)\n intrsct = set1.intersection(set2)\n inclusion = len(intrsct)/float(len(set1))\n return int(100*inclusion)\n\n\n\ndef compute(hubname,isCompany,fun_name):\n hub_readers = Reader.check_and_download(hubname, isCompany) \n hubs_data_dir = 'data/hubs/'\n tocut = len(hubs_data_dir)\n hubs = glob.glob(hubs_data_dir+'*')\n similarity_dict = dict()\n for hub_file in hubs:\n readers = Reader.read_list_of_users(hub_file)\n hub = hub_file[tocut:]\n #skip itself\n if hub == hubname:\n continue\n if fun_name == \"similarity\":\n similarity_dict[hub] = jaccard_index(hub_readers,readers)\n if fun_name == \"inclusion\":\n similarity_dict[hub] = inclusion(hub_readers,readers)\n return similarity_dict\n\n\ndef display_preferences(hubname,isCompany,fun_name,flag,flagopts):\n ylabel = fun_name\n values = compute(hubname,isCompany,fun_name)\n sorted_values = sorted(values.iteritems(), key=operator.itemgetter(1), reverse=True)\n hubs = map(lambda x: x[0],sorted_values) \n y_values = map(lambda x: x[1],sorted_values) \n if flag is None:\n MAX_HUBS = 50\n #exclude itself\n hubs = hubs[:MAX_HUBS]\n y_values = y_values[:MAX_HUBS]\n fig = pl.figure()\n ax = pl.subplot(111)\n hub_range = range(0,MAX_HUBS)\n ax.bar(hub_range, y_values)\n # re-write and also show % of intersection, like\n # 50% of space also read this...\n pl.title(hubname + \" : \" + fun_name, fontsize=22)\n pl.xticks(hub_range, hubs,rotation=80)\n pl.ylabel(ylabel, fontsize=20)\n pl.show()\n elif flag == \"max\":\n max_hubs = int(flagopts)\n hubs = hubs[:max_hubs+1]\n y_values = y_values[:max_hubs+1]\n for hub, value in zip(hubs, y_values):\n print(\"hub:\"+hub + \" function:\" + fun_name + \" value:\" + str(value))\n elif flag == \"min\":\n min_hubs = int(flagopts)\n inverse_sorted_values = sorted(values.iteritems(), key=operator.itemgetter(1))\n hubs = map(lambda x: x[0],inverse_sorted_values)[:min_hubs]\n y_values = map(lambda x: x[1],inverse_sorted_values)[:min_hubs]\n for hub, value in zip(hubs, y_values):\n print(\"hub:\"+hub + \" function:\" + fun_name + \" value:\" + str(value))\n"
},
{
"alpha_fraction": 0.6567685604095459,
"alphanum_fraction": 0.6570596694946289,
"avg_line_length": 30.513761520385742,
"blob_id": "e040b15c57f772452f4fb14562fceee1a4e21640",
"content_id": "f7f31351fa8b878bc5ceec39160cb500cb07e66d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3435,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 109,
"path": "/src/reader.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\"High level wrapper for functionality of venn.py\n Makes sure data is downloaded and calls draw.py to visualize\n\"\"\"\n\nfrom __future__ import print_function\nfrom draw import draw\nfrom analyzeHubs import HubAnalyzer\nimport os.path\nclass Reader:\n @staticmethod\n def read_list_of_users(filename):\n user_file = open(filename, \"r\")\n users = user_file.readlines()\n users = [user.strip() for user in users]\n return users\n\n\n @staticmethod\n def check_and_download(hubname,company_flag):\n if hubname is None:\n return None\n hubs = HubAnalyzer.get_hub_names()\n companies = HubAnalyzer.get_company_names()\n if not company_flag:\n if hubname in hubs and hubname in companies:\n print(\"Name is ambiguous, there is a company and a hub with this name; assuming hub by default.\")\n prefix = \"hubs/\"\n elif hubname in hubs:\n prefix = \"hubs/\"\n elif hubname in companies:\n prefix = \"companies/\"\n else:\n print(\"There is no name record for *\" + hubname + \"*, assuming it is a hub, not a company.\")\n else:\n prefix = \"companies/\"\n datafile = \"data/\"+prefix+hubname\n if os.path.isfile(datafile):\n return Reader.read_list_of_users(datafile)\n else:\n print(\"Data for *\" +hubname+ \"* is not in the local dataset, downloading it now... \")\n HubAnalyzer.report_downloading_progress = True\n if company_flag:\n HubAnalyzer.getCompanyUsers(hubname)\n else:\n HubAnalyzer.getUsers(hubname)\n return Reader.read_list_of_users(datafile)\n\n @staticmethod\n def removehubdata(hubname):\n datafile = \"data/hubs/\"+hubname\n if os.path.isfile(datafile):\n os.remove(datafile)\n else:\n raise Exception(\"The data file does not exist\")\n\n @staticmethod\n def removehub(hubname_to_delete):\n is_deleted = False\n hubname_to_delete = hubname_to_delete.strip()\n hubs = open(\"data/meta/hubs_name_link.csv\",\"r\")\n lines = hubs.readlines()\n hubs.close()\n hubs = open(\"data/meta/hubs_name_link.csv\",\"w\")\n for line in lines:\n hubname = line.split(',')[1].strip()\n if hubname != hubname_to_delete:\n print(line, file=hubs, end=\"\")\n else:\n is_deleted = True\n hubs.close()\n if not is_deleted:\n raise Exception(\"Link is not found among available hubs\")\n\n @staticmethod\n def addhub(hubname, description):\n hubs = open(\"data/meta/hubs_name_link.csv\",\"a\")\n print(description+\",\" + hubname , file=hubs)\n hubs.close()\n\n @staticmethod\n def updatehub(hubname):\n print(\"Updating: \" + hubname)\n HubAnalyzer.report_downloading_progress = True\n HubAnalyzer.enforce_download_in_presence_of_data = True\n HubAnalyzer.getUsers(hubname)\n HubAnalyzer.enforce_download_in_presence_of_data = False\n\n @staticmethod\n def download_company_data(name):\n HubAnalyzer.report_downloading_progress = True\n HubAnalyzer.enforce_download_in_presence_of_data = True\n try:\n HubAnalyzer.getCompanyUsers(name)\n except Exception as e:\n print(str(e))\n return\n\n @staticmethod\n def print_hubs():\n print(\"companies\")\n company_dict = HubAnalyzer.generate_company_dictionary()\n for name, descr in company_dict.items():\n print(name + \" <--> \" + descr + \"(company)\")\n print(\"hubs\")\n hub_dict = HubAnalyzer.generate_hub_dictionary()\n for name, descr in hub_dict.items():\n print(name + \" <--> \" + descr)\n"
},
{
"alpha_fraction": 0.621391773223877,
"alphanum_fraction": 0.6255838871002197,
"avg_line_length": 32.26294708251953,
"blob_id": "fdaa69aaeadea67804a4c6c32f4cad940d21004f",
"content_id": "4f9204281fcd43349ee5ce2bef005f2de25494d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8349,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 251,
"path": "/src/analyzeHubs.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\" Low level functions to parse pages with users\n Transform names into links and back \n\"\"\"\nfrom __future__ import print_function\nfrom parseHubs import HubsParser \nimport urllib3\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport os,os.path\nfrom progress.bar import Bar\nimport sys\n\nclass HubAnalyzer:\n logfile = \"data/meta/parsing_log.txt\"\n hubnames = None\n report_downloading_progress = False\n enforce_download_in_presence_of_data = False\n\n @staticmethod\n def getLastPageNumber(url):\n url = url.strip('/')\n suffix = \"/subscribers/rating/\"\n userlist_url = url + suffix\n http = urllib3.PoolManager()\n response = http.request('GET', userlist_url)\n html = response.data\n soup = BeautifulSoup(html)\n nav_pages = soup.find(id=\"nav-pages\")\n if not nav_pages:\n return 1\n no_index = nav_pages.find(\"noindex\")\n if no_index:\n last_page = no_index.a['href'] \n num_row = re.findall(r\"/subscribers/rating/page\\d+\",last_page)[0]\n num = re.findall(r\"\\d+\", num_row)[0]\n else:\n raw_nums = re.findall(\"/subscribers/rating/page\\d\",html)\n nums = [int(num[-1]) for num in raw_nums]\n num = max(nums)\n return num\n \n @staticmethod\n def getCompanyLastPage(name):\n url = \"http://habrahabr.ru/company/\"+name+\"/fans/all/rating/\"\n http = urllib3.PoolManager()\n try:\n response = http.request('GET', url)\n except Exception as e:\n print(str(e))\n return\n html = response.data.decode('utf-8')\n soup = BeautifulSoup(html)\n nav_pages = soup.find(id=\"nav-pages\")\n if not nav_pages:\n return 1\n no_index = nav_pages.find(\"noindex\")\n if no_index:\n last_page = no_index.a['href'] \n num_row = re.findall(r\"/fans/all/rating/page\\d+\",last_page)[0]\n num = int(re.findall(r\"\\d+\", num_row)[0])\n else:\n raw_nums = re.findall(\"/fans/all/rating/page\\d\",html)\n nums = [int(num[-1]) for num in raw_nums]\n if nums:\n num = max(nums)\n else:\n raise Exception(\"Link is broken\")\n return num\n \n @staticmethod\n def getCompanyUsers(name):\n url = \"http://habrahabr.ru/company/\"+name+\"/fans/all/rating/\"\n log = open(HubAnalyzer.logfile, \"a\")\n print(\"URL: \" + url + \" ----------------- \", file=log)\n print(time.strftime(\"%H:%M:%S\"), file=log)\n log.flush()\n datapath = \"data/companies/\"+name\n if os.path.isfile(datapath) and not HubAnalyzer.enforce_download_in_presence_of_data: \n print(\"data is already here, abort this url\",file=log)\n return None\n try:\n last_page = HubAnalyzer.getCompanyLastPage(name)\n except Exception as err:\n print(\"URL is broken, abort the url\", file=log)\n print(str(e), file=log)\n log.flush()\n print(\"Cannot analyze the page, please, check the url below: \\n\" + url)\n return\n datafile = open(datapath,\"w\")\n http = urllib3.PoolManager()\n if HubAnalyzer.report_downloading_progress:\n bar = Bar('Downloading: '+name, max=last_page, suffix='%(percent)d%%')\n for i in range(1,last_page+1):\n user_page = url +\"page\" +str(i)\n print(user_page, file=log)\n log.flush()\n try:\n response = http.request('GET', user_page)\n except Exception as e:\n print(str(e),file=log)\n print(str(e))\n log.flush()\n datafile.close()\n os.remove(datapath)\n return \n html = response.data\n soup = BeautifulSoup(html)\n usersRow = soup.find_all(class_=\"user \")\n for userRow in usersRow:\n username = userRow.find(class_=\"username\").text\n print(username, file=datafile)\n datafile.flush()\n if HubAnalyzer.report_downloading_progress:\n bar.next() \n #finalize and close everything\n if HubAnalyzer.report_downloading_progress:\n bar.finish()\n datafile.close()\n log.close()\n\n @staticmethod\n def getUsers(hubname):\n log = open(HubAnalyzer.logfile, \"a\")\n print(\"hub: \" + hubname + \" ----------------- \", file=log)\n print(time.strftime(\"%H:%M:%S\"), file=log)\n #clean the file to write users to\n url = HubAnalyzer.hubname2link(hubname)\n output_filename = \"data/hubs/\"+hubname \n #if data is here, do nothing\n if os.path.isfile(output_filename) and not HubAnalyzer.enforce_download_in_presence_of_data: \n print(\"data is already here, abort this url\",file=log)\n return None\n output_file = open(output_filename, \"w\")\n try:\n last_page_num = int(HubAnalyzer.getLastPageNumber(url))\n except Exception as err:\n print(\"URL is broken, abort the url\", file=log)\n log.flush()\n os.remove(output_filename)\n raise Exception(\"Cannot analyze the page, please, check the url below: \\n\" + url)\n #get connection to habrahabr-hub\n suffix = \"/subscribers/rating/page\"\n userlist_url = url + suffix\n http = urllib3.PoolManager()\n if HubAnalyzer.report_downloading_progress:\n HubAnalyzer.get_hub_description(hubname)\n bar = Bar('Downloading: '+ hubname, max=last_page_num, suffix='%(percent)d%%')\n for i in range(1,last_page_num+1):\n user_page = userlist_url+str(i)\n print(user_page, file=log)\n log.flush()\n try:\n response = http.request('GET', user_page)\n except urllib3.exceptions.HTTPError as err:\n if err.code == 404:\n print(user_page + \" !! 404 !!\", file=log)\n log.flush()\n output_file.close()\n os.remove(output_filename)\n raise(\"Hub is not found, please, check the url\")\n else:\n print(user_page + \" PARSING ERROR \", file=log)\n log.flush()\n output_file.close()\n os.remove(output_filename)\n raise Exception(\"Error: cannot parse the page!\")\n html = response.data\n soup = BeautifulSoup(html)\n usersRow = soup.find_all(class_=\"user \")\n for userRow in usersRow:\n username = userRow.find(class_=\"username\").text\n print(username, file=output_file)\n output_file.flush()\n if HubAnalyzer.report_downloading_progress:\n bar.next() \n #finalize and close everything\n if HubAnalyzer.report_downloading_progress:\n bar.finish()\n output_file.close()\n log.close()\n\n @staticmethod\n def generate_hub_dictionary():\n hub_dict = dict()\n csv_hubs = open(\"data/meta/hubs_name_link.csv\",\"r\")\n for line in csv_hubs.readlines():\n name = line.split(\",\")[0].strip()\n hubname = line.split(\",\")[1].strip()\n hub_dict[hubname] = name\n return hub_dict\n\n @staticmethod\n def generate_company_dictionary():\n company_dict = dict()\n csv_companies = open(\"data/meta/companies_name_link.csv\",\"r\")\n for line in csv_companies.readlines():\n name = line.split(\",\")[0].strip()\n company = line.split(\",\")[1].strip()\n company_dict[company] = name\n return company_dict\n\n @staticmethod\n def hubname2link(hubname):\n prefix = \"http://habrahabr.ru/hub/\"\n url = prefix + hubname.strip()\n return url\n\n @staticmethod\n def get_hub_description(hubname):\n if HubAnalyzer.hubnames is None:\n HubAnalyzer.hubnames = HubAnalyzer.generate_hub_dictionary() \n return HubAnalyzer.hubnames[hubname]\n\n @staticmethod\n def get_hub_links():\n if HubAnalyzer.hubnames is None:\n HubAnalyzer.hubnames = HubAnalyzer.generate_hub_dictionary() \n return [HubAnalyzer.hubname2link(hubnames) for hubnames in HubAnalyzer.hubnames.keys()]\n\n @staticmethod\n def get_hub_names():\n if HubAnalyzer.hubnames is None:\n HubAnalyzer.hubnames = HubAnalyzer.generate_hub_dictionary() \n return HubAnalyzer.hubnames.keys()\n\n @staticmethod\n def get_company_names():\n lines = open(\"data/meta/companies_name_link.csv\",\"r\").readlines()\n names = [line.split(',')[1].strip() for line in lines]\n return names\n \n @staticmethod\n def convert_label(name, company_flag):\n if company_flag:\n return name + \"(company)\"\n return name\n\n\nif __name__ == \"__main__\":\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n HubAnalyzer.report_downloading_progress = True\n for hub in HubAnalyzer.get_hub_names():\n print(hub)\n HubAnalyzer.getUsers(hub)\n #for name in HubAnalyzer.getCompanyNames():\n # print(name)\n # HubAnalyzer.getCompanyUsers(name)\n"
},
{
"alpha_fraction": 0.62543123960495,
"alphanum_fraction": 0.6343026161193848,
"avg_line_length": 31.206348419189453,
"blob_id": "cfe901e4c35ddeff4a5db2773abf59c8b3fee932",
"content_id": "45884b2326e4b1aa8f2872a11be8118581eebd3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2029,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 63,
"path": "/src/parseHubs.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\" Downloads and parses list of hubs and companies\n After executation and making the list, presence is not required\n This file is NOT called by venn.py or any other script\n For dev/debugging only\n\"\"\"\n\nfrom __future__ import print_function\nimport urllib3\nfrom bs4 import BeautifulSoup\n\nclass HubsParser:\n\n @staticmethod\n def parse(url, output_file, divclass, record_html):\n http = urllib3.PoolManager()\n try:\n response = http.request('GET', url)\n except urllib3.exceptions.HTTPError as err:\n if err.code == 404:\n return None\n else:\n raise Exception(\"Error: cannot parse the page!\")\n html = response.data.decode(\"utf-8\")\n soup = BeautifulSoup(html)\n hubsRow = soup.find_all(class_=divclass)\n hubs = []\n for hubRow in hubsRow:\n title = hubRow.find(class_=record_html).a.text\n link = hubRow.find(class_=record_html).a['href']\n print(title.encode('utf-8'), file=output_file, end=\"\")\n print(\",\", file=output_file, end=\"\")\n print(link, file=output_file)\n\n @staticmethod\n def generateHubDictionary(filename):\n output_file = open(filename, \"w\")\n for i in range(1,10):\n url = \"http://habrahabr.ru/hubs/page{}/\".format(i)\n HubsParser.parse(url,output_file,\"hub \", \"title\")\n \n @staticmethod\n def generateCompanyDictionary(filename):\n last_page = 71\n output_file = open(filename, \"w\")\n for i in range(1,last_page+1):\n url = \"http://habrahabr.ru/companies/page{}/\".format(i)\n HubsParser.parse(url,output_file, \"company \", \"name\")\n output_file.close()\n \n @staticmethod\n def format_company_links(filename):\n lines = open(filename, \"r\").readlines() \n formatted = open(filename, \"w\")\n tocut = len(\"/company/\")\n for line in lines:\n name = line.split(',')[0]\n link = line.split(',')[1]\n link = link[tocut:].strip().strip('/')\n print(name, file=formatted, end=\"\")\n print(\",\", file=formatted, end=\"\")\n print(link, file=formatted)\n"
},
{
"alpha_fraction": 0.7445887327194214,
"alphanum_fraction": 0.7515151500701904,
"avg_line_length": 43.39743423461914,
"blob_id": "75e98cf7739a309772b51e069dc154c4ae7a8309",
"content_id": "f2dcf0ef34df9cc91fc71870f0d57f5d96194c25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4681,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 78,
"path": "/README.md",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "Статьи с описанием скриптов: \nhttp://habrahabr.ru/post/221087/ -- hubs.py \nhttp://habrahabr.ru/post/220465/ -- venn.py\n\nПримеры:\n\nhelp по тулам: \npython venn.py -h \npython hubs.py -h\n\nсоздание диаграммы\npython venn.py -d space programming fido \nили \npython venn.py --draw space programming fido \nесли данные не присутствуют, то программа автоматически проверит наличие данных на хабре и начнет скачивание, примерно 15-20 минут на хаб \n\nвывод доступных имен хабов и их полные названия, все операции производятся по коротким латинским именам из списка (они же используются в url на хабре) \npython venn.py --hubs\n\nдиаграмма вместе с базовой статистикой \npython venn.py --stats -d space programming fido \n\nвывод только базовой статистики без диаграмм: ключ --onlystats или -o\npython venn.py -o space programming fido \n\nудаление хаба из списка (не удаляет данные!!!) \npython venn.py --removehublink space \n\nдобавление хаба в список (не скачивает данные!!!) \npython venn.py --addhublink space \n\nобновление данных хаба, скачивает данные ~15-20 минут\npython venn.py --updatehub space \n\nдиаграммы и\\или стаистика без заголовка про пиццу и котят\npython venn.py -s -d space programming\n\nдобавление компании и её скачивание данных (так же обновляет данные, если уже что-то скачено)\npytnon venn.py --downloadcompany yandex\n\nСтруктура программы для построения диаграмм Венна\nsrc/ папка хранит исходники \nsrc/reader.py -- высокоуровневые функции для интерефейса, определяет есть ли необходимость качать данные, откуда и вызывает соотвествующие функции \nsrc/analyzeHubs.py -- основные инструменты для анализа хабов, парсинга, \nsrc/draw.py -- содержит фукнцию для рисования диаграмм и подсчета базовой статистики по пересечениям \nsrc/parseHubs.py -- собирает данные по именам хабов, компаний и составляет списки для словарей в meta/ ; после составления этих словарей, данный файл не является необходимым\n\ndata/ папка хранит данные \n\nо пользователях хабов data/hubs, \nчитателях компаний data/companies \n\nслужебные данные data/meta: различные вспомогательные данные, список хабов и их полных имен, логи \ndata/meta/parsing_log -- лог скачивания данных \ndata/meta/hubs_name_link.csv -- список хабов и их описаний \ndata/meta/companies_name_link.csv -- список компаний и их описаний \n\nusage: venn.py [-h] [--hubs] [--draw hubname [hubname ...]] [--stats] \n[--onlystats hubname [hubname ...]] [--removehubdata hubname] \n[--removehublink hubname] [--addhublink hubname] \n[--updatehub hubname] [--silentheader] \n[--downloadcompany company_name] \n\noptional arguments: \n-h, --help show this help message and exit \n--hubs Print the list of available hubs from habrahabr \n--onlystats hubname [hubname ...], -o hubname [hubname ...] Print statistics (at least 2 hubs must be given) and exit \n--removehubdata hubname Remove the data for the selected hub \n--removehublink hubname Remove the link for the selected hub \n--addhublink hubname Add a link for a hub \n--updatehub hubname Update user list of a hub \n--silentheader, -s Do not show the header about pizza and kittens \n--downloadcompany company_name, -c company_name Download the data given company name e.g. yandex \n\ndrawing commands: \n--draw hubname [hubname ...], -d hubname [hubname ...] \nMake Venn diagram for the 1st and 2nd hubs (must be given) and optinally the 3rd \n--stats Must be used with --draw, print statistics about hubs intersection \n"
},
{
"alpha_fraction": 0.6791558861732483,
"alphanum_fraction": 0.6834625601768494,
"avg_line_length": 30.37837791442871,
"blob_id": "a7a3d34c65d32a1ec3c5f58b17a8a73e3ccdae98",
"content_id": "0b264dc2a9f0bc9518cee74e56bad3051d56ae7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2322,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 74,
"path": "/hubs.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\" Console interface for hub operations\n Parses arguments and calls right functions from src/\n\"\"\"\n\n#regular import \nimport sys\nsys.path.append(\"./src\")\nimport argparse\n#my functions and classes\nfrom header import print_header_hubs\nfrom reader import Reader\nfrom hubs_wrapper import *\n\n\n\ndef main():\n SUCCESS = 0\n #arguments declaration\n parser = argparse.ArgumentParser()\n parser.add_argument('--omitheader','-m', help='Do not show the header about pizza and kittens', action='store_true', default=False)\n parser.add_argument('--hublist', help='Shows all available hubs', action='store_true', default=False)\n parser.add_argument('--similar','-s', help='Displays similar hubs as a histogram', nargs=1, metavar=(\"hub_name\"))\n parser.add_argument('--alsoread','-a', help='Displays what else people read from this hub as a histogram', nargs=1, metavar=(\"hub_name\"))\n parser.add_argument('--max', help='Print several hubs that maximize the score function e.g. --similar or --alsoread', nargs=1, metavar=(\"number_of_hubs\"), type=int) \n parser.add_argument('--min', help='Print several hubs that minimize the score function e.g. --similar or --alsoread', nargs=1, metavar=(\"number_of_hubs\"), type=int)\n parser.add_argument('--company', help='If a name is ambiguous, like yandex: it is a hub and a company, then enforce company interpretation', action=\"store_true\", default=False)\n\n args = vars(parser.parse_args())\n\n #check flags and delegate functions to src/reader.py and src/hubs_wrapper.py\n if len(sys.argv)==1:\n print_header_hubs()\n parser.print_help()\n return SUCCESS\n\n if args['omitheader']:\n pass\n else:\n print_header_hubs()\n\n if args['hublist']:\n Reader.print_hubs()\n return SUCCESS\n\n isCompany = False\n if args['company']:\n isCompany = True\n\n flag = None\n flagopts = None\n if args['max']:\n flag = \"max\"\n flagopts = args['max'][0]\n\n if args['min']:\n flag = \"min\"\n flagopts = args['min'][0]\n\n if args['similar']:\n hub_name = args['similar'][0]\n display_preferences(hub_name, isCompany, \"similarity\", flag, flagopts)\n return SUCCESS\n\n if args['alsoread']:\n hub_name = args['alsoread'][0]\n display_preferences(hub_name, isCompany, \"inclusion\", flag, flagopts)\n return SUCCESS\n\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5768217444419861,
"alphanum_fraction": 0.640035092830658,
"avg_line_length": 41.185184478759766,
"blob_id": "ccadcaa9d6b148b6bb1d5957a6cac8153eaa74fe",
"content_id": "ac004e594fe0a4d36dfa41c3b058248e3d6b8196",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2278,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 54,
"path": "/src/draw.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\" Visualization function and basic statistics \"\"\"\nfrom matplotlib_venn import venn2, venn3\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import gca\nfrom analyzeHubs import HubAnalyzer\nimport numpy as np\n\ndef percent_of(set1,set2):\n dif = set1 & set2\n return str(int(100*len(dif)/len(set1)))\n\ndef print_stats(set1, set2, set3, label1, label2, label3):\n set1 = set(set1)\n set2 = set(set2)\n print(percent_of(set1,set2)+\"% of \" +label1 +\" intersects with \" +label2)\n print(percent_of(set2,set1)+\"% of \" +label2 +\" intersects with \" +label1)\n if label3 is None:\n print(\"Overall number of unique users: \" + str((len(set1.union(set2)))))\n return\n set3 = set(set3)\n print(percent_of(set1,set3)+\"% of \" +label1 +\" intersects with \" +label3)\n print(percent_of(set3,set1)+\"% of \" +label3 +\" intersects with \" +label1)\n print(percent_of(set3,set2)+\"% of \" +label3 +\" intersects with \" +label2)\n print(percent_of(set2,set3)+\"% of \" +label2 +\" intersects with \" +label3)\n print(\"Overall number of unique users: \" + str((len(set1.union(set2.union(set3))))))\n\ndef draw(set1, set2, set3, label1, label2, label3):\n set1 = set(set1)\n set2 = set(set2)\n if label3:\n set3 = set(set3)\n v = venn3([set1,set2, set3], (label1, label2, label3))\n plt.title('Venn diagram for hubs: ' + label1 + \",\" + label2 +\",\" + label3, fontsize=20)\n else:\n v = venn2([set1, set2], (label1, label2))\n plt.title('Venn diagram for hubs:' + label1 + \",\" + label2, fontsize=20)\n# if v.get_label_by_id('110'):\n# plt.annotate(percent_of(set1,set2)+\"% of \" +label1 , xy=v.get_label_by_id('110').get_position() - np.array([0.15, 0.10]))\n# plt.annotate(percent_of(set2,set1)+\"% of \" +label2 , xy=v.get_label_by_id('110').get_position() - np.array([0.15, 0.15]))\n if v.get_patch_by_id('100'):\n v.get_patch_by_id('100').set_color(\"blue\")\n if v.get_patch_by_id('010'):\n v.get_patch_by_id('010').set_color(\"red\")\n if v.get_patch_by_id('110'):\n v.get_patch_by_id('110').set_color(\"purple\")\n if label3 and v.get_patch_by_id('001'):\n v.get_patch_by_id('001').set_color(\"green\") \n if v.get_patch_by_id('111'):\n v.get_patch_by_id('111').set_color(\"black\") \n gca().set_axis_bgcolor('white')\n gca().set_axis_on()\n plt.show()\n"
},
{
"alpha_fraction": 0.36769232153892517,
"alphanum_fraction": 0.38615384697914124,
"avg_line_length": 33.21052551269531,
"blob_id": "8f4771f45a43197191490f49eb52faf7087d4853",
"content_id": "6021ef6e492e7bb99186733797aa624b4936a44d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 650,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 19,
"path": "/src/header.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\ndef print_header_venn():\n print(\n'''----------------------------------------------------------------------\n Venn diagrams for hubs from Habrahabr.ru.\n Version 0.1, 2014 by habra-user varagian\n Send your pizza, beer and kittens to [email protected]\n----------------------------------------------------------------------'''\n )\n\ndef print_header_hubs():\n print(\n'''----------------------------------------------------------------------\n Hub metrics for Habrahabr.ru\n Version 0.1, 2014 by habra-user varagian\n Send your pizza, beer and kittens to [email protected]\n----------------------------------------------------------------------\n''')\n"
},
{
"alpha_fraction": 0.6123371124267578,
"alphanum_fraction": 0.6309296488761902,
"avg_line_length": 38.404109954833984,
"blob_id": "171c6f5a5884d0ba6021a2f859c36377d72b17be",
"content_id": "9ce7e1cba81562865d20bb098f55fbca77c0235d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5755,
"license_type": "no_license",
"max_line_length": 212,
"num_lines": 146,
"path": "/venn.py",
"repo_name": "SergeyParamonov/HabraAnalyticsTools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python \n\n\"\"\" Console interface for Venn diagrams \n Parses arguments and calls the functions from src/reader.py\n\"\"\"\nimport sys\nsys.path.append(\"./src\")\nfrom reader import Reader\nfrom analyzeHubs import HubAnalyzer\nimport argparse\nfrom argparse import RawTextHelpFormatter\nfrom draw import draw, print_stats\nfrom header import print_header\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--hubs', help='Print the list of available hubs from habrahabr',action='store_true', default=False)\n drawgroup = parser.add_argument_group(title='drawing commands') \n drawgroup.add_argument('--draw','-d', help='Make Venn diagram for the 1st and 2nd hubs (must be given) and optinally the 3rd', nargs=\"+\", metavar=('hubname'))\n drawgroup.add_argument('--stats', '-t', help='Must be used with --draw, print statistics about hubs intersection ', action='store_true', default=False)\n parser.add_argument('--onlystats',\"-o\", help='Print statistics (at least 2 hubs must be given) and exit', nargs=\"+\", metavar=('hubname'))\n parser.add_argument('--removehubdata', help='Remove the data for the selected hub', nargs=1, metavar=('hubname'))\n parser.add_argument('--removehub', help='Remove the hub\\'s name', nargs=1, metavar=('hubname'))\n parser.add_argument('--addhub', help='Add a link for a hub', nargs=2, metavar=('hubname', 'description'))\n parser.add_argument('--updatehub', help='Update user list of a hub', nargs=1, metavar=('hubname'))\n parser.add_argument('--silentheader',\"-s\", help='Do not show the header about pizza and kittens', action='store_true', default=False)\n parser.add_argument('--downloadcompany','-c', help='Download the data given company name e.g. yandex', nargs=1, metavar=(\"company_name\")) \n parser.add_argument('--company', help='If a name is ambiguous, like yandex: it is a hub and a company, then enforce company interpretation', nargs='+', metavar=(\"argument_index\"), type=int, choices=range(1,4)) \n args = vars(parser.parse_args())\n\n if len(sys.argv)==1:\n print_header()\n parser.print_help()\n sys.exit(1)\n \n if args['silentheader']:\n pass\n else:\n print_header()\n\n if args['downloadcompany']:\n Reader.download_company_data(args['downloadcompany'][0])\n print(\"Data has been downloaded. Now it can be used as a regular hub name\")\n exit()\n\n if args['updatehub']:\n try:\n hubname = args['updatehub'][0]\n Reader.updatehub(hubname)\n print(hubname+ \" has been updated. Done.\")\n exit(0)\n except Exception as e:\n print(e)\n\n if args['addhub']: \n try:\n hubname = args['addhub'][0]\n description = args['addhub'][1]\n Reader.addhub(hubname, description)\n print(\"Hub name and link have been added, done. \")\n exit(0)\n except Exception as e:\n print(e)\n print(str(e))\n\n if args['removehub']:\n try:\n Reader.removehub(args['removehub'][0])\n print(\"Link deletion: done.\")\n exit(0)\n except Exception as e:\n print(str(e))\n\n if args['removehubdata']:\n try:\n Reader.removehubdata(args['removehubdata'][0])\n print(\"Data deletion: done.\")\n exit(0)\n except Exception as e:\n print(str(e))\n\n if args['hubs']:\n Reader.print_hubs()\n\n if args['onlystats']:\n if len(args['onlystats']) >= 2:\n hub1 = args['onlystats'][0].strip()\n hub2 = args['onlystats'][1].strip()\n if len(args['onlystats']) > 2:\n hub3 = args['onlystats'][2]\n else:\n hub3 = None\n try:\n company_flag1 = False\n company_flag2 = False\n company_flag3 = False\n if args['company']:\n for i in args['company']: \n if i == 1:\n company_flag1 = True \n if i == 2:\n company_flag2 = True \n if i == 3:\n company_flag3 = True \n set1 = Reader.check_and_download(hub1,company_flag1)\n set2 = Reader.check_and_download(hub2,company_flag2)\n set3 = Reader.check_and_download(hub3,company_flag3)\n print_stats(set1,set2,set3,HubAnalyzer.convert_label(hub1, company_flag1),HubAnalyzer.convert_label(hub2, company_flag2), HubAnalyzer.convert_label(hub3, company_flag3))\n except Exception as e:\n print(str(e))\n else:\n print(\"To get statistics at least two hubs must be specified\")\n\n if args['draw']: \n if len(args['draw']) >= 2:\n hub1 = args['draw'][0].strip()\n hub2 = args['draw'][1].strip()\n if len(args['draw']) > 2:\n hub3 = args['draw'][2]\n else:\n hub3 = None\n try:\n company_flag1 = False\n company_flag2 = False\n company_flag3 = False\n if args['company']:\n for i in args['company']: \n if i == 1:\n company_flag1 = True \n if i == 2:\n company_flag2 = True \n if i == 3:\n company_flag3 = True \n set1 = Reader.check_and_download(hub1,company_flag1)\n set2 = Reader.check_and_download(hub2,company_flag2)\n set3 = Reader.check_and_download(hub3,company_flag3)\n if args['stats']:\n print_stats(set1,set2,set3,HubAnalyzer.convert_label(hub1, company_flag1),HubAnalyzer.convert_label(hub2, company_flag2), HubAnalyzer.convert_label(hub3, company_flag3))\n draw(set1,set2,set3,HubAnalyzer.convert_label(hub1, company_flag1),HubAnalyzer.convert_label(hub2, company_flag2), HubAnalyzer.convert_label(hub3, company_flag3))\n except Exception as e:\n print(str(e))\n else:\n print(\"To draw a diagram at least two hubs must be specified\")\n if not args['draw'] and args['stats']:\n print('--stats is used only with draw, use --onlystats hub1 hub2 [hub3] instead')\n #it's ok to have only two hubs\n\n\n"
}
] | 9 |
namoaton/Pet_clinic
|
https://github.com/namoaton/Pet_clinic
|
b443f6604e3bc7ad826c1f053aa321ae2ce4f0e4
|
ee173d4e197e6f2e438db03a491fcfb3ac57fb25
|
b0c307d4be4edac2a5e815f584b226600280cb38
|
refs/heads/master
| 2021-01-22T16:38:34.529672 | 2014-04-24T18:23:29 | 2014-04-24T18:23:29 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5151619911193848,
"alphanum_fraction": 0.5617433190345764,
"avg_line_length": 45.629032135009766,
"blob_id": "230e8d0d56eb20bd7c4e4a2826e6e8f5fb306c70",
"content_id": "18dba8ec034eb7732982ddc2b7fd2e527a6205fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9088,
"license_type": "no_license",
"max_line_length": 473,
"num_lines": 186,
"path": "/table_fill.py",
"repo_name": "namoaton/Pet_clinic",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport MySQLdb\nimport sys\nfrom Tkinter import *\nfrom tkMessageBox import *\nimport Pmw\n\n\nclass Petclinic (Frame):\n counter = 0\n def __init__(self):\n Frame.__init__(self)\n Pmw.initialise()\n self.pack (expand = YES, fill = BOTH)\n self.master.title(u'Ветеринарная клиника КОТ-ПЕС')\n \n #button for command\n self.buttons = Pmw.ButtonBox( self, padx = 0)\n self.buttons.grid(columnspan = 2)\n self.buttons.add(u\"Прием пациента\", command = self.visit, font = \"Ubuntu 15\")\n self.buttons.add(u\"Очистить\", command = self.clearContents , font = \"Ubuntu 15\")\n self.buttons.add(u\"Поиск карточки\", command = self.findCard, font = \"Ubuntu 15\")\n self.buttons.add(u\"Заполнение карточки\", command = self.editCard, font = \"Ubuntu 15\")\n self.buttons.add(u\"Породы собак\", command = self.show_breeds,font = \"Ubuntu 15\")\n self.buttons.add(u\"quit\", command = quit)\n #list of visit entry\n #fields = [u'Id',u'Кличка',u'Хозяин',u'Жалобы',u'Манипуляции',u'Назначения']\n fields = [u'Id', u'\\u041a\\u043b\\u0438\\u0447\\u043a\\u0430', u'\\u0425\\u043e\\u0437\\u044f\\u0438\\u043d', u'\\u0416\\u0430\\u043b\\u043e\\u0431\\u044b', u'\\u041c\\u0430\\u043d\\u0438\\u043f\\u0443\\u043b\\u044f\\u0446\\u0438\\u0438', u'\\u041d\\u0430\\u0437\\u043d\\u0430\\u0447\\u0435\\u043d\\u0438\\u044f']\n\n self.entries = {}\n self.entrie_owner={}\n self.IDEntry = StringVar()\n self.IDEntry.set(\"\")\n\n #create entries\n for i in range(len(fields)): \n label = Label(self, text = fields[i], font = \"Ubuntu 10\")\n label.grid (row =i+1, column = 0)\n entry = Entry(self, name = fields[i].lower(), font = \"Ubuntu 20\")\n entry.grid(row = i+1, column = 1, sticky = W+E+N+S, padx =5)\n #service field \n if fields[i] == u'Id':\n entry.config(state =DISABLED, textvariable = self.IDEntry, bg = 'gray')\n entry.insert(2,\"0nbn\")\n \n #add entry field to dict\n key = fields [i].replace(\" \",\"_\")\n #key = key.upper()\n self.entries [key] = entry\n\n \n#[u'Id', u'\\u041a\\u043b\\u0438\\u0447\\u043a\\u0430', u'\\u0425\\u043e\\u0437\\u044f\\u0438\\u043d', u'\\u0416\\u0430\\u043b\\u043e\\u0431\\u044b', u'\\u041c\\u0430\\u043d\\u0438\\u043f\\u0443\\u043b\\u044f\\u0446\\u0438\\u0438', u'\\u041d\\u0430\\u0437\\u043d\\u0430\\u0447\\u0435\\u043d\\u0438\\u044f']\n \n#add visit data to Base\n def visit(self):\n if self.entries [u'\\u041a\\u043b\\u0438\\u0447\\u043a\\u0430'].get()!=\"\" and self.entries [u'Жалобы'].get()!=\"\" and self.entries [u'Манипуляции'].get()!=\"\" and self.entries [u'Назначения'].get()!=\"\":\n query = \"INSERT INTO visit (pet_id, owner_id, diagnose, manipulation, administration) VALUES(\" +str(self.entries[u'Id'].get(),self.entries[u'Хозяин'].get(),self.entries[u'Жалобы'].get(),self.entries[u'Манипуляции'].get(),self.entries[u'Назначения'].get())\n query = query[:-2]+\")\"\n try :\n print query\n con = MySQLdb.connect(host = 'xxx.xxx.xxx.xxx', user = 'user', passwd = 'pass', db= \"petscl\");\n cursor =con.cursor()\n cursor.execute(query_list)\n except MySQLdb.OperationalError, mesage:\n errorMessage = \"Error %d:\\n%s\" %(message[0],mesage[1])\n showerror (\"Error\", errorMessage)\n else:\n cursor.close()\n con.close()\n self.clearContents()\n else:\n showwarning (u\"Заполните поля\", u\"Заполните все поля\")\n \n \n def clearContents(self):\n for entry in self.entries.values():\n entry.delete(0,END)\n self.IDEntry.set(u'')\n for entry in self.entrie_owner.values():\n entry.delete(0,END)\n self.IDEntry.set(u'')\n\n def findCard(self):\n search_entry={}\n t = Toplevel(self, bd=10)\n t.wm_title(u'Поиск')\n \n\t\t#create entries for searchimg\n search_list = [u'Кличка',u'Фамилия хозяина']\n for i in range(len(search_list)): \n label = Label(t, text = search_list[i], font = \"Ubuntu 10\")\n label.grid (row =i+1, column = 0)\n entry = Entry(t, name = search_list[i].lower(), font = \"Ubuntu 20\")\n entry.grid(row = i+1, column = 1, sticky = W+E+N+S, padx =5)\n key = search_list[i]\n search_entry [key] = entry\n print search_entry\n t.buttons = Pmw.ButtonBox( t, padx = 0)\n t.buttons.grid(columnspan = 2)\n t.buttons.add(u\"Очистить\", command = self.clearContents() , font = \"Ubuntu 15\")\n t.buttons.add(u\"Поиск карточки\", command = self.findCard, font = \"Ubuntu 15\")\n t.buttons.add(u\"Закрыть\", command = t.destroy, font = \"Ubuntu 15\")\n \n \n \n def show_breeds(self):\n ss = Toplevel(self)\n ss.wm_title(u'Породы собак')\n con = MySQLdb.connect(host = '192.168.1.100', user = 'admin', passwd = 'root', db =\"petscl\", use_unicode=True);\n con.set_character_set('utf8')\n cur = con.cursor()\n cur.execute('SET NAMES utf8;')\n cur.execute('SET CHARACTER SET utf8;')\n cur.execute('SET character_set_connection=utf8;')\n query_db =\"SELECT * FROM breed ORDER BY breed ASC;\"\n cur = con.cursor()\n cur.execute(query_db)\n allRec = cur.fetchall()\n cur.close()\n con.close()\n #print allRec\n q=0\n label = Label(ss, text = u'Вид', font = \"Ubuntu 20\", justify=\"left\")\n scrollbar = Scrollbar(ss)\n scrollbar.pack( side = RIGHT, fill=Y ) \n list_db =Listbox(ss, yscrollcommand = scrollbar.set,font = \"Ubuntu 15\", width =\"40\")\n for i in allRec:\n\t\t\tq=q+1\n\t\t\t#a =\"ID : \"+str(i[])+\" Species :\" + i[1]+\"\\n\"\n\t\t\ta =\" Species :\" + str(i[1])+\"\\n\"\n\t\t\t#label = Label(ss, text = a, font = \"Ubuntu 20\", justify=LEFT)\n\t\t\t#label.grid (row =q+1, column = 1)\n\t\t\t\n\t\t\tlist_db.insert(q, unicode(i[2]))\n list_db.pack(expand=1,fill=BOTH)\n \n \n def editCard(self):\n search_entry={}\n tt = Toplevel(self, bd=10)\n tt.wm_title(u'Владелец')\n \n\t\t#create entries for searchimg\n search_list = [u'Имя',u'Фамилия',u'Адрес',u'Email',u'Телефон',u'Доп_телефон']\n for i in range(len(search_list)): \n label = Label(tt, text = search_list[i], font = \"Ubuntu 10\")\n label.grid (row =i+1, column = 0)\n entry = Entry(tt, name = search_list[i].lower(), font = \"Ubuntu 20\")\n entry.grid(row = i+1, column = 1, sticky = W+E+N+S, padx =5)\n key = search_list[i]\n search_entry [key] = entry\n print search_entry\n key = search_list[i].replace(\" \",\"_\")\n #key = key.upper()\n self.entrie_owner [key] = entry\n tt.buttons = Pmw.ButtonBox( tt, padx = 0)\n tt.buttons.grid(columnspan = 2)\n tt.buttons.add(u\"Очистить\", command = self.clearContents , font = \"Ubuntu 15\")\n tt.buttons.add(u\"Записать\", command = self.insert_owner, font = \"Ubuntu 15\")\n tt.buttons.add(u\"Закрыть\", command = tt.destroy, font = \"Ubuntu 15\")\n \n def insert_owner(self):\n if self.entrie_owner [u'Имя'].get()!=\"\" and self.entrie_owner [u'Фамилия'].get()!=\"\" and self.entrie_owner [u'Адрес'].get()!=\"\" and self.entrie_owner [u'Телефон'].get()!=\"\":\n query_owner = \"INSERT INTO owner (name, surname, adress, email, phone, home_phone) VALUES('\" +str(self.entrie_owner[u'Имя'].get()).decode('utf_8')+\"','\"+str(self.entrie_owner[u'Фамилия'].get()).decode('utf_8')+\"','\"+str(self.entrie_owner[u'Адрес'].get()).decode('utf_8')+\"','\"+str(self.entrie_owner[u'Email'].get()).decode('utf_8')+\"','\"+str(self.entrie_owner[u'Телефон'].get()).decode('utf_8')+\"','\"+str(self.entrie_owner[u'Доп_телефон'].get()).decode('utf_8')\n query_owner = query_owner[:-2]+\"')\"\n try :\n print query_owner\n con = MySQLdb.connect(host = 'xxx.xxx.xxx.xxx', user = 'user', passwd = 'pass', db= \"petscl\");\n cursor =con.cursor()\n cursor.execute(query_owner)\n except MySQLdb.OperationalError, mesage:\n errorMessage = \"Error %d:\\n%s\" %(message[0],mesage[1])\n showerror (\"Error\", errorMessage)\n else:\n cursor.close()\n con.close()\n self.clearContents()\n else:\n showwarning (u\"Заполните поля\", u\"Заполните все поля\")\n \n \ndef main():\n Petclinic().mainloop()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5980091691017151,
"alphanum_fraction": 0.6102603077888489,
"avg_line_length": 29.255813598632812,
"blob_id": "2c3af14935607e59f93cd18fe833f7b0bd12bb25",
"content_id": "4d4450376691463e7aa126b4450744e627db1b97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1315,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 43,
"path": "/dog_breed.py",
"repo_name": "namoaton/Pet_clinic",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport MySQLdb\nimport sys\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\n\nlink_list=[\"http://poisk-druga.ru/breeds/\"]\nfor i in range(27):\n if i>=2:\n link_list.append(\"http://poisk-druga.ru/breeds/page/\"+str(i)+\"/\")\n\nbreed_list={u\"Без породы\":'1'}\nfor url in link_list:\n page = urllib2.urlopen(url)\n soup = BeautifulSoup(page)\n hit = soup.findAll('div', attrs = { 'class' : 'short-breeds' })\n for z in hit:\n x= {z.text:\"1\"}\n breed_list.update(x)\nfor key in breed_list:\n print \" (%s, %s);\"%(key,breed_list[key])\n\ndef add_breed(breed_list):\n con = MySQLdb.connect(host = 'xxx', user = 'user', passwd = 'pass', db =\"petscl\", use_unicode=True);\n #con.names=\"utf8\"\n con.set_character_set('utf8')\n cur = con.cursor()\n cur.execute('SET NAMES utf8;')\n cur.execute('SET CHARACTER SET utf8;')\n cur.execute('SET character_set_connection=utf8;')\n for key in breed_list:\n query_db =u\"INSERT INTO breed (species_id, breed) values ('%s','%s')\"%(breed_list[key],key)\n print query_db\n cur.execute(query_db)\n cur.close()\n con.commit()\n con.close()\ntry:\n add_breed(breed_list)\nexcept MySQLdb.Error, e: \n print \"Error %d: %s\" % (e.args[0],e.args[1])\n sys.exit(1)\n \n"
},
{
"alpha_fraction": 0.7063426971435547,
"alphanum_fraction": 0.7277594804763794,
"avg_line_length": 62.894737243652344,
"blob_id": "6e3824e37697b2a112895958e1903b2c1b91f308",
"content_id": "ddb243e546e476391b3f02a4467698976b5335f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2428,
"license_type": "no_license",
"max_line_length": 547,
"num_lines": 38,
"path": "/init_db.py",
"repo_name": "namoaton/Pet_clinic",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport MySQLdb\nimport sys\ncreate_db = \"CREATE DATABASE IF NOT EXISTS petscl\"\nselect_db = \"use petscl\"\n\ncreate_pet = \"CREATE TABLE pet ( pet_id INT(4) unsigned not null auto_increment,species_id INT(4) unsigned not null,breed_id INT(4) unsigned not null,owner_id INT(4) unsigned not null,nick varchar(30) not null,birth_date DATE, image varchar(50),PRIMARY KEY (pet_id),FOREIGN KEY (species_id) REFERENCES species(species_id) ON UPDATE CASCADE ON DELETE RESTRICT,FOREIGN KEY (breed_id) REFERENCES breed(breed_id) ON UPDATE CASCADE ON DELETE RESTRICT,FOREIGN KEY (owner_id) REFERENCES owner(owner_id) ON UPDATE CASCADE ON DELETE RESTRICT) TYPE=InnoDb\"\n\ncreate_species = \"CREATE TABLE species(species_id INT(4) unsigned not null auto_increment,species varchar(100) not null, PRIMARY KEY (species_id)) TYPE=InnoDb\"\n\ncreate_breed = \"CREATE TABLE breed(breed_id INT(4) unsigned not null auto_increment,species_id INT(4) unsigned not null, breed varchar(100) not null, PRIMARY KEY (breed_id), FOREIGN KEY (species_id) REFERENCES species(species_id) ON UPDATE CASCADE ON DELETE RESTRICT) TYPE=InnoDb\"\n\ncreate_owner = \"CREATE TABLE owner(owner_id INT(4) unsigned not null auto_increment,name varchar(100) not null, surname varchar(100) not null, adress varchar(100) not null, email varchar(100) not null, phone varchar(20), home_phone varchar(20) , PRIMARY KEY (owner_id)) TYPE=InnoDb\"\n\ncreate_visit = \"CREATE TABLE visit ( visit_id INT(4) unsigned not null auto_increment, date TIMESTAMP(10),pet_id INT(4) unsigned not null, owner_id INT(4) unsigned not null, diagnose varchar(255) not null, manipulation varchar(255) not null, administration varchar(255) not null, PRIMARY KEY (visit_id), FOREIGN KEY (pet_id) REFERENCES pet(pet_id) ON UPDATE CASCADE ON DELETE RESTRICT, FOREIGN KEY (owner_id) REFERENCES owner(owner_id) ON UPDATE CASCADE ON DELETE RESTRICT) TYPE=InnoDb\"\nquery_list = [create_db,select_db,create_species,create_breed, create_owner,create_pet,create_visit]\n\ndef pp(cur):\n ver = cur.fetchone()\n print ver\n\ndef qury_db(lst):\n cur = con.cursor()\n for i in lst:\n cur.execute(i)\n pp(cur)\ntry:\n con = MySQLdb.connect(host = 'xxx.xxx.xxx.xxx', user = 'user', passwd = 'pass');\n qury_db(query_list)\n \nexcept MySQLdb.Error, e: \n print \"Error %d: %s\" % (e.args[0],e.args[1])\n sys.exit(1)\n \nfinally:\n if con:\n con.close()\n"
},
{
"alpha_fraction": 0.654321014881134,
"alphanum_fraction": 0.6790123581886292,
"avg_line_length": 15.199999809265137,
"blob_id": "1fa54e0e9c719d4984c5950559a73393967dcb6f",
"content_id": "fab731ffe8d8d604410671c23632bdfac5d20b00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/README.md",
"repo_name": "namoaton/Pet_clinic",
"src_encoding": "UTF-8",
"text": "PET CLINIC\n==========\n\n:Description: Python program for use in pet clinic\n:Language: Python\n:Date: 2014\n\ninit_db.py\n----------\ncreates Mysql database for program\n"
}
] | 4 |
christofranz/covid-19-dashboard
|
https://github.com/christofranz/covid-19-dashboard
|
0e4a27ea09ef06bc72bafb00bca2eb17e33be43e
|
64f8897a8621e7308d6ee591b85dcff79dee70a6
|
edeffcd09b8ad83ef88cfe568cc6a4d6061ea5ce
|
refs/heads/master
| 2022-07-07T18:03:20.549096 | 2020-05-19T14:15:49 | 2020-05-19T14:15:49 | 254,037,359 | 0 | 0 | null | 2020-04-08T09:03:57 | 2020-05-19T14:15:52 | 2022-06-22T01:39:55 |
Python
|
[
{
"alpha_fraction": 0.5532492995262146,
"alphanum_fraction": 0.5626389980316162,
"avg_line_length": 35.4594612121582,
"blob_id": "39d411b4e3a27174a6cd337ae0174b42545ebf30",
"content_id": "0857bc6e83833670020424685fce003e36860ced",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4047,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 111,
"path": "/wrangling_scripts/wrangle_data.py",
"repo_name": "christofranz/covid-19-dashboard",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport plotly.graph_objs as go\n\n# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in\n# `data/file_name.csv`\n\ndef load_dataset():\n df = pd.read_csv(\"data/covid_19.csv\")\n cumulative_df = df.groupby([\"countryterritoryCode\"]).sum()[\"cases\"].reset_index()\n country_df = df[[\"countriesAndTerritories\", \"countryterritoryCode\"]].drop_duplicates().reset_index()\n df_merged = pd.merge(cumulative_df, country_df, on=\"countryterritoryCode\")\n return df_merged\n\ndef return_figures():\n \"\"\"Creates four plotly visualizations\n\n Args:\n None\n\n Returns:\n list (dict): list containing the four plotly visualizations\n\n \"\"\"\n df = pd.read_csv(\"data/covid_19.csv\")\n cumulative_df = df.groupby([\"countryterritoryCode\"]).sum()[[\"cases\", \"deaths\"]].reset_index()\n country_df = df[[\"countriesAndTerritories\", \"countryterritoryCode\"]].drop_duplicates().reset_index()\n df_merged = pd.merge(cumulative_df, country_df, on=\"countryterritoryCode\")\n \n graph_one = []\n df[\"time\"] = pd.to_datetime(df[[\"day\", \"month\", \"year\"]])\n country_of_interest = [\"United_States_of_America\", \"China\", \"Taiwan\", \"Italy\", \"Spain\", \"France\", \"Germany\", \"Iran\", \"Japan\", \"South_Corea\"]\n for country in country_of_interest:\n df_country = df[df[\"countriesAndTerritories\"] == country]\n df_country_sorted = df_country.sort_values(by='time')\n graph_one.append(\n go.Scatter(\n x = df_country_sorted[\"time\"],\n y = df_country_sorted[\"cases\"],\n mode = 'lines+markers',\n name = country\n )\n )\n\n layout_one = dict(title = 'Confirmed Cases per Day and Country',\n xaxis = dict(title = 'time'),\n yaxis = dict(title = 'Cases per day'),\n )\n\n# second chart plots ararble land for 2015 as a bar chart \n graph_two = []\n df_merged[\"ratio\"] = df_merged[\"deaths\"] / df_merged[\"cases\"]\n df_ratio = df_merged.sort_values(by=\"ratio\", ascending=False)\n df_ratio = df_ratio[df_ratio[\"cases\"] > 1000]\n df_ratio[\"ratio\"] = df_ratio[\"ratio\"] * 100\n countries_high = df_ratio[\"countriesAndTerritories\"][:10].tolist()\n\n graph_two.append(\n go.Bar(\n x = countries_high,\n y = df_ratio.iloc[:10].ratio.tolist(),\n name = \"death ratio\"\n )\n )\n layout_two = dict(title = 'Death Ratio Per Country',\n barmode = \"group\",\n xaxis = dict(title = 'Country',),\n yaxis = dict(title = 'Death Ratio [%]')\n )\n \n # world map\n \n graph_five = []\n graph_five.append(go.Choropleth(\n locations = df_merged['countryterritoryCode'],\n z = df_merged['cases'],\n text = df_merged['countriesAndTerritories'],\n colorscale = 'Blues',\n autocolorscale=False,\n reversescale=False))\n\n layout_five = dict(title = 'Confirmed COVID-19 cases 20/04/08',\n title_x=0.5,\n geo=dict(\n showframe=False,\n showcoastlines=False,\n projection_type='equirectangular'\n ),\n marker_line_color='darkgray',\n marker_line_width=0.5,\n colorbar_tickprefix = '',\n colorbar_title = 'Confirmed Cases',\n annotations = [dict(\n x=0.55,\n y=0.1,\n xref='paper',\n yref='paper',\n text='Source: <a href=\"https://data.europa.eu/euodp/de/data/dataset/covid-19-coronavirus-data\">data.europe</a>',\n showarrow = False)]\n )\n \n \n \n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_five, layout=layout_five)) \n \n \n return figures\n"
},
{
"alpha_fraction": 0.7658536434173584,
"alphanum_fraction": 0.787804901599884,
"avg_line_length": 57.57143020629883,
"blob_id": "61b6b11d62c1d624f00946047f21ccbc26cbb8f8",
"content_id": "0b459445b333df475e3b19b03fb73701ae64ded4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 7,
"path": "/README.md",
"repo_name": "christofranz/covid-19-dashboard",
"src_encoding": "UTF-8",
"text": "# Covid-19 Data Dashboard\n\nWork in progress.\n \nIn this project data about corona cases gathered until the 8th of April 2020 is processed and analyzed. In particular, the confirmed corona cases per day and country, the countries with the highest death ratio and a world map with total cases is visualized in a web app on heroku.\n \nPlease visit https://covid-19-data-dashboard.herokuapp.com/ for more details.\n"
}
] | 2 |
jtapiovaara/adventureOura
|
https://github.com/jtapiovaara/adventureOura
|
d16743120e8008860b7fc094838286dfd663b69e
|
e0665a66381328d5fb496d95a35310a5471c9767
|
11a9a60c8dc07bbde8f2af55f04ed4cdddfaf3a9
|
refs/heads/master
| 2023-08-11T07:50:18.521006 | 2021-09-25T07:31:53 | 2021-09-25T07:31:53 | 344,736,737 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5983379483222961,
"alphanum_fraction": 0.6186518669128418,
"avg_line_length": 27.473684310913086,
"blob_id": "1b76f184cf0aec39751586ca485ed107440ceb56",
"content_id": "c720867b636327b7cee117c778eafe14f11e490d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 38,
"path": "/myOura/models.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Sportdays(models.Model):\n DAYS_CHOICES = [\n ('0', 'Ma'),\n ('1', 'Ti'),\n ('2', 'Ke'),\n ('3', 'To'),\n ('4', 'Pe'),\n ('5', 'La'),\n ('6', 'Su'),\n ]\n days = models.CharField(max_length=1, blank=True, choices=DAYS_CHOICES)\n\n def __str__(self):\n return self.days\n\n\nclass Ourauser(models.Model):\n firstname = models.CharField(max_length=32, blank=True)\n lastname = models.CharField(max_length=32, blank=True)\n username = models.CharField(max_length=24, blank=True)\n ourakey = models.CharField(max_length=64, blank=True)\n sportdays = models.ManyToManyField('Sportdays', blank=True)\n tintensity = models.IntegerField(blank=True, null=True, default=2)\n\n def __str__(self):\n return self.firstname\n\n\nclass Hqmessages(models.Model):\n shortdesc = models.CharField(max_length=8, blank=True)\n meaning = models.CharField(max_length=32, blank=True)\n longdesc = models.CharField(max_length=64, blank=True)\n\n def __str__(self):\n return self.shortdesc\n\n"
},
{
"alpha_fraction": 0.637509822845459,
"alphanum_fraction": 0.6398739218711853,
"avg_line_length": 32.394737243652344,
"blob_id": "ae29db0a764e2ddf7a85368c4be4bec829510c3f",
"content_id": "1d0e918139cf7f612c904b93f3e06613f052ff2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 38,
"path": "/myOura/admin.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.forms import CheckboxSelectMultiple\n\nfrom .models import Ourauser, Sportdays, Hqmessages\n\n\nclass OurauserAdmin(admin.ModelAdmin):\n formfield_overrides = {\n models.ManyToManyField: {'widget': CheckboxSelectMultiple},\n }\n Sportdays.get_days_display.short_description = 'days'\n fieldsets = [\n ('Nimi', {'fields': ['firstname', 'lastname']}),\n ('Urheilupäivät, merkitse milloin teet rasittavaa urheilua 3 min tai kauemmin. '\n '0 = Maanantai ja siitä eteenpäin.', {'fields': ['sportdays']}),\n ('Kun urheilet, jossain vaiheessa suoritus on ns. Kova, esim. 4 minuutin jälkeen.', {'fields': ['tintensity']}),\n (_('Oura Tiedot'), {\n 'classes': ('collapse',),\n 'fields': (\n 'username',\n 'ourakey',\n ),\n }),\n # ('Oura Tiedot', {'fields': ['username', 'ourakey']})\n ]\n\n list_display = ['firstname']\n\n\nclass HqmessagesAdmin(admin.ModelAdmin):\n list_display = ('shortdesc', 'meaning')\n\n\nadmin.site.register(Ourauser, OurauserAdmin)\nadmin.site.register(Sportdays)\nadmin.site.register(Hqmessages, HqmessagesAdmin)\n"
},
{
"alpha_fraction": 0.5127782225608826,
"alphanum_fraction": 0.5383347272872925,
"avg_line_length": 36.90625,
"blob_id": "208798ef990993a20d78b60df8371a93dc312c40",
"content_id": "294216d20336b53d810ab3ce725f415f6239b332",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1213,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 32,
"path": "/myOura/migrations/0001_initial.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.7 on 2021-02-27 08:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Sportdays',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('days', models.CharField(blank=True, choices=[('0', 'Ma'), ('1', 'Ti'), ('2', 'Ke'), ('3', 'To'), ('4', 'Pe'), ('5', 'La'), ('6', 'Su')], max_length=1)),\n ],\n ),\n migrations.CreateModel(\n name='Ourauser',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('firstname', models.CharField(blank=True, max_length=32)),\n ('lastname', models.CharField(blank=True, max_length=32)),\n ('username', models.CharField(blank=True, max_length=24)),\n ('ourakey', models.CharField(blank=True, max_length=64)),\n ('sportdays', models.ManyToManyField(blank=True, to='myOura.Sportdays')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.6171257495880127,
"alphanum_fraction": 0.6281437277793884,
"avg_line_length": 33.64730453491211,
"blob_id": "4162a588b0e0064d4bb17600cce03b5111106873",
"content_id": "b70206f936b77c5568a14ae60ed570e6d255509e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8436,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 241,
"path": "/myOura/views.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "import os\nimport requests\nimport datetime\n\nfrom django.shortcuts import render\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.admin import User\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Ourauser, Sportdays, Hqmessages\n\n\ndef ourastart(request):\n return render(request, 'myOura/ourastart.html')\n\n\ndef logout(request):\n logout(request)\n\n\n@login_required\ndef ouraapi(request):\n \"\"\"\n 2021-01-09 Päätös hakea aina kaikista rajapinnoista 'kaikki' data. Vaihtoehtona olisi hakea vain eilisestä eteenpäin.\n Oli käytössä Sleep- ja Readiness- rajapinnoissa. Voi muuttaa takaisin jos tarvis, toiminnot #-merkitty\n \"\"\"\n global sleepstory\n # omaouraapi = OURA_API\n kayttaja = request.user.username\n assert isinstance(Ourauser.objects.get(username=kayttaja).ourakey, object)\n omaouraapi = Ourauser.objects.get(username=kayttaja).ourakey\n you = Ourauser.objects.get(username=kayttaja).firstname\n messages = Hqmessages.objects.all()\n\n today = datetime.date.today()\n # eilinen = str(today - datetime.timedelta(days=1))\n\n # url_sleep = 'https://api.ouraring.com/v1/sleep?start=' + eilinen + '&access_token=' + omaouraapi\n # url_ready = 'https://api.ouraring.com/v1/readiness?start=' + eilinen + '&access_token=' + omaouraapi\n url_user = 'https://api.ouraring.com/v1/userinfo?access_token=' + omaouraapi\n url_sleep = 'https://api.ouraring.com/v1/sleep?&access_token=' + omaouraapi\n url_active = 'https://api.ouraring.com/v1/activity?access_token=' + omaouraapi\n url_ready = 'https://api.ouraring.com/v1/readiness?&access_token=' + omaouraapi\n url_bedtime = 'https://api.ouraring.com/v1/bedtime?access_token=' + omaouraapi\n u = requests.get(url_user).json()\n s = requests.get(url_sleep).json()\n a = requests.get(url_active).json()\n r = requests.get(url_ready).json()\n b = requests.get(url_bedtime).json()\n\n# Käyttäjätietoja\n\n # BMI (u)\n height = float(u['height']) / 100\n weight = float(u['weight'])\n bmi = round(weight / (height * height), 2)\n\n# Unenlaatu\n\n # Syvän unen määrä viime yönä (s)\n sleeptotal = s['sleep'][0]['total']/60\n deepsleepamount = s['sleep'][-1]['deep']/60\n # deepscore = s['sleep'][0]['score_deep']\n deepsleeppercentage = round(deepsleepamount/sleeptotal*100, 1)\n if deepsleeppercentage < 12:\n sleepstory = ', mikä on liian vähän'\n if 12 <= deepsleeppercentage < 17:\n sleepstory = ', mikä on melkein riittävästi'\n if deepsleeppercentage >= 17:\n sleepstory = ', hyvät syvät!'\n\n# Aktiivisuutta\n\n # Aktiivisuus, viimeiset 2h (a)\n a_kappyra = a['activity'][-1]['class_5min']\n a_2h = a_kappyra[-24:]\n\n # Kävellyt kilometrit eilen\n activedata = a['activity'][-2]['daily_movement']\n\n # Kävellyt kilometrit tänään\n activedata_2 = a['activity'][-1]['daily_movement']\n\n # Kävellyt kilometrit tänään miinus eilen. Onko käyrä ylös vai alas?\n plusmiinus = activedata_2 - activedata\n if plusmiinus > 0:\n okei = 'parempi'\n else:\n okei = 'huonompi'\n\n # Otetut askeleet, total (5 vrk)\n stepsit = a['activity'][-7:]\n\n # Askelet tänään\n steps = a['activity'][-1]['steps']\n steps_vk = 0\n for i in range(0, 7):\n steps_pv = a['activity'][i]['steps']\n steps_vk = steps_vk + steps_pv\n\n # Paikallaanolohälytykset tänään\n liikkeellemars = a['activity'][-1]['inactivity_alerts']\n\n # Raskas urheilu eilen\n voima = a['activity'][-1]['high']\n\n# Liikuntaraportti\n\n viikkoday = []\n weekstrength = []\n for i in range(0, 7):\n viikkovoima = a['activity'][i]['high']\n viikkovoimapvm = a['activity'][i]['day_start']\n y = int(viikkovoimapvm[0:4])\n m = int(viikkovoimapvm[6:7])\n d = int(viikkovoimapvm[8:10])\n viikkoday.append(datetime.datetime(y, m, d).weekday())\n weekstrength.append(viikkovoima)\n\n pvm = viikkoday\n pvmh = weekstrength\n pvm_pvmh = dict(zip(pvm, pvmh))\n\n # Alla on Django Adminissa määritetyt omat harjoittelupäivät/viikko.\n # Suunniteltujen urheilupäivien loogiset nimet. Tässä alla myös vaihtoehtoinen (nopeampi?) tapa.\n\n # omatsportdays = Ourauser.objects.get(username=kayttaja)\n # print(omatsportdays.sportdays.all())\n\n # Jos edellinen päivä oli urheilupäivä, niin raporttipäivän vähempi liikunta vähän kiltimmällä viestillä.\n\n urkkadaynimet = Sportdays.objects.filter(ourauser__firstname__iexact=you).order_by('days')\n tamapaiva = datetime.date.weekday(today)\n sdays = urkkadaynimet.filter().values_list('days', flat=True)\n eilensport = False\n eilensportmsg = ''\n for a in sdays:\n if tamapaiva == int(a)+1:\n eilensportmsg = messages[4].longdesc\n eilensport = True\n\n kova_tintensity = Ourauser.objects.get(username=kayttaja).tintensity\n sporttiminuutit = 0\n\n for i in pvm_pvmh:\n sporttiminuutit += pvm_pvmh[i]\n if pvm_pvmh[i] > kova_tintensity:\n with open('lauantairaportti.txt', 'a') as f:\n f.write('Päivä ' + str(i) + ' Urheilit kovalla pulssilla ' + str(pvm_pvmh[i]) + ' minuuttia.')\n if str(i) in sdays:\n with open('lauantairaportti.txt', 'a') as f:\n f.write(' ' + messages[0].longdesc)\n else:\n with open('lauantairaportti.txt', 'a') as f:\n f.write(' ' + messages[1].longdesc)\n else:\n if str(i) in sdays:\n with open('lauantairaportti.txt', 'a') as f:\n f.write('Päivä ' + str(i) + ' ' + messages[2].longdesc)\n else:\n with open('lauantairaportti.txt', 'a') as f:\n f.write('Päivä ' + str(i) + ' ' + messages[3].longdesc)\n with open('lauantairaportti.txt', 'a') as f:\n f.write('\\n')\n with open('lauantairaportti.txt', 'a') as f:\n f.write('Viikonsaldo: ' + str(sporttiminuutit) + ' minuuttia hikijumppaa.')\n\n fin = open(\"lauantairaportti.txt\", \"rt\")\n # read file contents to string\n data = fin.read()\n # replace all occurrences of the required string\n data = data.replace('Päivä 0', 'Ma')\n data = data.replace('Päivä 1', 'Ti')\n data = data.replace('Päivä 2', 'Ke')\n data = data.replace('Päivä 3', 'To')\n data = data.replace('Päivä 4', 'Pe')\n data = data.replace('Päivä 5', 'La')\n data = data.replace('Päivä 6', 'Su')\n # close the input file\n fin.close()\n # open the input file in write mode\n fin = open(\"lauantairaportti.txt\", \"wt\")\n # overrite the input file with the resulting data\n fin.write(data)\n # close the file\n fin.close()\n\n with open('lauantairaportti.txt', 'r') as f:\n raportti = f.readlines()\n\n os.remove('lauantairaportti.txt')\n\n# Valmiustila (r)\n\n readydata = r['readiness'][-1]['score']\n readydatahistory = r['readiness'][-7:]\n\n # score_previous_day vaihdettu '-2' koska en tiedä, mitä se tekee\n # r_yesterday = r['readiness'][-1]['score_previous_day']\n r_yesterday = r['readiness'][-2]['score']\n valmiusero = readydata - r_yesterday\n\n# Ihanteellinen nukkumaanmenoaika (b)\n\n nukkumaanko = b['ideal_bedtimes'][0]['status']\n unille = b['ideal_bedtimes'][0]['bedtime_window']['end']\n pillowtime = ''\n\n if unille is not None:\n seconds_input = unille\n conversion = datetime.timedelta(seconds=seconds_input)\n ta = str(conversion)\n pillowtime = ta[-8:-3]\n\n context = {\n 'you': you,\n 'urkkadaynimet': urkkadaynimet,\n 'bmi': bmi,\n 'deepsleepamount': deepsleepamount,\n 'deepsleeppercentage': deepsleeppercentage,\n 'sleepstory': sleepstory,\n 'a_2h': a_2h,\n 'readydatahistory': readydatahistory,\n 'plusmiinus': plusmiinus,\n 'okei': okei,\n 'stepsit': stepsit,\n 'steps': steps,\n 'liikkeellemars': liikkeellemars,\n 'voima': voima,\n 'readydata': readydata,\n 'nukkumaanko': nukkumaanko,\n 'unille': unille,\n 'pillowtime': pillowtime,\n 'valmiusero': valmiusero,\n 'raportti': raportti,\n 'steps_vk': steps_vk,\n 'eilensport': eilensport,\n 'eilensportmsg': eilensportmsg,\n }\n return render(request, 'myOura/ouraring.html', {'context': context})\n"
},
{
"alpha_fraction": 0.6878452897071838,
"alphanum_fraction": 0.6878452897071838,
"avg_line_length": 32,
"blob_id": "7f86042ff8cb2084712176a70ebcab45d48ba887",
"content_id": "1221133f9f2d92d149cf3b73f978c456ed07dfd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 11,
"path": "/myOura/urls.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\n\nfrom . import views\n\nurlpatterns = [\n path('login/', auth_views.LoginView.as_view(), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('oura/', views.ourastart, name='ourastart'),\n path('myoura/', views.ouraapi, name='ouracall'),\n]"
},
{
"alpha_fraction": 0.7471264600753784,
"alphanum_fraction": 0.7471264600753784,
"avg_line_length": 16.399999618530273,
"blob_id": "cb5de0183130c5a30ab0e124e041a591b567420b",
"content_id": "4f8e828a79f0f81d2e39925905e7b48e8747e88d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/myOura/apps.py",
"repo_name": "jtapiovaara/adventureOura",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass MyouraConfig(AppConfig):\n name = 'myOura'\n"
}
] | 6 |
fkirwin/learningjournal
|
https://github.com/fkirwin/learningjournal
|
53fd455022fcdee7ba7744a0e8345249ed4f4837
|
ce404f81bcbd613a7e4dbf4a2864ef77c405ba76
|
cd040bb93169fe617fada5999b858023b4b53301
|
refs/heads/master
| 2020-03-26T19:14:33.348616 | 2018-08-24T01:13:53 | 2018-08-24T01:13:53 | 145,255,018 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7962962985038757,
"alphanum_fraction": 0.7962962985038757,
"avg_line_length": 70.33333587646484,
"blob_id": "aab19bfe79e6e9f0ea548da4e88c4646662f314c",
"content_id": "52d3607b5424b66d873b37995963e473d22db243",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 3,
"path": "/views.py",
"repo_name": "fkirwin/learningjournal",
"src_encoding": "UTF-8",
"text": "from flask import Flask, g, render_template, flash, redirect, url_for, abort\nfrom flask_bcrypt import check_password_hash\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\n\n\n"
},
{
"alpha_fraction": 0.6768332123756409,
"alphanum_fraction": 0.6794289350509644,
"avg_line_length": 45.69696807861328,
"blob_id": "b79f51fba5e02e98cb7280877378bfe21b93c46c",
"content_id": "7d91677765f5101329a9d926e308e462938aff25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1541,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 33,
"path": "/forms.py",
"repo_name": "fkirwin/learningjournal",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, TextAreaField, DateField, IntegerField, PasswordField, RadioField\nfrom wtforms.validators import DataRequired, Length, ValidationError, EqualTo\n\nimport models\n\n\ndef name_exists(form, field):\n if models.User.select().where(models.User.username == field.data).exists():\n raise ValidationError('User with that name already exists.')\n\n\nclass EntryForm(FlaskForm):\n title = StringField(\"Title\", validators=[DataRequired(), Length(min=2)])\n date = DateField(\"Date\", validators=[DataRequired()], format='%Y-%m-%d')\n time_spent = IntegerField(\"Time Spent\", validators=[DataRequired()])\n learnings = TextAreaField(\"What I Learned\", validators=[DataRequired()])\n rememberings = TextAreaField(\"Resources To Remember\", validators=[DataRequired()])\n\n\nclass RegisterForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), name_exists])\n password = PasswordField('Password',\n validators=[DataRequired(),\n Length(min=2),\n EqualTo('password2', message='Passwords must match')])\n password2 = PasswordField('Confirm Password', validators=[DataRequired()])\n is_admin = RadioField(\"Is administrator?\", default=False, choices=[(\"True\", True), (\"False\", False)])\n\n\nclass LoginForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n password = PasswordField('Password', validators=[DataRequired()])\n"
},
{
"alpha_fraction": 0.8106508851051331,
"alphanum_fraction": 0.8106508851051331,
"avg_line_length": 41.5,
"blob_id": "6e004402c690603987311b18a554b0ca9481a2f7",
"content_id": "da4aaf26e6a702dc243abadb939b6472ba47fe4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 4,
"path": "/README.md",
"repo_name": "fkirwin/learningjournal",
"src_encoding": "UTF-8",
"text": "# learningjournal\n\nNot much to say. Simple CRUD app which allows users to login and view entries or edit them.\nRequirements have been included for convenience purposes."
},
{
"alpha_fraction": 0.6302294135093689,
"alphanum_fraction": 0.6317717432975769,
"avg_line_length": 30.628047943115234,
"blob_id": "c2f98872cb961f1f763e4a8df6c0a38e07af13b7",
"content_id": "4d8a2df40beb3c78597d3456c4dd4ce85e376d38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5187,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 164,
"path": "/app.py",
"repo_name": "fkirwin/learningjournal",
"src_encoding": "UTF-8",
"text": "from flask import Flask, g, render_template, flash, redirect, url_for\nfrom flask_bcrypt import check_password_hash, Bcrypt\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\n\nimport models\nimport forms\n\n# Default variables\nPORT = 8080\nHOST = '0.0.0.0'\n\n# Application reference.\napp = Flask(__name__)\napp.secret_key = 'asdf9fs82rnu478200fofj01sksal013rbdabcvbgem2'\n\n# Login manager reference.\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n# Bcrypt reference.\nbcrypt = Bcrypt(app)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n \"\"\"Loads user when called.\"\"\"\n try:\n return models.User.get(models.User.id == user_id)\n except models.DoesNotExist:\n return None\n\n\[email protected]_request\ndef before_request():\n \"\"\"Connect to the database before each request for connection management.\"\"\"\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user\n\n\[email protected]_request\ndef after_request(response):\n \"\"\"Close the database connection after each request for connection management.\"\"\"\n g.db.close()\n return response\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n \"\"\"Core login page for user to enter creds.\"\"\"\n form = forms.LoginForm()\n if form.validate_on_submit():\n try:\n user = models.User.get(models.User.username == form.username.data)\n except models.DoesNotExist:\n flash(\"Your email or password doesn't match!\", \"error\")\n else:\n if check_password_hash(user.password, form.password.data):\n login_user(user)\n flash(\"You've been logged in!\", \"success\")\n return redirect(url_for('entries'))\n else:\n flash(\"Your email or password doesn't match!\", \"error\")\n return render_template('login.html', form=form)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n \"\"\"Core logout page to log the user out.\"\"\"\n logout_user()\n flash(\"You've been logged out!\", \"success\")\n return redirect(url_for('login'))\n\n\[email protected]('/register', methods=('GET', 'POST'))\ndef register():\n \"\"\"Create a new user.\"\"\"\n form = forms.RegisterForm()\n if form.validate_on_submit():\n flash(\"Registration successful!\", \"success\")\n models.User.write_user(\n username=form.username.data,\n password=form.password.data,\n is_admin=form.is_admin.data\n )\n return redirect(url_for('entries'))\n return render_template('register.html', form=form)\n\n\[email protected](\"/entries\")\[email protected](\"/\")\n@login_required\ndef entries():\n \"\"\"Landing page for logged in users. Shows all entries regardless of user.\"\"\"\n entriez = models.Entry.get_all_entries()\n return render_template(\"index.html\", entriez=entriez)\n\n\[email protected](\"/entries/<entry_id>\")\n@login_required\ndef specific_entry(entry_id):\n \"\"\"Shows details on specific entry.\"\"\"\n entry = models.Entry.get_specific_entry(entry_id)\n return render_template(\"detail.html\", entry=entry)\n\n\[email protected](\"/entries/edit/<entry_id>\", methods=(\"GET\", \"POST\"))\n@login_required\ndef edit(entry_id):\n \"\"\"Allows user to edit an entry if they own it.\"\"\"\n try:\n entry = models.Entry.get_specific_entry_for_user(entry_id, g.user.id)\n except:\n flash(\"You cannot alter entries you did not write!\")\n return redirect(url_for('specific_entry', entry_id=entry_id))\n form = forms.EntryForm()\n if form.validate_on_submit():\n entry.user = g.user.id\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnings = form.learnings.data\n entry.rememberings = form.rememberings.data\n entry.save()\n flash(\"Entry updated! Thanks!\", \"success\")\n return redirect(url_for('entries'))\n return render_template(\"edit.html\", entry=entry, form=form)\n\n\[email protected](\"/entries/delete/<entry_id>\", methods=[\"POST\"])\n@login_required\ndef delete(entry_id):\n \"\"\"Allows user to delete entry if they own it.\"\"\"\n try:\n entry = models.Entry.get_specific_entry_for_user(entry_id, g.user.id)\n except:\n flash(\"You cannot alter entries you did not write!\")\n return redirect(url_for('specific_entry', entry_id=entry_id))\n with models.DATABASE.transaction():\n entry.delete_instance()\n return redirect(url_for('entries'))\n\n\[email protected](\"/new\", methods=(\"GET\", \"POST\"))\n@login_required\ndef new():\n \"\"\"Allows user to create a new journal entry.\"\"\"\n form = forms.EntryForm()\n if form.validate_on_submit():\n models.Entry.write_entry(user=g.user.id,\n title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n learnings=form.learnings.data,\n rememberings=form.rememberings.data)\n flash(\"New entry created!\", \"success\")\n return redirect(url_for('entries'))\n return render_template(\"new.html\", form=form)\n\nif __name__ == '__main__':\n models.initialize()\n app.run(host=HOST, port=PORT, debug=True)\n"
},
{
"alpha_fraction": 0.5939040780067444,
"alphanum_fraction": 0.5952487587928772,
"avg_line_length": 28.355262756347656,
"blob_id": "1caf4249c2e9a26e6ff5cacac1d543c2d5cca075",
"content_id": "5110467458ff1ac77c9137ce7cfb60ea82799dc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2231,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 76,
"path": "/models.py",
"repo_name": "fkirwin/learningjournal",
"src_encoding": "UTF-8",
"text": "import datetime\n\nfrom flask_bcrypt import generate_password_hash\nfrom flask_login import UserMixin\nfrom peewee import *\n\n\nDATABASE = SqliteDatabase('journal.db')\n\n\nclass BaseModel(Model):\n class Meta:\n database = DATABASE\n\n\nclass User(UserMixin, BaseModel):\n username = CharField(unique=True)\n password = CharField(max_length=100)\n joined_on = DateTimeField(default=datetime.datetime.now)\n is_admin = BooleanField(default=False)\n\n @classmethod\n def write_user(cls, username, password, is_admin, joined_on=None):\n if not joined_on:\n joined_on = datetime.date.today()\n try:\n with DATABASE.transaction():\n cls.create(username=username,\n password=generate_password_hash(password),\n is_admin=is_admin,\n joined_on=joined_on)\n except IntegrityError:\n raise ValueError(\"User already exists\")\n\n\nclass Entry(BaseModel):\n user = ForeignKeyField(User, backref='entries')\n title = TextField()\n date = DateField()\n time_spent = IntegerField()\n learnings = TextField()\n rememberings = TextField()\n\n @classmethod\n def write_entry(cls, user, title, time_spent, learnings, rememberings, date=None):\n if not date:\n date = datetime.date.today()\n try:\n with DATABASE.transaction():\n cls.create(user=user,\n title=title,\n time_spent=time_spent,\n learnings=learnings,\n rememberings=rememberings,\n date=date)\n except IntegrityError:\n raise ValueError(\"Entry already exists\")\n\n @classmethod\n def get_all_entries(cls):\n return cls.select()\n\n @classmethod\n def get_specific_entry(cls, entry_id):\n return cls.select().where(Entry.id == entry_id).get()\n\n @classmethod\n def get_specific_entry_for_user(cls, entry_id, user):\n return Entry.select().where((Entry.user == user) & (Entry.id == entry_id)).get()\n\n\n\ndef initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()\n"
}
] | 5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.