repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
โŒ€
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
ghfkdgml/pyTestCode
https://github.com/ghfkdgml/pyTestCode
a04052df613f76c03abf44fc19e7a2335d9c52d8
ce3a38bd747519248354b580bc0ea259971b6425
c3cf0f6a590614dc0bf914538bda01d775238ee6
refs/heads/master
2020-04-05T02:03:50.393627
2018-12-19T07:24:26
2018-12-19T07:24:26
156,461,928
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5205479264259338, "alphanum_fraction": 0.5698630213737488, "avg_line_length": 14.208333015441895, "blob_id": "e8a48666a08a872fb8556f68e13733140f071d3c", "content_id": "7850e528e1323436d4f22acd23f985fc82428760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 34, "num_lines": 24, "path": "/ksh/coding/sum_time_check.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import time\n\ndef time_check(func):\n def check(num):\n bef=time.time()\n func(num)\n aft=time.time()\n print(aft-bef,\"was taken\")\n return check\n\n\n\n@time_check\ndef sumnum(num):\n ret=0\n for x in range(num):\n ret+=x+1\n print(\"num1\",ret)\n@time_check\ndef sum2(num):\n print(\"num2\",num*(num+1)/2)\n\nsumnum(10000)\nsum2(10000)\n" }, { "alpha_fraction": 0.6814285516738892, "alphanum_fraction": 0.6957142949104309, "avg_line_length": 28.16666603088379, "blob_id": "98a274a48bf1b94109327e7094bc0791d9006e56", "content_id": "d75f6653cb14d0fd4784794aa8c2ea940a43bd9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 54, "num_lines": 24, "path": "/ksh/twisted/client/echoclient.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "from twisted.internet import reactor,protocol\n\nclass EchoClient(protocol.Protocol):\n def connectionMade(self):\n self.transport.write(\"hello world\")\n def dataReceived(self,data):\n print \"Server said:\",data\n print self.transport.getPeer()\n self.transport.loseConnection()\n\nclass EchoFactory(protocol.ClientFactory):\n def buildProtocol(self,addr):\n return EchoClient()\n\n def clientConnectionFailed(self,connector,reason):\n print \"Connection failed!\"\n reactor.stop()\n\n def clientConnectionLost(self,connector,reason):\n print \"Connection Lost!\"\n reactor.stop()\n\nreactor.connectTCP(\"127.0.0.1\",8000,EchoFactory())\nreactor.run()\n" }, { "alpha_fraction": 0.34682080149650574, "alphanum_fraction": 0.36878612637519836, "avg_line_length": 17.340425491333008, "blob_id": "b210a1d9438e51fc7181744ea114b8bb51f0c3e6", "content_id": "6437d8166ba96f547971f1e5f0ace4ebd579dc82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 37, "num_lines": 47, "path": "/ksh/coding/honeyComb.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\n\ndef findPos(num):\n length=len(str(num))\n pass\n\ndef lenCount(num):\n ret=[]\n cnt=1\n for x in range(num):\n a=int((x*x+x)/2)\n if len(str(a))==cnt:\n ret.append(a)\n cnt+=1\n print(ret)\n\n\ndef test(num):\n ret=1\n n=1\n while 1:\n if num>ret:\n ret+=6*int((n*n+n)/2)\n n+=1\n else:\n print(n)\n break\n\ndef fraction(num):\n ret=0\n n=1\n while 1:\n if num>ret:\n ret+=int((n*n+n)/2)\n n+=1\n else:\n if n%2:\n down=n-(ret-num)\n print(n,ret,num,down)\n up=n+1-down\n print(up,'/',down)\n break\n else:\n up=n-(ret-num)\n down=n+1-up\n print(up,'/',down)\n break\n\nfraction(12)\n" }, { "alpha_fraction": 0.5175619721412659, "alphanum_fraction": 0.5371900796890259, "avg_line_length": 19.595745086669922, "blob_id": "3acc0470535a45d1feaae04eeacc545cf2f87c49", "content_id": "3fc7ae63aa4bede5c266ee2b61fc9fc9be923b39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 66, "num_lines": 47, "path": "/ksh/basic/basic_5.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ5\n ์•„๋ž˜ fruit_store ์˜ ๊ฐ’์„ fruit_list์˜ ๋‚ด์šฉ์„ ์ฝ์–ด fruit_store์— ๋ฐ˜์˜ํ•˜์‹œ์š”.\n\n ๊ฒฐ๊ณผ\n {'strwberry': 11, 'grape': 22, 'apple': 7, 'banana': 11}\n\n\"\"\"\n\nfruit_store = {'apple':5, 'banana':3, 'grape':10}\nfruit_list = [\n 'apple count 2',\n 'banana count 8',\n 'grape count 12',\n 'strwberry count 11'\n ]\n\n\nclass FruitStore(object):\n def __init__(self,fruit_store):\n self.fruit_store=fruit_store\n\n def buy_fruit(self,list):\n print 'start!'\n print list\n for i in range(len(list)):\n fruit,_,num=list[i].split()\n if fruit in self.fruit_store:\n self.fruit_store[fruit]+=int(num)\n else:\n self.fruit_store[fruit]=int(num)\n print fruit_store\n\n def run(self,list):\n self.buy_fruit(list)\n\ndef main():\n FS = FruitStore(fruit_store)\n FS.run(fruit_list)\n\nif __name__ == \"__main__\":\n main()\n\n#EOF\n" }, { "alpha_fraction": 0.4972677528858185, "alphanum_fraction": 0.4972677528858185, "avg_line_length": 14.25, "blob_id": "ecd437125eacdf61116fcec39b65b7281f975ac7", "content_id": "11aa48ef2b5b81c77c60223165af952328083007", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 29, "num_lines": 12, "path": "/ksh/basic/decorator.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "def decor(func):\n def deco():\n print(\"decorator!!!\")\n print(func.__name__)\n return deco\n\n@decor\ndef test():\n print(\"test\")\n\nif __name__=='__main__':\n test()\n" }, { "alpha_fraction": 0.6226415038108826, "alphanum_fraction": 0.6352201104164124, "avg_line_length": 21.714284896850586, "blob_id": "df283efcc5cb07e5da458c06e10e5d49b8119b87", "content_id": "89251d303043a07845a12a46e81bd414453174b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 84, "num_lines": 21, "path": "/ksh/OddsAndEnds/namedtuple.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import collections\n\nCards=collections.namedtuple('Card',['rank','suit'])\nclass FrenchDeck:\n ranks=[str(n) for n in range(2,11)]+list('JQKA')\n suits='spades diamonds clubs hearts'.split()\n\n def __init__(self):\n self._cards=[Cards(rank,suit) for suit in self.suits for rank in self.ranks]\n\n def __len__(self):\n return len(self._cards)\n\n def __getitem__(self,pos):\n return self._cards[pos]\n\n\na=FrenchDeck()\nprint len(a)\nprint a[3]\nprint a[:10]\n" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.5584415793418884, "avg_line_length": 14.399999618530273, "blob_id": "85e762f09af88ae3b8723096b5999599db72b775", "content_id": "e5bee0f90469214caaa3d24bde905c9b310f2447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/ksh/coding/printN2.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "def printN2(num):\n for x in range(num,0,-1):\n print(x)\n\nprintN2(5)\n" }, { "alpha_fraction": 0.5312899351119995, "alphanum_fraction": 0.5363984704017639, "avg_line_length": 20.108108520507812, "blob_id": "a565f7ad72d5e9d092e835dcbafdd49a8c44d38f", "content_id": "100d32a7855bd457e77fee81904f02c5ad0aec59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 783, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/ksh/OddsAndEnds/N_gram.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\n\ndef ngram():\n n=int(input(\"input num!\"))\n text='Python is a programming language that lets you work quickly'\n words=text.split()\n\n if len(words)<n:\n print \"wrong text and num!\"\n else:\n for i in range(len(words)-n+1):\n print ' '.join(words[i:i+n])\n\ndef wordChecker(word):\n checkList=['.',',',\"'\"]\n for x in checkList:\n if x in word:\n word=word.replace(x,'')\n return word\n\ndef palindrom(word):\n wordChecker(word)\n check=True\n for i in range(len(word)/2):\n if word[i]!=word[-1-i]:\n check=False\n return check\n\n\n\n\n\nif __name__=='__main__':\n test=['apache','decal','did','neep','noon','refer','river']\n for x in test:\n if palindrom(x):\n print x\n\n\n" }, { "alpha_fraction": 0.44624999165534973, "alphanum_fraction": 0.4662500023841858, "avg_line_length": 18.487804412841797, "blob_id": "9919f56d0fba76136338bb1ee995ea64f8689c5d", "content_id": "d4817cf721371dcf2c89c47a374dcf1290fa5cc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 40, "num_lines": 41, "path": "/ksh/coding/prime.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "from math import sqrt\n\ndef findprime(num):\n if num==1:\n return 0\n if num==2:\n return 1\n a=int(sqrt(num))\n for x in range(2,a+1):\n if num%x==0:\n return 0\n return 1\n\ndef totalprime(m,n):\n total=0\n min_num=0\n for x in range(m,n+1):\n if findprime(x):\n total+=x\n if not min_num or min_num>x:\n min_num=x\n if not total:\n print(-1)\n else:\n print(total)\n print(min_num)\n\ndef totalprime_x(m,n):\n total=[]\n min_num=0\n for x in range(m,n+1):\n if findprime(x):\n total.append(x)\n if not min_num or min_num>x:\n min_num=x\n if not total:\n print(-1)\n else:\n #print(total)\n for x in total:\n print(x)\n\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 12, "blob_id": "bc4dbed540d195f2ed283e4b812b5be890f813a8", "content_id": "ed8baf1c9539bd64d13cacc747d8a22c25f085a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/ksh/coding/temp.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import sys\n\na=sys.stdin.readline()\nprint(a.strip())\n" }, { "alpha_fraction": 0.481719046831131, "alphanum_fraction": 0.5182809233665466, "avg_line_length": 22.621212005615234, "blob_id": "9d568f864ae7adc8a92bb5795d87d582cb47c140", "content_id": "bbb184019b8059077bec94fd2324c2b870a38335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1753, "license_type": "no_license", "max_line_length": 130, "num_lines": 66, "path": "/ksh/basic/basic_4.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ4\n ๋‹ค์Œ list๋ฐ์ดํ„ฐ๋ฅผ ์กฐ์ž‘ํ•˜์‹œ์š”\n\n 4-1 list๋ฐ์ดํ„ฐ๋ฅผ dictํ˜•ํƒœ๋กœ ๋ณ€๊ฒฝํ•˜์‹œ์š”\n 4-2 list๋ฐ์ดํ„ฐ๋ฅผ listํ˜•์‹์œผ๋กœ ๋ฐ”๊พธ๋ฉด์„œ ๋ฐ์ดํ„ฐ๋ฅผ ์กฐ์ž‘ํ•˜์‹œ์š”\n 4-3 list๋ฐ์ดํ„ฐ๋ฅผ list์•ˆ์— dictํ˜•ํƒœ๋กœ ๋ณ€๊ฒฝํ•˜์‹œ์š”\n 4-4 4-1์˜ ๊ฒฐ๊ณผ์— ํŠน์ดํ•œ์ ์„ ์ฐพ์œผ์‹œ์š”\n\n ๊ฒฐ๊ณผ\n 4-1 ๊ฒฐ๊ณผ (dict ์ถœ๋ ฅ)\n {'blue': '10', 'gray': '12', 'apple': '10', 'fineapple': '3', 'green': '15', 'banana': '5', 'red': '11'}\n 4-2 ๊ฒฐ๊ณผ (list์ถœ๋ ฅ ๋ฐ ์ค‘๋ณต์ œ๊ฑฐ๋œ list์ถœ๋ ฅ)\n ['apple', 'banana', 'fineapple', 'red', 'blue', 'green', 'gray', 'apple']\n ['blue', 'gray', 'apple', 'fineapple', 'green', 'banana', 'red']\n 4-3 ๊ฒฐ๊ณผ (list์•ˆ์— dict์ถœ๋ ฅ)\n [{'apple': '10'}, {'banana': '5'}, {'fineapple': '3'}, {'red': '11'}, {'blue': '10'}, {'green': '15'}, {'gray': '12'}]\n\n\n\"\"\"\nLIST_DATA = [ 'apple,10', 'banana,5', 'fineapple,3', 'red,11', 'blue,10', 'green,15', 'gray,12', 'apple,12' ]\n\ndef view():\n temp = {}\n new_list = []\n temp_list = []\n text=LIST_DATA\n xdict=listTodict(text)\n print xdict\n listChange(text)\n\n #print temp\n #print new_list\n #print new_list\n #print temp_list\n\ndef listTodict(list):\n result={}\n for i in range(len(list)):\n fruit,num=list[i].split(',')\n result[fruit]=num\n return result\n\ndef listChange(list):\n result=[]\n #for i in range(len(list)):\n # fruit=list[i].split(',')[0]\n # result.append(fruit)\n a=listTodict(list)\n result=a.keys()\n print result\n print set(result)\n\n\n\ndef main():\n view()\n print 'default test'\n\nif __name__ == \"__main__\":\n main()\n\n#EOF\n" }, { "alpha_fraction": 0.43766579031944275, "alphanum_fraction": 0.45092839002609253, "avg_line_length": 14, "blob_id": "65bc5877772cd29046573de38e06c99f3a63dfae", "content_id": "116783349ff87742a9623016ec0dbe8a2cc50afb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 29, "num_lines": 25, "path": "/ksh/OddsAndEnds/classTest.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\n\nclass Point():\n def __init__(self,_x,_y):\n self.x=_x\n self.y=_y\n\n def setx(self,x):\n self.x=x\n def sety(self,y):\n self.y=y\n\n def get(self):\n return self.x,self.y\n\n def move(self,dx,dy):\n self.x+=dx\n self.y+=dy\n\n\n\nif __name__==\"__main__\":\n a=Point(4,5)\n a.move(2,-4)\n print a.get()\n\n\n" }, { "alpha_fraction": 0.5512971878051758, "alphanum_fraction": 0.5654481053352356, "avg_line_length": 26.73770523071289, "blob_id": "f66dfdfe6e9cc4c31f93a21ecb94b4c05b454e78", "content_id": "383a433118861b0de68bc5b5ef97bc9d94ed53a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1696, "license_type": "no_license", "max_line_length": 106, "num_lines": 61, "path": "/ksh/dbTest/insertDB.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import psycopg2 as pg\n\n#conn=pg.connect(\"host=127.0.0.1 dbname=logsee user=postgres password=netcruz!1 port=5432\")\n#cur=conn.cursor()\n#\n#try:\n# cur.execute('create table if not exists \"TEST\" (num integer,name char(5));')\n# cur.execute('select * from \"TEST\";')\n# cur.execute('insert into \"TEST\"(num,name) values (%d,%s);'%(3,\"e\"))\n#\n#except Exception,e:\n# print e\n#\n#conn.commit()\n#\n#conn.close()\n\nclass DBMaker():\n def __init__(self,ip,dbname,user,passwd,port):\n self.conn=pg.connect('host=%r dbname=%r user=%r password=%r port=%r'%(ip,dbname,user,passwd,port))\n self.cur=self.conn.cursor()\n def dbInsert(self,table,value):\n if not self.conn or not self.cur:\n print 'connection not exist!'\n return\n try:\n self.cur.execute(\"\"\"insert into %s values%r;\"\"\"%(table,value))\n print 'insert done!'\n self.conn.commit()\n except Exception, e:\n print 'insert failed ',e\n\n def dbSelect(self,table,column='*'):\n if not self.conn or not self.cur:\n print 'connection not exist!'\n return\n try:\n self.cur.execute(\"\"\"select %s from %s\"\"\"%(column,table))\n print self.cur.fetchall()\n\n except Exception, e:\n print 'no result! ',e\n\n def __del__(self):\n if self.conn:\n self.conn.close()\n\n\n\nif __name__=='__main__':\n ip='127.0.0.1'\n dbname='logsee'\n user='postgres'\n password='netcruz!1'\n port='5432'\n test=DBMaker(ip,dbname,user,password,port)\n try:\n test.dbInsert('\"TEST\"',(4,'suho'))\n test.dbSelect('\"TEST\"')\n except Exception,e:\n print 'Fail ',e\n\n\n\n\n" }, { "alpha_fraction": 0.49827468395233154, "alphanum_fraction": 0.5438233017921448, "avg_line_length": 20.62686538696289, "blob_id": "eda7c8b50d36e8881624c38639f9eb87c5ceb66a", "content_id": "65041c2639321b2717d15449ae516932c8b40d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2199, "license_type": "no_license", "max_line_length": 92, "num_lines": 67, "path": "/ksh/basic/basic_6.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ6\n ์€ํ•˜์ˆ˜๋ฒ…์Šค๋Š” ์ด๋ฒˆ ์—ฌ๋ฆ„์— ๊ณ ๊ฐ๋“ค์„ ๋Œ€์ƒ์œผ๋กœ ์‚ฌ์€ํ’ˆ ํ”„๋กœ๋ชจ์…˜์„ ์ง„ํ–‰ํ•˜๊ณ  ์žˆ์Šต๋‹ˆ๋‹ค. ํ”„๋กœ๋ชจ์…˜์€ ์Œ๋ฃŒ๋ฅผ ๊ตฌ๋งคํ•  ๋•Œ๋งˆ๋‹ค ๊ณ ๊ฐ์—๊ฒŒ ์Šคํ‹ฐ์ปค๋ฅผ ์ ๋ฆฝํ•ด์ค๋‹ˆ๋‹ค.\n ์Šคํ‹ฐ์ปค๋Š” ์Œ๋ฃŒ์˜ ์ข…๋ฅ˜์— ๋”ฐ๋ผ ์—ฌ๋ฆ„ ์Œ๋ฃŒ ์Šคํ‹ฐ์ปค์™€ ์ผ๋ฐ˜ ์Œ๋ฃŒ ์Šคํ‹ฐ์ปค๋กœ ๋‚˜๋‰˜๋ฉฐ, 5๊ฐœ ์ด์ƒ์˜ ์—ฌ๋ฆ„ ์Œ๋ฃŒ ์Šคํ‹ฐ์ปค๋ฅผ ํฌํ•จํ•œ, ์ด 12๊ฐœ์˜ ์Šคํ‹ฐ์ปค๋ฅผ ๋ชจ์œผ๋ฉด\n ์ด๋ฅผ ํ•˜๋‚˜์˜ ํ…€๋ธ”๋Ÿฌ๋กœ ๊ตํ™˜ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์—ฐ์žฌ๋Š” ํ˜„์žฌ S ๊ฐœ์˜ ์—ฌ๋ฆ„ ์Œ๋ฃŒ ์Šคํ‹ฐ์ปค์™€ N ๊ฐœ์˜ ์ผ๋ฐ˜์Œ๋ฃŒ ์Šคํ‹ฐ์ปค๋ฅผ ๊ฐ€์ง€๊ณ ์žˆ๋Š”๋ฐ, ์ด๋ฅผ ํ†ตํ•ด ๋ฐ›์„ ์ˆ˜ ์žˆ๋Š”\n ์ตœ๋Œ€ ํ…€๋ธ”๋Ÿฌ์˜ ์ˆ˜๋ฅผ ์•Œ๊ณ  ์‹ถ์–ด ํ•ฉ๋‹ˆ๋‹ค. ์›Œ๋‚™ ๋งŽ์€ ์Šคํ‹ฐ์ปค๋ฅผ ๊ฐ€์ง€๊ณ  ์žˆ์œผ๋ฏ€๋กœ, ๊ณ„์‚ฐ์ด ์–ด๋ ต๋‹ค๊ณ  ๋А๋‚€ ์—ฐ์žฌ๋Š” ๋‹น์‹ ์—๊ฒŒ ์ด๋ฅผ ๊ณ„์‚ฐํ•˜๋Š” ํ”„๋กœ๊ทธ๋žจ์„ ์ž‘์„ฑํ•ด๋‹ฌ๋ผ๋Š”\n ์š”์ฒญ์„ ๋ฐ›์•˜์Šต๋‹ˆ๋‹ค.\n\n ์Šคํ‹ฐ์ปค์˜ ๊ฐœ์ˆ˜๊ฐ€ ์ฃผ์–ด์กŒ์„ ๋•Œ ์ด๋ฅผ ํ†ตํ•ด ์–ป์„ ์ˆ˜ ์žˆ๋Š” ์ตœ๋Œ€ ํ…€๋ธ”๋Ÿฌ์˜ ์ˆ˜๋ฅผ ์ถœ๋ ฅํ•˜๋Š” ํ”„๋กœ๊ทธ๋žจ์„ ์ž‘์„ฑํ•˜์„ธ์š”\n\n ์ž…๋ ฅ\n ์ž…๋ ฅ์€ ์—ฌ๋Ÿฌ ๊ฐœ์˜ ํ…Œ์ŠคํŠธ ์ผ€์ด์Šค๋กœ ์ฃผ์–ด์ง€๋ฉฐ, ์ฒซ ์ค„์— ํ…Œ์ŠคํŠธ ์ผ€์ด์Šค์˜ ๊ฐœ์ˆ˜ T (1 โ‰ค T โ‰ค 10,000)๊ฐ€ ์ž…๋ ฅ๋ฉ๋‹ˆ๋‹ค.\n ํ•˜๋‚˜์˜ ํ…Œ์ŠคํŠธ ์ผ€์ด์Šค๋Š” 0์ด์ƒ 260 โˆ’ 1์ดํ•˜์˜ ์ •์ˆ˜ S ์™€ N ์ด ์ž…๋ ฅ๋˜๋ฉฐ, ๋‘ ์ˆซ์ž ์‚ฌ์ด์—๋Š” ๊ณต๋ฐฑ์ด ์ฃผ์–ด์ง‘๋‹ˆ๋‹ค.\n ex) ์ž…๋ ฅ\n 4\n 12 0\n 10 14\n 4 20\n 5 2147483648\n\n ์ถœ๋ ฅ\n 1\n 2\n 0\n 1\n\n\"\"\"\ndef couponCount():\n total_num=input(\"total test:\")\n result=[]\n for i in range(total_num):\n summer_num=input(\"summer:\")\n gen_num=input(\"general:\")\n result.append(countMax(summer_num,gen_num))\n\n for j in range(len(result)):\n print(\"result:%d\"% result[j])\n\n\ndef countMax(a,b):\n ret1=(a+b)/12\n if ret1==0 or a<5 or b<7:\n return 0\n ret3=1\n for i in range(5,12):\n ret2=min((a/i),(b/(12-i)))\n ret3=max(ret3,ret2)\n\n if ret1>ret3:\n ret1=ret3\n return ret1\n\n\n\n\ndef main():\n print 'default test'\n couponCount()\n\nif __name__ == \"__main__\":\n main()\n\n\n#EOF\n" }, { "alpha_fraction": 0.4400569796562195, "alphanum_fraction": 0.4589863717556, "avg_line_length": 39.27049255371094, "blob_id": "256afb7e097f6352242a834dad644d339e952113", "content_id": "70618cfefd05e6e87a3b3300dbc5186f72d870cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5011, "license_type": "no_license", "max_line_length": 88, "num_lines": 122, "path": "/ksh/dbTest/countDB.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\nimport sqlite3\nimport os\n#๋‹ค๋ฅธ ๋‚ ์งœ ์ง€์ •ํ•ด์„œ ํ™•์ธ ํ•„์š”\ndef countDBRow(path):\n con=sqlite3.connect(path)\n result=con.execute('select count(*) from(select * from \"LOGS\");').fetchone()[0]\n con.close()\n return result\n\ndef dirExistCheck():\n d=raw_input('input directory want to check!')\n if not os.path.exists(d):\n print 'not exist!'\n dirExistCheck()\n else:\n path=d\n sum=0\n froms,tos=raw_input('input year,day(ex-2018-08-01 2018-08-08):').split()\n toY,toM,toD=map(int,tos.split('-'))\n fromY,fromM,fromD=map(int,froms.split('-'))\n if toY-fromY==0: #๊ฐ™์€ ํ•ด์ผ๋•Œ\n gap=toM-fromM\n if gap>1: #๋‹ค๋ฅธ ๋‹ฌ์ผ๋•Œ(2๋‹ฌ์ด์ƒ ์ฐจ์ด)\n for x in dayCheck(fromM,fromD):\n path=d+'/'+str(fromY)+'-'+str(fromM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n for x in rdayCheck(toM,toD):\n path=d+'/'+str(toY)+'-'+str(toM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n\n for y in range(fromM+1,toM):\n for x in dayCheck(y):\n path=d+'/'+str(fromY)+'-'+str(y).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n\n elif gap==1:#1๋‹ฌ ์ฐจ์ด\n for x in dayCheck(fromM,fromD):\n path=d+'/'+str(fromY)+'-'+str(fromM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n for x in rdayCheck(toM,toD):\n path=d+'/'+str(toY)+'-'+str(toM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n\n else:#๊ฐ™์€ ๋‹ฌ\n for x in range(fromD,toD+1):\n path=d+'/'+str(fromY)+'-'+str(fromM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n elif toY-fromY<0:\n print 'wrong year!'\n else:\n if int(fromM)!=12:#12์›” ์•„๋‹๋•Œ fromMonth\n for x in dayCheck(fromM,fromD):\n path=d+'/'+str(fromY)+'-'+str(fromM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n print z\n sum=sum+countDBRow(path+'/'+z)\n for x in range(fromM+1,13):#fromMonth ์ดํ›„ Month ๊ณ„์‚ฐ\n for y in dayCheck(x):\n path=d+'/'+str(fromY)+'-'+str(x).zfill(2)+'-'+str(y).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n print z\n sum=sum+countDBRow(path+'/'+z)\n else:\n for x in dayCheck(fromM,fromD):\n path=d+'/'+str(fromY)+'-'+str(fromM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n\n\n if int(toM)!=1:\n for x in range(1,toM):#toMonth ์ด์ „ Month ๊ณ„์‚ฐ\n for y in dayCheck(x):\n path=d+'/'+str(fromY)+'-'+str(x).zfill(2)+'-'+str(y).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n for x in rdayCheck(toM,toD):#toMonth ๊ณ„์‚ฐ\n path=d+'/'+str(toY)+'-'+str(toM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n else:\n for x in rdayCheck(toM,toD):#toMonth ๊ณ„์‚ฐ\n path=d+'/'+str(toY)+'-'+str(toM).zfill(2)+'-'+str(x).zfill(2)\n for z in os.popen('cd %s;ls *.logsee'%path).read().splitlines():\n sum=sum+countDBRow(path+'/'+z)\n\n print sum\n\n\n\n\n\n\ndef rdayCheck(m,d):\n Day_res=[]\n Day_res.append(range(1,d+1))\n return Day_res\n\n\n\n\n\ndef dayCheck(m,d=1):\n Month_31=[1,3,5,7,8,10,12]\n Month_30=[4,6,9,11]\n Day_res=[]\n if m in Month_31:\n Day_res=range(d,32)\n elif m in Month_30:\n Day_res=range(d,31)\n else:\n Day_res=range(d,28)\n return Day_res\n\nif __name__=='__main__':\n dirExistCheck()\n" }, { "alpha_fraction": 0.26032641530036926, "alphanum_fraction": 0.27805763483047485, "avg_line_length": 27.8430233001709, "blob_id": "2c57dbe39e6a6ce08f44ba772e6d9a5fb773abc8", "content_id": "e9863cbfb8b88c05d86f2a01a0ad4dbd2c82fa31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4963, "license_type": "no_license", "max_line_length": 59, "num_lines": 172, "path": "/ksh/coding/stack.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\ndef validPS(msg):\n ret=list(msg)\n cnt=0\n for x in ret:\n if cnt<0:\n break\n if x =='(':\n cnt+=1\n elif x==')':\n cnt-=1\n if cnt==0:\n print('YES')\n else:\n print('NO')\n\n#def multiPS(msg):\n# ret=list(msg)\n# stack=[]\n# tmp=1\n# tmptotal=0\n# total=0\n# subcheck=False\n# cnt=0\n# for x in ret:\n# if x in ['(','[']:\n# if subcheck:\n# tmptotal+=tmp\n# tmp=1\n# subcheck=False\n# cnt+=1\n# stack.append(x)\n# elif x ==')':\n# if len(stack)>1:\n# if stack[-1]=='(':\n# if cnt:\n# stack.pop()\n# subcheck=True\n# tmp*=2\n# cnt-=1\n# else:\n# \n# else:\n# total=0\n# break\n# elif len(stack)==1:\n# if stack[0]=='(':\n# stack.pop()\n# subcheck=False\n# tmptotal+=tmp\n# total+=tmptotal*2\n# tmptotal=0\n# tmp=1\n# else:\n# total=0\n# break\n# else:\n# total=0\n# break\n# \n# \n# elif x==']':\n# if len(stack)>1:\n# if stack[-1]=='[':\n#\t\t subcheck=True\n# stack.pop()\n# tmp*=3\n# else:\n# total=0\n# break\n# elif len(stack)==1:\n# if stack[0]=='[':\n# stack.pop()\n# subcheck=False\n# tmptotal+=tmp\n# total+=tmptotal*3\n# tmptotal=0\n# tmp=1\n# else:\n# total=0\n# break\n# else:\n# total=0\n# break\n# print(total)\n\ndef test(msg):\n ret=list(msg)\n stack=[]\n total=0\n add_check=False\n for x in ret:\n if x in ['(','[']:\n if len(stack)>=1 and type(stack[-1])==int:\n add_check=True\n stack.append(x)\n elif x==')':\n if stack:\n if stack[-1]=='(':\n stack[-1]=2\n elif type(stack[-1])==int:\n if add_check and type(stack[-2])==int:\n while type(stack[-2])==int:\n stack[-2]=stack[-1]+stack[-2]\n stack.pop()\n add_check=False\n if len(stack)==1 or stack[-2]!='(':\n stack=[]\n total=0\n break\n else:\n stack[-2]=stack[-1]*2\n stack.pop()\n \n elif len(stack)>1 and stack[-2]=='(':\n stack[-2]=stack[-1]*2\n stack.pop()\n else:\n total=0\n stack=[]\n break\n else:\n total=0\n stack=[]\n break\n else:\n total=0\n stack=[]\n break\n elif x==']':\n if stack:\n if stack[-1]=='[':\n stack[-1]=3\n elif type(stack[-1])==int:\n if add_check and type(stack[-2])==int:\n while type(stack[-2])==int:\n stack[-2]=stack[-1]+stack[-2]\n stack.pop()\n add_check=False\n if len(stack)==1 or stack[-1]!='[':\n total=0\n stack=[]\n break\n else:\n stack[-2]=stack[-1]*3\n stack.pop()\n elif len(stack)>1 and stack[-2]=='[':\n stack[-2]=stack[-1]*3\n stack.pop()\n else:\n total=0\n stack=[]\n break\n else:\n total=0\n stack=[]\n break\n else:\n total=0\n stack=[]\n break\n if len(stack)>1:\n for x in stack:\n if x in [')','(','[',']']:\n total=0\n break\n else:\n total+=x\n elif len(stack)==1 and type(stack[0])==int:\n total=stack[0]\n print(total) \n\ntest('([)])')\n" }, { "alpha_fraction": 0.5435630679130554, "alphanum_fraction": 0.5708712339401245, "avg_line_length": 17.309524536132812, "blob_id": "d795f0f07b0064e0c612d0e8e1d119273f4264e3", "content_id": "42863921ec1e90311ea7756ffa98df844c482b91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 61, "num_lines": 42, "path": "/ksh/socketTest/client.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import socket,sys\nfrom threading import Thread\n\n\nHOST='127.0.0.1'\nADDR=(HOST,33331)\n\ndef rcvMsg(sock):\n while 1:\n try:\n msg=sock.recv(1024)\n if not msg:\n break\n print msg\n except:\n pass\n\n\nclientSocket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ntry:\n clientSocket.connect(ADDR)\nexcept:\n print ' connect failed!'\n sys.exit()\nt=Thread(target=rcvMsg,args=(clientSocket,))\nt.daemon=True\nt.start()\n\n\nwhile True:\n msg=raw_input()\n clientSocket.send(msg.encode('utf-8'))\n print 'msg send!'\n\n #if msg=='exit':\n # m=clientSocket.recv(1024)\n # if m=='bye':\n # print 'Done!'\n # #sys.exit()\n # clientSocket.close()\n\nclientSocket.close()\n" }, { "alpha_fraction": 0.4793689250946045, "alphanum_fraction": 0.5036407709121704, "avg_line_length": 20.6842098236084, "blob_id": "94403d8eb8303d9fccc813506c344032e01c6b6c", "content_id": "ea05c51f1c2ec72b2c88781cbdd0e7bb7f9fa72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 824, "license_type": "no_license", "max_line_length": 62, "num_lines": 38, "path": "/ksh/socketTest/server/server.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "from socket import *\nimport os,sys\n\n\nHOST=''\nPORT=44443\nADDR=(HOST,PORT)\n\ns=socket(AF_INET,SOCK_STREAM)\ns.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)\n\ns.bind(ADDR)\ns.listen(5)\nc=None\n\nwhile True:\n if c is None:\n print 'wait....;'\n c,addr=s.accept()\n\n else:\n print 'client connected!'\n msg=c.recv(1024).decode('utf-8')\n if msg=='exit':\n print 'bye'\n c.send('bye!')\n c.close()\n print'socket end!'\n sys.exit()\n elif msg=='':\n print 'client disconnect!'\n c.close()\n sys.exit()\n else:\n result=os.popen(msg).read()\n print 'command done! by %r,%r'%(addr[0],addr[1])\n #msg=os.popen(c.recv(1024).decode('utf-8')).read()\n c.send(result.encode('utf-8'))\n" }, { "alpha_fraction": 0.4422857165336609, "alphanum_fraction": 0.4560000002384186, "avg_line_length": 14.909090995788574, "blob_id": "cdb7523968894af47b84acc34c09db58ab46b425", "content_id": "c08e6edea4c7c7f396d94306dd02fa29d0d748f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 73, "num_lines": 55, "path": "/ksh/basic/basic_1.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ1\n num_1, num_2 ์˜ ๋ณ€์ˆ˜๋ฅผ ๊ฐ€์ง€๊ณ \n add_func(๋”ํ•˜๊ธฐ), sub_func(๋นผ๊ธฐ), div_func(๋‚˜๋ˆ„๊ธฐ), multi_func(๊ณฑํ•˜๊ธฐ)๋ฅผ ํ•œ ๊ฐ’์„\n view() ํ•จ์ˆ˜์—์„œ ์ถœ๋ ฅํ•˜์‹œ์š”\n\n\n ๊ฒฐ๊ณผ\n ***************\n ๋”ํ•˜๊ธฐ : XXX\n ๋นผ๊ธฐ : XXX\n ๋‚˜๋ˆ„๊ธฐ : XXX\n ๊ณฑํ•˜๊ธฐ : XXX\n ***************\n\n\"\"\"\n\n\ndef add_func(a,b):\n return a+b\n\ndef sub_func(a,b):\n if ((type(a)==int)and(type(b)==int)):\n if a>=b:\n return a-b\n else:\n return b-a\n return a-b\n\ndef div_func(a,b):\n return a/b\n\ndef multi_func(a,b):\n return a*b\n\ndef view(a,b):\n print(\"๋”ํ•˜๊ธฐ:%d\"%add_func(a,b))\n print(\"๋นผ๊ธฐ%d\"%sub_func(a,b))\n print(\"๋‚˜๋ˆ„๊ธฐ:%d\"%div_func(a,b))\n print(\"๊ณฑํ•˜๊ธฐ:%d\"%multi_func(a,b))\n\n\ndef main():\n num_1 = 40\n num_2 = 20\n print 'default test'\n view(num_1,num_2)\n\nif __name__ == \"__main__\":\n main()\n\n#EOF\n" }, { "alpha_fraction": 0.5082508325576782, "alphanum_fraction": 0.5346534848213196, "avg_line_length": 17.75, "blob_id": "63df487631b56170196c5f0274976488ec872d14", "content_id": "1be9026bbc2025be63e8a4e6e4bf2e1a8a7b1972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 45, "num_lines": 16, "path": "/ksh/coding/strLen.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\n\ndef strLen(s):\n str_len=len(s)\n m=str_len/10\n n=str_len%10\n ret=[]\n num=0\n for x in range(m):\n ret.append(s[num:num+9])\n num+=10\n if not n:\n ret.append(s[num:]\n #print(ret)\n for x in ret:\n print(x)\n\nstrLen(\"aaaaaaaaaabbbbbbbbbbccccccccccddddd\")\n" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 14.199999809265137, "blob_id": "e9b6222818e8c389b5ff67f09d207f30bcb7a276", "content_id": "4d9f4eb254841ec48fbd2a0b15f617f2fcd8ff12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/ksh/coding/printN.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "def printNum(num):\n for x in range(num):\n print(x+1)\n\nprintNum(4)\n" }, { "alpha_fraction": 0.5233415365219116, "alphanum_fraction": 0.5528255701065063, "avg_line_length": 20.73214340209961, "blob_id": "27aa275be3eedd43683707d506f4a25a46eea92a", "content_id": "eaca3c943a0b68af44a7d5642de6bf25fbbebaa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1243, "license_type": "no_license", "max_line_length": 51, "num_lines": 56, "path": "/ksh/OddsAndEnds/BMI.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#-*-coding:utf-8-*-\n\ndef get_max_min(data_list):\n return max(data_list),min(data_list)\n\ndef get_txt_list(path):\n import os\n print os.system('ls')\n\ndef BMI():\n name=raw_input(\"Name:\")\n height=float(input(\"Height:\"))\n weight=float(input(\"weight:\"))\n bmi=height/(weight*weight)\n print name,\"๋‹˜\"\n if bmi<18.5:\n print \"๋งˆ๋ฆ„\"\n elif bmi>=18.5 and bmi<25.0:\n print \"ํ‘œ์ค€\"\n elif bmi>=25 and bmi <30.0:\n print \"๋น„๋งŒ\"\n elif bmi>30.0:\n print \"๊ณ ๋„ ๋น„๋งŒ\"\n\ndef add_start(start,end):\n total_sum=0\n for i in range(start,end+1):\n total_sum+=i\n print total_sum\n\ndef listCut(list):\n for i in range(len(list)):\n list[i]=list[i][0:3]\n print list\n\ndef dictget(dic,dict_word,except_str):\n if type(dic)!=dict:\n print \"not dict!!\"\n return\n try:\n dic[dict_word]\n except KeyError:\n dic[dict_word]=except_str\n print dic[dict_word]\n\nif __name__=='__main__':\n #a=[1,5,6,7,4,9]\n #print get_max_min(a)\n #et_txt_list('/opt/bigeye')\n BMI()\n #add_start(2,20)\n #listCut(['Seoul', 'Daegu', 'Kwangju', 'Jeju'])\n #a={'test':1,'test2':2,'test4':4}\n #print type(a)\n #dictget(a,'tes','nono')\n\n\n\n\n" }, { "alpha_fraction": 0.5955473184585571, "alphanum_fraction": 0.5992578864097595, "avg_line_length": 18.962963104248047, "blob_id": "283cb091790bba06129350090952b6f952f09912", "content_id": "d486283e384418263da29177877cc86cdaabf2e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 48, "num_lines": 27, "path": "/ksh/multiprocess/process.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import multiprocessing as mp\nimport time\n\ndef dryer(input):\n while 1:\n dish=input.get()\n if dish=='exit':\n print(\"done\")\n break\n print(\"result:\",dish)\n return False\n\ndish_queue=mp.JoinableQueue()\nproc=mp.Process(target=dryer,args=(dish_queue,))\nproc.daemon=True\nproc.start()\n\ndishes=['salad','bread','exit']\nfor x in dishes:\n dish_queue.put(x)\n\nif dish_queue.get()=='exit':\n print(\"done2\")\n proc.terminate()\n if not proc.is_alive():\n proc.join() \ndish_queue.join()\n" }, { "alpha_fraction": 0.3986254334449768, "alphanum_fraction": 0.44558992981910706, "avg_line_length": 15.980392456054688, "blob_id": "761dd74ad99ef2c0e2a3bd3aa8342decdc66db51", "content_id": "1d2097a9514f5e78489591c8059c19f04f43e5dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/ksh/OddsAndEnds/kakao.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\n\ndef kakao():\n ret=[]\n n=int(input('input num'))\n arr1=map(int,raw_input(\"arr1:\").split())\n arr2=map(int,raw_input(\"arr2:\").split())\n\n for i in range(n):\n ret.append(bin(arr1[i]|arr2[i])[2:].replace('1','#').replace('0',' '))\n print ret\n\ndef strCheck(a):\n num=0\n if a[0]=='1' and a[1]=='0':\n num=10\n if a[2]=='D':\n num^=2\n elif a[2]=='T':\n num^=3\n\n else:\n num=a[0]\n if a[1]=='D':\n num^=2\n elif a[1]=='T':\n num^=3\n return num\n\n\ndef kakao2(test):\n num1=[]\n for i in range(2,len(test)):\n if test[i].isdigit():\n num1.append(i)\n\n a=test[:num1[0]]\n b=test[num1[0]:num1[1]]\n c=test[num1[1]:]\n print a,b,c\n print strCheck(a)\n\n\n #for i in range(len(test)):\n\n\n\n\n\n\n\nif __name__=='__main__':\n kakao2('1S2D*3T')\n\n\n\n\n" }, { "alpha_fraction": 0.4851851761341095, "alphanum_fraction": 0.4888888895511627, "avg_line_length": 11.809523582458496, "blob_id": "b3567e207b01d56b20193ba0a6f075d64c7146aa", "content_id": "a7149e0c2fea10fcec5feec3e2ad5e7be9ab5b03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 29, "num_lines": 21, "path": "/ksh/basic/class_default.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n XXX\n\n\"\"\"\n\nclass ClassDefault(object):\n def __init__(self):\n pass\n\n def run(self):\n print 'default test!'\n\ndef main():\n CD = CalssDefault()\n CD.run()\n\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.411255419254303, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 15.428571701049805, "blob_id": "bbd3c39f164a0411806ddf4f0575f3c5915875b1", "content_id": "27b48e9970fb8f6ca75d21511387421ce8177e1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 247, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/ksh/OddsAndEnds/CtoF.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\n\ndef CtoF(cel):\n print \"์„ญ์”จ:{}->ํ™”์”จ:{}\".format(cel,(cel*1.8)+32)\ndef FtoC(f):\n print \"ํ™”์”จ:{}->์„ญ์”จ:{}\".format(f,(f-32)/1.8)\n\ndef main():\n a=40\n CtoF(40)\n FtoC(40)\n\nif __name__=='__main__':\n main()\n\n" }, { "alpha_fraction": 0.4275782108306885, "alphanum_fraction": 0.44032445549964905, "avg_line_length": 16.9375, "blob_id": "2e7bf187bea0e39785b4b467bc407eda4f632db6", "content_id": "c82a2df7913f66e0dfe618424f3e61fbd5cc5602", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 34, "num_lines": 48, "path": "/ksh/coding/deck.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\nclass Deck():\n def __init__(self):\n self.ret=[]\n\n def push_back(self,num):\n self.ret.append(num)\n print(num)\n \n def push_front(self,num):\n self.ret.insert(0,num)\n print(num)\n \n def pop_front(self,num):\n if self.ret:\n print(self.ret.pop(0))\n else:\n print(-1)\n\n def pop_back(self,num):\n if self.ret:\n print(self.ret.pop())\n else:\n print(1)\n\n def size(self):\n print(len(self.ret))\n\n def empty(self):\n if ret:\n print(0)\n else:\n print(1)\n\n def front(self):\n if self.ret:\n print(self.ret[0])\n else:\n print(-1)\n\n def back(self):\n if self.ret:\n print(self.ret[-1])\n else:\n print(-1)\n\na=Deck()\nb=\"push_back\"\na.b(3)\n" }, { "alpha_fraction": 0.7864077687263489, "alphanum_fraction": 0.8058252334594727, "avg_line_length": 19.600000381469727, "blob_id": "5cd475ce3e9de73e1e3386df7ae14ff2efe137ad", "content_id": "077fe6ed77475b77b84e3faccd5c9c05f0e341eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/ksh/cgi_test/webserver.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import os,sys\nfrom http.server import HTTPServer,CGIHTTPRequestHandler\n\nwebdir='.'\nport =7777\n\nos.chdir(webdir)\nsrvraddr=(\"\",port)\nsrvrobj=HTTPServer(srvraddr,CGIHTTPRequestHandler)\nsrvrobj.serve_forever()\n" }, { "alpha_fraction": 0.5520737171173096, "alphanum_fraction": 0.5649769306182861, "avg_line_length": 26.794872283935547, "blob_id": "c25080085774fcb28d312aa3277200df532e038e", "content_id": "4549f5b730947b7420f3196cccce4072843f27d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/ksh/OddsAndEnds/stock.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\ndef stockCount(a):\n while 1:\n name=raw_input('what\\'s the name of stock?:')\n value=int(input(\"how much?:\"))\n num=int(input(\"how much stock do you have?:\"))\n total_v=value*num\n a[name]=(name,value,num,total_v)\n chk=raw_input(\"Do you have stock left?(y/n)\")\n if chk=='n':\n break\n return a\n\ndef lossCheck(value):\n percent=int(input(\"how many percent do you loss or get?:\"))\n loss_get=raw_input(\"loss or get:?(loss/get)\")\n if loss_get=='loss':\n total_v=value*(100-percent)/100\n else:\n total_v=value*(100+percent)/100\n return total_v\n\n\ndef main():\n total_stock={}\n total=0\n\n stockCount(total_stock)\n for x in total_stock.keys():\n name,value,num,total_v= total_stock[x]\n print(\"name:{},value:{},num:{},total_v:{}\".format(name,value,num,total_v))\n checker=raw_input(\"Do your stock's value change?(y/n)\")\n if checker=='y':\n total_v=lossCheck(total_v)\n total+=total_v\n print\"total:{}\".format(total)\n\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.2733672261238098, "alphanum_fraction": 0.293703556060791, "avg_line_length": 30.962499618530273, "blob_id": "fc36215d7b1cd1a1a52d9aa6a9254b9e54203b7f", "content_id": "2c77b616e7d97f958bd8655f403bf478f7d130c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2557, "license_type": "no_license", "max_line_length": 59, "num_lines": 80, "path": "/ksh/coding/ddd.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "msg=str(input())\ndef test(msg):\n ret=list(msg)\n stack=[]\n total=0\n add_check=False\n for x in ret:\n print(stack)\n if x in ['(','[']:\n if len(stack)>=1 and type(stack[-1])==int:\n add_check=True\n stack.append(x)\n elif x==')':\n if stack:\n if stack[-1]=='(':\n stack[-1]=2\n elif type(stack[-1])==int:\n if add_check and type(stack[-2])==int:\n while type(stack[-2])==int:\n stack[-2]=stack[-1]+stack[-2]\n stack.pop()\n add_check=False\n if len(stack)==1 or stack[-2]!='(':\n total=0\n break\n else:\n stack[-2]=stack[-1]*2\n stack.pop()\n\n elif stack[-2]=='(':\n stack[-2]=stack[-1]*2\n stack.pop()\n else:\n total=0\n break\n else:\n total=0\n break\n else:\n total=0\n break\n elif x==']':\n if stack:\n if stack[-1]=='[':\n stack[-1]=3\n elif type(stack[-1])==int:\n if add_check and type(stack[-2])==int:\n while type(stack[-2])==int:\n stack[-2]=stack[-1]+stack[-2]\n stack.pop()\n add_check=False\n if len(stack)==1 or stack[-1]!='[':\n total=0\n break\n else:\n stack[-2]=stack[-1]*2\n stack.pop() \n elif stack[-2]=='[':\n stack[-2]=stack[-1]*3\n stack.pop()\n else:\n total=0\n break\n else:\n total=0\n break\n else:\n total=0\n break\n if len(stack)>1:\n for x in stack:\n if x in [')','(','[',']']:\n total=0\n break\n else:\n total+=x\n elif len(stack)==1 and type(stack[0])==int:\n total=stack[0]\n print(total)\ntest(msg)\n" }, { "alpha_fraction": 0.38810741901397705, "alphanum_fraction": 0.5824808478355408, "avg_line_length": 31.58333396911621, "blob_id": "1e9b971c26409fbff25f194a53fa7bbc94a0be8e", "content_id": "e9478855c263955c02800bd78095cd635b68de44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "no_license", "max_line_length": 578, "num_lines": 48, "path": "/ksh/lotto/percent.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\nfrom random import *\nimport datetime\n\nclass LottoNum():\n totalNum=[]\n #def __init__(self,args):\n # self.totalNum.append(args)\n\n def addNum(self,args):\n self.totalNum=self.totalNum+args\n\n def choiceNum(self):\n ret=[]\n for i in range(6):\n num=choice(self.totalNum)\n ret.append(num)\n for j in range(self.totalNum.count(num)):\n self.totalNum.remove(num)\n Day=checkSaturday()\n print Day.year,'๋…„ ',Day.month,'์›” ',Day.day,'์ผ ','๋กœ๋˜ ๋ฒˆํ˜ธ->',sorted(ret)\n\n\n\ndef checkSaturday():#์‹คํ–‰ํ•˜๋Š” ์‹œ๊ฐ„ ๊ธฐ์ค€ ๊ทธ ์ฃผ ํ† ์š”์ผ ๋‚ ์งœ ์ƒ์„ฑ\n today=datetime.datetime.now()\n num=today.weekday()\n if num!=5:\n if num==6:\n retDay=today+datetime.timedelta(days=6)\n else:\n retDay=today+datetime.timedelta(days=5-num)\n\n else:\n retDay=today\n\n return retDay.date()\n\n\n\n\nTOTALNUM={1:[1]*144,2:[2]*132,3:[3]*128,4:[4]*136,5:[5]*127,6:[6]*128,7:[7]*131,8:[8]*131,9:[9]*100,10:[10]*137,11:[11]*133,12:[12]*135,13:[13]*136,14:[14]*131,15:[15]*128,16:[16]*125,17:[17]*139,18:[18]*126,19:[19]*127,20:[20]*139,21:[21]*130,22:[22]*101,23:[23]*111,24:[24]*127,25:[25]*124,26:[26]*128,27:[27]*147,28:[28]*111,29:[29]*108,30:[30]*120,31:[31]*131,32:[32]*112,33:[33]*135,34:[34]*144,35:[35]*122,36:[36]*124,37:[37]*133,38:[38]*123,39:[39]*127,40:[40]*136,41:[41]*114,42:[42]*120,43:[43]*147,44:[44]*124,45:[45]*128}#์ง€๊ธˆ๊นŒ์ง€์˜ ๋กœ๋˜ ๋ฒˆํ˜ธ๋ณ„ ์ถ”์ฒจํšŸ์ˆ˜๋ฅผ ํ•˜๋“œ์ฝ”๋“œํ•จ -->ํฌ๋กค๋งํ•˜์—ฌ ๋ฐ›์•„์˜ค๋Š” ํ˜•ํƒœ๋กœ ์ˆ˜์ •\n\ntest=LottoNum()\nfor i in range(len(TOTALNUM)):\n test.addNum(TOTALNUM[i+1])\n\ntest.choiceNum()\n" }, { "alpha_fraction": 0.6710963249206543, "alphanum_fraction": 0.6777408719062805, "avg_line_length": 29, "blob_id": "eb463b448ea6a410343409c429a8e2f1bf7e2f67", "content_id": "0d5f07f6cdb00387efbe3f850054355e5e54937f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/ksh/redis/pub.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import redis,random\n\nconn=redis.Redis()\ncats=['siamese','persian','maine coon','norwegian forest']\nhats=['stovepipe','bowler','tam-o-shanter','fedora']\nfor msg in range(10):\n cat=random.choice(cats)\n hat=random.choice(hats)\n print('Publish: %s wears %s'%(cat,hat))\n conn.publish(cat,hat)\n\n" }, { "alpha_fraction": 0.4048059284687042, "alphanum_fraction": 0.4214417636394501, "avg_line_length": 16.96666717529297, "blob_id": "d9ef9b5b18a1c55dae2ee61e48babe63e083ecf2", "content_id": "83eb242e0bf4b9aab029958b336ecee86afa4aa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 33, "num_lines": 30, "path": "/ksh/coding/countWord.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\ndef wordCount(s):\n if not len(s):\n return \"Not exist string\"\n ret=0\n for x in s.split(' '):\n if x:\n ret+=1\n print(ret)\n return ret\n\ndef OXcount(s):\n con=0\n ret=0\n for x in list(s):\n if x in ['o','O']:\n if con:\n ret+=con+1\n con+=1\n else:\n ret+=1\n con+=1\n else:\n if con:\n con=0\n print(ret)\n\n\nOXcount('OOOOOOOOOO')\nOXcount('OXOXOXOXOXOXOX')\nOXcount('OOOOXOOOOXOOOOX')\n" }, { "alpha_fraction": 0.43026891350746155, "alphanum_fraction": 0.4446529150009155, "avg_line_length": 20.904109954833984, "blob_id": "c22ca3b3819595cc7cbc8bd38d813e5658dcb58f", "content_id": "4c961b3bb80715f9eef8a7fadc910171319a06a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "no_license", "max_line_length": 49, "num_lines": 73, "path": "/ksh/coding/sorting.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#N=int(input())\n#nums=[]\n#for a in range(N):\n# nums.append(int(input()))\n \n\ndef sorting(ss):\n ret=sorted(ss)\n for x in ret:\n print(x)\n\ndef compNum(front,back=None):\n ret=[]\n if not back:\n if type(front)!=list:\n ret.append(front)\n return ret\n return front\n if type(front)==int:\n if front<back:\n return [front,back]\n elif front>back:\n return [back,front]\n f=0\n b=0\n while 1:\n if front[f]>back[b]:\n ret.append(back[b])\n if b<len(back)-1:\n b+=1\n else:\n for x in front[f:]:\n ret.append(x)\n break\n elif front[f]<back[b]:\n ret.append(front[f])\n if f<len(front)-1:\n f+=1\n else:\n for x in back[b:]:\n ret.append(x)\n break\n return ret\n \ndef sortingss(ss):\n #ret=list(map(int,str(ss)))\n ret=ss\n result=[]\n #if not num:\n # return\n while 1:\n num=len(ret)\n for x in range(0,num,2):\n if ret[x]==ret[-1]:\n result.append(compNum(ret[x]))\n else:\n part_ret=compNum(ret[x],ret[x+1])\n result.append(part_ret)\n if num==1:\n break\n ret=result\n result=[]\n #print(ret[0])\n if type(ret[0])==int:\n print(ret[0])\n else:\n for x in ret[0]:\n print(x)\nimport random\nrannum=[x for x in range(100000)]\nrandom.shuffle(rannum)\n\nsortingss(rannum)\n" }, { "alpha_fraction": 0.5243902206420898, "alphanum_fraction": 0.5487805008888245, "avg_line_length": 16.44444465637207, "blob_id": "a0361b4638795d69b07e0e5cc00df648b548da28", "content_id": "f460cedca4bfcc2f18bcf350b6d8060e08f48c1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 29, "num_lines": 9, "path": "/ksh/coding/selfnum.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "\n\ndef selfNum(num):\n pos=len(str(num))\n ret=num\n for x in range(pos):\n ret+=int(str(num)[x])\n print(\"result:\",ret)\n\nselfNum(75)\nselfNum(39)\n \n" }, { "alpha_fraction": 0.5558463931083679, "alphanum_fraction": 0.5641361474990845, "avg_line_length": 22.13131332397461, "blob_id": "abfbddd2309f296851fac0e04479cdd5f73e3970", "content_id": "bc80074394157d04ab3f2c4effe99d97edc058ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2292, "license_type": "no_license", "max_line_length": 58, "num_lines": 99, "path": "/ksh/socketTest/SSTest/client.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "from SocketServer import *\nfrom socket import *\nHOST=''\nPORT=33331\nADDR=(HOST,PORT)\n\nclass TcpReqHandler(StreamRequestHandler):\n def handle(self):\n print 'connect...'\n conn=self.request\n while 1:\n msg=conn.recv(1024)\n if not msg:\n conn.close()\n print self.client_address,' disconnected!'\n break\n print self.client_address, msg\n if msg=='exit':\n conn.send('bye')\n\nclass UserList():\n def __init__(self,ulist):\n self.userList=ulist\n\n\n def userCheck(self,id):\n self.id=id\n if self.id in self.userList:\n print 'user name exist!'\n else:\n self.userList.append(self.id)\n\n def userDel(self):\n self.userList.remove(self.id)\n def printUser(self):\n print self.userList\nclass SendMsg():\n userList={}\n\n def userAdd(self,ip,conn):\n self.userList[ip]=conn\n\n def sendMSG(self,ip,msg):\n if len(self.userList)<=1:\n self.userList[ip].send('more user needed!')\n return\n for i in self.userList.keys():\n if i!=ip:\n self.userList[i].send(msg)\n\n def userDel(self,ip):\n self.userList.pop(ip)\nclass TCPChatHandler(StreamRequestHandler):\n users=SendMsg()\n #def handle(self):\n # print 'connect!....'\n # self.id=raw_input('type your id:')\n # x.userCheck(self.id)\n # conn=self.request\n # while 1:\n def handle(self):\n print 'conenct!...'\n conn=self.request\n ip=self.client_address[0]\n self.users.userAdd(ip,conn)\n while 1:\n msg=conn.recv(1024)\n if not msg:\n self.users.userDel(self.client_address[0])\n conn.close()\n print self.client_address,' disconnected!'\n break\n self.users.sendMSG(ip,msg)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n #ThreadingTCPServer.allow_reuse_address=True\n #server=ThreadingTCPServer(ADDR,TcpReqHandler)\n #print 'listening on port ',PORT\n #server.serve_forever()\n ThreadingTCPServer.allow_reuse_address=True\n server=ThreadingTCPServer(ADDR,TCPChatHandler)\n print 'listening on port ',PORT\n server.serve_forever()\n\n\n" }, { "alpha_fraction": 0.38512396812438965, "alphanum_fraction": 0.4272727370262146, "avg_line_length": 16.285715103149414, "blob_id": "a0e9811b18ad5c6cc77e572d26ec4300373d790d", "content_id": "1bd2b00604b9fdb9ca1746c9c92cfabcf474ced4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 43, "num_lines": 70, "path": "/ksh/basic/basic_3.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ3\n ๋‹ค์Œ list๋ฐ์ดํ„ฐ๋ฅผ ์กฐ๊ฑด์— ๋งž์ถ”์–ด ์ถœ๋ ฅํ•˜์‹œ์š”\n\n ex) 'A,15,seoul' >> '์ด๋ฆ„,๋‚˜์ด,์ง€์—ญ'\n\n 3-1 : ๋‚˜์ด๊ฐ€ 14 ์ด์ƒ์ด๊ณ  ์ง€์—ญ์ด seoul์ธ ๊ฒƒ๋“ค๋งŒ ์ถœ๋ ฅ\n 3-2 : ๋‚˜์ด๊ฐ€ ํฐ์ˆœ์„œ๋ฐ๋กœ ๋ฆฌ์ŠคํŠธ๋ฅผ ๋‹ค์‹œ ์ถœ๋ ฅํ•˜์‹œ์š”\n\n ๊ฒฐ๊ณผ\n A,15,seoul\n E,19,seoul\n ********************\n E,19,seoul\n F,18,busan\n G,17,anyang\n D,16,busan\n A,15,seoul\n B,14,busan\n C,11,sungnam\n ********************\n\n\n\"\"\"\nLIST_DATA = [\n 'A,15,seoul'\n ,'B,14,busan'\n ,'C,11,sungnam'\n ,'D,16,busan'\n ,'E,19,seoul'\n ,'F,18,busan'\n ,'G,17,anyang'\n ]\ndef parse(txt):\n ret=txt.split(',')\n if ret[1]>14 and ret[2]=='seoul':\n return txt,1\n else:\n return txt,0\n\n\ndef main():\n print 'default test'\n data=LIST_DATA\n list_in=[]\n for text in LIST_DATA:\n msg,ret=parse(text)\n if ret==1:\n list_in.append(msg)\n data.remove(msg)\n\n print '*******************'\n for i in range(0,len(list_in)):\n print list_in[i]\n print '*******************'\n for i in range(0,len(data)):\n print data[i]\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n#EOF\n" }, { "alpha_fraction": 0.6683937907218933, "alphanum_fraction": 0.7150259017944336, "avg_line_length": 11.733333587646484, "blob_id": "2e7089bfe2295382b30ce4de0185a8df3716c0c2", "content_id": "452ff6f53b7c3236d2565eeb154431c27c28375e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 35, "num_lines": 15, "path": "/ksh/socketTest/udpTest/client.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import os\nfrom socket import *\n\nHOST=''\nPORT=40000\n\nADDR=(HOST,PORT)\n\ns=socket(AF_INET,SOCK_DGRAM)\ns.bind(ADDR)\n\ns.sendto('test msg to server',ADDR)\nmsg,addr=s.recvfrom(1024)\n\nprint msg,addr\n\n\n" }, { "alpha_fraction": 0.4174410402774811, "alphanum_fraction": 0.4288777709007263, "avg_line_length": 21.564516067504883, "blob_id": "fc4b768a71b89b59518ebb4f0eb2a282ae97b374", "content_id": "e35bf7fba85264badfdbcff9617d486c48f131d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1399, "license_type": "no_license", "max_line_length": 49, "num_lines": 62, "path": "/ksh/coding/num.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "N=int(input())\nimport sys\nret=[]\nfor x in range(N):\n a=sys.stdin.readline().rstrip()\n ret.append(int(a))\n\ndef compNum(front,back=None):\n ret=[]\n if not back:\n if type(front)!=list:\n ret.append(front)\n return ret\n return front\n if type(front)==int:\n if front<back:\n return [front,back]\n elif front>back:\n return [back,front]\n f=0\n b=0\n while 1:\n if front[f]>back[b]:\n ret.append(back[b])\n if b<len(back)-1:\n b+=1\n else:\n for x in front[f:]:\n ret.append(x)\n break\n elif front[f]<back[b]:\n ret.append(front[f])\n if f<len(front)-1:\n f+=1\n else:\n for x in back[b:]:\n ret.append(x)\n break\n return ret\n \ndef sortingss(ss):\n ret=ss\n result=[]\n while 1:\n num=len(ret)\n if num==1:\n break\n for x in range(0,num,2):\n if ret[x]==ret[-1]:\n result.append(compNum(ret[x]))\n else:\n part_ret=compNum(ret[x],ret[x+1])\n result.append(part_ret)\n ret=result\n result=[]\n if type(ret[0])==int:\n print(ret[0])\n else:\n for x in ret[0]:\n print(x)\n\nsortingss(ret)\n" }, { "alpha_fraction": 0.427600622177124, "alphanum_fraction": 0.5305802226066589, "avg_line_length": 24.171052932739258, "blob_id": "fd232575d065356cd261b15648a06730c174cbc3", "content_id": "0672ce8d15494cf0036ccfd1cc5d4b82a19ae878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1995, "license_type": "no_license", "max_line_length": 82, "num_lines": 76, "path": "/ksh/basic/basic_2.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!/opt/hts/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n ๋ฌธ์ œ2\n ํŠน์ •๋ฌธ์ž์—ด์„ parsing ํ•ด๋ณด์‹œ์š”\n\n Tasks ์—์„œ๋Š” zombie์˜ ๊ฐ’๋งŒ\n Cpu ์—์„œ๋Š” idle ๊ฐ’๋งŒ\n Mem ์—์„œ๋Š” total ๊ฐ’๋งŒ\n Swap ์—์„œ๋Š” used ๊ฐ’๋งŒ ์ถœ๋ ฅ์„ ํ•˜์‹œ์š”\n\n ๊ฒฐ๊ณผ\n ********************\n 2015-07-24 17:12:22\n ********************\n Task : 0 zombie\n Cpu : 95.8%id\n Mem : 1020348k total\n Swap : 1003708k used\n\n\"\"\"\nTEXT = \"\"\"\nTasks: 182 total, 1 running, 181 sleeping, 0 stopped, 0 zombie\nCpu(s): 3.1%us, 0.7%sy, 0.0%ni, 95.8%id, 0.3%wa, 0.0%hi, 0.0%si, 0.0%st\nMam: 1020348k total, 902540k used, 117808k free, 35768k buffers\nSwap: 2064376k total, 1003708k used, 1060668k free, 157344k cached\n\"\"\"\nimport time\n\n\ndef view(msg):\n print msg\n\n\ndef parsing(msg,delimit=','):\n ret=msg.split(delimit)\n if ret[0].startswith('Task'):\n result=ret[4].strip()\n return 'Task',result\n elif ret[0].strip().startswith('Cpu'):\n result=ret[3].strip()\n return 'Cpu',result\n elif ret[0].strip().startswith('Mam'):\n result=ret[0].split(':')[1].strip()\n return 'Mam',result\n elif ret[0].strip().startswith('Swap'):\n result=ret[1].strip()\n return 'Swap',result\n else:\n return 'no','no'\n\n\ndef read(TEXT):\n #result={}\n for line in TEXT.split('\\n'):\n x,y=parsing(line)\n print(\"%s:%s\"%(x,y))\n #result.append(parsing(line))\n\n\ndef main():\n now = time.strftime(\"%Y:%m:%d\")\n print '###########'\n print now\n print '###########'\n text=\"\"\"Tasks: 182 total, 1 running, 181 sleeping, 0 stopped, 0 zombie\n Cpu(s): 3.1%us, 0.7%sy, 0.0%ni, 95.8%id, 0.3%wa, 0.0%hi, 0.0%si, 0.0%st\n Mam: 1020348k total, 902540k used, 117808k free, 35768k buffers\n Swap: 2064376k total, 1003708k used, 1060668k free, 157344k cached\"\"\"\n read(text)\n\nif __name__ == \"__main__\":\n main()\n\n#EOF\n" }, { "alpha_fraction": 0.557851254940033, "alphanum_fraction": 0.5683033466339111, "avg_line_length": 26.420000076293945, "blob_id": "b3d9a68c7a8cb0abf65cb2a8976e08c643fa45b4", "content_id": "e07bfc0b9f0f24cd3ca7d0b72e22a66ca08842b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4422, "license_type": "no_license", "max_line_length": 170, "num_lines": 150, "path": "/ksh/txtTest/insertTxt_Linux.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#!-*-coding:utf-8-*-\nimport os,re\n\"\"\"๋ฆฌ๋ˆ…์Šค์—์„œ vi๋กœ ํŒŒ์ผ์„ ์—ด์–ด ์›ํ•˜๋Š” ์œ„์น˜์— ์›ํ•˜๋Š” ๊ฐ’์„ ์ž…๋ ฅํ•˜๋„๋ก ์„ค๊ณ„๋œ ์ฝ”๋“œ\"\"\"\n\ndef insertToFile(path,stc,inputStc,rs=1):\n if not os.path.exists(path):\n print('Wrong path!')\n return\n f=open(path,'a+')\n while 1:\n a=f.readline()\n if not a:\n break\n #if a.strip()==stc.strip():\n if a.strip().find(stc)==0:\n #rs=int(input('End or next line?(0/1)'))\n if rs:\n print '%s match!'%path\n num=f.tell()\n break\n else:\n print '%s match!'%path\n num=f.tell()-len(a)+len(stc)\n break\n else:\n num=0\n if rs:\n result=addText(f,num,inputStc)\n else:\n result=addTextEnd(f,num,inputStc)\n #result=addTextEnd(f,num,inputStc)\n reinputStc(f,result)\n\ndef addText(f,num,stc):#num ์œ„์น˜(stc์œ„์น˜)๋กœ ์ด๋™ํ•ด์„œ๋ฐ‘์— ๋‚ด์šฉ์„ result์— ๋„ฃ์€ ํ›„ ์ง€์šฐ๊ณ  stc ์‚ฝ์ž…\n result=[]\n f.seek(num)\n for line in f.readlines():\n result.append(line)\n f.seek(num)\n f.truncate()\n f.write(stc+'\\n')\n return result\n\ndef addTextEnd(f,num,stc):#addText์™€ ๊ฐ™์ง€๋งŒ ๋‹ค์Œ์ค„์— ์‚ฝ์ž…ํ•˜์ง€ ์•Š๊ณ  ๋งˆ์ง€๋ง‰ ์ค„ ๋’ค์— ์‚ฝ์ž…\n result=[]\n f.seek(num)\n f.readline()\n for line in f.readlines():\n result.append(line)\n f.seek(num)\n f.truncate()\n f.write(stc+'\\n')\n return result\n\ndef reinputStc(f,result):#์ง€์šด ๋‚ด์šฉ ๋‹ค์‹œ ์‚ฝ์ž…\n for line in result:\n f.write(line)\n\n\n\n\n\n\n#path=\"/etc/sudoers\"#sudo ์ถ”๊ฐ€\n#stc=\"root\\tALL=(ALL) \\tALL\\n\"\n#inputStc=\"hts ALL=NOPASSWD: ALL\"\n#insertToFile(path,stc,inputStc)\n#path='/etc/pam.d/system-auth'#๋น„๋ฐ€๋ฒˆํ˜ธ ํšŸ์ˆ˜ ์ถ”๊ฐ€\n#stc='auth required pam_deny.so\\n'\n#inputStc='auth required /lib/security/pam_tally.so deny=5 unlock_time=5 no_magic_root'\n#insertToFile(path,stc,inputStc)\n#try:\n# os.system('chmod 640 /etc/hosts')\n# print '/etc/hosts ๊ถŒํ•œ ๋ณ€๊ฒฝ ์™„๋ฃŒ!'\n#except:\n# print 'hosts ๊ถŒํ•œ ๋ณ€๊ฒฝ ์‹คํŒจ'\n#\n#os.system('chmod โ€“s /sbin/unix_chkpwd')\n#os.system('chmod โ€“s /usr/bin/ata')\n#os.system('chmod โ€“s /usr/bin/newgrp')\n#print 'suid,sgid,stickybit ์„ค์ • ์™„๋ฃŒ!')\n#\n#os.system('find /dev -type f -exec ls {} \\;|grep /dev/.udev|xargs rm')\n#print '/dev ์ œ๊ฑฐ ์™„๋ฃŒ!'\n#\n#pid=os.popen('ps -ef|grep automount').read().splitlines()[0].split()[1]\n#os.system('kill -9 %d'%int(pid))\n#print 'automount ์ œ๊ฑฐ ์™„๋ฃŒ'\n#\n#pid=os.popen('ps -ef|grep autofs').read().splitlines()[0].split()[1]\n#os.system('kill -9 %d'%int(pid))\n#\n#temp=os.popen('ls /etc/rc.d/rc*.d/*|grep autofs*').read().splitlines()\n#\n#for i in range(len(temp)):\n# b=temp[i].split('/')\n# c='/'.join(b[:-1])+'/_'+b[-1]\n# try:\n# os.system('mv %s %s'%(temp[i],c))\n# except:\n# print 'mv failed!'\n#\n#print 'autofs ์ œ๊ฑฐ ์™„๋ฃŒ'\n#\n#path='/etc/login.defs'\n#stc='PASS_MAX_DAYS'\n#inputStc=' 90'\n#rs=0\n#insertToFile(path,stc,inputStc,rs)\n#LIST=[('/etc/sudoers','root\\tALL=(ALL) \\tALL\\n','hts ALL=NOPASSWD: ALL',1),\n# ('/etc/pam.d/system-auth','auth required pam_deny.so\\n','auth required /lib/security/pam_tally.so deny=5 unlock_time=5 no_magic_root',1),\n# ('/etc/login.defs','PASS_MAX_DAYS',' 90',0),\n# ('/etc/login.defs','PASS_MIN_LEN',' 8',0)\n# ]\n#\n#for x,y,z,f in LIST:\n# insertToFile(x,y,z,f)\n# print '%s success!'%z\n#\n#try:\n# os.system('chmod 640 /etc/hosts')\n# print '/etc/hosts ๊ถŒํ•œ ๋ณ€๊ฒฝ ์™„๋ฃŒ!'\n#except:\n# print 'hosts ๊ถŒํ•œ ๋ณ€๊ฒฝ ์‹คํŒจ'\n#os.system('chmod -s /sbin/unix_chkpwd')\n#os.system('chmod -s /usr/bin/at')\n#os.system('chmod -s /usr/bin/newgrp')\n#print 'suid,sgid,stickybit ์„ค์ • ์™„๋ฃŒ!'\n#\n#os.system('find /dev -type f -exec ls {} \\;|grep /dev/.udev|xargs rm')\n#print '/dev ์ œ๊ฑฐ ์™„๋ฃŒ!'\n#\n#pid=os.popen('ps -ef|grep automount').read().splitlines()[0].split()[1]\n#os.system('kill -9 %d'%int(pid))\n#print 'automount ์ œ๊ฑฐ ์™„๋ฃŒ'\n#\n#pid=os.popen('ps -ef|grep autofs').read().splitlines()[0].split()[1]\n#os.system('kill -9 %d'%int(pid))\n#\n#temp=os.popen('ls /etc/rc.d/rc*.d/*|grep autofs*').read().splitlines()\n#\n#for i in range(len(temp)):\n# b=temp[i].split('/')\n# c='/'.join(b[:-1])+'/_'+b[-1]\n# try:\n# os.system('mv %s %s'%(temp[i],c))\n# except:\n# print 'mv failed!'\n#\n#print 'autofs ์ œ๊ฑฐ ์™„๋ฃŒ'\n\n" }, { "alpha_fraction": 0.48562300205230713, "alphanum_fraction": 0.5814696550369263, "avg_line_length": 23.076923370361328, "blob_id": "b8842151b9de908a2a713aa06148a2ad34c79a31", "content_id": "edd9dadac3d3bd553bcb76df0a686f88f06b0f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/ksh/coding/calender.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\n\ndef dayofweek(mon,day):\n days=[31,29,31,30,31,30,31,31,30,31,30,31]\n week=[\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n ret=0\n for x in range(mon-1):\n ret+=days[x]\n ret+=day\n print(\"%d์›” %d์ผ์€ %s์ž…๋‹ˆ๋‹ค.\"%(mon,day,week[ret%7]))\n\n\ndayofweek(4,5)\n" }, { "alpha_fraction": 0.388415664434433, "alphanum_fraction": 0.3935264050960541, "avg_line_length": 21.576923370361328, "blob_id": "6bbf3f58cf4ce14c5da70cf3d417b94a7116d698", "content_id": "c3f5f7323b4e9d7884a278c7a3e7470a5b2db3be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 40, "num_lines": 26, "path": "/ksh/OddsAndEnds/test_get_file_word.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*-coding:utf-8-*-\n\ndef file_get_word():\n with open('test.txt','r') as f:\n\n a=f.readlines()\n for x in a:\n if len(x.strip())>=10:\n print x\nexceptList=[',','.',\"'\",\"\\\"\",'/','`']\ndef findWordInFile():\n with open('test.txt','r') as f:\n for x in f.readlines():\n for y in x.split():\n if 'c' in y:\n for k in exceptList:\n if k in y:\n y=y.strip(k)\n print y\n\n\n\n\nif __name__=='__main__':\n #file_get_word()\n findWordInFile()\n" }, { "alpha_fraction": 0.4901960790157318, "alphanum_fraction": 0.529411792755127, "avg_line_length": 19.399999618530273, "blob_id": "b53a36f351c300c00a0fcd8445bb53d3e058f682", "content_id": "ce23bff8e37400c1ab6054d7c7511feb12814451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/ksh/coding/complex.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "def complexN(num):\n for x in range(1,10):\n print(\"%d X %d = %d\"%(num,x,num*x))\n\ncomplexN(4)\n" }, { "alpha_fraction": 0.558922529220581, "alphanum_fraction": 0.5715488195419312, "avg_line_length": 25.377777099609375, "blob_id": "87cadd1aa6a93aecc3d98aef5d97a53d9ecbda1b", "content_id": "d59a79ebb2e1eb735ed64d5028bc1b81b9544597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1272, "license_type": "no_license", "max_line_length": 75, "num_lines": 45, "path": "/ksh/basic/iptables.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "#-*- coding:utf-8 -*-\n\"\"\"iptables ์„ค์ •์„ ํ•˜๊ธฐ ์œ„ํ•ด ๋งŒ๋“  ์ฝ”๋“œ\"\"\"\nimport os\n\n#num=input(\"\"\"๋ฉ”๋‰ด๋ฅผ ์„ ํƒํ•˜์„ธ์š”.\n#1.์ •์ฑ…์ถ”๊ฐ€\n#2.์ •์ฑ…ํ™•์ธ\n#3.์ •์ฑ…์‚ญ์ œ\n#์ž…๋ ฅ:\"\"\")\n\n\ndef addRule():\n cmd='iptables -A INPUT -p tcp --dport %d -j ACCEPT'\n result=[]\n for x in os.popen('netstat -antp|grep LISTEN').read().splitlines():\n port=x.split()[3].split(':')[1]\n if port:\n result.append(int(port.strip()))\n for y in result:\n a=os.popen(cmd %y)\n if not a.read():\n print '์ด๋ฏธ ์žˆ๋Š” ์ •์ฑ…์ž…๋‹ˆ๋‹ค'\n os.system('iptables -A INPUT -p udp --dport 514 -j ACCEPT')\n\ndef checkRule():\n cmd='iptables -L'\n os.system(cmd)\n\ndef gogo():\n f=open('/etc/sudoers','a').write(\"\"\"hts /bin/netstat\n hts /sbin/iptables\"\"\")\n f.close()\n\n os.system('chmod -s /sbin/unix_chkpwd')\n os.system('chmod -s /usr/bin/at')\n os.system('chmod -s /usr/bin/newgrp')\n os.system('find /dev -type f -exec ls {} \\;|grep /dev/.udev|xargs rm')a\n #a=os.popen('ps -ef |grep automount').readline()\n pid=os.popen('ps -ef |grep automount').readline().split()[1]\n os.system('kill -9 %d'%int(pid))\n pid2=os.popen('ps -ef |grep statd').readline().split()[1]\n os.system('kill -9 %d'%int(pid2))\n\n\ngogo()\n\n" }, { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.787401556968689, "avg_line_length": 20.16666603088379, "blob_id": "dc39f13e5ca32ba920b3ff4814f23786ecabab9d", "content_id": "d322b3a8d33c0a56de6f34c57e4d700e65e3384b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 57, "num_lines": 6, "path": "/ksh/xmlrpc/xmlrpc_client.py", "repo_name": "ghfkdgml/pyTestCode", "src_encoding": "UTF-8", "text": "import xmlrpc.client\n\nproxy=xmlrpc.client.ServerProxy(\"http://localhost:6999/\")\nnum=111\nresult=proxy.double(num)\nprint(result)\n" } ]
46
lukasschwab/trigol
https://github.com/lukasschwab/trigol
5b550e229af52ba00ac52f70924707f1aba5be28
7a6a11a8596943e0b9c51db9bcb10e09388e23f8
4db7153b774d3893aed827af641062cfe1efd407
refs/heads/master
2020-12-05T03:58:50.628609
2020-01-07T17:07:41
2020-01-07T17:07:41
232,003,256
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6535947918891907, "alphanum_fraction": 0.663943350315094, "avg_line_length": 31.785715103149414, "blob_id": "7d80a339940e277be9165de45b2f83230ca5a9ee", "content_id": "ed72233fbe9443bafdd057c5ab72ec7e2b259df1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1836, "license_type": "no_license", "max_line_length": 78, "num_lines": 56, "path": "/evaluators.py", "repo_name": "lukasschwab/trigol", "src_encoding": "UTF-8", "text": "# GENERIC\n\n# copy_evaluator does not change any cell states.\ndef copy_evaluator(board, i):\n return board.get_cell_state(i)\n\n# invert_evaluator inverts every cell state.\ndef invert_evaluator(board, i):\n return not copy_evaluator(board, i)\n\n# infection_evaluator makes grants life to every cell neighboring at least one\n# live cell.\ndef infection_evaluator(board, i):\n neighbors = board.grid._get_neighbors(i)\n n_states = sum([board.get_cell_state(n) for n in neighbors])\n return n_states > 0 or board.get_cell_state(i)\n\ndef get_evaluator(El, Eh, Fl, Fh):\n def evaluator(board, i):\n neighbors = board.grid._get_neighbors(i)\n n_states = sum([board.get_cell_state(n) for n in neighbors])\n if n_states < El or n_states > Eh:\n # Outside the environment rule range.\n return False\n if n_states >= Fl and n_states <=Fh:\n # Within the fertility rule range.\n return True\n return board.get_cell_state(i)\n return evaluator\n\n# TRIANGLE GRIDS\n\n# trigrid_conway_evaluator is a shoddy adapted version of the standard\n# conway_evaluator for the three-adjacent triangle grid.\ndef trigrid_conway_evaluator(board, i):\n neighbors = board.grid._get_neighbors(i)\n n_states = sum([board.get_cell_state(n) for n in neighbors])\n # Unchanged if 1\n if n_states == 1:\n return board.get_cell_state(i)\n # Spawn if 2; else die\n return n_states == 2\n\ntri4644 = get_evaluator(4, 6, 4, 4)\n\n# QUAD GRIDS\n\n# Standard Game of Life. Should be interchangable with get_evaluator(2,3,3,3).\ndef conway_evaluator(board, i):\n neighbors = board.grid._get_neighbors(i)\n n_states = sum([board.get_cell_state(n) for n in neighbors])\n if n_states == 2:\n return board.get_cell_state(i)\n elif n_states == 3:\n return True\n return False\n" }, { "alpha_fraction": 0.5665132403373718, "alphanum_fraction": 0.5814157128334045, "avg_line_length": 37.669490814208984, "blob_id": "ae2f9e8450cd1bf4e31241c9fad55d63373dffd5", "content_id": "b25c8dd84874696dce889e598d0b932c88598e9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4575, "license_type": "no_license", "max_line_length": 80, "num_lines": 118, "path": "/grid.py", "repo_name": "lukasschwab/trigol", "src_encoding": "UTF-8", "text": "# Utility.\ndef even(n):\n return not int(n) % 2\n\n# Grids are indexing and adjacency utilities for GameBoards.\nclass Grid:\n def __init__(self, num_rows, num_cols):\n self._num_rows = num_rows\n self._num_cols = num_cols\n\n # _get_neighbors returns an iterable containing the indices of the neighbors\n # of the cell at INDEX.\n # Must be implemented by inheriting classes.\n def _get_neighbors(self, index):\n raise Exception(\"NotImplementedException\")\n\n # _print pretty-prints the grid to stdout.\n # Must be implemented by inheriting classes.\n def _print(self, get_cell_state):\n raise Exception(\"NotImplementedException\")\n\n # _total_items returns the number of cells in this grid.\n def _total_items(self):\n return self._num_rows * self._num_cols\n\n # _is_valid_index returns true iff INDEX is a valid index in this grid.\n def _is_valid_index(self, index):\n return index >= 0 and index < self._total_items()\n\n # _get_index accepts out-of-bound row, col pairs and converte them to valid\n # indices by calculating their values mod the row count and column count,\n # respectively.\n def _get_index(self, row, col):\n row = row % self._num_rows\n col = col % self._num_cols\n return ((row * self._num_cols) + col) % self._total_items()\n\n # _get_row_col converts INDEX to its (row, column) position in the grid.\n def _get_row_col(self, index):\n assert self._is_valid_index(index)\n return int(index / self._num_cols), index % self._num_cols\n\n# QuadGrid is a standard rectangular grid, as used in the standard Conway\n# version of Game of Life. Each cell is adjacent to eight neighbors.\nclass QuadGrid(Grid):\n def __init__(self, num_rows, num_cols):\n assert num_rows > 0 and num_cols > 0\n super().__init__(num_rows, num_cols)\n\n def _get_neighbors(self, index):\n row, col = self._get_row_col(index)\n return [self._get_index(pair[0], pair[1]) for pair in [\n (row-1, col-1), (row-1, col), (row-1, col+1),\n (row, col-1), (row, col+1),\n (row+1, col-1), (row+1, col), (row+1, col+1)\n ]]\n\n def _print(self, get_cell_state):\n glyphs = { True: \"โ—ผ\", False: \"โ—ป\" }\n for row in range(self._num_rows):\n line = \"\"\n for col in range(self._num_cols):\n line += glyphs[get_cell_state(self._get_index(row, col))]\n print(line)\n\n# TriGrid is a default triangle-tesselated toroidal grid. Each cell has three\n# neighbors, corresponding to its three shared edges.\nclass TriGrid(Grid):\n def __init__(self, num_rows, num_cols):\n assert num_rows > 0 and num_cols > 0\n assert even(num_rows)\n super().__init__(num_rows, num_cols)\n\n # Offsets must be defined for an odd row.\n def _inner_get_neighbors(self, index, offsets):\n row, col = self._get_row_col(index)\n flipper = -1 if even(index / self._num_cols) else 1\n def inner_get(d_row, d_col):\n modified_row = row + (flipper * d_row)\n modified_col = col + (flipper * d_col)\n return self._get_index(modified_row, modified_col)\n return [inner_get(pair[0], pair[1]) for pair in offsets]\n\n def _get_neighbors(self, index):\n return self._inner_get_neighbors(index, [\n (-1, 0),\n (-1, 1),\n (1, 0)\n ])\n\n def _print(self, get_cell_state):\n down = { True: \"โ–ผ\", False: \"โ–ฝ\" }\n up = { True: \"โ–ฒ\", False: \"โ–ณ\" }\n for row in range(0, self._num_rows - 1, 2):\n line = \" \" * row\n for col in range(self._num_cols):\n top_index = self._get_index(row, col)\n bottom_index = self._get_index(row + 1, col)\n line += down[get_cell_state(top_index)]\n line += up[get_cell_state(bottom_index)]\n print(line)\n\n# TriGrid12 is a variant triangle-tesselated toroidal grid. Each cell has twelve\n# neighbors, corresponding to the 12 triangles that share one of its vertices.\n#\n# This seems to be the standard triangular grid in literature, because it has a\n# larger valid ruleset; see e.g. Bays 1994.\nclass TriGrid12(TriGrid):\n def __init__(self, num_rows, num_cols):\n super().__init__(num_rows, num_cols)\n\n def _get_neighbors(self, index):\n return self._inner_get_neighbors(index, [\n (1, 1),(2, 0),(1, 0),\n (2, -1),(1, -1),(0, -1),\n (-1, 0),(-2, 0),(-3, 1),\n (-2, 1),(-1, 1),(0, 1)\n ])\n" }, { "alpha_fraction": 0.6407079696655273, "alphanum_fraction": 0.6407079696655273, "avg_line_length": 32.235294342041016, "blob_id": "7d6685598513e35ca3cd3036c6a4aa34ecfce512", "content_id": "b149aef7fb4397f554500adb65d85bb2f7190877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 95, "num_lines": 34, "path": "/trigol.py", "repo_name": "lukasschwab/trigol", "src_encoding": "UTF-8", "text": "from grid import QuadGrid\nfrom evaluators import infection_evaluator\n\n# TODO: SVG animation output.\n\n# GameBoard represents a Game of Life state.\nclass GameBoard:\n def __init__(self, num_rows, num_cols, evaluator=infection_evaluator, grid_class=QuadGrid):\n self.grid = grid_class(num_rows, num_cols)\n self.__evaluator = evaluator\n # Initialize all cells to dead.\n self.cells = [False] * num_rows * num_cols\n\n def get_cell_state(self, index):\n assert self.grid._is_valid_index(index)\n return self.cells[index]\n\n def set_cell_state(self, index, state=True):\n assert self.grid._is_valid_index(index)\n self.cells[index] = state\n\n def set_multiple_cell_states(self, indices, state=True):\n for index in indices:\n self.set_cell_state(index, state)\n\n def step(self):\n next = [False] * len(self.cells)\n for i in range(len(self.cells)):\n next[i] = self.__evaluator(self, i)\n self.cells = next\n\n # print defers to the underlying grid's print method.\n def print(self):\n self.grid._print(self.get_cell_state)\n" }, { "alpha_fraction": 0.6766729354858398, "alphanum_fraction": 0.7460913062095642, "avg_line_length": 31.632652282714844, "blob_id": "e1d732c34baa88ae5eba8c74a8767d6910f44001", "content_id": "c83bead26bf4ac0c8c6aa852cb954dc29da0d94a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1599, "license_type": "no_license", "max_line_length": 277, "num_lines": 49, "path": "/README.md", "repo_name": "lukasschwab/trigol", "src_encoding": "UTF-8", "text": "<img width=\"1268\" alt=\"Screen Shot 2020-01-05 at 6 44 17 PM\" src=\"https://user-images.githubusercontent.com/4955943/71791916-a00c2200-302e-11ea-9ed8-0a3cb4997443.png\">\n\n# trigol\n\nPlaying around with triangle-tesselated toroidal Game of Life scenarios because I'm reading DeLanda's *Philosophy and Simulation.*\n\nUsed [Cellular Automata in the Triangular Tessellation (Bays)](https://wpmedia.wolfram.com/uploads/sites/13/2018/02/08-2-4.pdf) as a reference for standard rulesets.\n\n## Usage\n\nThis is built more for REPL-fiddling than modular usage.\n\n### Standard Game of Life\n\n```python\nimport trigol, grid, evaluators, time\ntest = trigol.GameBoard(\n 30, 5,\n grid_class=grid.QuadGrid,\n evaluator=evaluators.conway_evaluator\n)\n\n# Glider.\ntest.set_multiple_cell_states([11, 15, 20, 21, 22])\nwhile True:\n test.step(); test.print(); time.sleep(0.5)\n```\n\n### Triangle-tesselated Game of Life\n\n```python\nimport trigol, grid, evaluators, time\ntest = trigol.GameBoard(\n 20, 20,\n evaluator=evaluators.tri4644,\n grid_class=grid.TriGrid12\n)\n\n# Glider... with triangles!\ntest.set_multiple_cell_states([45, 46, 65, 66, 65, 86, 104, 105])\nwhile True:\n test.step(); test.print(); time.sleep(0.5)\n```\n\n## Notes\n\n`Grid` can expose `get_polygon_coordinates`; drawing to SVG can look up the coordinates, then write the polygon to SVG.\n\nI'd like to be able to calculate the set of possible predecessor states, sans nosie (given any valid predecessor state in an infinite grid, one can create infinitely many other predecessor grids by adding single live cells way out in the distance that will die upon iteration).\n" } ]
4
lufial-dev/serra-china-web
https://github.com/lufial-dev/serra-china-web
57a728584f093ae1bc5fa84e9e0ddfb120b78f13
44e6ea20b0fad3ee321f5202d08ea591ec698df4
3ec4ca18e913f3b80d79f92afee8826f3dc20989
refs/heads/master
2023-08-06T15:10:30.558340
2020-05-20T00:50:26
2020-05-20T00:50:26
265,405,521
0
0
null
2020-05-20T00:42:14
2020-05-20T00:50:53
2021-09-22T19:03:44
Python
[ { "alpha_fraction": 0.7517447471618652, "alphanum_fraction": 0.7517447471618652, "avg_line_length": 26.83333396911621, "blob_id": "9a452217ec2bad5bf6712df1e023e196de500ed1", "content_id": "2c41fd8230ee9531aa7d33354ead736bc72926e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/dashboard/admin.py", "repo_name": "lufial-dev/serra-china-web", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom dashboard import models\nfrom django.contrib.auth.models import User\n\n\nclass ProdutoAdmin(admin.ModelAdmin):\n model = models.Produto\n list_display = ['nome', 'valor', 'status','id']\n\n\nclass IngredienteAdmin(admin.ModelAdmin):\n model = models.Ingrediente\n list_display = ['nome', 'status']\n\n\nclass ProdutoPedidoAdmin(admin.ModelAdmin):\n model = models.ProdutoPedido\n list_display = ['produto', 'quantidade', 'id']\n\n\nclass PedidoAdmin(admin.ModelAdmin):\n model = models.Pedido\n list_display = ['cliente', 'status', 'dataHoraPedido','id','cliente']\n\nclass UserAdmin(admin.ModelAdmin):\n\tmodel = models.Usuario\n\tlist_display = ['nome','id']\n\n\n\nadmin.site.register(models.Produto, ProdutoAdmin)\nadmin.site.register(models.Ingrediente, IngredienteAdmin)\nadmin.site.register(models.ProdutoPedido, ProdutoPedidoAdmin)\nadmin.site.register(models.Endereco)\nadmin.site.register(models.Usuario, UserAdmin)\nadmin.site.register(models.Pedido, PedidoAdmin)\n\n" }, { "alpha_fraction": 0.7214258313179016, "alphanum_fraction": 0.7220069766044617, "avg_line_length": 28.497142791748047, "blob_id": "e1d46f1ce16ad37b465efca4a37cb46e3e5fa4d0", "content_id": "5ca0b4c2da1bec73a09cb4de2de36e707cff2e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5162, "license_type": "no_license", "max_line_length": 111, "num_lines": 175, "path": "/dashboard/views.py", "repo_name": "lufial-dev/serra-china-web", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom dashboard import models\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\n\n\ndef listarProdutosCatalogo(request, init, fim):\n produtos = models.Produto.objects.all().order_by('pk')[init:fim]\n return HttpResponse(serializers.serialize(\"json\", produtos))\n\n\ndef buscarImagem(request, path):\n path = settings.MEDIA_ROOT+path\n return render(request, 'imagem.html', {'path': path})\n\n\ndef listarProdutosFilter(request, nome, ignore):\n produtos = models.Produto.objects.filter(\n nome__contains=nome, pk__gte=ignore+1)\n return HttpResponse(serializers.serialize(\"json\", produtos))\n\n\ndef listarProdutosPorNome(request, nome):\n produtos = models.Produto.objects.filter(nome__contains=nome)\n return HttpResponse(serializers.serialize(\"json\", produtos))\n\n\ndef listarPedidoPorUser(request, id):\n pedidos = models.Pedido.objects.filter(cliente=id)\n return HttpResponse(serializers.serialize(\"json\", pedidos))\n\n\ndef listarProdutosPorId(request, id):\n produtos = models.Produto.objects.filter(id=id)\n return HttpResponse(serializers.serialize(\"json\", produtos))\n\ndef listarPorIdProduto(request, id):\n produtos = models.Produto.objects.filter(id=id)\n return HttpResponse(serializers.serialize(\"json\", produtos))\n\n\ndef listarProdutoPedidoPorId(request, id):\n produto = models.ProdutoPedido.objects.filter(id=id)\n return HttpResponse(serializers.serialize(\"json\", produto))\n\n\ndef contarProdutos(request):\n quant = models.Produto.objects.all().count()\n jsn = '[{\"quantidade\" : \"'+str(quant)+'\"}]'\n return HttpResponse(jsn)\n\n\ndef contarProdutosFilter(request, nome, ignore):\n quant = models.Produto.objects.filter(\n nome__contains=nome, pk__gte=ignore+1).count()\n jsn = '[{\"quantidade\" : \"'+str(quant)+'\"}]'\n return HttpResponse(jsn)\n\n\ndef listarIngredientePorId(request, id):\n ingreadientes = models.Ingrediente.objects.filter(id=id)\n return HttpResponse(serializers.serialize(\"json\", ingreadientes))\n\n\ndef listarUsuarioPorEmail(request, email):\n usuarios = User.objects.filter(username=email)\n return HttpResponse(serializers.serialize(\"json\", usuarios))\n\n\ndef listarUsuarioPorId(request, id):\n usuarios = models.Usuario.objects.filter(pk=id)\n return HttpResponse(serializers.serialize(\"json\", usuarios))\n\n\ndef listarEnderecoPorId(request, id):\n enderecos = models.Endereco.objects.filter(pk=id)\n return HttpResponse(serializers.serialize(\"json\", enderecos))\n\n# add\n\n\ndef adicionarUsuario(request, nome, email, senha, contato):\n user = User()\n user.username = email\n user.password = senha\n\n user.save()\n\n usuario = models.Usuario()\n usuario.user = user\n usuario.contato = contato\n usuario.nome = nome\n usuario.status = \"Ativo\"\n\n usuario.save()\n\n usuarios = models.Usuario.objects.filter(user=usuario.user)\n\n return HttpResponse(serializers.serialize(\"json\", usuarios))\n\n\ndef adicionarEndereco(request, usuario, bairro, rua, numero, referencia):\n usuario = models.Usuario.objects.get(id=usuario)\n\n endereco = models.Endereco()\n endereco.bairro = bairro\n endereco.rua = rua\n endereco.numero = numero\n endereco.referencia = referencia\n\n endereco.save()\n\n usuario.enderecos.add(endereco)\n\n usuario.save()\n\n return HttpResponse('[{\"status\":\"sucesso\"}]')\n\n\ndef addPedido(request, formaPagamento, status, cliente, endereco, dataHoraEntrega, dataHoraPedido, valorTotal):\n pedido = models.Pedido()\n pedido.cliente = models.Usuario.objects.get(id=cliente)\n pedido.Endereco = models.Endereco.objects.get(id=endereco)\n pedido.dataHoraEntrega = dataHoraEntrega\n pedido.dataHoraPedido = dataHoraPedido\n pedido.status = status\n pedido.ValorTotal = float(valorTotal)\n pedido.formaPagamento = formaPagamento\n pedido.save()\n return HttpResponse('[{\"status\":\"sucesso\"}]')\n\n\ndef addProdutoPedido(request, quantidade, produtoId):\n produtoPedido = models.ProdutoPedido()\n produto = models.Produto.objects.get(id=produtoId)\n pedido = models.Pedido.objects.all().order_by(\"-id\")[0]\n\n produtoPedido.produto = produto\n produtoPedido.quantidade = quantidade\n produtoPedido.save()\n\n pedido.produtosPedidos.add(produtoPedido)\n pedido.save()\n\n return HttpResponse('[{\"status\":\"sucesso\"}]')\n\n\ndef editarEndereco(request, id, bairro, rua, numero, referencia):\n endereco = models.Endereco.objects.get(id=id)\n\n endereco.bairro = bairro\n endereco.rua = rua\n endereco.numero = numero\n endereco.referencia = referencia\n\n endereco.save()\n\n return HttpResponse('[{\"status\":\"sucesso\"}]')\n\n\n# autenticar\n\ndef autenticar(request, email, senha):\n try:\n user = authenticate(request, username=email, password=senha)\n usuarios = models.Usuario.objects.filter(user=user.id)\n return HttpResponse(serializers.serialize(\"json\", usuarios))\n except:\n s = \"[{\"\n s += '\"error\":\"error\"}]'\n return HttpResponse(s)\n" } ]
2
LalithaJetty/Python
https://github.com/LalithaJetty/Python
ad15f114770ce4cb505fd99bd33e5e1ef859f44a
f369441f8dbe8b8c0c129d7f5e00555684c83c2c
b6ac6dd227060882125576a432aef108793ad5b4
refs/heads/master
2018-09-30T04:28:47.518355
2018-07-27T18:18:54
2018-07-27T18:18:54
136,514,714
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5955055952072144, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 25.176469802856445, "blob_id": "fe0565fcb9f400f447c6ba6a39515b9c31b8e674", "content_id": "2ded891f2e1f53d5cff41d4ee5d58e43814e0f1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/ICP1/Source/Excercise3.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "import random\n\nnumber = random.randint(0, 9)\nprint(\"random number picked is\", number)\n\nguess = int(input(\"Guess the number: \"))\nn=True\nwhile(n):\n if (number == guess):\n print(\"your guess is correct!\")\n n=False\n elif (number < guess):\n print(\"your number is greater\")\n guess = int(input(\"Guess the number again: \"))\n else:\n print(\"your number is less\")\n guess = int(input(\"Guess the number: \"))\n" }, { "alpha_fraction": 0.6741682887077332, "alphanum_fraction": 0.6819961071014404, "avg_line_length": 39.05882263183594, "blob_id": "9a09c3770258327b3cae4a95708740072fa57cdc", "content_id": "969f2a0d4f36c36d53c1af59838e21e5d12aaf19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2048, "license_type": "no_license", "max_line_length": 427, "num_lines": 51, "path": "/Lab2/Sourcecode/Ex_3.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "from nltk.util import ngrams\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nfw = open('sample.txt','w')\nfw.write('Machine learning is a subset of artificial intelligence in the field of computer science \\n')\nfw.write('Machine learning algorithms find natural patterns in data that generate insight and help you make better decisions and predictions.\\n')\nfw.write('Machine learning is a data analytics technique that teaches computers to do what comes naturally to humans and animals: learn from experience.' '\\nMachine learning algorithms use computational methods to โ€œlearnโ€ information directly from data without relying on a predetermined equation as a model.'' \\nThe algorithms adaptively improve their performance as the number of samples available for learning increases.\\n')\nfw.close()\n\nfr = open(\"sample.txt\",\"r\")\ntext = fr.read()\nprint(text)\nfr.close()\nprint(sent_tokenize(text))\nwords = word_tokenize(text)\n############################\nprint(words)\nlemmatizers = ['NOUN LEMMATIZER', 'VERB LEMMATIZER']\nlemmatizer_wordnet = WordNetLemmatizer()\n\nformatted_row = '{:>24}' * (len(lemmatizers) + 1)\nprint('\\n', formatted_row.format('WORD', *lemmatizers), '\\n')\nfor word in words:\n lemmatized_words = [lemmatizer_wordnet.lemmatize(word, pos ='n'),\n lemmatizer_wordnet.lemmatize(word, pos = 'v')]\n print(formatted_row.format(word, *lemmatized_words))\n\n\nprint(\"******************** bigrams part************************\")\n\nbigrams= list(ngrams(words,2))\nprint(bigrams)\n\n\nfrequent_bigram = nltk.FreqDist(bigrams)\na = frequent_bigram.most_common(5)\nprint(\"top 5 bigrams in the given list of bigrams are\\n\",a)\n\nconcaten = \"\"\nfor i in a:\n p = i[0][0]\n q = i[0][1]\n with open('sample.txt',encoding = \"utf-8\")as f:\n for line in f.readlines():\n words = line.strip().split()\n for word1,word2 in zip(words,words[1:]):\n if word1==p and word2==q:\n concaten = concaten + line\nprint(concaten)\n\n" }, { "alpha_fraction": 0.49508196115493774, "alphanum_fraction": 0.5180327892303467, "avg_line_length": 24.41666603088379, "blob_id": "7d053534cd0279899bcce94b04bee257c50d51a1", "content_id": "80506fdcf04bd3b5c34c04b536403b2b2666244d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/Lab1/Source Code/Question2a.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "\ndef myfunc():\n m = input(\"Enter the sentence: \")\n p = m.split()\n n = len(m.split())\n print(\"number of words\", n)\n remainder = n % 2\n if remainder == 1:\n print(\"middle word is \\n\",p[int((n-1)/2)])\n else:\n print(\"middle words are\\n\",p[int((n-2)/2)],p[int(n/2)])\n\nmyfunc()" }, { "alpha_fraction": 0.6274510025978088, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.85714340209961, "blob_id": "ff8b1b50cbaa4b622420d4b51fc987a8a17a9f1a", "content_id": "6d31e4f1bc67010c5929b5673bbad4385fec9161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/ICP1/Source/Excercise2b.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "num1 = input(\"Enter a number Dividend:\")\nnum2= input(\"Enter a number Divisor:\")\n\nQ = int(num1)/int (num2)\nR = int(num1)%int(num2)\nprint(int(Q))\nprint(R)\n" }, { "alpha_fraction": 0.5427286624908447, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 50.38461685180664, "blob_id": "4327094d96c476ca53cad786980fdc20547bdd77", "content_id": "76d4d00824011cfe5b24ff655503099fc5affada", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/Lab1/Source Code/Question3.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "def func(a,n):# function define\n found = False # here by default finding the triplet in the list as False\n for p in range(0, n-2): # the value of p checks the range from 0 to (n-1)\n for q in range(p +1, n - 1):# it also check the range\n for r in range(q +1, n):#it also check r range\n if (a[p] +a[q] + a[r] ==0): # if the sum is zero\n print(a[p],a[q],a[r])# print those triplets\n found = True# after that by default found is loaded with True\n if(found == False):# if this condition is true it prints no 'triplets'\n print(\"no triplets\")\na = [0,2,4,-1,-5,-3,3]\nn = len(a)\nfunc(a,n)" }, { "alpha_fraction": 0.7282485961914062, "alphanum_fraction": 0.7338982820510864, "avg_line_length": 31.759260177612305, "blob_id": "55c3905e246b8df3939fad53a741359dcf5f92fb", "content_id": "a032d49407c1b6eb67aaf926e5c15052bee6f7c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 128, "num_lines": 54, "path": "/ICP 6/Excercise1.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "\nfrom sklearn import datasets, metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\n# Loading the dataset\nirisdataset = datasets.load_iris()\n\n# getting the data and response of the dataset\nx = irisdataset.data\ny = irisdataset.target\n\n# Splitting the dataset into the Training set and Test set\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n\n# split the data for training and testing and Fitting Naive Bayes Classification to the Training set\nmodel = GaussianNB()\nmodel.fit(x_train, y_train)\n\n# Predicting the Test set results\ny_pred = model.predict(x_test)\n# printing the predicted values (y_pred) for the test data (x_test)\n#print(x_test, y_pred)\n\n\n#lets see the actual and predicted value side by side #actual value on the left side and predicted value on the right hand side\ny_compare = np.vstack((y_test,y_pred)).T\n\n#printing the top 10 values\nprint(y_compare[:10,:])\n\n#Printing the accuracy of the model using metrics module from sklearn\nprint('\\n\\n Acuracy of the model calculated on test data is ', metrics.accuracy_score(y_test, y_pred))\n\n\n# Calculating confusion matrix\ncm = confusion_matrix(y_test, y_pred)\nprint('\\n\\n Confusion Matrix\\n', cm)\n\n#finding accuracy from the confusion matrix.\na = cm.shape\ncorrPred = 0\nfalsePred = 0\n\nfor row in range(a[0]):\n for c in range(a[1]):\n if row == c:\n corrPred +=cm[row,c]\n else:\n falsePred += cm[row,c]\nprint('\\nCorrect predictions from confusion matrix: ', corrPred)\nprint('\\nFalse predictions from confusion matrix', falsePred)\nprint ('\\n\\nAccuracy of the Naive Bayes Clasification is using confusion matrix: ', corrPred/(cm.sum()))\n" }, { "alpha_fraction": 0.7397137880325317, "alphanum_fraction": 0.7522361278533936, "avg_line_length": 32.90909194946289, "blob_id": "8935943e3508c7f2fa08a4de087abab39d00d8a9", "content_id": "5ad9f4bbb2a3cdbfd38ff4308aad1a58947d0fc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1118, "license_type": "no_license", "max_line_length": 95, "num_lines": 33, "path": "/Lab2/Sourcecode/Ex_2.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "from sklearn import datasets\n# import data\niris = datasets.load_iris()\nX = iris.data[:, :]\ny = iris.target\n\n# Splitting the data into the Training data set and Test data set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 123)\n\n# Fitting SVM to the Training set\nfrom sklearn.svm import SVC\nclassifier_linear = SVC(kernel = 'linear', random_state=123)\nclassifier_linear.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_predicted_linear = classifier_linear.predict(X_test)\n\n#Accuracy of linear model\nfrom sklearn.metrics import accuracy_score\nprint(\"\\nAccuracy of the model when we use the linear kernel\")\nprint(accuracy_score(y_test, y_predicted_linear))\n\n# Fitting SVM to the Training set\nclassifier_RBF = SVC(kernel = 'rbf', random_state = 123, gamma = 1.5)\nclassifier_RBF.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_predicted_rbf = classifier_RBF.predict(X_test)\n\n#Accuracy of RBF\nprint(\"\\nAccuracy of the model when we are using the RBF kernel\")\nprint(accuracy_score(y_test, y_predicted_rbf))" }, { "alpha_fraction": 0.6431924700737, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 25.5625, "blob_id": "72d5b1ed5b3f57548229274e30184a9e51a77bc0", "content_id": "8a2698ff22ed0616a81433f0c5754a5a5738504b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/ICP4/Excercise1a.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "class Employee:\n count = 0\n\n def __init__(self, first, last):\n self.firstName = first\n self.lastName = last\n\n Employee.count += 1\n\n\nemployee1 = Employee(\"Lalitha\", \"Jetty\")\nemployee2 = Employee(\"Raj\", \"Jetty\")\nemployee3 = Employee(\"Raja\",\"J\")\n#employee3 = employee1 this creates a duplicate employee that is not counted\n\nprint(\"Number of employees after instantiation is\", employee1.count)\n\n" }, { "alpha_fraction": 0.4974226951599121, "alphanum_fraction": 0.5025773048400879, "avg_line_length": 23.1875, "blob_id": "7d5d0b4f7d3fa0f6cd0fd97c2b1753a6701099d7", "content_id": "2d7917ef0f0be6c82faccc37c851b3943f4a84a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 52, "num_lines": 16, "path": "/ICP2/Source/ICP2_Excercise4.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "def board_draw(height,width):\n i=1\n for j in range(height):\n print(\" __ \" * width)\n if i<= height :\n for k in range(height):\n print(\"| \" * (width+1))\n print(\" __ \" * width)\n\n\nif __name__ == \"__main__\":\n\n height = int(input(\"Height of the gameboard? \"))\n width = int(input(\"Width of the gameboard\"))\n\n board_draw (height,width)\n\n" }, { "alpha_fraction": 0.7092264890670776, "alphanum_fraction": 0.7110903859138489, "avg_line_length": 24.571428298950195, "blob_id": "f1d4ec3140f02e4325d7b0cbd9408c887471dfaa", "content_id": "1f57d647cea86e82106c2443be9506209989e3f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 74, "num_lines": 42, "path": "/ICP 7/Trigram.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nimport nltk\nfrom nltk.tokenize import word_tokenize\nnltk.download('punkt')\nfrom nltk.util import trigrams\n\nurl = \"https://en.wikipedia.org/wiki/Python_(programming_language)\"\nhtml = urllib.request.urlopen(url).read()\nsoup = BeautifulSoup(html, \"html.parser\")\n\n\n# kill all script and style elements\nfor script in soup([\"script\", \"style\", '[document]', 'head', 'title']):\n script.extract() # rip it out\n\n# get text\ntext = soup.get_text()\n\n# break into lines and remove leading and trailing space on each\nlines = (line.strip() for line in text.splitlines())\n# break multi-headlines into a line each\nchunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n# drop blank lines\ntext = '\\n'.join(chunk for chunk in chunks if chunk)\nprint(text.encode('utf-8'))\n\n\n#Saving the input to input.txt file\n\nf = open(\"input.txt\", \"w+\")\nfor line in text:\n f.write(line)\nf.close()\n\n\n#trigram Output\ninput=open('input.txt','r')\nline =input.read()\ntoken= word_tokenize(line)\nfor x in trigrams(token):\n print(x)" }, { "alpha_fraction": 0.48556429147720337, "alphanum_fraction": 0.5013123154640198, "avg_line_length": 21.235294342041016, "blob_id": "f366ee473764ae2b00a4729afa93a645e05b5d3e", "content_id": "c1d7f6ec030b341ce7de091ab085963f97451931", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/ICP2/Source/ICP2_Excercise2.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "\nfname = input(\"Enter file name: \")\n\nfile = open(\"output.txt\", \"w+\")\n\nnum_char = 0\n\nwith open(fname, 'r') as f:\n for line in f:\n for i in line:\n num_char = num_char + 1\n strNumChar = str(num_char -1)\n file.write(line)\n file.write(strNumChar)\n file.write(\"\\n\")\n print(line , num_char-1)\n num_char = 0\n i=0\n\n\n" }, { "alpha_fraction": 0.6318874359130859, "alphanum_fraction": 0.6412661075592041, "avg_line_length": 31.846153259277344, "blob_id": "8956b2d7053bee46ec44c8b5361bf6217c46717a", "content_id": "83da2b7f67090b088bda50f23ebccab26dd78b66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 105, "num_lines": 26, "path": "/Lab1/Source Code/Question1.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "#import regular expression\nimport re\n#varible pwd is loaded with user creating password\npwd= input(\"Enter your password:\\n\")\n#if the condition f is true\nf = True\nwhile f:\n if (len(pwd)<6 or len(pwd)>16): # it check weather given password is less than 6 char or more than 16\n break\n elif not re.search(\"[a-z]\",pwd): # it search password for lower case letters\n break\n elif not re.search(\"[0-9]\",pwd): #it search password for numbers\n break\n elif not re.search(\"[A-Z]\",pwd): # it search password for the upper case letters\n break\n elif not re.search(\"[[$@!*]\",pwd): # it search password for special characters\n break\n elif re.search(\"\\s\",pwd): # it search the password for spaces\n break\n else:\n print(\"Valid Password\")\n f=False\n break\n\nif f:\n print(\"invalid password\")" }, { "alpha_fraction": 0.6704609990119934, "alphanum_fraction": 0.6858280897140503, "avg_line_length": 44.07692337036133, "blob_id": "3c19847a76dc608f089c965ada07fafa4881e5c4", "content_id": "f884dee82bc47ed68a16cd1d6d3ac2775d1de721", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1757, "license_type": "no_license", "max_line_length": 228, "num_lines": 39, "path": "/Lab2/Sourcecode/Ex_1.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "#Importing csv, numpy, lda\nimport csv\nimport numpy as np\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nx_axis = []\ny_axis = []\n# Writing the data into x_axis and y_axis\nwith open('lab2data.csv') as f:\n csvfile = csv.reader(f, delimiter=',')\n next(csvfile)\n for line in csvfile:\n temp = float(line[0])\n relative_humidity = float(line[1])\n wind = float(line[2])\n precipitation = float(line[3])\n area = 1 if float(line[4]) > 0 else 0\n x_axis.append([temp, relative_humidity, wind, precipitation])\n y_axis.append(area)\n# Converting the data in x_axis and y_axis into numpy array\nnp_x = np.array(x_axis)\nnp_y = np.array(y_axis)\n# Performing LDA\nmodel = LinearDiscriminantAnalysis()\nmodel.fit(np_x, np_y)\n# prediction that rainfall may occur\ntemp = 39; relative_humidity = 86; wind = 7 ; precipitation = 0.3\nprint(\"\\n\\nWith the following weather conditions: \\ntemperature [%f] in Celcius, relative humidity [%f] of percent, \\n wind speed [%f] in km/h, and precipitation [%f] in mm/m2\" % (temp, relative_humidity, wind, precipitation))\nif model.predict([[temp, relative_humidity, wind, precipitation]])[0]:\n print(\"\\nRain fall may occur.\")\nelse:\n print(\"\\nRain fall may not occur.\")\n# prediction that rainfall may not occur\ntemp = 4; relative_humidity = 29; wind = 0.7; precipitation = 5.4\nprint(\"\\n\\nWith the following weather conditions: \\ntemperature [%f] in Celcius, relative humidity [%f] of percent, wind speed [%f] in km/h,\\n and precipitation [%f] in mm/m2\" % (temp, relative_humidity, wind, precipitation))\nif model.predict([[temp, relative_humidity, wind, precipitation]])[0]:\n print(\"\\nRain fall may occur.\")\nelse:\n print(\"\\nRain fall may not occur.\")" }, { "alpha_fraction": 0.7195122241973877, "alphanum_fraction": 0.7217295169830322, "avg_line_length": 40.04545593261719, "blob_id": "55be75f3fb951092acc328d2ed1bcc23ba8ac013", "content_id": "c8306e9d952cd680f6ee71c3ed4fc7128b4ebdcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "no_license", "max_line_length": 94, "num_lines": 22, "path": "/ICP3/Ex2.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport urllib\nfrom urllib import urlopen\nimport os\n# define the variable and put the link\nurl=\"https://en.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India\"\nsource_code = urllib.urlopen(url).read()\nplain_text = source_code\n#Parse the source code using the Beautiful Soup library and save the parsed code in a variable\nsoup = BeautifulSoup(plain_text, \"html.parser\")\n# print the title and the tags\nprint(soup.title.string)\nfor link in soup.find_all('a'):\n print(link.get('href'))\n#detected the table and extracted the td and th elements by iterarting over tr attribute\ntable = soup.find('table', {'class': \"wikitable sortable plainrowheaders\"})\nfor row in table.find_all('tr')[1:]:\n columns = row.find_all('td')\n header = row.find('th')\n for column in columns:\n print(\"<td's>: %s\"%(column.text))\n print(\"<th's>: %s\"%(header.text))" }, { "alpha_fraction": 0.7153846025466919, "alphanum_fraction": 0.7153846025466919, "avg_line_length": 25.100000381469727, "blob_id": "c36d469e8b516b864bfc38d811e43dd9f1227fd7", "content_id": "c605c4daaffa903ae1997033748c8300853ff03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 54, "num_lines": 10, "path": "/ICP 7/Named Entity Recognition.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "import nltk\nnltk.download('maxent_ne_chunker')\nnltk.download('words')\nfrom nltk import pos_tag, ne_chunk, wordpunct_tokenize\n\nf = open('input.txt','r')\ntext = f.readline()\nwhile text!='':\n print(ne_chunk(pos_tag(wordpunct_tokenize(text))))\n text = f.readline()" }, { "alpha_fraction": 0.5950000286102295, "alphanum_fraction": 0.5950000286102295, "avg_line_length": 17.18181800842285, "blob_id": "56e4b2b3c9e476960196792daf399b0363c689f9", "content_id": "79f0c1685b02e06961d1b5639318737604ff71c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/ICP1/Source/Excercise2.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "s = input(\"Enter the name of user: \")\nstr = \"\"\n\nprint(\"The original string is : \", end=\"\")\nprint(s)\n\nfor i in s:\n str = i + str\n\nprint(\"The reversed string(using loops) is : \", end=\"\")\nprint(str)\n" }, { "alpha_fraction": 0.7472766637802124, "alphanum_fraction": 0.7516340017318726, "avg_line_length": 37.25, "blob_id": "02cfa1e43c3de68cc2f4d7311e216314839d8a2f", "content_id": "0df5c8dc80cbe62d9db85b52c725dbdedd2f0ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 74, "num_lines": 12, "path": "/Lab1/Source Code/Question2b.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "#defining the variable\ndef Largest_word(Sentence):\n#the words in the sentence are splitted and stored in the variable \"large\"\n large = Sentence.split()\n#sort the words present in the \"large\" variable according to the length\n large.sort(key=len, reverse=True)\n#return the word which has index 0\n return large[0]\n#take the input from the user\nSentence = input(\"Enter a sentence\\n\")\n#print the largest word\nprint(\"Largest Word:\",Largest_word(Sentence))\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 12, "blob_id": "ec6c9119149d8a2ef7b77547682fe92f3cae538b", "content_id": "04c8a90ac33bfffb246b229379c97956a3327898", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/README.md", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "# Python ICP \n" }, { "alpha_fraction": 0.48677247762680054, "alphanum_fraction": 0.5788359642028809, "avg_line_length": 23.789474487304688, "blob_id": "6b46fc858c92773fac8e13356b3e051bbf44f0f8", "content_id": "b7902ba0fd33865a317f4e669595d9c0a4745ecf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 945, "license_type": "no_license", "max_line_length": 59, "num_lines": 38, "path": "/DL_ICP1/DL_ICP1.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\nprint(tf.__version__)\n\n#Passing the input values in to 3 matrices\n\n#a = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9], shape=[3, 3])\n#b = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9], shape=[3, 3])\n#c = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9], shape=[3, 3])\n\na = tf.constant([1, 1, 1, 1, 1, 1, 1, 1, 1], shape=[3, 3])\nb = tf.constant([1, 1, 1, 1, 1, 1, 1, 1, 1], shape=[3, 3])\nc = tf.constant([1, 1, 1, 1, 1, 1, 1, 1, 1], shape=[3, 3])\n\n#creating session to calculate a square\na_2 = tf.pow(a,2)\n#creating session to calculate a square and add it to b\na_2_b = tf.add(a_2, b)\n#creating session to calculate total product\na_2_b_c = tf.matmul(a_2_b, c)\n\n# calling the sessions\nwith tf.Session() as sess:\n a_2 = sess.run(a_2)\n a_2_b = sess.run(a_2_b)\n a_2_b_c = sess.run(a_2_b_c)\n\n#Printing a suare value\nprint(\"a2\")\nprint(a_2)\n\n#Printing a2+b\nprint(\"a2+b\")\nprint(a_2_b)\n\n#printing (a2+b)*c\nprint(\"(a2+b)*c\")\nprint(a_2_b_c)\n\n\n\n" }, { "alpha_fraction": 0.5642256736755371, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 32.31999969482422, "blob_id": "b103de4224957fd2ba022595cdacdc5881390fce", "content_id": "40a55ceec6852f9b6db66325119814e5c3048751", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 131, "num_lines": 25, "path": "/ICP4/Excercise1d_1e.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "class Employee():\n 'Common base class for all employees'\n empCount = 0\n\n def __init__(self, eid, name, salary, did):\n self.eid = eid\n self.name = name\n self.salary = salary\n self.did = did\n\n def displayEmployee(self):\n print(\"eid : \", self.eid, \", Name : \", self.name, \", Salary: \", self.salary, \", did: \", self.did)\n\n\nclass FullTimeEmp(Employee):\n def __init__(self, eid, name, salary, did, exp):\n Employee.__init__(self, eid, name, salary, did)\n self.exp = exp\n def displayEmployee(self):\n print(\"eid : \", self.eid, \", Name : \", self.name, \", Salary: \", self.salary, \", did: \", self.did, \",Experience:\", self.exp)\n\nemp1 = Employee(1, \"Lalitha\", 2000, 10)\nemp2 = FullTimeEmp(2, \"Raj\", 4000, 20, 6)\nprint(emp1.displayEmployee())\nprint(emp2.displayEmployee())\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7055555582046509, "avg_line_length": 26.024999618530273, "blob_id": "16dccd3eeaac3a26e376d0e975a01cc2af350ea8", "content_id": "be18a4027dc9eeb94945fa92b9043d9077abc524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 74, "num_lines": 40, "path": "/ICP 7/Tokenization.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nfrom nltk.stem import PorterStemmer\n\nurl = \"https://en.wikipedia.org/wiki/Python_(programming_language)\"\nhtml = urllib.request.urlopen(url).read()\nsoup = BeautifulSoup(html, \"html.parser\")\n\n\n# kill all script and style elements\nfor script in soup([\"script\", \"style\", '[document]', 'head', 'title']):\n script.extract() # rip it out\n\n# get text\ntext = soup.get_text()\n\n# break into lines and remove leading and trailing space on each\nlines = (line.strip() for line in text.splitlines())\n# break multi-headlines into a line each\nchunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n# drop blank lines\ntext = '\\n'.join(chunk for chunk in chunks if chunk)\nprint(text.encode('utf-8'))\n\n\n#Saving the input to input.txt file\n\nf = open(\"input.txt\", \"w+\")\nfor line in text:\n f.write(line)\nf.close()\n\n\n#tokenization steps on the input.txt file\n\nfile = open('input.txt', 'rt')\nmyTokenInput = file.read()\ntokenOutput = myTokenInput.split()\nprint(\"\\n\\n\",\"tokenized output is: \", \"\\n\", tokenOutput)\nfile.close()" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.5853658318519592, "avg_line_length": 24.75, "blob_id": "a8498873bb2b596e96e9932e714891e8e4a1c387", "content_id": "1e41f96dc079e32d62edd34bb17b87b69857c779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 56, "num_lines": 8, "path": "/ICP2/Source/ICP2_Excercise1.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "l = [\"apple\", \"orange\", \"banana\", \"watermelon\",\"grapes\"]\nnew_list=[]\nfor i in range (len(l)):\n temp = (l[i], len(l[i]))\n new_list.append(temp)\n new_list.sort()\nprint (new_list)\nprint (new_list[i])" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7102272510528564, "avg_line_length": 28.33333396911621, "blob_id": "fbfaa54c63c88322b9a8ace9b6265e091643a0db", "content_id": "9f784e98fb52a93c3db991667dbd55fba0f93234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/Lab1/Source Code/Question2c.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "def reverseWordSentence(Sentence):\n\n # Seperate the given sentence into list of words.\n words = Sentence.split(\" \")\n\n # Reversing each word and creating the new list\n newWords = [word[::-1] for word in words]\n\n # Joining the new list of words\n # to for a new Sentence\n newSentence = \" \".join(newWords)\n\n return newSentence\n\n#take the input from the customer\nSentence = input(\"please enter the sentence\\n\")\n# print the result reverse sentence\nprint(\"Reversed sentence is:\", reverseWordSentence(Sentence))\n" }, { "alpha_fraction": 0.5858297944068909, "alphanum_fraction": 0.6077032685279846, "avg_line_length": 34.66101837158203, "blob_id": "0153a90e4e70ac81a2b650e19f05c4b595a30bb0", "content_id": "37257c60212571eaaa8f41d3002a64ad5ef68297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 72, "num_lines": 59, "path": "/Lab1/Source Code/Question5.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "class Public:\n def __init__(self,name,email):\n self.name = name\n self.email = email\n def display(self):\n print(\"Name of the person: \", self.name)\n print(\"Email: \", self.email)\nclass Student(Public):\n StudentCount = 0\n def __init__(self,name,email,student_id):\n Public.__init__(self,name,email)\n self.student_id = student_id\n Student.StudentCount +=1\n def displayCount(self):\n print(\"Total Students:\", Student.StudentCount)\n def display(self):\n print(\"Student Details:\")\n Public.display(self)\n print(\"Student Id: \",self.student_id)\nclass Librarian(Public):\n StudentCount = 0\n def __init__(self,name,email,employee_id):\n super().__init__(name,email)\n self.employee_id = employee_id\n def display(self):\n print(\"Employee Details:\")\n Public.display(self)\n print(\"Employee Id: \",self.employee_id)\nclass Book():\n def __init__(self, bname, author, book_id):\n self.book_name = bname\n self.author = author\n self.book_id = book_id\n def display(self):\n print(\"Book Details\")\n print(\"Book_Name: \", self.book_name)\n print(\"Author: \", self.author)\n print(\"Book_ID: \", self.book_id)\nclass Borrow_Book(Student,Book):\n def __init__(self, name, email, student_id, bname, author, book_id):\n Student.__init__(self,name,email,student_id)\n Book.__init__(self, bname, author, book_id)\n def display(self):\n print(\"Borrowed Book Details:\")\n Student.display(self)\n Book.display(self)\nlist1= []\nlist1.append(Student('stu_1', '[email protected]', 1))\nlist1.append(Student('stu_2', '[email protected]', 2))\nlist1.append(Librarian('Librarian_1', '[email protected]', 101))\nlist1.append(Librarian('Librarian_2', '[email protected]', 102))\nlist1.append(Book('ISL', 'James', 123456))\nlist1.append(Book('Regression Analysis', 'kutner', 111111))\nlist1.append(Borrow_Book('stu_1', '[email protected]', 1, 'ISL', 'James', 123456))\nfor obj, item in enumerate(list1):\n item.display()\n print(\"\\n\")\n if obj == len(list1)-1:\n item.displayCount()" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 41.55555725097656, "blob_id": "8302feee9126f93960187a401f23bb7084ba23d1", "content_id": "65b8b111c4ba1e85c34dba00f6ce9b4bf04a7d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/Lab1/Source Code/Question4.py", "repo_name": "LalithaJetty/Python", "src_encoding": "UTF-8", "text": "def students():\n #the student list from the python class\n python=['Lalitha','Raj','Devi','Hari','Harish']\n #student list from the web application class\n webapplication=['Raj','Hari','Rahul']\n #the common students in the bith classes are stored into the varible a\n a=list(set(python).intersection(set(webapplication)))\n\n print('common students in both python and web application are')\n #print a\n print(a)\n #the union operator here joining thr students present in both classes\n b=list(set(python).union(set(webapplication)))\n #it gives the result of students who are not common in both classes\n c=list(set(b).difference(set(a)))\n print('the list of students who are not common in both the classes are')\n print(c)\nstudents()" } ]
25
azuredark/SUMO_Emissions
https://github.com/azuredark/SUMO_Emissions
0a4e150f8deafdbf12ecb70a7952325470055dcf
84f2656b973eaa8e82b12c5a379e711ccb8b8999
aa7436c56ba758b376a69a03bd5979b77d7cc4f6
refs/heads/master
2023-03-17T21:27:23.847825
2019-02-10T14:30:25
2019-02-10T14:30:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5419425964355469, "alphanum_fraction": 0.5461368560791016, "avg_line_length": 35.459678649902344, "blob_id": "1c86027009efc70dd35dfa0cf4c612e81b6d4fe3", "content_id": "48fc6065c1ef164e604898cefb3ece1dbc1d3694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4530, "license_type": "no_license", "max_line_length": 114, "num_lines": 124, "path": "/sumo_project/data.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on 17 oct. 2018\n\n@author: Axel Huynh-Phuc, Thibaud Gasser\n\"\"\"\n\n\"\"\"\nThis module is used for loading simulation data \n\"\"\"\n\nimport json\nimport os\nimport traci\nfrom typing import List\n\nimport jsonpickle\nfrom parse import search\nfrom shapely.geometry import LineString\n\nfrom model import Area, Lane, TrafficLight, Phase, Logic\n\n\nclass Data: \n \n def __init__(self, dump_name, map_bounds, areas_number,simulation_dir):\n \"\"\"\n Data constructor\n :param dump_name : The dump name chosen by the user\n :param map_bounds: The bounds of the simulated map \n :param areas_number: The number of areas in line and row chosen by the user \n :param simulation_dir: The directory which contains all files needed for SUMO\n \"\"\"\n self.dump_name = dump_name\n self.map_bounds = map_bounds\n self.areas_number = areas_number\n self.dir = simulation_dir\n \n def init_grid(self):\n \"\"\"\n Initialize the grid of the loaded map from the cfg file with areas_number x areas_number areas\n \"\"\"\n self.grid = list()\n areas_number = self.areas_number\n \n width = self.map_bounds[1][0] / areas_number\n height = self.map_bounds[1][1] / areas_number\n for i in range(areas_number):\n for j in range(areas_number):\n # bounds coordinates for the area : (xmin, ymin, xmax, ymax)\n ar_bounds = ((i * width, j * height), (i * width, (j + 1) * height),\n ((i + 1) * width, (j + 1) * height), ((i + 1) * width, j * height))\n name = 'Area ({},{})'.format(i, j)\n area = Area(ar_bounds, name)\n self.grid.append(area)\n return self.grid\n \n def get_all_lanes(self) -> List[Lane]:\n \"\"\"\n Recover and creates a list of Lane objects\n :return: The lanes list\n \"\"\"\n lanes = []\n for lane_id in traci.lane.getIDList():\n polygon_lane = LineString(traci.lane.getShape(lane_id))\n initial_max_speed = traci.lane.getMaxSpeed(lane_id)\n lanes.append(Lane(lane_id, polygon_lane, initial_max_speed))\n return lanes\n \n def parse_phase(self, phase_repr):\n \"\"\"\n Because the SUMO object Phase does not contain accessors,\n we parse the string representation to retrieve data members.\n :param phase_repr: The Phase string representation\n :return: An new Phase instance\n \"\"\"\n duration = search('duration: {:f}', phase_repr)\n min_duration = search('minDuration: {:f}', phase_repr)\n max_duration = search('maxDuration: {:f}', phase_repr)\n phase_def = search('phaseDef: {}\\n', phase_repr)\n \n if phase_def is None:\n phase_def = ''\n else:\n phase_def = phase_def[0]\n \n \n \n \n return Phase(duration[0], min_duration[0], max_duration[0], phase_def)\n \n def add_data_to_areas(self):\n \"\"\"\n Adds all recovered data to different areas\n :param areas: The list of areas\n :return:\n \"\"\"\n lanes = self.get_all_lanes()\n for area in self.grid:\n for lane in lanes: # add lanes \n if area.rectangle.intersects(lane.polygon):\n area.add_lane(lane)\n for tl_id in traci.trafficlight.getIDList(): # add traffic lights \n if lane.lane_id in traci.trafficlight.getControlledLanes(tl_id):\n logics = []\n for l in traci.trafficlight.getCompleteRedYellowGreenDefinition(tl_id): # add logics \n phases = []\n for phase in traci.trafficlight.Logic.getPhases(l): # add phases to logics\n phases.append(self.parse_phase(phase.__repr__()))\n logics.append(Logic(l, phases))\n area.add_tl(TrafficLight(tl_id, logics))\n \n def save(self):\n \"\"\"\n Save simulation data into a json file \n :param dump_name: The name of your data dump\n :return:\n \"\"\"\n dump_dir = f'{self.dir}/dump'\n if not os.path.exists(dump_dir):\n os.mkdir(dump_dir)\n \n s = json.dumps(json.loads(jsonpickle.encode(self)), indent=4) # for pretty JSON \n with open(f'{dump_dir}/{self.dump_name}.json', 'w') as f:\n f.write(s)\n \n" }, { "alpha_fraction": 0.6365578174591064, "alphanum_fraction": 0.6399145722389221, "avg_line_length": 44.83216857910156, "blob_id": "1f443abbeadc4fec43decc8cebeb8572f88b38e2", "content_id": "8a75b2822875a55e838f634597a745055566f4c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6554, "license_type": "no_license", "max_line_length": 124, "num_lines": 143, "path": "/sumo_project/tests/configurator_tests.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "import io\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport configurator\n\n# Absolute path of the directory the script is in\nSCRIPTDIR = os.path.dirname(__file__)\n\n\nclass TemplateTests(unittest.TestCase):\n def setUp(self):\n self.sim_name = 'test_simulation'\n self.sim_path = '/test_simulation'\n self.log_path = '/test_simulation/log'\n\n def test_load_netconvert_template(self):\n tree = configurator.load_netconvert_template('test.osm', 'test_simulation')\n self.assertEqual(tree.find('input/osm-files').get('value'), 'test.osm')\n self.assertEqual(tree.find('output/output-file').get('value'), f'{self.sim_name}.net.xml')\n self.assertEqual(tree.find('report/log').get('value'), f'{self.sim_name}.netconvert.log')\n\n def test_load_sumoconfig_template_default(self):\n tree = configurator.load_sumoconfig_template(self.sim_name)\n self.assertEqual(tree.find('input/net-file').get('value'), f'{self.sim_name}.net.xml')\n self.assertEqual(tree.find('input/route-files').get('value'), f'{self.sim_name}.rou.xml')\n self.assertEqual(tree.find('report/log').get('value'), f'{self.sim_name}.log')\n\n def test_load_sumoconfig_template_with_polygons(self):\n tree = configurator.load_sumoconfig_template(self.sim_name, generate_polygons=True)\n self.assertEqual(tree.find('input/net-file').get('value'), f'{self.sim_name}.net.xml')\n self.assertEqual(tree.find('input/route-files').get('value'), f'{self.sim_name}.rou.xml')\n self.assertEqual(tree.find('report/log').get('value'), f'{self.sim_name}.log')\n self.assertEqual(tree.find('input/additional-files').get('value'), f'{self.sim_name}.poly.xml')\n\n def test_load_sumoconfig_template_with_routefiles(self):\n routefiles = (f'{self.sim_name}.bus.rou.xml', f'{self.sim_name}.passenger.rou.xml')\n tree = configurator.load_sumoconfig_template(self.sim_name, routefiles)\n self.assertEqual(tree.find('input/net-file').get('value'), f'{self.sim_name}.net.xml')\n self.assertEqual(tree.find('input/route-files').get('value'), ','.join(routefiles))\n self.assertEqual(tree.find('report/log').get('value'), f'{self.sim_name}.log')\n\n def test_load_sumoconfig_template_with_seed(self):\n routefiles = (f'{self.sim_name}.bus.rou.xml', f'{self.sim_name}.passenger.rou.xml')\n tree = configurator.load_sumoconfig_template(self.sim_name, routefiles, seed=42)\n self.assertEqual(tree.find('random_number/seed').get('value'), 42)\n\n def test_load_polyconvert_template(self):\n tree = configurator.load_polyconvert_template(\n osm_file=f'{self.sim_name}.osm',\n type_file='typemap/test.typ.xml',\n scenario_name=f'{self.sim_name}'\n )\n self.assertEqual(tree.find('input/osm-files').get('value'), f'{self.sim_name}.osm')\n self.assertEqual(tree.find('input/net-file').get('value'), f'{self.sim_name}.net.xml')\n self.assertEqual(tree.find('input/type-file').get('value'), 'typemap/test.typ.xml')\n self.assertEqual(tree.find('output/output-file').get('value'), f'{self.sim_name}.poly.xml')\n self.assertEqual(tree.find('report/log').get('value'), f'{self.sim_name}.polyconvert.log')\n\n\nclass GenerationTests(unittest.TestCase):\n\n def setUp(self):\n self.base_path = tempfile.mkdtemp()\n self.sim_name = 'test_simulation'\n self.sim_path = os.path.join(self.base_path, self.sim_name)\n self.log_path = os.path.join(self.sim_name, 'log')\n\n def tearDown(self):\n shutil.rmtree(self.base_path)\n\n def test_generate_scenario(self):\n osm_file = os.path.join(SCRIPTDIR, 'sample.osm')\n configurator.generate_scenario(osm_file, self.sim_path, self.sim_name, generate_polygons=False)\n self.assert_is_file(os.path.join(self.sim_path, f'{self.sim_name}.net.xml'))\n\n def test_generate_scenario_with_polygons(self):\n osm_file = os.path.join(SCRIPTDIR, 'sample.osm')\n configurator.generate_scenario(osm_file, self.sim_path, self.sim_name, generate_polygons=True)\n self.assert_is_dir(self.sim_path)\n generated_files = [\n f'{self.sim_name}.poly.xml',\n f'{self.sim_name}.net.xml'\n ]\n for f in generated_files:\n self.assert_is_file(os.path.join(self.sim_path, f))\n\n def test_generate_mobility(self):\n # The scenario must be generated before the mobility\n osm_file = os.path.join(SCRIPTDIR, 'sample.osm')\n trips_file = os.path.join(self.sim_path, f'{self.sim_name}.trips.xml')\n configurator.generate_scenario(osm_file, self.sim_path, self.sim_name)\n classes = {'passenger': 10, 'truck': 1}\n routefiles = configurator.generate_mobility(self.sim_path, self.sim_name, vclasses=classes, end_time=200)\n\n self.assert_is_file(trips_file)\n for f in routefiles:\n self.assert_is_file(os.path.join(self.sim_path, f))\n\n def assert_exists(self, path):\n self.assertTrue(os.path.exists(path), msg=f'{path} does not exist')\n\n def assert_is_file(self, path):\n self.assert_exists(path)\n self.assertTrue(os.path.isfile(path), msg=f'{path} is not a file')\n\n def assert_is_dir(self, path):\n self.assert_exists(path)\n self.assertTrue(os.path.isdir(path), msg=f'{path} is not a directory')\n\n\nclass InputTests(unittest.TestCase):\n def test_commandline(self):\n options = ['--name', 'test-config', '--path', '/some/path', '--vclass', 'passenger=10', 'truck=1', '--', 'test.osm']\n actual_conf = configurator.parse_command_line(options)\n self.assertEqual(actual_conf.name, 'test-config')\n self.assertEqual(actual_conf.osmfile, 'test.osm')\n self.assertEqual(actual_conf.path, '/some/path')\n self.assertEqual(actual_conf.vclasses, {'passenger': '10', 'truck': '1'})\n\n def test_from_config_file(self):\n options = \"\"\"\n {\n \"name\": \"test-config\",\n \"path\": \"/some/path\",\n \"vclasses\": {\n \"passenger\": 10,\n \"truck\": 1\n },\n \"osmfile\": \"test.osm\"\n }\n \"\"\"\n actual_conf = configurator.parse_json(io.StringIO(options))\n self.assertEqual(actual_conf.name, 'test-config')\n self.assertEqual(actual_conf.osmfile, 'test.osm')\n self.assertEqual(actual_conf.path, '/some/path')\n self.assertEqual(actual_conf.vclasses, {'passenger': 10, 'truck': 1})\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5401034355163574, "alphanum_fraction": 0.5420780181884766, "avg_line_length": 35.712764739990234, "blob_id": "d8e26ef0a0405e4148328982637fa0ed1df37238", "content_id": "6d5175a15a556930cbb5f26491e6adcc1da419be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10635, "license_type": "no_license", "max_line_length": 134, "num_lines": 282, "path": "/sumo_project/runner.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 19 janv. 2019\r\n\r\n@author: Axel Huynh-Phuc\r\n'''\r\n\r\n\"\"\"\r\nThis module defines the entry point of the application\r\n\"\"\"\r\n\r\nimport argparse\r\nimport csv\r\nimport datetime\r\nimport itertools\r\nimport logging\r\nimport multiprocessing\r\nimport os\r\nimport sys\r\nimport time\r\nimport traci\r\n\r\nimport jsonpickle\r\n\r\nfrom config import Config\r\nfrom data import Data\r\nimport emissions\r\nfrom model import Emission\r\n\r\n\r\n\"\"\"\r\nInit the Traci API\r\n\"\"\"\r\nif 'SUMO_HOME' in os.environ:\r\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\r\n sys.path.append(tools)\r\nelse:\r\n sys.exit(\"please declare environment variable 'SUMO_HOME'\")\r\n \r\nclass RunProcess(multiprocessing.Process):\r\n \"\"\"\r\n Run process inheriting from multiprocessing.Process\r\n \"\"\"\r\n \r\n def __init__(self, data: Data, config: Config, save_logs: bool, csv_export: bool):\r\n \"\"\"\r\n RunProcess constructor\r\n :param data: The data instance\r\n :param config: The config instance\r\n :param save_logs: If save_logs == True, it will save the logs into the logs directory \r\n :param csv_export: If csv_export == True, it will export all emissions data into a csv file \r\n \"\"\"\r\n multiprocessing.Process.__init__(self)\r\n self.data = data \r\n self.config = config\r\n self.save_logs = save_logs\r\n self.csv_export = csv_export\r\n \r\n def init_logger(self):\r\n \"\"\"\r\n Init logger properties \r\n \"\"\"\r\n now = datetime.datetime.now()\r\n current_date = now.strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n \r\n logdir = f'{self.data.dir}/logs/'\r\n logging.info(logdir)\r\n if not os.path.exists(logdir):\r\n os.mkdir(logdir)\r\n\r\n conf_name = self.config.config_filename.replace('.json', '')\r\n log_filename = f'{logdir}/{current_date}.log'\r\n\r\n self.logger = logging.getLogger(f'sumo_logger')\r\n self.logger.setLevel(logging.INFO)\r\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\r\n\r\n if self.save_logs:\r\n file_handler = logging.FileHandler(log_filename)\r\n file_handler.setFormatter(formatter)\r\n self.logger.addHandler(file_handler)\r\n\r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n self.logger.addHandler(handler)\r\n \r\n def export_data_to_csv(self):\r\n \"\"\"\r\n Export all Emission objects as a CSV file into the csv directory\r\n \"\"\"\r\n csv_dir = f'{self.data.dir}/csv'\r\n if not os.path.exists(csv_dir):\r\n os.mkdir(csv_dir)\r\n \r\n now = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\r\n conf_name = self.config.config_filename.replace('.json', '')\r\n\r\n csvfile = os.path.join(csv_dir, f'{self.data.dump_name}_{conf_name}_{now}.csv')\r\n with open(csvfile, 'w') as f:\r\n writer = csv.writer(f)\r\n # Write CSV headers\r\n writer.writerow(itertools.chain(('Step',), (a.name for a in self.data.grid)))\r\n # Write all areas emission value for each step\r\n for step in range(self.config.n_steps):\r\n em_for_step = (f'{a.emissions_by_step[step].value():.3f}' for a in self.data.grid)\r\n writer.writerow(itertools.chain((step,), em_for_step))\r\n \r\n def run(self):\r\n \"\"\"\r\n Launch a simulation, will be called when a RunProcess instance is started\r\n \"\"\"\r\n try:\r\n self.init_logger()\r\n self.logger.info(f'Running simulation dump \"{self.data.dump_name}\" with the config \"{self.config.config_filename}\" ...') \r\n \r\n if self.config.without_actions_mode:\r\n self.logger.info('Reference simulation')\r\n \r\n traci.start(self.config.sumo_cmd)\r\n \r\n for area in self.data.grid: # Set acquisition window size \r\n area.set_window_size(self.config.window_size)\r\n traci.polygon.add(area.name, area.rectangle.exterior.coords, (255, 0, 0)) # Add polygon for UI\r\n \r\n self.logger.info(f'Loaded simulation file : {self.config._SUMOCFG}')\r\n self.logger.info('Loading data for the simulation')\r\n \r\n start = time.perf_counter()\r\n self.logger.info('Simulation started...')\r\n step = 0\r\n while step < self.config.n_steps:\r\n traci.simulationStep()\r\n \r\n vehicles = emissions.get_all_vehicles()\r\n emissions.get_emissions(self, vehicles, step)\r\n step += 1\r\n \r\n print(f'step = {step}/{self.config.n_steps}', end='\\r')\r\n \r\n finally:\r\n traci.close(False)\r\n \r\n total_emissions = Emission()\r\n for area in self.data.grid:\r\n total_emissions += area.sum_all_emissions()\r\n \r\n self.logger.info(f'Total emissions = {total_emissions.value()} mg')\r\n for pollutant in ['co2','co','nox','hc','pmx']:\r\n value = total_emissions.__getattribute__(pollutant)\r\n self.logger.info(f'{pollutant.upper()} = {value} mg')\r\n \r\n simulation_time = round(time.perf_counter() - start, 2)\r\n self.logger.info(f'End of the simulation ({simulation_time}s)')\r\n \r\n # 1 step is equal to one second simulated\r\n self.logger.info(f'Real-time factor : {self.config.n_steps / simulation_time}')\r\n \r\n if self.csv_export:\r\n self.export_data_to_csv()\r\n self.logger.info(f'Exported data into the csv folder')\r\n \r\ndef create_dump(dump_name, simulation_dir, areas_number):\r\n \"\"\"\r\n Create a new dump with config file and dump_name chosen \r\n :param dump_name: The name of the data dump\r\n :param simulation_dir: The simulation directory \r\n :param areas_number: The number of areas in grid \r\n :return:\r\n \"\"\"\r\n \r\n #sumo_binary = os.path.join(os.environ['SUMO_HOME'], 'bin', 'sumo')\r\n #sumo_cmd = [sumo_binary, \"-c\", f'files/simulations/{simulation_dir}/osm.sumocfg']\r\n \r\n for f in os.listdir(simulation_dir):\r\n if f.endswith('.sumocfg'):\r\n _SUMOCFG = os.path.join(simulation_dir, f)\r\n \r\n sumo_binary = os.path.join(os.environ['SUMO_HOME'], 'bin', 'sumo')\r\n sumo_cmd = [sumo_binary, \"-c\", _SUMOCFG]\r\n \r\n \r\n traci.start(sumo_cmd)\r\n if not os.path.isfile(f'{simulation_dir}/dump/{dump_name}.json'):\r\n start = time.perf_counter()\r\n data = Data(dump_name, traci.simulation.getNetBoundary(), areas_number, simulation_dir)\r\n data.init_grid()\r\n data.add_data_to_areas() \r\n data.save()\r\n \r\n loading_time = round(time.perf_counter() - start, 2)\r\n print(f'Data loaded ({loading_time}s)')\r\n print(f'Dump {dump_name} created')\r\n else:\r\n print(f'Dump with name {dump_name} already exists')\r\n \r\n traci.close(False) \r\n \r\ndef add_options(parser):\r\n \"\"\"\r\n Add command line options\r\n :param parser: The command line parser\r\n :return:\r\n \"\"\"\r\n \r\n parser.add_argument(\"-new_dump\", \"--new_dump\", type=str,\r\n help='Load and create a new data dump with the configuration file chosen')\r\n parser.add_argument(\"-areas\", \"--areas\", type=int,\r\n help='Will create a grid with \"areas x areas\" areas')\r\n parser.add_argument(\"-simulation_dir\", \"--simulation_dir\", type=str,\r\n help='Choose the simulation directory')\r\n \r\n parser.add_argument(\"-run\", \"--run\", type=str,\r\n help='Run a simulation process with the dump chosen')\r\n parser.add_argument(\"-c\", \"--c\", metavar =('config1','config2'), nargs='+', type=str,\r\n help='Choose your(s) configuration file(s) from your working directory')\r\n parser.add_argument(\"-c_dir\", \"--c_dir\", type=str,\r\n help='Choose a directory which contains your(s) configuration file(s)')\r\n parser.add_argument(\"-save\", \"--save\", action=\"store_true\",\r\n help='Save the logs into the logs folder')\r\n parser.add_argument(\"-csv\", \"--csv\", action=\"store_true\",\r\n help=\"Export all data emissions into a CSV file\")\r\n \r\ndef check_user_entry(args):\r\n \"\"\"\r\n Check the user entry consistency\r\n \"\"\"\r\n if (args.new_dump is not None):\r\n if(args.areas is None or args.simulation_dir is None):\r\n print('The -new_dump argument requires the -areas and -simulation_dir options')\r\n return False\r\n \r\n if (args.run is not None):\r\n if(args.c is None and args.c_dir is None):\r\n print('The -run argument requires the -c or -c_dir')\r\n return False\r\n \r\n return True \r\n \r\ndef main(args):\r\n \"\"\"\r\n The entry point of the application\r\n :param args: Command line options\r\n :return:\r\n \"\"\"\r\n parser = argparse.ArgumentParser()\r\n add_options(parser)\r\n args = parser.parse_args(args)\r\n \r\n if(check_user_entry(args)):\r\n \r\n if args.new_dump is not None:\r\n if (args.simulation_dir is not None) and (args.areas is not None): \r\n create_dump(args.new_dump, args.simulation_dir, args.areas)\r\n \r\n if args.run is not None:\r\n dump_path = f'{args.run}'\r\n if os.path.isfile(dump_path):\r\n with open(dump_path, 'r') as f:\r\n data = jsonpickle.decode(f.read())\r\n \r\n process = []\r\n files = [] \r\n \r\n if args.c is not None: \r\n for config in args.c:\r\n files.append(f'{config}') \r\n \r\n if args.c_dir is not None: \r\n path = f'{args.c_dir}'\r\n bundle_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] \r\n for config in bundle_files:\r\n files.append(os.path.join(path, config))\r\n\r\n for conf in files: # Initialize all process\r\n config = Config(conf,data) \r\n p = RunProcess(data, config, args.save, args.csv)\r\n process.append(p) \r\n p.start()\r\n \r\n for p in process : p.join() \r\n \r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n" }, { "alpha_fraction": 0.6471537351608276, "alphanum_fraction": 0.6501120328903198, "avg_line_length": 38.278167724609375, "blob_id": "c4b74c875ff236c27824c5a0b2125df823603de2", "content_id": "34a9d1632090e872f82e0eb6b1cc784806411bc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11159, "license_type": "no_license", "max_line_length": 116, "num_lines": 284, "path": "/sumo_project/configurator.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nif 'SUMO_HOME' in os.environ:\n TOOLSDIR = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(TOOLSDIR)\nelse:\n sys.exit(\"Please declare environment variable 'SUMO_HOME'\")\n\n\nimport argparse\nimport datetime\nimport json\nimport logging\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nfrom sys import argv\nfrom types import SimpleNamespace\nfrom xml.etree import ElementTree\n\nimport randomTrips\nimport sumolib\n\n# Absolute path of the directory the script is in\nSCRIPTDIR = os.path.dirname(__file__)\nTEMPLATEDIR = os.path.join(SCRIPTDIR, 'templates')\nSUMOBIN = os.path.join(os.environ['SUMO_HOME'], 'bin')\n\n# Init logger\nlogfile = os.path.join(SCRIPTDIR, f'files/logs/configurator_{datetime.datetime.utcnow().isoformat()}.log')\nlogging.basicConfig(\n filename=logfile,\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n)\n\n\"\"\"\nDefinition of vehicle classes. \nSee http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes#Abstract_Vehicle_Class\n\"\"\"\nvehicle_classes = {\n 'passenger': {\n '--vehicle-class': 'passenger',\n '--vclass': 'passenger',\n '--prefix': 'veh',\n '--min-distance': 300,\n '--trip-attributes': 'departLane=\"best\"',\n },\n 'bus': {\n '--vehicle-class': 'bus',\n '--vclass': 'bus',\n '--prefix': 'bus',\n },\n 'truck': {\n '--vehicle-class': 'truck',\n '--vclass': 'truck',\n '--prefix': 'truck',\n '--min-distance': 600,\n '--trip-attributes': 'departLane=\"best\"',\n }\n}\n\n\nclass RandomTripsGenerator:\n def __init__(self, netpath, routepath, output, vclass, density, *flags, **opts):\n self.vclass = vclass\n self.density = density\n self.options = {\n # Default options\n '--net-file': netpath,\n '--output-trip-file': output,\n '--route-file': routepath,\n **opts\n }\n self.flags = [*flags]\n edges = sumolib.net.readNet(netpath).getEdges()\n self._init_trips(edges, vclass, density)\n self.options.update(vehicle_classes[self.vclass])\n\n def generate(self):\n logging.info(f'Generating trips for vehicle class {self.vclass} with density of {self.density} veh/km/h')\n randomTrips.main(randomTrips.get_options(dict_to_list(self.options) + self.flags))\n\n def _init_trips(self, edges, vclass, density):\n \"\"\"\n :param edges: foo.rou.xml\n :param density: vehicle/km/h\n \"\"\"\n # calculate the total length of the available lanes\n length = 0.\n for edge in edges:\n if edge.allows(vclass):\n length += edge.getLaneNumber() * edge.getLength()\n\n logging.debug(f'density = {density}')\n period = 3600 / (length / 1000) / density\n logging.debug(f'Period computed for network : {period}, vclass={self.vclass}')\n self.options.update({'-p': period})\n\n\nclass StoreDictKeyPair(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n pairs = {}\n for kv in values:\n k, v = kv.split(\"=\")\n pairs[k] = v\n setattr(namespace, self.dest, pairs)\n\n\ndef load_netconvert_template(osm_input, out_name):\n netconfig = ElementTree.parse(os.path.join(TEMPLATEDIR, 'simul.netcfg'))\n root = netconfig.getroot()\n root.find('input/osm-files').set('value', osm_input)\n root.find('output/output-file').set('value', f'{out_name}.net.xml')\n root.find('report/log').set('value', f'{out_name}.netconvert.log')\n return netconfig\n\n\ndef load_polyconvert_template(osm_file, type_file, scenario_name):\n polyconfig = ElementTree.parse(os.path.join(TEMPLATEDIR, 'simul.polycfg'))\n root = polyconfig.getroot()\n root.find('input/osm-files').set('value', osm_file)\n root.find('input/net-file').set('value', f'{scenario_name}.net.xml')\n root.find('input/type-file').set('value', type_file)\n root.find('output/output-file').set('value', f'{scenario_name}.poly.xml')\n root.find('report/log').set('value', f'{scenario_name}.polyconvert.log')\n return polyconfig\n\n\ndef load_sumoconfig_template(simulation_name, routefiles=(), generate_polygons=False, seed=None):\n routefiles = routefiles or (f'{simulation_name}.rou.xml',)\n sumoconfig = ElementTree.parse(os.path.join(TEMPLATEDIR, 'simul.sumocfg'))\n root = sumoconfig.getroot()\n root.find('input/net-file').set('value', f'{simulation_name}.net.xml')\n root.find('input/route-files').set('value', ','.join(routefiles))\n additional = root.find('input/additional-files')\n if generate_polygons:\n additional.set('value', f'{simulation_name}.poly.xml')\n else:\n root.find('input').remove(additional)\n root.find('report/log').set('value', f'{simulation_name}.log')\n # Set the seed for the random number generator. By default, use the current time\n root.find('random_number/seed').set('value', seed or str(int(time.time())))\n return sumoconfig\n\n\ndef generate_scenario(osm_file, out_path, scenario_name, generate_polygons=False):\n net_template = load_netconvert_template(osm_file, scenario_name)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Generate NETCONVERT configuration\n netconfig = os.path.join(tmpdirname, f'{scenario_name}.netcfg')\n net_template.write(netconfig)\n # Copy typemaps to tempdir\n shutil.copytree(os.path.join(TEMPLATEDIR, 'typemap'), os.path.join(tmpdirname, 'typemap'))\n # Call NETCONVERT\n logging.info(\"Generating networkโ€ฆ\")\n netconvertcmd = [os.path.join(SUMOBIN, 'netconvert'), '-c', netconfig]\n logging.debug(f'Calling {\" \".join(netconvertcmd)}')\n subprocess.run(netconvertcmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n # Optionaly generate polygons\n if generate_polygons:\n generate_polygons_(osm_file, scenario_name, tmpdirname)\n # Move files to destination\n ignore_patterns = shutil.ignore_patterns('*.polycfg', '*.netcfg', 'typemap')\n shutil.copytree(tmpdirname, out_path, ignore=ignore_patterns)\n\n\ndef generate_polygons_(osm_file, scenario_name, dest):\n polyconfig = os.path.join(dest, f'{scenario_name}.polycfg')\n poly_template = load_polyconvert_template(osm_file, 'typemap/osmPolyconvert.typ.xml', scenario_name)\n poly_template.write(polyconfig)\n # Call POLYCONVERT\n logging.info('Generating polygonsโ€ฆ')\n polyconvert_cmd = [os.path.join(SUMOBIN, 'polyconvert'), '-c', polyconfig]\n logging.debug(f'Calling {\" \".join(polyconvert_cmd)}')\n subprocess.run(polyconvert_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n\ndef generate_mobility(out_path, name, vclasses, end_time):\n netfile = f'{name}.net.xml'\n netpath = os.path.join(out_path, netfile)\n output = os.path.join(out_path, f'{name}.trips.xml')\n routefiles = []\n for vclass, density in vclasses.items():\n # simname.bus.rou.xml, simname.passenger.rou.xml, ...\n routefile = f'{name}.{vclass}.rou.xml'\n routepath = os.path.join(out_path, routefile)\n routefiles.append(routefile)\n logging.debug(routefile)\n generator = RandomTripsGenerator(netpath, routepath, output, vclass, float(density))\n generator.flags.append('-l')\n generator.flags.append('--validate')\n generator.options.update(**{'--end': end_time})\n generator.generate()\n return routefiles\n\n\ndef generate_sumo_configuration(routefiles, path, scenario_name, generate_polygons):\n sumo_template = load_sumoconfig_template(scenario_name, routefiles, generate_polygons)\n sumo_template.write(os.path.join(path, f'{scenario_name}.sumocfg'))\n\n\ndef generate_all(args):\n simulation_name = args.name\n simulation_dir = os.path.join(args.path, simulation_name)\n try:\n generate_polygons = args.generate_polygons\n except AttributeError:\n generate_polygons = False\n osm_file = args.osmfile\n logs_dir = os.path.join(simulation_dir, 'log')\n\n generate_scenario(osm_file, simulation_dir, simulation_name, generate_polygons)\n routefiles = generate_mobility(simulation_dir, simulation_name, args.vclasses, args.end)\n generate_sumo_configuration(routefiles, simulation_dir, simulation_name, generate_polygons)\n # Move all logs to logdir\n move_logs(simulation_dir, logs_dir)\n\n\ndef move_logs(simulation_dir, logs_dir):\n for f in os.listdir(simulation_dir):\n if os.path.splitext(f)[1] == '.log':\n shutil.move(os.path.join(simulation_dir, f), logs_dir)\n\n\ndef dict_to_list(d):\n return [item for k in d for item in (k, d[k])]\n\n\ndef parse_command_line(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('osmfile', help='Path to the .osm file to convert to a SUMO simulation')\n parser.add_argument('--path', help='Where to generate the files')\n parser.add_argument('--name', required=True, help='Name of the SUMO scenario to generate')\n parser.add_argument('--generate-polygons', default=False, action='store_true',\n help='Whether to generate polygons and POIs (defaults to false).')\n parser.add_argument('--vclass', dest='vclasses', action=StoreDictKeyPair,\n nargs=\"+\", metavar=\"VCLASS=DENSITY\",\n help='Generate this vclass with given density, in pair form vclass=density. The density is '\n 'given in vehicles per hour per kilometer. For now, the following vehicle classes are '\n 'available: passenger, truck, bus.')\n parser.add_argument('--seed', help='Initializes the random number generator.')\n parser.add_argument('-e', '--end', type=int, default=200, help='end time (default 200)')\n return parser.parse_args(args=args)\n\n\ndef handle_args(options):\n # If no vehicle classes are specified, use 'passenger' as a default with a density of 10 cars/km/h.\n options.vclasses = options.vclasses or {'passenger': 10}\n # Delete simul_dir if it already exists\n simul_dir = os.path.join(options.path, options.name)\n if os.path.isdir(simul_dir):\n input(f'{simul_dir} already exists ! Press Enter to delete...')\n shutil.rmtree(simul_dir)\n logging.debug(f'Options : {options}')\n generate_all(options)\n\n\ndef parse_json(json_file):\n logging.info(f'Loading config from {json_file}')\n config = SimpleNamespace(**json.load(json_file))\n logging.debug(f'Config {config}')\n return config\n\n\nif __name__ == '__main__':\n # Try to load the config file\n if len(argv) > 2 and argv[1] == '-c' or argv[1] == '--config' or argv[1] == '-config':\n try:\n with open(argv[2]) as jsonfile:\n config = parse_json(jsonfile)\n handle_args(config)\n except FileNotFoundError:\n msg = f'The config file {argv[2]} does not exist!'\n logging.fatal(msg)\n raise FileNotFoundError(msg)\n else:\n # Run with command line arguments\n config = parse_command_line()\n handle_args(config)\n" }, { "alpha_fraction": 0.5637362599372864, "alphanum_fraction": 0.5681318640708923, "avg_line_length": 29.685392379760742, "blob_id": "57188b1e9a78c2f6636fae02242badf4407b41b1", "content_id": "02eefe8bdf5ca256e2bf805f3d14256a5def8403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2730, "license_type": "no_license", "max_line_length": 88, "num_lines": 89, "path": "/sumo_project/config.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on 17 oct. 2018\n\n@author: Axel Huynh-Phuc, Thibaud Gasser\n\"\"\"\n\n\"\"\"\nThis module defines the global configuration for the simulation\n\"\"\"\n\nimport json\nimport os\n\nfrom data import Data\nfrom model import Emission\n\n\nclass Config:\n \"\"\"\n The Config class defines all simulation properties that can be changed\n \"\"\"\n\n def __init__(self,config_file, data : Data):\n \"\"\"\n Default constructor\n \"\"\"\n self.import_config_file(config_file)\n self.init_traci(data.dir)\n self.check_config()\n \n def import_config_file(self, config_file):\n \"\"\"\n Import your configuration file in JSON format\n :param config_file: The path to your configuration file\n :return:\n \"\"\"\n with open(f'{config_file}', 'r') as f:\n data = json.load(f)\n\n for option in data:\n self.__setattr__(option, data[option])\n self.config_filename = os.path.basename(f.name)\n self.check_config()\n\n def check_config(self):\n \"\"\"\n Check the relevance of user configuration choices\n :return:\n \"\"\"\n # Weight routing mode cannot be combined with other actions\n if self.weight_routing_mode:\n self.limit_speed_mode = False\n self.adjust_traffic_light_mode = False\n self.lock_area_mode = False\n\n # If without_actions_mode is chosen\n if self.without_actions_mode:\n self.limit_speed_mode = False\n self.adjust_traffic_light_mode = False\n self.weight_routing_mode = False\n self.lock_area_mode = False\n\n def __repr__(self) -> str:\n \"\"\"\n :return: All properties chosen by the user\n \"\"\"\n return (\n f'step number = {self.n_steps}\\n'\n f'window size = {self.window_size}\\n'\n f'weight routing mode = {self.weight_routing_mode}\\n'\n f'lock area mode = {self.lock_area_mode}\\n'\n f'limit speed mode = {self.limit_speed_mode}, RF = {self.speed_rf * 100}%\\n'\n f'adjust traffic light mode = {self.adjust_traffic_light_mode},'\n f'RF = {self.trafficLights_duration_rf * 100}%\\n'\n )\n \n def init_traci(self, simulation_dir):\n \"\"\"\n Init the Traci API\n :param simulation_dir: The path to the simulation directory\n :return:\n \"\"\"\n simdir = os.path.join(os.path.dirname(__file__), f'{simulation_dir}')\n \n for f in os.listdir(simdir):\n if f.endswith('.sumocfg'):\n self._SUMOCFG = os.path.join(simdir, f)\n sumo_binary = os.path.join(os.environ['SUMO_HOME'], 'bin', self._SUMOCMD)\n self.sumo_cmd = [sumo_binary, \"-c\", self._SUMOCFG]" }, { "alpha_fraction": 0.6506959199905396, "alphanum_fraction": 0.6555139422416687, "avg_line_length": 36.349998474121094, "blob_id": "1d30511add95d158de77cf0c6027f91ed58818e5", "content_id": "f31e7794bfbbff8cd6ebb01830e75a152bc72999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3736, "license_type": "no_license", "max_line_length": 117, "num_lines": 100, "path": "/sumo_project/emissions.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on 17 oct. 2018\n\n@author: Axel Huynh-Phuc, Thibaud Gasser\n\"\"\"\n\n\"\"\"\nThis module defines how pollutant emissions are recovered and how we act on the areas \n\"\"\"\n\nimport traci\nfrom typing import List\n\nimport actions\nfrom model import Vehicle, Emission\nfrom runner import RunProcess\n\n\ndef compute_vehicle_emissions(veh_id):\n \"\"\"\n Recover the emissions of different pollutants from a vehicle and create an Emission instance\n :param veh_id: The vehicle ID\n :return: A new Emission instance\n \"\"\"\n co2 = traci.vehicle.getCO2Emission(veh_id)\n co = traci.vehicle.getCOEmission(veh_id)\n nox = traci.vehicle.getNOxEmission(veh_id)\n hc = traci.vehicle.getHCEmission(veh_id)\n pmx = traci.vehicle.getPMxEmission(veh_id)\n\n return Emission(co2, co, nox, hc, pmx)\n\n\ndef get_all_vehicles() -> List[Vehicle]:\n \"\"\"\n Recover all useful information about vehicles and creates a vehicles list\n :return: A list of vehicles instances\n \"\"\"\n vehicles = list()\n for veh_id in traci.vehicle.getIDList():\n veh_pos = traci.vehicle.getPosition(veh_id)\n vehicle = Vehicle(veh_id, veh_pos)\n vehicle.emissions = compute_vehicle_emissions(veh_id)\n vehicles.append(vehicle)\n return vehicles\n\ndef get_emissions(p : RunProcess, vehicles: List[Vehicle], current_step):\n \"\"\"\n For each area retrieves the acquired emissions in the window,\n and acts according to the configuration chosen by the user\n :param p: The current process\n :param vehicles: The list of vehicles\n :param current_step: The simulation current step\n :return:\n \"\"\"\n for area in p.data.grid:\n total_emissions = Emission()\n for vehicle in vehicles:\n if vehicle.pos in area:\n total_emissions += vehicle.emissions\n\n # Adding of the total of emissions pollutant at the current step into memory\n area.emissions_by_step.append(total_emissions)\n \n # If the sum of pollutant emissions (in mg) exceeds the threshold\n if area.sum_emissions_into_window(current_step) >= p.config.emissions_threshold:\n\n if p.config.limit_speed_mode and not area.limited_speed:\n p.logger.info(f'Action - Decreased max speed into {area.name} by {p.config.speed_rf * 100}%')\n actions.limit_speed_into_area(area, p.config.speed_rf)\n if p.config.adjust_traffic_light_mode and not area.tls_adjusted:\n p.logger.info(\n f'Action - Decreased traffic lights duration by {p.config.trafficLights_duration_rf * 100}%')\n actions.adjust_traffic_light_phase_duration(area, p.config.trafficLights_duration_rf)\n\n if p.config.lock_area_mode and not area.locked:\n if actions.count_vehicles_in_area(area):\n p.logger.info(f'Action - {area.name} blocked')\n actions.lock_area(area)\n\n if p.config.weight_routing_mode and not area.weight_adjusted:\n actions.adjust_edges_weights(area)\n\n traci.polygon.setFilled(area.name, True)\n\n else:\n if area.infrastructure_changed():\n p.logger.info(f'Action - Reversed actions into area {area.name}')\n actions.reverse_actions(area)\n traci.polygon.setFilled(area.name, False)\n\n\ndef get_reduction_percentage(ref, total):\n \"\"\"\n Return the reduction percentage of total emissions between reference and an other simulation\n :param ref: The sum of all pollutant emissions (in mg) for the simulation of reference\n :param total: The sum of all pollutant emissions (in mg) for the current simulation launched\n :return:\n \"\"\"\n return (ref - total) / ref * 100\n\n" }, { "alpha_fraction": 0.5813267827033997, "alphanum_fraction": 0.5843980312347412, "avg_line_length": 27.661972045898438, "blob_id": "3fdf4dfe87f01d08bf29400b0f22457725e19777", "content_id": "f39af69ed20bcdd8346cc5487a6390b75ef40ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8140, "license_type": "no_license", "max_line_length": 136, "num_lines": 284, "path": "/sumo_project/model.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on 17 oct. 2018\n\n@author: Axel Huynh-Phuc, Thibaud Gasser\n\"\"\"\n\n\"\"\"\nThis module defines the business model of our application\n\"\"\"\n\nimport collections\nfrom traci._trafficlight import Logic as SUMO_Logic\nfrom typing import Tuple, Set\n\nfrom shapely.geometry import Point, LineString\nfrom shapely.geometry import Polygon\nfrom shapely.geometry.base import BaseGeometry\n\n\nclass Lane:\n \"\"\"\n The Lane class includes the polygon defining the lane\n and keep in memory the initial maximum speed of the lane\n \"\"\"\n\n def __init__(self, lane_id: str, polygon: LineString, initial_max_speed: float):\n \"\"\"\n Lane constructor\n\n :param lane_id: The ID of the lane\n :param polygon: The polygon defining the shape of the lane\n :param initial_max_speed: The initial maximum speed\n \"\"\"\n self.polygon = polygon\n self.lane_id = lane_id\n self.initial_max_speed = initial_max_speed\n\n def __hash__(self):\n \"\"\"Overrides the default implementation\"\"\"\n return hash(self.lane_id)\n\n\nclass Phase:\n \"\"\"\n The Phase class defines a phase of a traffic light\n \"\"\"\n\n def __init__(self, duration: float, minDuration: float, maxDuration: float, phaseDef: str):\n \"\"\"\n Phase constructor\n\n :param duration: The duration of the phase (in seconds)\n :param minDuration: The minimum duration of the phase\n :param maxDuration: The maximum duration of the phase\n :param phaseDef: The definition of the phase, following the definition rules of SUMO\n (See : http://sumo.dlr.de/wiki/Simulation/Traffic_Lights#.3Cphase.3E_Attributes)\n \"\"\"\n\n self.duration = duration\n self.minDuration = minDuration\n self.maxDuration = maxDuration\n self.phaseDef = phaseDef\n\n def __repr__(self) -> str:\n \"\"\"\n :return: The Phase string representation\n \"\"\"\n repr = f'Phase(duration:{self.duration},minDuration:{self.minDuration},maxDuration:{self.maxDuration},phaseDef:{self.phaseDef})'\n return str(repr)\n\n\nclass Logic:\n \"\"\"\n The Logic class defines the strategy of a traffic light.\n This class includes the Logic instance of SUMO with all phases corresponding to it.\n A Logic object contains multiple phases.\n \"\"\"\n\n def __init__(self, logic: SUMO_Logic, phases: Set[Phase]):\n \"\"\"\n Logic constructor\n :param logic: The SUMO Logic object\n :param phases: The list of phases belonging to this logic\n \"\"\"\n self._logic = logic\n self._phases: Set[Phase] = phases\n\n\nclass TrafficLight:\n \"\"\"\n This TrafficLight class defines a traffic light\n \"\"\"\n\n def __init__(self, tl_id: str, logics: Set[Logic]):\n \"\"\"\n TrafficLight constructor\n :param tl_id: The traffic light ID\n :param logics: The list of logics belonging to the traffic light\n \"\"\"\n self.tl_id = tl_id\n self._logics: Set[Logic] = logics\n\n def __hash__(self):\n \"\"\"Overrides the default implementation\"\"\"\n return hash(self.tl_id)\n\n\nclass Emission:\n \"\"\"\n This class defines the different pollutant emissions\n \"\"\"\n\n def __init__(self, co2=0, co=0, nox=0, hc=0, pmx=0):\n \"\"\"\n Emission constructor\n :param co2: Quantity of CO2(in mg)\n :param co: Quantity of C0(in mg)\n :param nox: Quantity of Nox(in mg)\n :param hc: Quantity of HC(in mg)\n :param pmx: Quantity of PMx(in mg)\n \"\"\"\n self.co2 = co2\n self.co = co\n self.nox = nox\n self.hc = hc\n self.pmx = pmx\n\n def __add__(self, other):\n \"\"\"\n Add two emission objects\n :param other: The other Emission object to add\n :return: A new object whose emission values are the sum of both Emission object\n \"\"\"\n return Emission(self.co2 + other.co2, self.co + other.co, self.nox + other.nox, self.hc + other.hc,\n self.pmx + other.pmx)\n\n def value(self):\n \"\"\"\n :return: The sum of all emissions\n \"\"\"\n return self.co2 + self.co + self.nox + self.hc + self.pmx\n\n def __repr__(self) -> str:\n \"\"\"\n :return: The Emission string representation\n \"\"\"\n repr = f'Emission(co2={self.co2},co={self.co},nox={self.nox},hc={self.hc},pmx={self.pmx})'\n return str(repr)\n\n\nclass Area:\n \"\"\"\n The Area class defines a grid area of the simulation map\n \"\"\"\n\n def __init__(self, coords, name):\n \"\"\"\n Area constructor\n :param coords: The coordinates of the zone,\n defined by the bounds coordinates of this area : (xmin, ymin, xmax, ymax)\n :param name: The Area name\n \"\"\"\n self.limited_speed = False\n self.locked = False\n self.tls_adjusted = False\n self.weight_adjusted = False\n self.rectangle = Polygon(coords)\n self.name = name\n self.emissions_by_step = []\n self._lanes: Set[Lane] = set()\n self._tls: Set[TrafficLight] = set()\n\n def set_window_size(self, window_size):\n self.window = collections.deque(maxlen=window_size)\n \n def __eq__(self, other):\n \"\"\"\n Overrides the equal definition\n :param other: The other Area object\n :return: True if the two rectangles are equals\n \"\"\"\n return self.rectangle.__eq__(other)\n\n def __contains__(self, item):\n \"\"\"\n :param item: A position on the map\n :return: True if the area contains the item\n \"\"\"\n return self.rectangle.contains(item)\n\n @property\n def bounds(self):\n \"\"\"\n Return the bounds rectangle of this area\n :return:\n \"\"\"\n return self.rectangle.bounds\n\n def intersects(self, other: BaseGeometry) -> bool:\n \"\"\"\n :param other: A BaseGeometry object\n :return: True if this area intersects with other\n \"\"\"\n return self.rectangle.intersects(other)\n\n def add_lane(self, lane: Lane):\n \"\"\"\n Add a new lane object into lanes list\n :param lane: A Lane object\n :return:\n \"\"\"\n self._lanes.add(lane)\n\n def add_tl(self, tl: TrafficLight):\n \"\"\"\n Add a new trafficLight object into lanes list\n :param tl: A TrafficLight object\n :return:\n \"\"\"\n self._tls.add(tl)\n\n def remove_lane(self, lane: Lane):\n \"\"\"\n Remove a lane from lanes list\n :param lane: The Lane object to remove\n :return:\n \"\"\"\n self._lanes.remove(lane)\n\n def sum_all_emissions(self):\n \"\"\"\n Sum all Emissions object from initial step to final step\n :return: The sum Emission object\n \"\"\"\n sum = Emission()\n for emission in self.emissions_by_step:\n sum += emission\n return sum\n\n def sum_emissions_into_window(self, current_step):\n \"\"\"\n Sum all Emissions object into the acquisition window\n :param current_step: The current step of the simulation\n :return:\n \"\"\"\n self.window.appendleft(self.emissions_by_step[current_step].value())\n\n sum = 0\n for i in range(self.window.__len__()):\n sum += self.window[i]\n return sum\n\n @classmethod\n def from_bounds(cls, xmin, ymin, xmax, ymax):\n return cls((\n (xmin, ymin),\n (xmin, ymax),\n (xmax, ymax),\n (xmax, ymin)))\n \n def infrastructure_changed(self):\n return (self.limited_speed or self.locked or self.tls_adjusted or self.weight_adjusted)\n\n\nclass Vehicle:\n \"\"\"\n The Vehicle class defines a vehicle object\n \"\"\"\n\n def __init__(self, veh_id: int, pos: Tuple[float, float]):\n \"\"\"\n Vehicle constructor\n :param veh_id: The vehicle ID\n :param pos: The position of the vehicle one the map\n \"\"\"\n self.emissions: Emission = Emission()\n self.veh_id = veh_id\n self.pos = Point(pos)\n\n def __repr__(self) -> str:\n \"\"\"\n :return: The Vehicle string representation\n \"\"\"\n return str(self.__dict__)\n" }, { "alpha_fraction": 0.6718452572822571, "alphanum_fraction": 0.6797717213630676, "avg_line_length": 46.727272033691406, "blob_id": "c0989d41fb79600aa3d196aaa124d0a3d14b3fab", "content_id": "9b25b84e313cb434634f8912291b4829753b64ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3154, "license_type": "no_license", "max_line_length": 177, "num_lines": 66, "path": "/README.md", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "# SUMO Emissions\n\nThis \"Proof of concept\" aims to simulate the impact that connected vehicles and smart urban infrastructure would have on pollutant emissions.\nUsing the SUMO simulator, we developed several parameters and measures using Traci to act on the road infrastructure and vehicles.\n\nWe imagined that for a map of a given city, the city would be divided into areas, \nwhich when the pollution rate exceeds a certain threshold in these then we act on the infrastructure and the vehicles present in this zone.\n\n![](https://github.com/Ahp06/SUMO_Emissions/blob/master/sumo_project/files/imgs/simulation_example.PNG)\n\n# Prerequisites:\n* Python >3.7 : https://www.python.org/downloads/\n* External Python librairies : shapely, parse, jsonpickle : ``` > pip install [LIBRARY_NAME] ```\n* SUMO 1.0.0 : http://sumo.dlr.de/wiki/Downloads\n\n# How to run \n\nThis application can be launched from an IDE, or from a shell (linux, Windows, MacOS). \nYou will need a config.json configuration file (see [default_config.json](https://github.com/Ahp06/SUMO_Emissions/wiki/Configuration-file) for a template) and a simulation file.\nYou can use your own scenario file (osm.sumocfg file), see : [SUMO Tutorials](http://sumo.dlr.de/wiki/Tutorials). \n\n**With a Shell:**\n\n```\nusage: runner.py [-h] [-new_dump NEW_DUMP] [-areas AREAS]\n [-simulation_dir SIMULATION_DIR] [-run RUN]\n [-c config1 [config2 ...]] [-c_dir C_DIR] [-save] [-csv]\n\noptional arguments:\n -h, --help show this help message and exit\n -new_dump NEW_DUMP, --new_dump NEW_DUMP\n Load and create a new data dump with the configuration\n file chosen\n -areas AREAS, --areas AREAS\n Will create a grid with \"areas x areas\" areas\n -simulation_dir SIMULATION_DIR, --simulation_dir SIMULATION_DIR\n Choose the simulation directory\n -run RUN, --run RUN Run a simulation process with the dump chosen\n -c config1 [config2 ...], --c config1 [config2 ...]\n Choose your(s) configuration file(s) from your working\n directory\n -c_dir C_DIR, --c_dir C_DIR\n Choose a directory which contains your(s)\n configuration file(s)\n -save, --save Save the logs into the logs folder\n -csv, --csv Export all data emissions into a CSV file\n```\n\nCreate a data dump from simulation directory : \n\n```py ./runner.py -new_dump dump -areas 10 -simulation_dir [PATH_TO_SIMUL_DIR]```\n\nThis command will create new dump called \"dump\" from the simulation directory chosen with a 10x10 grid. \n\nRun simulations in parallel with multiple configuration files : \n\n```py ./runner.py -run dump -c [PATH_TO_CONFIG1] [PATH_TO_CONFIG2] -save -csv```\n\nThis command will run a simulation dump \"dump\" with the configuration file(s) \"config1\" and \"config2\" \nwith CSV data export and logs backup.\n\nFrom a folder which contains multiple configuration files : \n\n```py ./runner.py -run dump -c_dir [PATH_TO_CONFIG_DIR] -save -csv```\n\nLog and csv files will be written in a sub folder of the simulation folder. \n\n\n" }, { "alpha_fraction": 0.6560150384902954, "alphanum_fraction": 0.6590695381164551, "avg_line_length": 29.840579986572266, "blob_id": "975dff7b0e9050264cf34af67e8af1a763fa6652", "content_id": "0c713527f56c1f45f7c40778813bd51262188fff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4256, "license_type": "no_license", "max_line_length": 115, "num_lines": 138, "path": "/sumo_project/actions.py", "repo_name": "azuredark/SUMO_Emissions", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated on 17 oct. 2018\n\n@author: Axel Huynh-Phuc, Thibaud Gasser\n\"\"\"\n\n\"\"\"\nThis module defines all possible actions on the simulation\n\"\"\"\n\nimport traci\n\nfrom model import Area\n\n\ndef compute_edge_weight(edge_id):\n \"\"\"\n Sum the different pollutant emissions on the edge with the identifier edge_id\n :param edge_id: The edge ID\n :return: The sum (in mg) of all pollutant emissions\n \"\"\"\n co2 = traci.edge.getCO2Emission(edge_id)\n co = traci.edge.getCOEmission(edge_id)\n nox = traci.edge.getNOxEmission(edge_id)\n hc = traci.edge.getHCEmission(edge_id)\n pmx = traci.edge.getPMxEmission(edge_id)\n\n return co2 + co + nox + hc + pmx\n\n\ndef adjust_edges_weights(area):\n \"\"\"\n Changes the edge weight of all edges into the area\n :param area: The Area object\n :return:\n \"\"\"\n area.weight_adjusted = True\n for lane in area._lanes:\n edge_id = traci.lane.getEdgeID(lane.lane_id)\n weight = compute_edge_weight(edge_id) # by default edges weight = length/mean speed\n traci.edge.setEffort(edge_id, weight)\n\n for veh_id in traci.vehicle.getIDList():\n traci.vehicle.rerouteEffort(veh_id)\n\n\ndef limit_speed_into_area(area: Area, speed_rf):\n \"\"\"\n Limit the speed into the area by speed_rf factor\n :param area: The Area object\n :param speed_rf: The speed reduction factor (must be positive)\n :return:\n \"\"\"\n area.limited_speed = True\n for lane in area._lanes:\n traci.lane.setMaxSpeed(lane.lane_id, speed_rf * lane.initial_max_speed)\n\n\ndef modifyLogic(logic, rf):\n \"\"\"\n Change the logic of a traffic light by decreasing the overall duration of the traffic light\n :param logic: The Logic object\n :param rf: The reduction factor (must be positive)\n :return: A new Logic object with all phases modified\n \"\"\"\n new_phases = []\n for phase in logic._phases:\n new_phase = traci.trafficlight.Phase(phase.duration * rf, phase.minDuration * rf, phase.maxDuration * rf,\n phase.phaseDef)\n new_phases.append(new_phase)\n\n return traci.trafficlight.Logic(\"new-program\", 0, 0, 0, new_phases)\n\n\ndef adjust_traffic_light_phase_duration(area, reduction_factor):\n \"\"\"\n Set all logics modification on traffic lights into the area\n :param area: The Area object\n :param reduction_factor: The reduction factor (must be positive)\n :return:\n \"\"\"\n area.tls_adjusted = True\n for tl in area._tls:\n for logic in tl._logics:\n traci.trafficlights.setCompleteRedYellowGreenDefinition(tl.tl_id, modifyLogic(logic, reduction_factor))\n\n\ndef count_vehicles_in_area(area):\n \"\"\"\n Count the vehicles number into the area\n :param area: The Area object\n :return: The number of vehicles into the area\n \"\"\"\n vehicles_in_area = 0\n for lane in area._lanes:\n vehicles_in_area += traci.lane.getLastStepVehicleNumber(lane.lane_id)\n return vehicles_in_area\n\n\ndef lock_area(area):\n \"\"\"\n Prohibits access to the area to a particular vehicle class\n NOT FIXED : Some vehicles continue to go into the area\n if they can not turn around and then will stay blocked there\n as long as \"lock_area\" will not be reversed\n :param area: The Area object\n :return:\n \"\"\"\n area.locked = True\n for lane in area._lanes:\n # The passenger class is an example, you have to adapt this code\n traci.lane.setDisallowed(lane.lane_id, 'passenger')\n\n\ndef reverse_actions(area):\n \"\"\"\n Reverse all actions made in an area\n :param area: The Area object\n :return:\n \"\"\"\n # Reset max speed to original\n if area.limited_speed:\n area.limited_speed = False\n for lane in area._lanes:\n traci.lane.setMaxSpeed(lane.lane_id, lane.initial_max_speed)\n\n # Reset traffic lights initial duration\n if area.tls_adjusted:\n area.tls_adjusted = False\n for tl in area._tls:\n for initial_logic in tl._logics:\n traci.trafficlights.setCompleteRedYellowGreenDefinition(tl.tl_id, initial_logic._logic)\n\n # Unlock the area\n if area.locked:\n area.locked = False\n for lane in area._lanes:\n traci.lane.setAllowed(lane.lane_id, []) # empty means all classes are allowed\n" } ]
9
KitronikLtd/micropython-microbit-kitronik-fischertechnik
https://github.com/KitronikLtd/micropython-microbit-kitronik-fischertechnik
68c51453fe36df43c2a019d5200b637877b9399e
371b2f47d7bace183b615867b24db7ccfd3f193d
f7f7f75f23ae5ce8fe356c1f863b5f6a8fe684e1
refs/heads/master
2022-11-02T12:37:57.709252
2022-10-13T09:15:19
2022-10-13T09:15:19
186,440,263
0
1
MIT
2019-05-13T14:51:31
2019-05-21T13:20:16
2022-10-13T09:15:20
Python
[ { "alpha_fraction": 0.5875105261802673, "alphanum_fraction": 0.6169140338897705, "avg_line_length": 36.98936080932617, "blob_id": "41d061e414a6b561df3e8e3d1dd4893628b79065", "content_id": "5910f57eed146ce40c9f80216a98b84870b4249e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3571, "license_type": "permissive", "max_line_length": 122, "num_lines": 94, "path": "/Interface for Fischertechnik.py", "repo_name": "KitronikLtd/micropython-microbit-kitronik-fischertechnik", "src_encoding": "UTF-8", "text": "# microbit-module: [email protected]\n# Copyright (c) Kitronik Ltd 2022. \n#\n# The MIT License (MIT)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom microbit import *\nimport math\n\nclass KitronikInterfaceForFischertechnik:\n \n def motorOn(self, motor, direction, speed):\n \n if speed > 100:\n speed = 100\n elif speed < 0:\n speed = 0\n speed = speed * 10\n if motor == \"Motor1\":\n if direction == \"forward\":\n pin8.write_analog(speed)\n pin12.write_digital(0)\n elif direction == \"reverse\":\n pin12.write_analog(speed)\n pin8.write_digital(0)\n elif motor == \"Motor2\":\n if direction == \"forward\":\n pin16.write_analog(speed)\n pin2.write_digital(0)\n elif direction == \"reverse\":\n pin2.write_analog(speed)\n pin16.write_digital(0)\n\n def motorOff(self, motor):\n if motor == \"Motor1\":\n pin12.write_digital(0)\n pin8.write_digital(0)\n elif motor == \"Motor2\":\n pin2.write_digital(0)\n pin16.write_digital(0)\n \n def led(self, pinSelection, illumination):\n if pinSelection == \"P0\":\n if illumination == \"on\":\n pin0.write_digital(1)\n elif illumination == \"off\":\n pin0.write_digital(0)\n elif pinSelection == \"P1\":\n if illumination == \"on\":\n pin1.write_digital(1)\n elif illumination == \"off\":\n pin1.write_digital(0)\n \n def phototransistor(self, pinSelection):\n if pinSelection == \"P0\":\n reading = pin0.read_analog()\n elif pinSelection == \"P1\":\n reading = pin1.read_analog()\n return reading\n \n def ntc(self, pinSelection):\n if pinSelection == \"P0\":\n reading = pin0.read_analog()\n elif pinSelection == \"P1\":\n reading = pin1.read_analog()\n convertReading = reading * (3.3/1024) # convert reading to voltage reading x (supply divide ADC resoluction)\n ntcResistor = 3.3/((3.3-convertReading)/4700) # calculate resistance\n temperatureC = (3880/math.log(ntcResistor/0.13)) - 273.15\n return temperatureC\n \nkiff = KitronikInterfaceForFischertechnik\n\nwhile True:\n kiff.motorOn(kiff, \"Motor1\", \"forward\", 100)\n sleep(2000)\n kiff.motorOff(kiff, \"Motor1\")\n sleep(2000)\n display.show(kiff.ntc(kiff,\"P0\"))\n" }, { "alpha_fraction": 0.7054263353347778, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 19.945945739746094, "blob_id": "b56e55f6aef8fbcb398675a7114a26cf136d5a3d", "content_id": "6e1a654848bc5d73cda92bb9da068c06eaa5fe6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 774, "license_type": "permissive", "max_line_length": 101, "num_lines": 37, "path": "/README.md", "repo_name": "KitronikLtd/micropython-microbit-kitronik-fischertechnik", "src_encoding": "UTF-8", "text": "# micropython-microbit-kitronik-fischertechnik\nExample code for use with the Kitronik Interface for Fischertechnik board ( www.kitronik.co.uk/5656 )\n\n## Operation\n\nThis package contains a function to drive motors:\n```blocks\n kiff.motorOn(kiff, \"Motor1\", \"forward\", 100)\n```\n\nThis package contains a function to stop drive:\n```blocks\n kiff.motorOff(kiff, \"Motor1\")\n```\n\nThis package contains a function to read NTC resistor:\n```blocks\n display.show(kiff.ntc(kiff,\"P0\"))\n```\n\nThis package contains a function to read phototransistor voltage:\n```blocks\n display.show(kiff.phototransistor(kiff,\"P0\"))\n```\n\nThis package contains a function to turn on LED:\n```blocks\n display.show(kiff.led(kiff,\"P0\", \"on\"))\n```\n\n## License\n\nMIT\n\n## Supported Targets\n\nBBC micro:bit" } ]
2
KaranMomi/DockerAssignment
https://github.com/KaranMomi/DockerAssignment
1b6086fab3c1137221d47f1c79acad7fbae1d813
93d907f913f80952ed466d25dd8ab82bc70efc86
3c716db40eb69a4c2d5c79e3ba0ea0c469f1ba3a
refs/heads/main
2023-07-22T13:34:30.731030
2021-09-06T12:25:09
2021-09-06T12:25:09
403,539,165
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 31, "blob_id": "f3d273a3923b87d0c04be13a7111e48089b44c1e", "content_id": "f2e401923d7cb7c0be775ee40814eda02b69a99c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/hello.py", "repo_name": "KaranMomi/DockerAssignment", "src_encoding": "UTF-8", "text": "print(\"hello, script executed\")\n" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 21, "blob_id": "ae9ae283e80511639c06e1a0540a92b19903a5a0", "content_id": "8278695baefa8158e7b382ad872b7f901cf84ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 88, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/Dockerfile", "repo_name": "KaranMomi/DockerAssignment", "src_encoding": "UTF-8", "text": "FROM python:3.7-alpine\nCOPY *.py /tmp/hello.py\nWORKDIR /tmp\nCMD [ \"python\", \"hello.py\"]\n" } ]
2
pombredanne/public-data
https://github.com/pombredanne/public-data
596dc650ede3852d5bb85b6a5a5a0a2380c56664
4f6e0591d8174120f3b0eda99cc50ac45f84b75f
ef6bf8981814f41caddae12ed1a4807c8423dde0
refs/heads/master
2017-12-04T00:00:39.170751
2013-04-16T16:31:29
2013-04-16T16:31:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.7322834730148315, "avg_line_length": 30.25, "blob_id": "5596fd15347c2c967af3c457d2ea2ea2b5a6965e", "content_id": "05f3fadb28b178cb627bbb20be4306e3c871b1fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 127, "license_type": "no_license", "max_line_length": 99, "num_lines": 4, "path": "/README.md", "repo_name": "pombredanne/public-data", "src_encoding": "UTF-8", "text": "public-data\n===========\n\nRaw JSON data from publicly-disclosed incidents which have been VERISized by the Verizon RISK Team.\n\n\n" }, { "alpha_fraction": 0.6084498763084412, "alphanum_fraction": 0.6117227077484131, "avg_line_length": 29.834861755371094, "blob_id": "52b677dd89645f02bf5f5a147168efa50a3fee52", "content_id": "e0ac236338218557ffbb23b6a6779f9222c71649", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3361, "license_type": "no_license", "max_line_length": 101, "num_lines": 109, "path": "/scripts/fact_table.py", "repo_name": "pombredanne/public-data", "src_encoding": "UTF-8", "text": "# Hints on how to do this are located here\n# http://stackoverflow.com/questions/15251061/flatten-a-json-object-with-other-embedded-json-objects/\n\nimport os\n\n# Create an array with all the field names that we'll need to collect\nFIELD_NAMES = [\"incident_id\",\n \"source_id\",\n \"security_compromise\",\n \"confidence\",\n \"victim.victim_id\",\n \"victim.industry\",\n \"victim.employee_count\",\n \"victim.country\",\n \"victim.state\",\n \"victim.revenue.amount\",\n \"victim.revenue.iso_currency_code\",\n \"victim.locations_affected\",\n \"actor.type\",\n \"actor.motive\",\n \"actor.role\",\n \"actor.variety\",\n \"actor.country\",\n \"actor.industry\"\n \"action.type\",\n \"action.variety\",\n \"action.vector\",\n \"action.name\",\n \"action.cve\",\n \"action.target\",\n \"action.location\",\n \"asset.type\",\n \"asset.personal\",\n \"asset.management\",\n \"asset.hosting\",\n \"asset.country\",\n \"asset.cloud\",\n \"attribute.type\",\n \"attribute.data_disclosure\",\n \"attribute.data_total\",\n \"attribute.data.variety\",\n \"attribute.data.amount\",\n \"attribute.state\",\n \"attribute.variety\",\n \"attribute.duration.unit\",\n \"attribute.duration.value\",\n \"timeline.incident.year\",\n \"timeline.incident.month\",\n \"timeline.incident.day\",\n \"timeline.incident.time\",\n \"timeline.investigation.year\",\n \"timeline.investigation.month\",\n \"timeline.investigation.day\",\n \"timeline.compromise.unit\",\n \"timeline.compromise.value\",\n \"timeline.exfiltration.unit\",\n \"timeline.exfiltration.value\",\n \"timeline.discovery.unit\",\n \"timeline.discovery.value\",\n \"timeline.discovery.containment.unit\",\n \"timeline.discovery.containment.value\",\n \"discovery_method\",\n \"control_failure\",\n \"corrective_action\",\n \"cost_corrective_action\",\n \"impact.overall_rating\",\n \"impact.overall_min_amount\",\n \"impact.overall_amount\",\n \"impact.overall_max_amount\",\n \"impact.loss.variety\",\n \"impact.loss.rating\",\n \"impact.loss.min_amount\",\n \"impact.loss.amount\",\n \"impact.loss.max_amount\",\n \"impact.iso_currency_code\"]\n\n\n\n \ndef ProcessJSON(dataIn):\n print(\"do something to the data\")\n\n\n# Check if the data directory is available and is a directory\nif not os.path.isdir('data'):\n print(\"data directory not found. Are you running this in the right place?\")\n exit(1)\n\n# Count the number of files in the directory. This whole process could take a while and\n# we might want to give some feedback about how far along we are.\nfilecount = len([name for name in os.listdir('data') if os.path.isfile('data/'+name)])\niteration = 0\n\n# Open the output file and write the header to it\noutfile = open('fact_table.csv','w')\noutfile.write(\"this is the header\")\n\n\n# Read through each file and start generating the fact table\nfor filename in os.listdir('data'):\n infile = open((os.path.join('data',filename)),'r')\n ProcessJSON(infile.read())\n infile.close()\n iteration += 1\n print('Processed ' + filename + '. File ' + str(iteration) + ' of ' + str(filecount) + '.')\n\n\n# close the output file\noutfile.close()\n" }, { "alpha_fraction": 0.5764670968055725, "alphanum_fraction": 0.5794309377670288, "avg_line_length": 31.757282257080078, "blob_id": "991e95ef566b78e1c99ff2dfc376841b139a0c04", "content_id": "f397c961287acc49f413776781a2c3a12e2f0969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3374, "license_type": "no_license", "max_line_length": 111, "num_lines": 103, "path": "/scripts/json2csv-1.py", "repo_name": "pombredanne/public-data", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport simplejson as json\nimport sys\nimport os\nimport csv\nimport glob\n\ndef handledict(output, label, datadict, arraylist):\n \"general function to determine how to handle value\"\n debug = True\n if debug: print \"running with label: \" + label\n mylist = datadict.items()\n for k,v in mylist:\n alabel = k\n if label:\n alabel = \".\".join([label, k])\n if (label == \"agent\" or label == \"action\"):\n output[alabel] = 1\n handleAny(output, alabel, v, arraylist)\n\ndef handleAny(output, label, v, arraylist):\n \"handling any single instance\"\n debug = True\n if debug: print \"trying to parse \" + label\n if (type(v) is dict):\n handledict(output, label, v, arraylist)\n elif (type(v) is str):\n if output.get(label) is not None:\n if (type(output[label]) is str):\n if debug: print \"\\t** YES! ** Found string already\"\n output[label] = [output[label], v]\n arraylist[label] = 1\n if debug: print \"\\tconverted to list: \" + label + \" to \" + v\n elif (type(output[label]) is list):\n output[label].append(v)\n if debug: print \"\\tappended to list: \" + label + \" to \" + v\n arraylist[label] = 1\n else:\n if debug: print \"\\t---------- > weird, not sure what to do with \" + label + \": \" + str(type(v))\n if debug: print \"\\tand output label is \" + str(type(output[label]))\n if debug: print \"\\tand tempoget is \" + str(type(tempoget))\n else:\n if debug: print \"\\tsimply assigning: \" + label + \" to \" + v\n output[label] = v\n elif (type(v) is list):\n for onev in v:\n handleAny(output, label, onev, arraylist)\n else:\n if debug: print \"*******unknown type: \", type(v)\n\n\ndef recursive(alldata, localnames):\n \"Stare at this long enough and it's quite simple\"\n if not len(localnames):\n writer.writerow(alldata)\n return\n localdata = dict(alldata)\n ifield = localnames[0]\n for n in alldata[ifield]:\n localdata[ifield] = n\n if (len(localnames) > 1):\n sendon = localnames[1:len(localnames)]\n recursive(localdata, sendon)\n else:\n writer.writerow(localdata)\n\nkeyfields = []\nF = open(\"keyfields-pub.txt\")\nrawinput = F.readlines()\nfor line in rawinput:\n foo = line.strip(\"\\n\")\n keyfields.append(foo)\n\n# print out the line here, we are iterated as much as we can be\noutfile = open(\"pubfact-table.csv\", \"w\")\nwriter = csv.DictWriter(outfile, fieldnames=keyfields)\nheaders=dict( (n,n) for n in keyfields)\nwriter.writerow(headers)\n\n\n# for filename in glob.glob(\"src2/vz_Westp-ddb-news*.json\"):\nfor filename in glob.glob(\"pub/*.json\"):\n print filename\n json_data=open(filename).read()\n try:\n data = json.loads(json_data)\n except:\n print sys.argv[1], \" Unexpected error:\", sys.exc_info()[1]\n debug = True\n output = {}\n arraylist = {}\n handledict(output, \"\", data, arraylist)\n mylist = arraylist.items()\n keylist = []\n combos = 1\n for k,v in mylist:\n keylist.append(k)\n combos = combos * len(output[k])\n\n # print \"Arrays found in\",keylist\n print \"\\t\\t\" + str(len(keylist)) + \" combinations:\",combos\n recursive(output, keylist)\n" }, { "alpha_fraction": 0.6143250465393066, "alphanum_fraction": 0.6143250465393066, "avg_line_length": 24.928571701049805, "blob_id": "4c5ae78a18b88438dcf0e2852890931a2708b96d", "content_id": "47a561add94b17e6ffb889d8c2d27ee2f60846de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 363, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/scripts/data-array.rb", "repo_name": "pombredanne/public-data", "src_encoding": "UTF-8", "text": "require 'json'\noutfile = File.open('data-array.txt','w')\noutfile.print \"var incidents = [\"\nDir.foreach('../data/') do |item|\n next if item == '.' or item == '..'\n infile = File.open('../data/'+item,'r')\n the_data = JSON.parse(infile.read())\n outfile.print the_data.to_json\n outfile.print \",\"\n infile.close()\nend\n\noutfile.print \"]\"\noutfile.close\n" } ]
4
reedessick/video-game-camp
https://github.com/reedessick/video-game-camp
c83504d63637bc8c2c8f8b4067ec277233b74d4d
09a324279c5ea9de87080f122fe27e1ef83d5373
f2a12bc1435111dd4e2afda02834bb3cd53ed8d8
refs/heads/master
2022-11-06T11:00:32.526460
2020-06-19T16:28:12
2020-06-19T16:28:12
272,338,754
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5257452726364136, "alphanum_fraction": 0.5257452726364136, "avg_line_length": 32.54545593261719, "blob_id": "89d9400ecb2a45ef96d4fcef810e59a31fd549c2", "content_id": "b309eb3a584f2784db5371fbc05cc0280c65c035", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "permissive", "max_line_length": 198, "num_lines": 11, "path": "/vgc/BoxBreaker/__main__.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"A basic game that involves breaking boxes that randomly appear on the screen. This will teach the basics of BoundingBoxes and how to use pygame to interact with the computer to drive the game.\"\"\"\n\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nfrom . import game\n\n#-------------------------------------------------\n\ngame.main()\n" }, { "alpha_fraction": 0.7484471797943115, "alphanum_fraction": 0.7515528202056885, "avg_line_length": 21.928571701049805, "blob_id": "4e135855d8d4e99abcb8bce84b1223a38c9d12e8", "content_id": "30532c239695221366f7eeb0a245e75fdeaf4763", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 322, "license_type": "permissive", "max_line_length": 86, "num_lines": 14, "path": "/day4/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp Day 5\n\n[Camp Overview](../README.md)\n\n*Design*\n\nMake a user-guide that explains how to play your game!\nUse the descriptions you wrote down in your notebook to make a colorful user-guide.\nBe creative!\nMake sure your user-guide looks exciting so that everyone will want to play your game!\n\n*Code*\n\n*Test*\n\n" }, { "alpha_fraction": 0.5812892317771912, "alphanum_fraction": 0.5932686924934387, "avg_line_length": 29.75438690185547, "blob_id": "3594bb71812dc31e94d270bd11b7c5e31411a44e", "content_id": "5c1723941c25c795ba1be807b8ef5443893f9ada", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1753, "license_type": "permissive", "max_line_length": 195, "num_lines": 57, "path": "/vgc/BoxBreaker/characters.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"a module that defines the characters to be used within BoxBreaker\n\"\"\"\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nimport pygame\n\n### non-standard libraries\nfrom vgc import utils\nBoundingBox = utils.BoundingBox\nintersects = utils.intersect\n\n#-------------------------------------------------\n\nDEFAULT_WIDTH = 32\nDEFAULT_HEIGHT = 32\n\nDEFAULT_COLOR = (0, 0, 255)\n\n#-------------------------------------------------\n\nclass Character(BoundingBox):\n \"\"\"a simple extension of BoundingBox that also tracks the character's name and other attributes.\nNOTE, characters are represented with a BoundingBox but they are drawn as circles. This means that they may not appear to exactly overlap in the screen even though the code acts as if they do.\"\"\"\n\n def __init__(self, name, x, y, radius=DEFAULT_WIDTH, color=DEFAULT_COLOR):\n self._name = name\n self._radius = radius\n self.invert_color = False\n BoundingBox.__init__(self, x, y, width=2*radius, height=2*radius, color=color)\n\n @property\n def name(self):\n return self._name\n\n @property\n def color(self):\n r, g, b = self._color\n if self.invert_color:\n r = 255 - r\n g = 255 - g\n b = 255 - b\n return (r, g, b)\n\n @property\n def radius(self):\n return self._radius\n\n def draw(self, screen, level):\n pygame.draw.circle(screen, self.color, (self.x_center - level.left, self.y_center - level.bottom), self.radius, 2)\n\nclass Opponent(Character):\n \"\"\"an opponent rather than a character\"\"\"\n\n def draw(self, screen, level):\n pygame.draw.rect(screen, self.color, (self.left - level.left, self.bottom - level.bottom, self.width, self.height))\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 7.25, "blob_id": "363905c1548cbcf88b25f6e9101d63e66f8580dd", "content_id": "4c3606f2c4f653b77c3069f86049e7cd41e51637", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 33, "license_type": "permissive", "max_line_length": 13, "num_lines": 4, "path": "/bin/vgc", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nclear\npython -m vgc\n" }, { "alpha_fraction": 0.554126501083374, "alphanum_fraction": 0.5723472833633423, "avg_line_length": 34.88461685180664, "blob_id": "7067b2e9c4330da4adfb42dd007c10abb9d6fd61", "content_id": "490b73cfaf809d639d1e12c477bb7999e5930cd2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 933, "license_type": "permissive", "max_line_length": 134, "num_lines": 26, "path": "/vgc/BoxBreaker/levels.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"a module that defines the levels we will use in BoxBreaker\n\"\"\"\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nDEFAULT_GAMEWIDTH = 800\nDEFAULT_GAMEHEIGHT = 800\n\nDEFAULT_COLOR = (255, 255, 255) ### white\n\n#-------------------------------------------------\n\nfrom . import characters\n\n#-------------------------------------------------\n\nclass Level(characters.BoundingBox):\n \"a simple Level object that knows how big the level is and what color it should be\"\n\n def __init__(self, width=DEFAULT_GAMEWIDTH, height=DEFAULT_GAMEHEIGHT, color=DEFAULT_COLOR):\n characters.BoundingBox.__init__(self, 0., 0., width=width, height=height, color=color)\n\n def in_bounds(self, other):\n \"\"\"determine whether other is within the board's scope or not\"\"\"\n return (self.left <= other.left) and (self.right >= other.right) and (self.top >= other.top) and (self.bottom <= other.bottom)\n" }, { "alpha_fraction": 0.5851008892059326, "alphanum_fraction": 0.5882048606872559, "avg_line_length": 31.21666717529297, "blob_id": "f5b0c0334a7c08a30029ae177be681688724e975", "content_id": "5f412a68a21c70a6b59104fb498a550e4c5fe13e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1933, "license_type": "permissive", "max_line_length": 227, "num_lines": 60, "path": "/vgc/__main__.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"a basic menu from which users can navigate to different games they have designed.\n\"\"\"\n\n__author__ = 'Reed Essick ([email protected])'\n\n#-------------------------------------------------\n\nimport sys\nimport inspect\n\n### non-standard libraries\nimport vgc\n\n#-------------------------------------------------\n\ndef print_available_games(games):\n for game in games.keys():\n print(' -- '+game)\n\ndef select_game(games):\n \"\"\"interact with the command line to select a game\"\"\"\n\n Ngames = len(games)\n if Ngames == 0: ### no games available\n print('I\\'m sorry, but there are no games currently available. Please design a game soon so we can get playing!')\n sys.exit(0)\n\n elif Ngames==1:\n print('There is only a single game available!')\n return games.items()[0]\n\n else:\n print('Please tell me which of the following games you would like to play!')\n print_available_games(games)\n selected = raw_input('')\n\n while selected not in games: ### make sure the specified game is available\n print('I\\'m sorry, but I did not understand. Please specify one of the following, or specify \"exit\" to quit')\n print_available_games(games)\n selected = raw_input('')\n\n if selected == 'exit': ### quit\n sys.exit(0)\n\n return selected, games[selected]\n\n#------------------------\n\ndef main():\n \"\"\"the basic function that will be run when this module is called as an executable. This should discover the available games and prompt the user to select which game they would like to play. It should then launch that game.\nNote, users should also be able to launch individual games directly by calling the associated modules that live within vgc.\"\"\"\n\n name, game = select_game(vgc.KNOWN_GAMES)\n print('---- Launching: %s -----'%name)\n game.game.main()\n sys.exit(0)\n\n#-------------------------------------------------\n\nmain()\n" }, { "alpha_fraction": 0.7525906562805176, "alphanum_fraction": 0.7564767003059387, "avg_line_length": 37.54999923706055, "blob_id": "c0509f176fd1f0ed159e1cfcfaea5a13e8f86b5b", "content_id": "a4a4c2c21f49bb16db5460e5a313c8158680a0fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 772, "license_type": "permissive", "max_line_length": 153, "num_lines": 20, "path": "/day2/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp Day 2\n\n[Camp Overview](../README.md)\n\n*Design*\n\nFind images online (or draw your own!) for the different characters and levels you described on Day 1.\nMake sure to write down where you found your images in your notebook!\n\n*Code*\n\n * Create your characters!\n * Define objects to represent your characters and their special powers. These should extend the BoundingBox class.\n * See if you can make the characters look like you designed them by including the pictures you drew!\n\n*Test*\n\n * Building on the basic game from Day 1, add multiple characters to your game and see if you can make them do things when their bounding-boxes overlap.\n * Can you change which character you control?\n * Can you make multiple characters move at the same time?\n\n" }, { "alpha_fraction": 0.5641977787017822, "alphanum_fraction": 0.5726011395454407, "avg_line_length": 30.782608032226562, "blob_id": "f0bcb0460dc0059a4375d66a77a4e35cfe53a0f1", "content_id": "5aa02063c56db0c8121f610674c61ebdccaae043", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5117, "license_type": "permissive", "max_line_length": 145, "num_lines": 161, "path": "/vgc/BoxBreaker/game.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"utility functions for BoxBreaker\"\"\"\n\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nimport pygame\nfrom pygame.locals import (K_UP, K_DOWN, K_LEFT, K_RIGHT, K_SPACE, KEYDOWN, KEYUP, K_ESCAPE, QUIT)\n\n### non-standard libraries\nfrom . import characters\nfrom . import levels\n\nfrom vgc import utils\nrandom_character_placement = utils.random_character_placement\nrandom_color = utils.random_color\n\n#-------------------------------------------------\n\ndef main(fps=30, max_opponents=1000, **kwargs):\n \"\"\"Launch the game and enter the main loop!\"\"\"\n\n print('Welcome to BoxBreaker!')\n pygame.init()\n clock = pygame.time.Clock()\n clock.tick(fps)\n\n total_score = 0\n number_of_opponents = 1\n opponent_respawn_time = 5000\n level = 1\n speed = 1\n while number_of_opponents < max_opponents:\n print('Entering Level %d'%level)\n\n score, running = game(player_speed=speed, number_of_opponents=number_of_opponents, opponent_respawn_time=opponent_respawn_time, **kwargs)\n total_score += score\n\n if running:\n level += 1\n# speed = max(5, speed+1)\n number_of_opponents *= 2\n if (level % 3) == 0:\n opponent_respawn_time = max(3*fps, opponent_respawn_time/2)\n\n else:\n break\n\n print('---> total score: %d <---'%total_score)\n print('Exiting BoxBreaker!')\n pygame.quit()\n\n#------------------------\n\ndef loop(level, player, others, player_speed=1, respawn_time=1000):\n \"\"\"execute the main loop that drives the game\"\"\"\n screen = pygame.display.set_mode([level.width, level.height]) ### set the screen to match the level\n screen.fill(level.color) ### fill in the background color for the level\n\n pressed = dict()\n running = True\n removed = []\n\n respawn = 1\n while running and len(others):\n for event in pygame.event.get():\n if (event.type == QUIT) or (event.type == KEYDOWN and event.key == K_ESCAPE): ### clicked the close button or pressed escape\n running = False\n\n elif event.type == KEYDOWN:\n pressed[event.key] = True\n\n elif event.type == KEYUP:\n pressed[event.key] = False\n\n for key in [key for key, is_pressed in pressed.items() if is_pressed]:\n if key == K_DOWN:\n player.move_up(player_speed, max_y=level.top)\n\n elif key == K_UP:\n player.move_down(player_speed, min_y=level.bottom)\n\n elif key == K_LEFT:\n player.move_left(player_speed, min_x=level.left)\n\n elif key == K_RIGHT:\n player.move_right(player_speed, max_x=level.right)\n\n ### add back opponents if the timed out\n if (respawn % respawn_time) == 0:\n if removed:\n others.append(removed.pop(0))\n respawn += 1\n\n ### logic to interact with boxes\n for i in range(len(others)):\n other = others.pop(0)\n if characters.intersects(player, other):\n other.invert_color = True\n if pressed.get(K_SPACE, False):\n removed.append(other)\n else:\n others.append(other)\n else:\n other.invert_color = False\n others.append(other)\n\n ### make the background the level's color\n screen.fill(level.color)\n \n ### now draw the others to the screen\n for other in others:\n other.draw(screen, level)\n\n ### draw the player on the screen. This makes sure that the player is always on top of the others\n player.draw(screen, level)\n\n ### update the screen\n pygame.display.flip()\n\n return len(removed) - respawn//respawn_time, running ### if we are still running (have not quit), we need to convey that\n\n#-------------------------------------------------\n\ndef game(\n player_speed=1,\n number_of_opponents=1,\n opponent_respawn_time=1000,\n gamewidth=levels.DEFAULT_GAMEWIDTH,\n gameheight=levels.DEFAULT_GAMEHEIGHT,\n characterradius=characters.DEFAULT_WIDTH,\n ):\n \"\"\"the first level of the game!\"\"\"\n\n ### create the level\n level = levels.Level(width=gamewidth, height=gameheight, color=levels.DEFAULT_COLOR)\n\n ### create the main character\n player = characters.Character(\n 'Player',\n 0, ### start off in the center of the game\n 0,\n radius=characterradius,\n color=characters.DEFAULT_COLOR,\n )\n\n ### create other characters\n others = []\n\n for i in range(number_of_opponents):\n opponent = characters.Opponent(\n 'Opponent %d'%(i+1),\n random_character_placement(gamewidth, 2*characterradius),\n random_character_placement(gameheight, 2*characterradius),\n radius=characterradius,\n color=random_color(),\n )\n others.append(opponent)\n\n ### enter the game loop\n return loop(level, player, others, respawn_time=opponent_respawn_time, player_speed=player_speed)\n" }, { "alpha_fraction": 0.4804469347000122, "alphanum_fraction": 0.4804469347000122, "avg_line_length": 24.571428298950195, "blob_id": "ff4c188b4df9b77d3453fc202a99c752ce063916", "content_id": "a8064df70d9e349bd3d057b46503bb067c19ac9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "permissive", "max_line_length": 73, "num_lines": 7, "path": "/vgc/BoxBreaker/__init__.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"a basic game involving bounding boxes and keyboard input via pygame\"\"\"\n\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nfrom . import game\n" }, { "alpha_fraction": 0.5386533737182617, "alphanum_fraction": 0.5600284934043884, "avg_line_length": 25.48113250732422, "blob_id": "eec1d4449cb5859afbdd2ba5daada17b58705d4f", "content_id": "c5f855977341e1a52b9282c6e60a3f45fb9a188f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2807, "license_type": "permissive", "max_line_length": 118, "num_lines": 106, "path": "/vgc/utils.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"basic utilities that are helpful in many games\"\"\"\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nimport random\n\n#-------------------------------------------------\n\nDEFAULT_WIDTH = 32\nDEFAULT_HEIGHT = 32\n\nDEFAULT_COLOR = (0, 0, 255)\nRANDOM_COLOR = None\n\nDEFAULT_MAX = 2048\nDEFAULT_MIN = 0\n\n#-------------------------------------------------\n\ndef random_character_placement(gamesize, charactersize):\n return (random.random() - 0.5)*(gamesize - charactersize)\n\ndef random_color():\n return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n\n#-------------------------------------------------\n\nclass BoundingBox(object):\n \"a simple BoundingBox object that knows where it is and what color it should be colored\"\n\n def __init__(self, x, y, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, color=RANDOM_COLOR):\n self._x = int(x)\n self._y = int(y)\n self._width = width\n self._height = height\n self._color = random_color() if color == RANDOM_COLOR else color\n\n @property\n def area(self):\n return self.width*self.height\n\n @property\n def color(self):\n return self._color\n\n @property\n def x_center(self):\n return self._x\n\n @property\n def y_center(self):\n return self._y\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n @property\n def left(self):\n return int(self._x - self._width/2)\n\n @property\n def right(self):\n return int(self._x + self._width/2)\n\n @property\n def top(self):\n return int(self._y + self._height/2)\n\n @property\n def bottom(self):\n return int(self._y - self._height/2)\n\n def intersects(self, other):\n return intersect(self, other)\n\n def move_left(self, dx, min_x=DEFAULT_MIN):\n self._x = int(max(self._x - dx, min_x))\n\n def move_right(self, dx, max_x=DEFAULT_MAX):\n self._x = int(min(self._x + dx, max_x))\n\n def move_down(self, dy, min_y=DEFAULT_MIN):\n self._y = int(max(self._y - dy, min_y))\n\n def move_up(self, dy, max_y=DEFAULT_MAX):\n self._y = int(min(self._y + dy, max_y))\n\n#-------------------------------------------------\n\ndef is_between(low, x, high):\n \"\"\"Determine whether x is between X1 and X2\"\"\"\n return (low <= x) and (x <= high)\n\ndef lines_overlap(x1, x2, y1, y2):\n return is_between(x1, y1, x2) or is_between(x1, y2, x2) or is_between(y1, x1, y2) or is_between(y1, x2, y2)\n\ndef intersect(B1, B2):\n \"\"\"Determine whether two BoundingBox objects intersect! Use the properties of the \n return either True or False\"\"\"\n return lines_overlap(B1.left, B1.right, B2.left, B2.right) and lines_overlap(B1.bottom, B1.top, B2.bottom, B2.top)\n" }, { "alpha_fraction": 0.6784452199935913, "alphanum_fraction": 0.6784452199935913, "avg_line_length": 34.375, "blob_id": "b73b0b33ac9b6a077fcca5868603a4c0792e3eb0", "content_id": "e9473dbb07ad38680befc9fc403ac1fb1b350e05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "permissive", "max_line_length": 229, "num_lines": 16, "path": "/vgc/__init__.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"Very basic module to house different games. Each game is specified as a submodule, and this top-level interface allows users to select games from a list of automatically discovered games based on the submodules present herein.\n\nWe expect users to call this module via `python -m vgc` to launch their arcades.\n\"\"\"\n\n__author__ = 'Reed Essick ([email protected])'\n\n#-------------------------------------------------\n\nfrom . import BoxBreaker\nfrom . import DinosaursVsAirplanes\n\nKNOWN_GAMES = {\n 'Box Breaker' : BoxBreaker,\n 'Dinosaurs vs Airplanes' : DinosaursVsAirplanes,\n}\n" }, { "alpha_fraction": 0.7201017737388611, "alphanum_fraction": 0.7239185571670532, "avg_line_length": 31.70833396911621, "blob_id": "c299037a3fbd3a73ca860fe6ea7a34621aa35eb1", "content_id": "d03014e613919bb0cb8d31fb931cd66b2e19f34a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 786, "license_type": "permissive", "max_line_length": 98, "num_lines": 24, "path": "/day1/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp Day 1\n\n[Camp Overview](../README.md)\n\n*Design*\n\nWrite down the following in your notebook\n * Your game's title\n * What players will do within the game\n * What is the goal? How do you get points?\n * Is there a story to go along with your game?\n * What makes the game challenging?\n * At least 3 characters (including what they look like and any special powers they have)\n * At least 3 levels (what you have to do in each)\n\n*Code*\n * Learn how to start the game\n * Learn how to edit the game's source code\n * Write function to determine whether bounding boxes overlap\n\n*Test*\n\n * Play the game with and without the bounding-box overlap function defined.\n * Can you think how you would change the game to make actions occur when bounding boxes overlap?\n\n" }, { "alpha_fraction": 0.5688759088516235, "alphanum_fraction": 0.5807299017906189, "avg_line_length": 33.948978424072266, "blob_id": "f747b5a2a80b3a2bd1fff6688a0f35d806bafb75", "content_id": "8537fe568600f3fd05e5416bb9a7541c2b9538a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17125, "license_type": "permissive", "max_line_length": 183, "num_lines": 490, "path": "/vgc/DinosaursVsAirplanes/game.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "\"\"\"main library for the game. We define everything in here so that campers don't have to remember which file is which\"\"\"\n__author__ = \"Reed Essick ([email protected])\"\n\n#-------------------------------------------------\n\nimport os\nimport random\n\nimport pygame\nfrom pygame.locals import (K_UP, K_DOWN, K_LEFT, K_RIGHT, K_SPACE, KEYDOWN, KEYUP, K_ESCAPE, QUIT)\n\n### non-standard libraries\nfrom vgc import utils\n\n#-------------------------------------------------\n\nDEFAULT_VIEWWIDTH = 800\nDEFAULT_VIEWHEIGHT = 800\n\nDEFAULT_GAMEWIDTH = 2*DEFAULT_VIEWWIDTH\nDEFAULT_GAMEHEIGHT = 2*DEFAULT_VIEWHEIGHT\n\n#-------------------------------------------------\n\n### DEFINE CHARACTERS, which we alwasys draw to the screen\nclass Player(utils.BoundingBox):\n name = 'Player'\n\n def __init__(self, speed=1, width=utils.DEFAULT_WIDTH, height=utils.DEFAULT_HEIGHT, color=utils.RANDOM_COLOR):\n utils.BoundingBox.__init__(self, 0, 0, width=width, height=height, color=color)\n self._speed = speed\n self._x_velocity = 0\n self._y_velocity = 0\n\n @property\n def speed(self):\n return self._speed\n\n def _randomize_velocity(self):\n self._x_velocity = random.randint(-1, +1)*self.speed\n self._y_velocity = random.randint(-1, +1)*self.speed\n\n def walk(self, min_x=-DEFAULT_GAMEWIDTH, max_x=+DEFAULT_GAMEWIDTH, min_y=-DEFAULT_GAMEHEIGHT, max_y=+DEFAULT_GAMEHEIGHT):\n r = random.random()\n if r < 0.01: ### change x_velocity\n self._randomize_velocity()\n else:\n pass\n self.move(min_x=min_x, max_x=max_x, min_y=min_y, max_y=max_y)\n\n def move(self, min_x=-DEFAULT_GAMEWIDTH, max_x=+DEFAULT_GAMEWIDTH, min_y=-DEFAULT_GAMEHEIGHT, max_y=+DEFAULT_GAMEHEIGHT):\n if self._x_velocity == 0:\n pass\n elif self._x_velocity > 0:\n self.move_right(self._x_velocity, max_x=max_x)\n else:\n self.move_left(-self._x_velocity, min_x=min_x)\n\n if self._y_velocity == 0:\n pass\n elif self._y_velocity > 0:\n self.move_up(self._y_velocity, max_y=max_y)\n else:\n self.move_down(-self._y_velocity, min_y=min_y)\n\n def draw(self, screen, view):\n raise NotImplementedError\n\n#------------------------\n\nclass Dinosaur(Player):\n name = 'Dinosaur'\n\n def __init__(self, *args, **kwargs):\n kwargs['color'] = (0, 0, 0)\n Player.__init__(self, *args, **kwargs)\n\n def draw(self, screen, view):\n \n pygame.draw.ellipse(screen, self.color, (self.left - view.left, self.bottom - view.bottom, self.width/3, self.height/4))\n pygame.draw.line(screen, self.color, (self.left + self.width/8 - view.left, self.bottom - view.bottom), (self.left + self.width/3 - view.left, self.y_center-view.bottom), 4)\n pygame.draw.ellipse(screen, self.color, (self.x_center - self.width/3 - view.left, self.y_center - self.height/8 - view.bottom, self.width*2/3, self.height/4))\n pygame.draw.rect(screen, self.color, (self.x_center - self.width/4 - view.left, self.y_center - view.bottom, self.width/8, self.height/2))\n pygame.draw.rect(screen, self.color, (self.x_center + self.width/8 - view.left, self.y_center - view.bottom, self.width/8, self.height/2))\n pygame.draw.line(screen, self.color, (self.right - self.width/3 - view.left, self.y_center - view.bottom), (self.right - view.left, self.top - self.height/8 - view.bottom), 4)\n\nclass Airplane(Player):\n name = 'Airplane'\n\n def __init__(self, *args, **kwargs):\n kwargs['color'] = (0, 0, 0)\n Player.__init__(self, *args, **kwargs)\n\n def draw(self, screen, view): \n points = [\n (self.left - view.left, self.top - self.height/4 - view.bottom),\n (self.right - view.left, self.top - self.height/4 - view.bottom),\n (self.x_center - view.left, self.bottom + self.height/4 - view.bottom),\n ]\n pygame.draw.polygon(screen, self.color, points)\n pygame.draw.ellipse(screen, self.color, (self.x_center - self.width/8 - view.left, self.bottom - view.bottom, self.width/4, self.height))\n\nclass Enemy(Player):\n name = 'Enemy'\n\n def draw(self, screen, view):\n pygame.draw.ellipse(screen, self.color, (self.left - view.left, self.bottom +self.height/4 - view.bottom, self.width, self.height/2), 2)\n pygame.draw.ellipse(screen, self.color, (self.left + self.width/4 - view.left, self.bottom - view.bottom, self.width/2, self.height), 2)\n\n#------------------------\n\nKNOWN_CHARACTERS = {\n Dinosaur.name : Dinosaur,\n Airplane.name : Airplane,\n# Enemy.name : Enemy,\n}\n\n#-------------------------------------------------\n\n### DEFINE OBSTACLES, which we only draw to the screen if they are within the scrren's field of view\n\nclass Obstacle(utils.BoundingBox):\n\n def __init__(self, width=utils.DEFAULT_WIDTH, height=utils.DEFAULT_HEIGHT, color=utils.RANDOM_COLOR):\n utils.BoundingBox.__init__(\n self,\n 0,\n 0,\n width=width,\n height=height,\n color=color,\n )\n self._target = utils.BoundingBox(0, 0, width=width/2, height=height/2, color=color)\n self.invert = False\n\n @property\n def target(self):\n return self._target\n\n def place(self, game):\n self._x = self._target._x = utils.random_character_placement(game.width, self.width)\n self._y = self._target._y = utils.random_character_placement(game.height, self.height)\n\n def draw(self, screen, view):\n if self.intersects(view):\n r, g, b = self.color\n if self.invert:\n r = 255 - r\n g = 255 - g\n b = 255 - b\n pygame.draw.rect(screen, (r, g, b), (self.left - view.left, self.bottom - view.bottom, self.width, self.height), 2)\n pygame.draw.rect(screen, (r, g, b), (self.target.left - view.left, self.target.bottom - view.bottom, self.target.width, self.target.height))\n\n def bounce(self, other):\n if abs(other.x_center - self.x_center) > abs(other.y_center - self.y_center): ### move to the side\n if other.x_center > self.x_center: ### move to the right\n other._x = self.right + other.width/2\n else:\n other._x = self.left - other.width/2\n else: ### move up and down\n if other.y_center > self.y_center:\n other._y = self.top + other.height/2\n else:\n other._y = self.bottom - other.height/2\n\nclass Portal(Obstacle):\n\n def __init__(self, width=utils.DEFAULT_WIDTH, height=utils.DEFAULT_HEIGHT):\n Obstacle.__init__(self, width=width, height=height, color=(150, 0, 150))\n\n @staticmethod\n def transport(box, level):\n box._x = utils.random_character_placement(level.game.width, box.width)\n box._y = utils.random_character_placement(level.game.height, box.height)\n\n def draw(self, screen, view):\n if self.intersects(view):\n pygame.draw.ellipse(screen, self.color, (self.left - self.width/2- view.left, self.bottom - self.height/2 - view.bottom, 2*self.width, 2*self.height), 2)\n pygame.draw.ellipse(screen, self.color, (self.left - self.width/4 - view.left, self.bottom - self.height/4 - view.bottom, self.width*3/2, self.height*3/2), 2)\n pygame.draw.ellipse(screen, self.color, (self.left - view.left, self.bottom - view.bottom, self.width, self.height), 2)\n pygame.draw.ellipse(screen, self.color, (self.target.left - view.left, self.target.bottom - view.bottom, self.target.width, self.target.height))\n\nclass FinishLine(Obstacle):\n\n def __init__(self, width=utils.DEFAULT_WIDTH, height=utils.DEFAULT_HEIGHT):\n Obstacle.__init__(self, width=width, height=height, color=(0, 0, 0))\n\n def draw(self, screen, view):\n if self.intersects(view):\n pygame.draw.rect(screen, self.color, (self.left - self.width/2- view.left, self.bottom - self.height/2 - view.bottom, 2*self.width, 2*self.height), 2)\n pygame.draw.rect(screen, self.color, (self.left - self.width/4 - view.left, self.bottom - self.height/4 - view.bottom, self.width*3/2, self.height*3/2), 2)\n pygame.draw.rect(screen, self.color, (self.left - view.left, self.bottom - view.bottom, self.width, self.height), 2)\n pygame.draw.rect(screen, self.color, (self.target.left - view.left, self.target.bottom - view.bottom, self.target.width, self.target.height))\n\n#------------------------\n\nclass Level(object):\n\n background_characters = []\n\n def __init__(\n self,\n obstacles_per_view=10.,\n portals_per_view=1.,\n num_enemies=0,\n viewwidth=DEFAULT_VIEWWIDTH,\n viewheight=DEFAULT_VIEWHEIGHT,\n gamewidth=DEFAULT_GAMEWIDTH,\n gameheight=DEFAULT_GAMEHEIGHT,\n color=(255, 255, 255), ### white\n ):\n ### set up view and total game board\n self._view = utils.BoundingBox(0, 0, width=viewwidth, height=viewheight, color=color)\n self._game = utils.BoundingBox(0, 0, width=gamewidth, height=gameheight, color=None)\n\n ### add randomly placed obstacles\n num_obstacles = int(obstacles_per_view * self.game.area / self.view.area) + 1\n self._obstacles = [Obstacle() for i in range(num_obstacles)]\n for obstacle in self._obstacles:\n obstacle.place(self.game)\n\n ### add randomly placed portals\n num_portals = int(portals_per_view * self.game.area / self.view.area) + 1\n self._portals = [Portal() for i in range(num_portals)]\n for portal in self._portals:\n portal.place(self.game)\n\n ### add enemies\n self._enemies = [Enemy() for i in range(int(num_enemies))]\n\n ### add radomly placed finish line\n self._finishline = FinishLine()\n self._finishline.place(self.game)\n\n @property\n def view(self):\n return self._view\n\n @property\n def game(self):\n return self._game\n\n @property\n def obstacles(self):\n return self._obstacles\n\n @property\n def portals(self):\n return self._portals\n\n @property\n def enemies(self):\n return self._enemies\n\n @property\n def finishline(self):\n return self._finishline\n\n def animate(self):\n for enemy in self.enemies:\n enemy.walk(min_x=self.game.left, max_x=self.game.right, min_y=self.game.bottom, max_y=self.game.top)\n\n for background_character in self.background_characters:\n pass ### move these around the game\n\n def draw(self, screen):\n for background_character in self.background_characters:\n background_character.draw(screen, self.view)\n\n for obstacle in self.obstacles:\n obstacle.draw(screen, self.view)\n\n for portal in self.portals:\n portal.draw(screen, self.view)\n\n for enemy in self.enemies:\n enemy.draw(screen, self.view)\n\n self.finishline.draw(screen, self.view)\n\nclass Level0(Level):\n name = \"Training\"\n win_points = 0\n lose_points = 0\n\nclass Level1(Level):\n name = \"Sacramento, CA\"\n win_points = 10\n lose_points = 5\n\n def __init__(self):\n Level.__init__(\n self,\n obstacles_per_view=10.,\n portals_per_view=1.,\n num_enemies=1.,\n color=(0, 0, 200), ### blue\n )\n\nclass Level2(Level):\n name = \"Under Water\"\n win_points = 10\n lose_points = 15\n\n def __init__(self):\n Level.__init__(\n self,\n obstacles_per_view=20.,\n portals_per_view=2.,\n num_enemies=5.,\n color=(0, 0, 150), ### blue\n )\n \nclass Level3(Level):\n name = \"Chicago, IL\"\n win_points = 15\n lose_points = 10\n\n def __init__(self):\n Level.__init__(\n self,\n obstacles_per_view=30.,\n portals_per_view=3.,\n num_enemies=10.,\n color=(100, 100, 100), ### grey\n )\n\n#------------------------\n\nKNOWN_LEVELS = {\n Level0.name : Level0,\n Level1.name : Level1,\n Level2.name : Level2,\n Level3.name : Level3,\n}\n\n#-------------------------------------------------\n\ndef main(fps=30):\n\n print('Welcome to Dinosaurs vs Airplanes!')\n\n # select the character\n if len(KNOWN_CHARACTERS) == 1:\n player = KNOWN_CHARACTERS.keys()[0]\n\n else:\n print('Choose your character:')\n print('\\n'.join(' -- '+key for key in KNOWN_CHARACTERS.keys()))\n player = raw_input('')\n while player not in KNOWN_CHARACTERS:\n print('I did not understand. Please pick one of:')\n print('\\n'.join(' -- '+key for key in KNOWN_CHARACTERS.keys()))\n player = raw_input('')\n\n player = KNOWN_CHARACTERS[player]()\n\n # select level\n if len(KNOWN_LEVELS) == 1:\n level = KNOWN_LEVELS.keys()[0]\n\n else:\n print('Choose your level:')\n print('\\n'.join(' -- '+key for key in KNOWN_LEVELS.keys()))\n level = raw_input('')\n while level not in KNOWN_LEVELS:\n print('I did not understand. Please pick one of:')\n print('\\n'.join(' -- '+key for key in KNOWN_LEVELS.keys()))\n level = raw_input('')\n\n # there is only a single level at the moment, so we just instantiate that\n level = KNOWN_LEVELS[level]()\n\n # init game\n pygame.init()\n clock = pygame.time.Clock()\n clock.tick(fps)\n\n # enter main game loop\n screen = pygame.display.set_mode([level.view.width, level.view.height])\n loop(screen, level, player)\n\n # exit game\n print('Exiting Dinosaurs vs Airplanes!')\n pygame.quit()\n\n#------------------------\n\ndef loop(screen, level, player):\n\n ### figure out how many points you have\n if os.path.exists('.points'):\n with open('.points', 'r') as points_file:\n points = int(points_file.read())\n else:\n points = 200\n\n print('You are starting with %d points'%points)\n\n ### enter the main game loop\n running = True\n pressed = dict()\n while running:\n for event in pygame.event.get():\n if (event.type == QUIT) or (event.type == KEYDOWN and event.key == K_ESCAPE): ### clicked the close button or pressed escape\n running = False\n\n elif event.type == KEYDOWN:\n pressed[event.key] = True\n\n elif event.type == KEYUP:\n pressed[event.key] = False\n\n down = pressed.get(K_DOWN, False)\n up = pressed.get(K_UP, False)\n left = pressed.get(K_LEFT, False)\n right = pressed.get(K_RIGHT, False)\n\n if down and (not up):\n player._y_velocity = +player.speed\n elif up and (not down):\n player._y_velocity = -player.speed\n else:\n player._y_velocity = 0\n\n if left and (not right):\n player._x_velocity = -player.speed\n elif right and (not left):\n player._x_velocity = +player.speed\n else:\n player._x_velocity = 0\n\n # make background characters move\n level.animate()\n\n # move the player\n player.move(min_x=level.game.left, max_x=level.game.right, min_y=level.game.bottom, max_y=level.game.top)\n\n # condition for portals\n for enemy in level.enemies:\n for portal in level.portals:\n if portal.target.intersects(enemy):\n portal.transport(enemy, level)\n break\n\n for portal in level.portals:\n if portal.target.intersects(player): ### randomly scatter the player\n portal.transport(player, level)\n break\n\n # obstacles change color when you run over them\n for enemy in level.enemies:\n for obstacle in level.obstacles:\n if obstacle.target.intersects(enemy):\n obstacle.bounce(enemy) \n break\n \n for obstacle in level.obstacles:\n obstacle.invert = obstacle.intersects(player)\n if obstacle.target.intersects(player):\n obstacle.bounce(player)\n break\n\n # condition to exit the game\n if level.finishline.target.intersects(player):\n print('You crossed the finish line! You gained %d points!'%level.win_points)\n running = False\n points += level.win_points\n\n for enemy in level.enemies:\n if level.finishline.target.intersects(enemy):\n print('An enemy crossed the finish line! You lost %d points!'%level.lose_points)\n running = False\n points -= level.lose_points\n\n ### fill in the background color for the screen\n screen.fill(level.view.color)\n\n level.view._x = player.x_center\n level.view._y = player.y_center\n\n level.draw(screen)\n player.draw(screen, level.view)\n\n ### update the screen\n pygame.display.flip()\n\n print('You finished with %d points'%points)\n with open('.points', 'w') as points_file:\n points_file.write('%d'%points)\n" }, { "alpha_fraction": 0.7007874250411987, "alphanum_fraction": 0.7047244310379028, "avg_line_length": 24.299999237060547, "blob_id": "97048a47f4cf28268f98ed79ac3b2d09b21f4ebc", "content_id": "7cf7ad3c4fd648da6a127210d5d2a335ce345223", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 254, "license_type": "permissive", "max_line_length": 64, "num_lines": 10, "path": "/day5/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp Day 5\n\n[Camp Overview](../README.md)\n\n*TOURNAMENT*\n\nGet your friends and family to try your game!\n * Show them your user-guide\n * Demonstrate how to play the game and what you have learned\n * See who can get the highest score\n\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7424749135971069, "avg_line_length": 23.83333396911621, "blob_id": "675fb850c77592806a76589eda9a82385b2b1ac2", "content_id": "3a91d1e9204facfb1aadbe47fffa9f4df343da69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 299, "license_type": "permissive", "max_line_length": 145, "num_lines": 12, "path": "/day3/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp Day 3\n\n[Camp Overview](../README.md)\n\n*Design*\n\nFind images online (or draw your own!) for the levels. These could be backgrounds or specific items that you have to interact with in each level.\nMake sure to write down where you found your images in your notebook!\n\n*Code*\n\n*Test*\n\n" }, { "alpha_fraction": 0.7530296444892883, "alphanum_fraction": 0.757835328578949, "avg_line_length": 43.719627380371094, "blob_id": "87d7be70ef4c74fc5690657985d09bf88fbec222", "content_id": "4cae5b5f30399f15bdc5fa111fbfd776a43bd8d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4786, "license_type": "permissive", "max_line_length": 250, "num_lines": 107, "path": "/README.md", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "# video-game-camp\n\nWelcome to Video-Game Summer Camp (2020)!\n\nDuring this camp, you will learn the basics of video game design, including how to come up with creative ideas for new games, how to design computer programs to represent those ideas, and how to make the computer run your game.\nThis should be accessible to anyone, even those with no prior programing experience.\n\nThis repository is meant to contain the basic infrastructure that can be used to make many different games.\nYou can either copy this repository many times to create more than one game, or you can create multiple games in this repository.\nWe will show you how to organize your code to make this easy and so that you can continue to develop your games as much or as little as you like at any point in the future (even after camp is over!).\n\n## Playing Games\n\nTo begin playing games, make sure you have installed the code and then run\n```\npython -m vgc\n```\nThis will provide you with an interactive menu of all games currently known to the video-game-camp source code (`vgc`).\nType in the name of the game you'd like to play, and `vgc` will launch that game for you automatically.\n\nIf you know the name of the game you want to play already (for example, `BoxBreaker`), you can launch that game directly with\n```\npython -m vgc.BoxBreaker\n```\n\nThroughout this week-long camp, campers will learn how to develop a new game within the `vgc`/`pygame` architecture.\nWhen camp finishes, they can define more games and build the arcade of their dreams!\n\n## Schedule\n\nDuring the 5 day camp, you will design a video game and program a computer to let you play it.\nThis starts from the very basics of how to make your characters interact within the game on Day 1, to more advanced ideas like how to keep score, how to make your game unpredictable and exciting, and how to add new features later by the end of Day 4.\nDay 5 will be a tournament with your friends and family in which you can show off your new game!\n\n### [Day 1](day1/)\n\n*Design*: Brainstorm the basic aspects of your game!\n\n*Code*: Learn how to play a `vgc` game and the basics of bounding boxes!\n\n*Test*: Begin building your own game! Brainstorm more features while exploring bounding box behavior!\n\n### [Day 2](day2/)\n\n*Design*: Create images for your characters!\n\n*Code*: Create your characters!\n\n*Test*: Make your characters interact within your game!\n\n### [Day 3](day3/)\n\n*Design*: Create custom levels!\n\n*Code*: Allow your characters to score points and advance to higher levels!\n\n*Test*: See if you can get your characters to progress through all 3 levels!\n\n### [Day 4](day4/)\n\n*Design*: Make a user-guide that explains how to play your game!\n\n*Code*: ...\n\n*Test*: ...\n\n### [Day 5](day5/)\n\n*TOURNAMENT*: Get your friends and family to try your game!\n\n## Installation\n\nThis library relies upon pygame. On linux systems, this is simple to install with\n```\nsudo apt-get install python-pygame\n```\nThis will discover and install the version of pygame consistent with your default Python interpreter. Mine happens to be Python2.7; yours is likely to be Python3.7 or higher.\n\nOnce that is installed, simply install this library from source via\n```\npython setup.py install --prefix /path/to/install\n```\nMake sure you remember to update your environment to point to your new installation within your `$PYTHONPATH`.\nYou can then launch your arcade via\n```\npython -m vgc\n```\n\n## Architecture\n\nThe video-game-camp (`vgc`) library is structured as a collection of Python modules.\nThe basic `vgc` module registers which games are available and provides a simple menu for users to navigate to the games they want to play.\nIn order for new games to become available, users will have to import them within `~/vgc/__init__.py` (see the example for how to import `BoxBreaker`).\n\nUsers should define their new games as submodules within `~/vgc~`; remember to include `__init__.py` and `__main__.py` modules.\nImportantly, users should define a `game.py` within their game, and there must be a `main()` function defined within `game.py`.\nThis `game.main()` should be imported an called within `__main__.py` so that users can directly launch this game from the command line without navigating the `vgc` menu.\nHowever, we also need the `main()` function to be defined within `game` so that the menu within `vgc` can launch games in a standard way.\nThis additionally requires `game` to be imported within `__init__.py`.\n\nBeyond that, developers can add whatever material they like within their game's subdirectory, including defining other modules to support gameplay.\n`BoxBreaker` provides a very basic example of how this can be done.\n\n## Contact\n\nYou can contact Reed with questions at `[email protected]`.\nPlease feel free to build upon this simple camp with your friends and family!\n\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6875, "avg_line_length": 9.666666984558105, "blob_id": "d0ab667d9286cfa2728384fe23b2e8d99d7c837b", "content_id": "edbecb45eddc848fe9a6250e6a9425c696aee623", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "permissive", "max_line_length": 18, "num_lines": 3, "path": "/vgc/DinosaursVsAirplanes/__main__.py", "repo_name": "reedessick/video-game-camp", "src_encoding": "UTF-8", "text": "from . import game\n\ngame.main()\n" } ]
17
lateralblast/goat
https://github.com/lateralblast/goat
798fdceb6dc370d360193439ccac5fe9559c3e17
d1ec470d30486776806abc8b520715cafdd1be9a
d81f4fadab691d06fa8107630b036eea4cb1e0da
refs/heads/master
2023-04-07T23:49:59.744969
2023-03-16T05:42:54
2023-03-16T05:42:54
189,180,977
0
2
null
2019-05-29T08:13:35
2019-05-31T02:05:50
2019-05-31T22:35:52
Python
[ { "alpha_fraction": 0.6094755530357361, "alphanum_fraction": 0.6170703172683716, "avg_line_length": 32.270870208740234, "blob_id": "9ec846a54f8e96f5e9f044eecdb717dc21eb62ff", "content_id": "1cf62445d4f771a2623ced97f1da7d7e5caefb5d", "detected_licenses": [ "CC-BY-4.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58593, "license_type": "permissive", "max_line_length": 204, "num_lines": 1761, "path": "/goat.py", "repo_name": "lateralblast/goat", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n# Name: goat (General OOB Automation Tool)\n# Version: 0.5.1\n# Release: 1\n# License: CC-BA (Creative Commons By Attribution)\n# http://creativecommons.org/licenses/by/4.0/legalcode\n# Group: System\n# Source: N/A\n# URL: N/A\n# Distribution: UNIX\n# Vendor: Lateral Blast\n# Packager: Richard Spindler <[email protected]>\n# Description: Script to drive OOB management interfaces\n\n# Import modules\n\nimport urllib.request\nimport subprocess\nimport platform\nimport argparse\nimport binascii\nimport hashlib\nimport getpass\nimport socket\nimport time\nimport sys\nimport os\nimport re\n\nfrom os.path import expanduser\n\n# Set some defaults\n\nverbose_mode = False\nmesh_port = \"3000\"\npassword_db = \"goatpass\"\nhome_dir = expanduser(\"~\")\ndefault_user = \"admin\"\n\n# Check we have pip installed\n\ntry:\n from pip._internal import main\nexcept ImportError:\n os.system(\"easy_install pip\")\n os.system(\"pip install --upgrade pip\")\n\n# install and import a python module\n\ndef install_and_import(package):\n import importlib\n try:\n importlib.import_module(package)\n except ImportError:\n command = \"python3 -m pip install --user %s\" % (package)\n os.system(command)\n finally:\n globals()[package] = importlib.import_module(package)\n\n# Load selenium\n\ntry:\n from selenium import webdriver\nexcept ImportError:\n install_and_import(\"selenium\")\n from selenium import webdriver\n\n# Load bs4\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n install_and_import(\"bs4\")\n from bs4 import BeautifulSoup\n\n# Load lxml\n\ntry:\n import lxml\nexcept ImportError:\n install_and_import(\"lxml\")\n import lxml\n\nfrom lxml import etree\n\n# load wget\n\ntry:\n import wget\nexcept ImportError:\n install_and_import(\"wget\")\n import wget\n\n# load paraminko\n\ntry:\n import paramiko\nexcept ImportError:\n install_and_import(\"paramiko\")\n import paramiko\n\n# Load pexpect\n\ntry:\n import pexpect\nexcept ImportError:\n install_and_import(\"pexpect\")\n import pexpect\n\nscript_exe = sys.argv[0]\nscript_dir = os.path.dirname(script_exe)\nuname_arch = subprocess.check_output(\"uname -m\", shell=True)\nmeshcmd_bin = \"%s/meshcmd.%s\" % (script_dir, uname_arch)\n\n# Print help\n\ndef print_help(script_exe):\n print(\"\\n\")\n command = \"%s -h\" % (script_exe)\n os.system(command)\n print(\"\\n\")\n\n# Read a file into an array\n\ndef file_to_array(file_name):\n file_data = open(file_name)\n file_array = file_data.readlines()\n return file_array\n\n# If we have no command line arguments print help\n\nif sys.argv[-1] == sys.argv[0]:\n print_help(script_exe)\n exit()\n\n# Get command line arguments\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ip\", required=False) # Specify IP of OOB/Remote Management interface\nparser.add_argument(\"--username\", required=False) # Set Username\nparser.add_argument(\"--type\", required=False) # Set Type of OOB device\nparser.add_argument(\"--get\", required=False) # Get Parameter\nparser.add_argument(\"--password\", required=False) # Set Password\nparser.add_argument(\"--search\", required=False) # Search output for value\nparser.add_argument(\"--avail\", required=False) # Get available version from vendor (e.g. BIOS)\nparser.add_argument(\"--check\", required=False) # Check current version against available version from vendor (e.g. BIOS)\nparser.add_argument(\"--model\", required=False) # Specify model (can be used with --avail)\nparser.add_argument(\"--port\", required=False) # Specify port to run service on\nparser.add_argument(\"--power\", required=False) # Set power state (on, off, reset)\nparser.add_argument(\"--hostname\", required=False) # Set hostname\nparser.add_argument(\"--gateway\", required=False) # Set gateway\nparser.add_argument(\"--netmask\", required=False) # Set netmask\nparser.add_argument(\"--outlet\", required=False) # Set outlet\nparser.add_argument(\"--domainname\", required=False) # Set dommainname\nparser.add_argument(\"--primarydns\", required=False) # Set primary DNS\nparser.add_argument(\"--secondarydns\", required=False) # Set secondary DNS\nparser.add_argument(\"--primarysyslog\", required=False) # Set primary Syslog\nparser.add_argument(\"--secondarysyslog\", required=False) # Set secondary Syslog\nparser.add_argument(\"--syslogport\", required=False) # Set Syslog port\nparser.add_argument(\"--primaryntp\", required=False) # Set primary NTP\nparser.add_argument(\"--secondaryntp\", required=False) # Set secondary NTP \nparser.add_argument(\"--meshcmd\", required=False) # Run Meshcmd\nparser.add_argument(\"--group\", required=False) # Set group\nparser.add_argument(\"--parameter\", required=False) # Set parameter\nparser.add_argument(\"--value\", required=False) # Set value\nparser.add_argument(\"--boot\", required=False) # Set boot device\nparser.add_argument(\"--file\", required=False) # File to read in (e.g. iDRAC values)\nparser.add_argument(\"--set\", action='store_true') # Set value\nparser.add_argument(\"--kill\", action='store_true') # Stop existing session\nparser.add_argument(\"--version\", action='store_true') # Display version\nparser.add_argument(\"--insecure\", action='store_true') # Use HTTP/Telnet\nparser.add_argument(\"--verbose\", action='store_true') # Enable verbose output\nparser.add_argument(\"--debug\", action='store_true') # Enable debug output\nparser.add_argument(\"--dryrun\", action='store_true') # Dry run\nparser.add_argument(\"--mask\", action='store_true') # Mask serial and hostname output output\nparser.add_argument(\"--meshcommander\", action='store_true') # Use Meshcommander\nparser.add_argument(\"--meshcentral\", action='store_true') # Use Meshcentral\nparser.add_argument(\"--options\", action='store_true') # Display options information\nparser.add_argument(\"--allhosts\", action='store_true') # Automate via .goatpass\nparser.add_argument(\"--sol\", action='store_true') # Start a SOL connection to host\nparser.add_argument(\"--download\", action='store_true') # Download BIOS\n\noption = vars(parser.parse_args())\n\n# Print version\n\ndef print_version(script_exe):\n file_array = file_to_array(script_exe)\n version = list(filter(lambda x: re.search(r\"^# Version\", x), file_array))[0].split(\":\")[1]\n version = re.sub(r\"\\s+\", \"\", version)\n print(version)\n\n# Print options\n\ndef print_options(script_exe):\n file_array = file_to_array(script_exe)\n opts_array = list(filter(lambda x:re.search(r\"add_argument\", x), file_array))\n print(\"\\nOptions:\\n\")\n for line in opts_array:\n line = line.rstrip()\n if re.search(r\"#\", line):\n option = line.split('\"')[1]\n info = line.split(\"# \")[1]\n if len(option) < 8:\n string = \"%s\\t\\t\\t%s\" % (option, info)\n else:\n if len(option) < 16:\n string = \"%s\\t\\t%s\" % (option, info)\n else:\n string = \"%s\\t%s\" % (option, info)\n print(string)\n print(\"\\n\")\n\n# Check IP\n\ndef check_valid_ip(ip):\n if not re.search(r\"[a-z]\", ip):\n try:\n socket.inet_pton(socket.AF_INET, ip)\n except AttributeError:\n try:\n socket.inet_aton(ip)\n except socket.error:\n return False\n return ip.count('.') == 3\n except socket.error: # not a valid address\n return False\n else:\n return True\n\n# Check host is up\n\ndef check_ping(ip):\n try:\n output = subprocess.check_output(\"ping -{} 1 {}\".format('n' if platform.system().lower()==\"windows\" else 'c', ip), shell=True)\n except Exception:\n string = \"Warning:\\tHost %s not responding\" % (ip)\n handle_output(string)\n return False\n return True\n\n# Hash a password for storing\n\ndef hash_password(password):\n salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')\n pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000)\n pwdhash = binascii.hexlify(pwdhash)\n return (salt + pwdhash).decode('ascii')\n\n# Verify a stored password against one provided by user\n\ndef verify_password(stored_password, provided_password):\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512',\n provided_password.encode('utf-8'),\n salt.encode('ascii'), 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password\n\n# Download file\n\ndef download_file(link, file):\n if not os.path.exists(file):\n string = \"Downloading %s to %s\" % (link, file)\n wget.download(link, file)\n return\n\n# Get AMT value from web\n\ndef get_web_amt_value(avail, model, driver, download):\n if avail == \"bios\":\n found = False\n base_url = \"https://downloadcenter.intel.com\"\n full_url = \"%s/search?keyword=%s\" % (base_url, model)\n driver.get(full_url)\n html_doc = driver.page_source\n html_doc = BeautifulSoup(html_doc, features='lxml')\n html_data = html_doc.find_all('td')\n for html_line in html_data:\n html_text = str(html_line)\n if debug_mode == True:\n handle_output(html_text)\n if re.search(\"BIOS Update\", html_text):\n link_stub = BeautifulSoup(html_text, features='lxml').a.get(\"href\")\n bios_url = \"%s/%s\" % (base_url, link_stub)\n found = True\n if re.search(\"Latest\", html_text) and found == True:\n version = BeautifulSoup(html_text, features='lxml').get_text()\n version = re.sub(\"Latest\", \"\", version)\n string = \"Available version: %s\" % (version)\n handle_output(string)\n string = \"BIOS Download link: %s\" % (bios_url)\n handle_output(string)\n if download == True:\n from selenium.webdriver.common.by import By\n driver.get(bios_url) \n html = driver.page_source\n html = BeautifulSoup(html, features='lxml')\n html = html.findAll(\"a\", text=re.compile(r\"\\.bio\"))[0]\n html = str(html)\n link = html.split('\"')[3]\n file = os.path.basename(link)\n download_file(link, file)\n driver.quit()\n return version\n driver.quit()\n return\n\n# Handle output\n\ndef handle_output(output):\n if mask_mode == True:\n if re.search(r\"serial|address|host|id\", output.lower()):\n if re.search(\":\", output):\n param = output.split(\":\")[0]\n output = \"%s: XXXXXXXX\" % (param)\n print(output)\n return\n\n# Set SEP (ServerEdge PDU) value\n\ndef set_sep_power(power, ip, outlet, username, password, driver, http_proto):\n if http_proto == \"http\":\n port_no = \"80\"\n else:\n port_no = \"443\"\n base_url = \"%s://%s:%s@%s:%s\" % (http_proto, username, password, ip, port_no)\n full_url = \"%s/outlet.htm\" % (base_url)\n if verbose_mode == True:\n string = \"Information:\\tConnecting to: %s\" % (full_url)\n handle_output(string)\n alert = driver.get(full_url)\n html_doc = driver.page_source\n if re.search(r\"on\", power.lower()):\n button_id = \"T18\"\n button_name = \"B5\"\n if re.search(r\"off\", power.lower()):\n button_id = \"T19\"\n button_name = \"B6\"\n if re.search(r\"offon|cycle|onoff|reset\", power.lower()):\n button_id = \"T21\"\n button_name = \"T21\"\n if re.search(r\"all\", outlet.lower()):\n check_box_id = \"C0\"\n check_box_name = \"C0\"\n if re.search(r\"a$|1\", outlet.lower()):\n check_box_id = \"C11\"\n check_box_name = \"C11\"\n if re.search(r\"b$|2\", outlet.lower()):\n check_box_id = \"C12\"\n check_box_name = \"C12\"\n if re.search(r\"c$|3\", outlet.lower()):\n check_box_id = \"C13\"\n check_box_name = \"C13\"\n if re.search(r\"d$|4\", outlet.lower()):\n check_box_id = \"C14\"\n check_box_name = \"C14\"\n if re.search(r\"e$|5\", outlet.lower()):\n check_box_id = \"C15\"\n check_box_name = \"C15\"\n if re.search(r\"f$|6\", outlet.lower()):\n check_box_id = \"C16\"\n check_box_name = \"C16\"\n if re.search(r\"g$|7\", outlet.lower()):\n check_box_id = \"C17\"\n check_box_name = \"C17\"\n if re.search(r\"h$|8\", outlet.lower()):\n check_box_id = \"C18\"\n check_box_name = \"C18\"\n from selenium.webdriver.common.by import By\n check_box = driver.find_element(By.NAME, check_box_name)\n check_box.click()\n power_button = driver.find_element(By.NAME, button_name)\n power_button.click()\n alert = driver.switch_to.alert\n accept = alert.accept()\n return\n\n# Get SEP (ServerEdge PDU) value\n\ndef get_sep_value(get_value, ip, username, password, driver, http_proto, search):\n if http_proto == \"http\":\n port_no = \"80\"\n else:\n port_no = \"443\"\n base_url = \"%s://%s:%s@%s:%s\" % (http_proto, username, password, ip, port_no)\n if re.search(\"outlet|status\", get_value):\n full_url = \"%s/status.xml\" % (base_url)\n if re.search(\"outlet|status\", get_value):\n if verbose_mode == True:\n string = \"Information:\\tConnecting to: %s\" % (full_url)\n handle_output(string)\n alert = driver.get(full_url)\n html_doc = driver.page_source\n html_doc = BeautifulSoup(html_doc, features='lxml')\n html_string = str(html_doc)\n html_lines = html_string.split(\"\\n\")\n counter = 1\n outlet = \"A\"\n for html_line in html_lines:\n if re.search(\"pot0\", html_line):\n values = html_line.split(\",\")\n while counter < 9:\n amps = values[1+counter]\n status = values[9+counter]\n if int(status) == 1:\n status = \"ON \"\n else:\n status = \"OFF\"\n string = \"Outlet %s: %s (%s)\" % (outlet, status, amps)\n if re.search(r\"[a-z]\", search.lower()):\n if search.lower() in string.lower():\n print(string)\n else:\n print(string)\n outlet = ord(outlet)\n counter = counter+1\n outlet = outlet+1\n outlet = chr(outlet)\n else:\n if not re.search(r\"[a-z]\", search.lower()):\n search = get_value\n if re.search(\"info|output|overload|warning\", search):\n full_url = \"%s/index.htm\" % (base_url)\n if re.search(\"system|firmware|model|mac|systemname|contact|location\", search):\n full_url = \"%s/system.htm\" % (base_url)\n if re.search(\"ssl|snmp|mail|threshold|net$|id|pdu\", search):\n full_url = \"%s/config%s.htm\" % (base_url, search)\n if re.search(\"hostname|ipaddress|gateway|primary|secondary\", search):\n full_url = \"%s/confignet.htm\" % (base_url)\n if re.search(\"receiver\", search):\n full_url = \"%s/configsnmp.htm\" % (base_url)\n if verbose_mode == True:\n string = \"Information:\\tConnecting to: %s\" % (full_url)\n handle_output(string)\n alert = driver.get(full_url)\n html_doc = driver.page_source\n html_doc = BeautifulSoup(html_doc, features='lxml')\n if search == \"systemname\":\n search = \"system name\"\n if search == \"ipaddress\":\n search = \"ip address\"\n if search == \"hostname\":\n search = \"host name\"\n if re.search(\"primary\", search):\n search = \"primary dns\"\n if re.search(\"secondary\", search):\n search = \"secondary dns\"\n if re.search(\"receiver\", search):\n search = \"receiver ip\"\n counter = 0\n html_string = str(html_doc)\n html_lines = html_string.split(\"\\n\")\n for html_line in html_lines:\n if re.search(r\"{}\".format(search), html_line.lower()) and not re.search(\"confirm\", html_line.lower()):\n next_line = html_lines[counter+1]\n if not re.search(\"value=\", next_line):\n test_line = html_lines[counter+2]\n if re.search(\"value=\", test_line):\n next_line = html_lines[counter+2]\n else:\n if re.search(r\"^\\<\", next_line):\n next_line = html_lines[counter+2]\n if re.search(\"value=\", next_line):\n value = next_line.split(\"value=\")[1]\n value = value.split('\"')[1]\n else:\n html = BeautifulSoup(next_line, features='lxml')\n value = html.text\n print(value)\n return\n counter = counter+1\n\n# Get AMT value\n\ndef get_amt_value(get_value, ip, username, password, driver, http_proto, search):\n sub_value = \"\"\n if not re.search(r\"[A-Z]|[a-z]|[0-9]\", search):\n search = \"\"\n if get_value == \"bios\":\n get_value = \"system\"\n sub_value = \"bios\"\n if not re.search(r\"[A-Z]|[a-z]|[0-9]\", search):\n search = \"Version\"\n if get_value == \"model\":\n get_value = \"system\"\n sub_value = \"model\"\n if get_value == \"serial\":\n get_value = \"system\"\n sub_value = \"serial\"\n if http_proto == \"http\":\n port_no = \"16992\"\n else:\n port_no = \"16993\"\n base_url = \"%s://%s:%s@%s:%s\" % (http_proto, username, password, ip, port_no)\n full_url = \"%s/index.htm\" % (base_url)\n if re.search(\"model|version|serial|release|system\", get_value):\n full_url = \"%s/hw-sys.htm\" % (base_url)\n if re.search(\"disk\", get_value):\n full_url = \"%s/hw-disk.htm\" % (base_url)\n if re.search(\"network\", get_value):\n full_url = \"%s/ip.htm\" % (base_url)\n if re.search(\"memory\", get_value):\n full_url = \"%s/hw-mem.htm\" % (base_url)\n if re.search(r\"events|fqdn\", get_value):\n full_url = \"%s/%s.htm\" % (base_url, get_value)\n if re.search(\"remote|power\", get_value):\n full_url = \"%s/remote.htm\" % (base_url)\n get_value = re.sub(\"power\", \"state\", get_value)\n if re.search(\"processor|cpu|socket|family|manufacturer|speed\", get_value):\n full_url = \"%s/hw-proc.htm\" % (base_url)\n get_value = re.sub(\"cpu\", \"version\", get_value)\n if verbose_mode == True:\n string = \"Information:\\tConnecting to: %s\" % (full_url)\n handle_output(string)\n alert = driver.get(full_url)\n html_doc = driver.page_source\n html_doc = BeautifulSoup(html_doc, features='lxml')\n html_data = html_doc.find_all('td', 'maincell')\n if re.search(r\"state\", get_value):\n html_data = str(html_data).split(\"<td>\")\n else:\n html_data = str(html_data).split(\"<tr>\")\n new_data = []\n for html_line in html_data:\n temp_data = html_line.split(\"\\n\")\n for temp_line in temp_data:\n if not re.search(\"hidden\", temp_line):\n new_data.append(temp_line)\n html_data = new_data\n results = []\n if re.search(\"processor|system|memory|disk|event|fqdn|network\", get_value):\n temp_data = []\n counter = 0\n for html_line in html_data:\n html_text = str(html_line)\n if debug_mode == True:\n handle_output(html_text)\n if not re.search(r\"hidden|onclick|colspan\", html_text):\n html_text = re.sub(r\"^\\<\\/td\\>\", \"\", html_text)\n html_text = re.sub(r\"\\<br\\/\\>\", \",\", html_text)\n plain_text = BeautifulSoup(html_text, features='lxml').get_text()\n plain_text = re.sub(r\"\\s+\", \" \", plain_text)\n plain_text = re.sub(r\"^ | $\", \"\", plain_text)\n if re.search(\"event\", get_value):\n if re.search(\"border=\", html_text):\n if counter == 5:\n temp_data.append(plain_text)\n else:\n temp_text = (\",\").join(temp_data)\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", plain_text):\n results.append(temp_text)\n temp_data = []\n temp_data.append(plain_text)\n else:\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", plain_text):\n temp_data.append(plain_text)\n else:\n if re.search(r\"\\<\\/h1\\>|\\<\\/h2\\>\", html_text):\n results.append(plain_text)\n else:\n if re.search(r\"\\<\\/p\\>\", html_text):\n if re.search(\"checkbox\", html_text):\n param = plain_text\n if re.search(\"checked\", html_text):\n value = \"Yes\"\n else:\n value = \"No\"\n else:\n param = plain_text\n html = html_data[counter+1]\n html = str(html)\n html = re.sub(r\"^\\<\\/td\\>\", \"\", html)\n text = BeautifulSoup(html, features='lxml').get_text()\n if re.search(\"value=\", html) and not re.search(r\"[A-Z]|[a-z]|[0-9]\", text):\n value = html.split('\"')[-2]\n else:\n value = text\n if not re.search(r\"[A-Z]|[a-z]|[0-9]\", value):\n html = html_data[counter+2]\n html = str(html)\n html = re.sub(r\"^\\<\\/td\\>\", \"\", html)\n text = BeautifulSoup(html, features='lxml').get_text()\n if re.search(\"value=\", html) and not re.search(r\"[A-Z]|[a-z]|[0-9]\", text):\n value = html.split('\"')[-2]\n else:\n value = text\n plain_text = \"%s: %s\" % (param, value)\n plain_text = re.sub(\"::\", \":\", plain_text)\n plain_text = re.sub(r\"\\s+$\", \"\", plain_text)\n plain_text = re.sub(r\":$\", \"\", plain_text)\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", plain_text):\n results.append(plain_text)\n counter = counter+1\n if re.search(\"processor|system|memory|disk|event|fqdn|network\", get_value):\n found = False\n for result in results:\n if debug_mode == True:\n handle_output(result)\n if re.search(r\"[a-z]\", sub_value):\n if re.search(sub_value, result.lower()):\n found = True\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", search):\n if re.search(search, result) and found == True:\n handle_output(result)\n if re.search(r\":\", result):\n result = result.split(\": \")[1]\n return(result)\n else:\n if re.search(sub_value, result.lower()):\n handle_output(result)\n if re.search(r\":\", result):\n result = result.split(\": \")[1]\n return(result)\n else:\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", search):\n if re.search(search, result):\n handle_output(result)\n else:\n handle_output(result)\n driver.quit()\n return\n\n# Set AMT value\n\ndef set_amt_value(ip, username, password, driver, http_proto, hostname, dommainname, primarydns, secondarydns, power):\n if http_proto == \"http\":\n port_no = \"16992\"\n else:\n port_no = \"16993\"\n base_url = \"%s://%s:%s@%s:%s\" % (http_proto, username, password, ip, port_no)\n if re.search(r\"[a-z]\", hostname) or (r\"[a-z]\", domainname):\n full_url = \"%s/fqdn.htm\" % (base_url)\n if re.search(r\"[a-z]\", hostname):\n search = \"HostName\"\n driver.get(full_url)\n from selenium.webdriver.common.by import By\n field = driver.find_element_by_name(search)\n field.clear()\n field.send_keys(hostname)\n string = \"Information:\\tSetting Hostname to %s\" % (hostname)\n handle_output(string)\n driver.find_element_by_xpath('//input[@value=\" Submit \"]').click()\n if re.search(r\"[a-z]\", domainname):\n search = \"DomainName\"\n driver.get(full_url)\n from selenium.webdriver.common.by import By\n field = driver.find_element_by_name(search)\n field.clear()\n field.send_keys(domainname)\n string = \"Information:\\tSetting Domainname to %s\" % (domainname)\n handle_output(string)\n driver.find_element_by_xpath('//input[@value=\" Submit \"]').click()\n if re.search(r\"[a-z,0-9]\", primarydns) or (r\"[a-z,0-9]\", secondarydns):\n full_url = \"%s/ip.htm\" % (base_url)\n if re.search(r\"[a-z,0-9]\", primarydns):\n search = \"DNSServer\"\n driver.get(full_url)\n from selenium.webdriver.common.by import By\n field = driver.find_element_by_name(search)\n field.clear()\n field.send_keys(primarydns)\n string = \"Information:\\tSetting Primary DNS to %s\" % (primarydns)\n handle_output(string)\n driver.find_element_by_xpath('//input[@value=\" Submit \"]').click()\n if re.search(r\"[a-z,0-9]\", secondarydns):\n search = \"AlternativeDns\"\n driver.get(full_url)\n from selenium.webdriver.common.by import By\n field = driver.find_element_by_name(search)\n field.clear()\n field.send_keys(secondarydns)\n string = \"Information:\\tSetting Secondary DNS to %s\" % (secondarydns)\n handle_output(string)\n driver.find_element_by_xpath('//input[@value=\" Submit \"]').click()\n if re.search(r\"[a-z]\", power):\n full_url = \"%s/remote.htm\" % (base_url)\n if re.search(r\"off\", power):\n driver.find_element_by_xpath('//input[@value=\"1\"]').click()\n if re.search(r\"cycle\", power):\n driver.find_element_by_xpath('//input[@value=\"3\"]').click()\n if re.search(r\"reset\", power):\n driver.find_element_by_xpath('//input[@value=\"4\"]').click()\n driver.get(full_url)\n from selenium.webdriver.common.by import By\n driver.find_element_by_xpath('//input[@value=\"Send Command\"]').click()\n time.sleep(2)\n object = driver.switch_to.alert\n time.sleep(2)\n object.accept()\n string = \"Information:\\tSending power %s to %s (Intel AMT has a 30s pause before operation is done)\" % (power, ip)\n handle_output(string)\n driver.quit()\n return\n\n# Compare versions\n\ndef compare_versions(bios, avail, oob_type):\n if oob_type == \"amt\":\n if re.search(\".\", bios):\n current = bios.split(\".\")[2]\n if avail > current:\n handle_output(\"Information:\\tNewer version of BIOS available\")\n if avail == current:\n handle_output(\"Information:\\tLatest version of BIOS installed\")\n return\n\n# Get console output\n\ndef get_console_output(command):\n if verbose_mode:\n string = \"Executing:\\t%s\" % (command)\n handle_output(string)\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, )\n output = process.communicate()[0].decode()\n if verbose_mode:\n string = \"Output:\\t\\t%s\" % (output)\n handle_output(string)\n return output\n\n# Check local config\n\ndef check_local_config():\n pkg_list = [ \"geckodriver\", \"amtterm\", \"npm\", \"ipmitool\" ]\n output = get_console_output(\"uname -a\")\n if re.search(\"Darwin\", output):\n if os.path.exists(\"/usr/local/bin/brew\"):\n pkg_dir = \"/usr/local/bin\"\n else:\n if os.path.exists(\"/opt/homebrew/bin/brew\"):\n pkg_dir = \"/opt/homebrew/bin\"\n brew_bin = \"%s/brew\" % (pkg_dir)\n for pkg_name in pkg_list:\n pkg_bin = \"%s/%s\" % (pkg_dir, pkg_name)\n if not os.path.exists(pkg_bin):\n command = \"%s install %s\" % (brew_bin, pkg_name)\n output = get_console_output(command)\n return\n\n# Check mesh config\n\ndef check_mesh_config(mesh_bin):\n l_mesh_dir = \"./%s\" % (mesh_bin)\n l_mesh_bin = \"./%s/%s\" % (mesh_bin, mesh_bin)\n g_mesh_dir = \"/usr/local/lib/node_modules/%s\" % (mesh_bin)\n g_mesh_bin = \"/usr/local/lib/node_modules/%s/%s\" % (mesh_bin, mesh_bin)\n l_node_dir = \"./%s/node_modules/%s\" % (mesh_bin, mesh_bin)\n g_node_dir = \"/usr/local/lib/node_modules/%s\" % (mesh_bin)\n if not os.path.exists(l_mesh_bin) and not os.path.exists(g_mesh_bin):\n if not os.path.exists(l_mesh_dir):\n os.mkdir(l_mesh_dir)\n command = \"cd %s ; npm install %s\" % (l_mesh_dir, mesh_bin)\n output = get_console_output(command)\n if verbose_mode == True:\n handle_output(output)\n return\n\n# Start MeshCommander\n\ndef start_mesh(mesh_bin, mesh_port):\n l_node_dir = \"./%s/node_modules/%s\" % (mesh_bin, mesh_bin)\n g_node_dir = \"/usr/local/lib/node_modules/%s\" % (mesh_bin)\n if os.path.exists(l_node_dir):\n command = \"cd %s ; node %s --port %s\" % (l_node_dir, mesh_bin, mesh_port)\n os.system(command)\n else:\n if os.path.exists(g_node_dir):\n command = \"cd %s ; node %s --port %s\" % (g_node_dir, mesh_bin, mesh_port)\n os.system(command)\n else:\n string = \"%s not installed\" % (mesh_bin)\n handle_output(string)\n return\n\ndef get_ips():\n ips = []\n pass_file = \"%s/.%s\" % (home_dir, password_db)\n if os.path.exists(pass_file):\n file = open(pass_file, \"r\")\n data = file.readlines()\n for line in data:\n line.rstrip()\n file_ip = line.split(\":\")[0]\n ips.append(file_ip)\n return ips\n\n# Get username\n\ndef get_username(ip):\n username = default_user\n pass_file = \"%s/.%s\" % (home_dir, password_db)\n if os.path.exists(pass_file) and re.search(r\"[a-z]|[0-9]\", ip):\n file = open(pass_file, \"r\")\n data = file.readlines()\n for line in data:\n line.rstrip()\n file_user = line.split(\":\")[1]\n file_ip = line.split(\":\")[0]\n if file_ip == ip:\n return file_user\n else:\n return username\n\n# Get password\n\ndef get_password(ip, username):\n password = \"\"\n pass_file = \"%s/.%s\" % (home_dir, password_db)\n prompt = \"Password for %s:\" % (ip)\n if os.path.exists(pass_file):\n file = open(pass_file, \"r\")\n data = file.readlines()\n for line in data:\n line.rstrip()\n (file_ip, file_user, file_pass) = line.split(\":\")\n if file_ip == ip and file_user == username:\n if re.search(r\"[A-Z]|[a-z]|[0-9]\", file_pass):\n return file_pass\n else:\n password = getpass.getpass(prompt=prompt, stream=None)\n return password\n else:\n password = getpass.getpass(prompt=prompt, stream=None)\n return password\n\n# Sol to host\n\ndef sol_to_host(ip, username, password, oob_type):\n if oob_type == \"amt\":\n command = \"export AMT_PASSWORD=\\\"%s\\\" ; amtterm %s\" % (password, ip)\n else:\n command = \"ipmitool -I lanplus -U %s -P %s -H %s sol activate\" % (username, password, ip)\n if verbose_mode == True:\n string = \"Executing:\\t%s\" % (command)\n handle_output(string)\n os.system(command)\n return\n\n# Initiate web client\n\ndef start_web_driver():\n if debug_mode == False:\n from selenium.webdriver.firefox.options import Options\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options)\n else:\n driver = webdriver.Firefox()\n return driver\n\n# Run meshcmd\n\ndef mesh_command(ip, command, meshcmd, meshcmd_bin):\n if not os.path.exists(meshcmd_bin):\n uname_arch = subprocess.check_output(\"uname\", shell=True)\n if uname == \"Darwin\":\n return\n else:\n if uname == \"Linux\":\n os_name = \"linux\"\n if re.search(r\"i386|x86\", uname_arch):\n if re.seach(r\"64\", uname_arch):\n os_arch = \"x86_64\"\n else:\n os_arch = \"i386\"\n else:\n if re.seach(r\"64\", uname_arch):\n os_arch = \"arm64\"\n else:\n os_arch = \"arm\"\n fetch_bin = \"meshcmd_%s_%s\" % (os_name, os_arch) \n else:\n os_name = \"win\"\n fetch_bin = \"meshcmd_%s_%s.exe\" % (os_name, os_arch) \n uname_arch = subprocess.check_output(\"uname -m\", shell=True)\n meshcmd_url = \"https://github.com/lateralblast/goat/blob/master/meshcmd/%s?raw=true\" % (fetch_bin)\n download_file(meshcmd_url, meshcmd_bin)\n command = \"chmod +x %s\" % (meshcmd_bin)\n os.system(command)\n if meshcmd == \"help\":\n command = \"%s\" % (meshcmd_bin)\n else:\n if re.search(r\"[0-9]\", ip):\n status = check_ping(ip)\n if not status == False:\n username = get_username(ip)\n password = get_password(ip, username)\n command = \"sudo %s %s --host %s --host %s --user %s --pass %s\" % (meshcmd_bin, meshcmd, ip, username, password)\n else:\n command = \"sudo %s %s\" % (meshcmd_bin, meshcmd)\n handle_output(command)\n os.system(command)\n return\n\n# Initiate SSH Session\n\ndef start_ssh_session(ip, username, password):\n ssh_command = \"ssh -o StrictHostKeyChecking=no\"\n ssh_command = \"%s %s@%s\" % (ssh_command, username, ip)\n ssh_session = pexpect.spawn(ssh_command)\n ssh_session.expect(\"assword: \")\n ssh_session.sendline(password)\n return ssh_session\n\n# Set a list of iDRAC values from a file\n\ndef set_specific_idrac_values(ip, username, password, file_array, dryrun):\n if dryrun == False:\n ssh_session = start_ssh_session(ip, username, password)\n for line in file_array:\n items = line.split(\",\")\n if len(items) > 2:\n group = items[0]\n value = items[2]\n parameter = items[1]\n else:\n group = \"\"\n value = items[1]\n parameter = items[0]\n command = \"racadm config -g %s -o %s %s\" % (group, parameter, value)\n if dryrun == True:\n print(command) \n else:\n ssh_session.expect(\"/admin1-> \")\n ssh_session.sendline(command)\n ssh_session.expect(\"/admin1-> \")\n output = ssh_session.before\n output = output.decode()\n if verbose_mode == True:\n text = \"Executing:\\t%s\" % (command)\n handle_output(text)\n text = \"Output:\\t\\t%s\" % (output)\n handle_output(text)\n if dryrun == False:\n ssh_session.close()\n return\n\n# Set specific know iDRAC value\n\ndef set_specific_idrac_value(ip, username, password, group, parameter, value, dryrun):\n if re.search(r\"lan|network\",group):\n group = \"cfgLanNetworking\"\n if re.search(r\"server\",group):\n greoup = \"cfgServerInfo\"\n if re.search(r\"serial\",group):\n greoup = \"cfgSerial\"\n if re.search(r\"[A-Z,a-z]\", group):\n command = \"racadm set %s %s\" % (parameter, value)\n else:\n command = \"racadm config -g %s -o %s %s\" % (group, parameter, value)\n if dryrun == True:\n print(command)\n else:\n ssh_session = start_ssh_session(ip, username, password)\n ssh_session.expect(\"/admin1-> \")\n ssh_session.sendline(command)\n ssh_session.expect(\"/admin1-> \")\n output = ssh_session.before\n output = output.decode()\n if verbose_mode == True:\n text = \"Executing:\\t%s\" % (command)\n handle_output(text)\n text = \"Output:\\t\\t%s\" % (output)\n handle_output(text)\n ssh_session.close()\n return\n\n# Get general iDRAC value\n\ndef set_idrac_value(ip,username, password, hostname, domainname, netmask, gateway, primarydns, secondarydns, primaryntp, secondaryntp, primarysyslog, secondarysyslog, syslogport, power, dryrun):\n commands = []\n if re.search(r\"[a-z,0-9]\", domainname):\n command = \"racadm config -g cfgLanNetworking -o cfgDNSDomainNameFromDHCP 0\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgDNSDomainName %s\" % (domainname)\n commands.append(command)\n if re.search(r\"[0-9]\", netmask):\n command = \"racadm config -g cfgLanNetworking -o cfgNicNetmask %s\" % (netmask)\n commands.append(command)\n if re.search(r\"[0-9]\", gateway):\n command = \"racadm config -g cfgLanNetworking -o cfgNicNetmask %s\" % (gateway)\n commands.append(command)\n if re.search(r\"[0-9]\", primarydns):\n command = \"racadm config -g cfgLanNetworking -o cfgDNSServersFromDHCP 0\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgDNSServer1 %s\" % (primarydns)\n commands.append(command)\n if re.search(r\"[0-9]\", secondarydns):\n command = \"racadm config -g cfgLanNetworking -o cfgDNSServersFromDHCP 0\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgDNSServer2 %s\" % (secondarydns)\n commands.append(command)\n if re.search(r\"[a-z,0-9]\", primaryntp):\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsNtpEnable 1\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsNtpServer1 %s\" % (primaryntp)\n commands.append(command)\n if re.search(r\"[a-z,0-9]\", secondaryntp):\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsNtpEnable 1\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsNtpServer2 %s\" % (secondaryntp)\n commands.append(command)\n if re.search(r\"[a-z,0-9]\", primarysyslog):\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogEnable 1\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogServer1 %s\" % (primarysyslog)\n commands.append(command)\n if re.search(r\"[a-z,0-9]\", secondarysyslog):\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogEnable 1\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogServer2 %s\" % (secondarysyslog)\n commands.append(command)\n if re.search(r\"[0-9]\", syslogport):\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogEnable 1\"\n commands.append(command)\n command = \"racadm config -g cfgLanNetworking -o cfgRhostsSyslogPort%s\" % (syslogport)\n commands.append(command)\n if re.search(r\"[a-z]\", power):\n power = re.sub(r\"on\", \"up\", power)\n power = re.sub(r\"off\", \"down\", power)\n if not re.search(r\"^power\", power):\n power = \"power%s\" % (power)\n command = \"racadm serveraction %s\" % (power)\n commands.append(command)\n if dryrun == True:\n for command in commands:\n print(command)\n else:\n ssh_session = start_ssh_session(ip, username, password)\n ssh_session.expect(\"/admin1-> \")\n for command in commands:\n ssh_session.sendline(command)\n ssh_session.expect(\"/admin1-> \")\n output = ssh_session.before\n output = output.decode()\n if verbose_mode == True:\n text = \"Executing:\\t%s\" % (command)\n handle_output(text)\n text = \"Output:\\t\\t%s\" % (output)\n handle_output(text)\n ssh_session.close()\n return\n\n# Get iDRAC value\n\ndef get_idrac_value(get_value, ip, username, password):\n ssh_session = start_ssh_session(ip, username, password)\n ssh_session.expect(\"/admin1-> \")\n if re.search(r\"bios|idrac|usc\", get_value.lower()):\n command = \"racadm getversion\"\n else:\n command = \"racadm getsysinfo\"\n ssh_session.sendline(command)\n ssh_session.expect(\"/admin1-> \")\n output = ssh_session.before\n output = output.decode()\n ssh_session.sendline(\"exit\")\n ssh_session.close()\n lines = output.split(\"\\r\\n\")\n for line in lines:\n line = line.strip()\n regex = r'\\b(?=\\w){0}\\b(?!\\w)'.format(get_value)\n if re.search(get_value, line, re.IGNORECASE):\n line = re.sub(r\" \\s+\", \" \", line)\n handle_output(line)\n return\n\n# Get IPMI value\n\ndef get_ipmi_value(get_value, ip, username, password):\n command = \"ipmitool -I lanplus -U %s -P %s -H %s %s\" % (username, password, ip, get_value)\n handle_output(command)\n os.system(command)\n return\n\n# Set IPMI value\n\ndef set_ipmi_value(set_value, ip, username, password):\n command = \"ipmitool -I lanplus -U %s -P %s -H %s %s\" % (username, password, ip, set_value)\n handle_output(command)\n os.system(command)\n return\n\n# Use javaws to iDRAC KVM\n\ndef java_idrac_kvm(ip, port, username, password, home_dir):\n web_url = \"https://%s\" % (ip)\n command = \"which javaws\"\n output = os.popen(command).read()\n if not re.search(r\"^/\", output):\n output = \"Warning:\\tNo Java installation found\"\n handle_output(output)\n exit()\n xml_file = \"/tmp/%s.jnlp\" % (ip)\n command = \"uname -a\"\n os_name = os.popen(command).read()\n if re.search(r\"^Darwin\", os_name):\n command = \"java --version\"\n version = os.popen(command).read()\n if re.search(r\"Oracle\", version):\n exceptions = \"%s/Library/Application Support/Oracle/Java/Deployment/security/exception.sites\" % (home_dir)\n if os.path.exists(exceptions):\n with open(exceptions) as file:\n if not web_url in file.read():\n with open(exceptions, 'a') as file:\n file.write(web_url)\n file.write(\"\\n\")\n else:\n with open(exceptions, 'a') as file:\n file.write(web_url)\n file.write(\"\\n\")\n data = []\n data.append('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n string = '<jnlp codebase=\"%s\" spec=\"1.0+\">' % (web_url)\n data.append(string)\n data.append('<information>')\n data.append(' <title>Virtual Console Client</title>')\n data.append(' <vendor>Dell Inc.</vendor>')\n string = ' <icon href=\"%s/images/logo.gif\" kind=\"splash\"/>' % (web_url)\n data.append(string)\n data.append(' <shortcut online=\"true\"/>')\n data.append('</information>')\n data.append('<application-desc main-class=\"com.avocent.idrac.kvm.Main\">')\n string = ' <argument>ip=%s</argument>' % (ip)\n data.append(string)\n data.append(' <argument>vm=1</argument>')\n string = ' <argument>title=%s</argument>' % (ip)\n data.append(string)\n string = ' <argument>user=%s</argument>' % (username)\n data.append(string)\n string = ' <argument>password=%s</argument>' % (password)\n data.append(string)\n string = ' <argument>kmport=%s</argument>' % (port)\n data.append(string)\n string = ' <argument>vport=%s</argument>' % (port)\n data.append(string)\n data.append(' <argument>apcp=1</argument>')\n data.append(' <argument>reconnect=2</argument>')\n data.append(' <argument>chat=1</argument>')\n data.append(' <argument>F1=1</argument>')\n data.append(' <argument>custom=0</argument>')\n data.append(' <argument>scaling=15</argument>')\n data.append(' <argument>minwinheight=100</argument>')\n data.append(' <argument>minwinwidth=100</argument>')\n data.append(' <argument>videoborder=0</argument>')\n data.append(' <argument>version=2</argument>')\n data.append('</application-desc>')\n data.append('<security>')\n data.append(' <all-permissions/>')\n data.append('</security>')\n data.append('<resources>')\n data.append(' <j2se version=\"1.6+\"/>')\n string = ' <jar href=\"%s/software/avctKVM.jar\" download=\"eager\" main=\"true\" />' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Windows\" arch=\"x86\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOWin32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLWin32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Windows\" arch=\"amd64\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOWin64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLWin64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Windows\" arch=\"x86_64\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOWin64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLWin64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"x86\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"i386\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"i586\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"i686\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux32.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"amd64\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Linux\" arch=\"x86_64\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOLinux64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLLinux64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('<resources os=\"Mac OS X\" arch=\"x86_64\">')\n string = ' <nativelib href=\"%s/software/avctKVMIOMac64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n string = ' <nativelib href=\"%s/software/avctVMAPI_DLLMac64.jar\" download=\"eager\"/>' % (web_url)\n data.append(string)\n data.append('</resources>')\n data.append('</jnlp>')\n with open(xml_file, 'w') as file:\n for item in data:\n file.write(\"%s\\n\" % item)\n if os.path.exists(xml_file):\n command = \"chmod 700 %s\" % (xml_file)\n os.system(command)\n command = \"javaws %s\" % (xml_file)\n os.system(command)\n\n# Set APC power\n\ndef set_apc_power(power, ip, outlet, username, password):\n command = \"ssh -V 2>&1 |cut -f1 -d, |cut -f2 -d_\"\n output = os.popen(command).read()\n version = output.rstrip()\n major = version.split(\".\")[0]\n major = int(major)\n minor = version.split(\".\")[1]\n minor = minor.split(\"p\")[0]\n minor = int(minor)\n ssh_opt = \"-oKexAlgorithms=+diffie-hellman-group1-sha1 -oStrictHostKeyChecking=no\"\n if major > 7:\n command = \"which docker\"\n output = os.popen(command).read()\n if not re.search(r\"^/\", output):\n output = \"Warning:\\tNo docker installation found\"\n handle_output(output)\n exit()\n string = \"Docker old SHH version tool\"\n command = \"docker images |grep ostrich\"\n output = os.popen(command).read()\n if not re.search(r\"ostrich\", output):\n output = \"Information:\\tInstalling %s\" % (string) \n handle_output(output)\n with open(\"/tmp/Dockerfile\", 'w') as file:\n file.write(\"FROM ubuntu:16.0\\n\")\n file.write(\"RUN apt-get update && apt-get install -y openssh-client\\n\")\n with open(\"/tmp/docker-compose.yml\", 'w') as file:\n file.write('version: \"3\"\\n')\n file.write(\"services:\\n\")\n file.write(\" ostrich:\\n\")\n file.write(\" build:\\n\")\n file.write(\" context: .\\n\")\n file.write(\" dockerfile: Dockerfile\\n\")\n file.write(\" image: ostrich\\n\")\n file.write(\" container_name: ostrich\\n\")\n file.write(\" entrypoint: /bin/bash\\n\")\n file.write(\" working_dir: /root\\n\")\n command = \"docker run -it ostrich /bin/bash -c \\\"ssh %s %s@%s\\\"\" % (ssh_opt, username, ip)\n else:\n command = \"ssh %s %s@%s\" % (ssh_opt, username, ip)\n #child.expect(\"\")\n #child.sendline(\"\")\n outlet = str(outlet) \n outlet = \"%s\\r\" % (outlet)\n child = pexpect.spawnu(command)\n if verbose_mode == True:\n child.logfile = sys.stdout\n child.expect(\"password: \")\n child.sendline(password)\n child.expect(\"- Control Console -\")\n child.sendline(\"1\\r\")\n child.expect(\"- Device Manager -\")\n child.sendline(\"2\\r\")\n child.expect(\"- Outlet Management -\")\n child.sendline(\"1\\r\")\n child.expect(\"- Outlet Control/Configuration -\")\n child.sendline(outlet)\n child.expect(\"1- Control Outlet\")\n child.sendline(\"1\\r\")\n child.expect(\"- Control Outlet -\")\n if power == \"on\":\n child.sendline(\"1\\r\")\n else:\n child.sendline(\"2\\r\")\n child.expect(\"YES\")\n child.sendline(\"YES\\r\")\n child.expect(\"ENTER\")\n child.sendline(\"\\r\")\n child.expect(\"- Control Outlet -\")\n child.sendline(\"\\033\")\n child.expect(\" 1- Control Outlet\")\n child.sendline(\"\\033\")\n child.expect(\"- Outlet Control/Configuration -\")\n child.sendline(\"\\033\")\n child.expect(\"- Outlet Management -\")\n child.sendline(\"\\033\")\n child.expect(\"- Device Manager -\")\n child.sendline(\"\\033\")\n child.expect(\"- Control Console -\")\n child.sendline(\"4\\r\")\n child.close()\n return\n\n# Use docker container to drive iDRAC KVM\n\ndef web_idrac_kvm(ip, port, username, password):\n string = \"Docker iDRAC KVM redirection tool\"\n command = \"which docker\"\n output = os.popen(command).read()\n if not re.search(r\"^/\", output):\n output = \"Warning:\\tNo docker installation found\"\n handle_output(output)\n exit()\n command = \"docker images |grep idrac6\"\n output = os.popen(command).read()\n if not re.search(r\"idrac6\", output):\n output = \"Information:\\tInstalling %s\" % (string) \n handle_output(output)\n command = \"docker pull domistyle/idrac6\"\n if verbose_mode == True:\n output = \"Executing:\\t%s\" % (command)\n handle_output(output)\n output = os.popen(command).read()\n if verbose_mode == True:\n handle_output(output)\n command = \"docker ps |grep idrac |awk '{print $1}'\"\n process = os.popen(command).read()\n process = process.rstrip()\n if re.search(r\"[0-9]\", process):\n output = \"Warning:\\tInstance of %s already running\" % (string)\n handle_output(output)\n if kill_mode == True:\n output = \"Information:\\tStopping existing %s instance\" % (string)\n handle_output(output)\n command = \"docker kill %s\" % (process)\n output = os.popen(command).read()\n if verbose_mode == True:\n handle_output(output)\n else:\n exit()\n command = \"docker run -d -p %s:%s -p 5900:5900 -e IDRAC_HOST=%s -e IDRAC_USER=%s -e IDRAC_PASSWORD=%s domistyle/idrac6\" % (port, port, ip, username, password)\n if verbose_mode == True:\n output = \"Executing:\\t%s\" % (command)\n handle_output(output)\n output = os.popen(command).read()\n if verbose_mode == True:\n handle_output(output)\n output = \"Information:\\tStarting %s at http://127.0.0.1:%s\" % (string, port)\n handle_output(output)\n return\n\n# Handle dryrun\n\nif option[\"dryrun\"]:\n dryrun = True\nelse:\n dryrun = False\n\n# Handle type\n\nif option[\"type\"]:\n oob_type = option[\"type\"]\n oob_type = oob_type.lower()\n if oob_type == \"amt\":\n default_user = \"admin\"\n if oob_type == \"idrac\":\n default_user = \"root\"\n if oob_type == \"ipmi\":\n default_user = \"root\"\n\n# Handle version switch\n\nif option[\"version\"]:\n script_exe = sys.argv[0]\n print_version(script_exe)\n exit()\n\n# Handle verbose switch\n\nif option[\"ip\"]:\n string = \"\"\n ip = option[\"ip\"]\n test = check_valid_ip(ip)\n if test == False:\n string = \"Warning:\\tInvalid IP: %s\" % (ip)\n handle_output(string)\n exit()\n\n# Handle options switch\n\nif option[\"options\"]:\n script_exe = sys.argv[0]\n print_options(script_exe)\n exit()\n\n# Handle insecure switch\n\nif option[\"insecure\"]:\n http_proto = \"http\"\nelse:\n http_proto = \"https\"\n\n# Handle mask switch\n\nif option[\"mask\"]:\n mask_mode = True\nelse:\n mask_mode = False\n\n# Handle username switch\n\nif option[\"username\"]:\n username = option[\"username\"]\nelse:\n if option[\"avail\"]:\n if option[\"ip\"]:\n username = get_username(ip)\n else:\n if option[\"type\"] and not option[\"allhosts\"]:\n if option['type'] == \"apc\":\n username = get_username(ip)\n if option[\"meshcmd\"]:\n if option[\"ip\"]:\n username = get_username(ip)\n else:\n if not option[\"ip\"]:\n output = \"Warning:\\tNo IP specified\"\n handle_output(output)\n exit()\n else:\n username = get_username(ip)\n\n# Handle password switch\n\nif option[\"password\"]:\n password = option[\"password\"]\nelse:\n if option[\"avail\"]:\n if option[\"ip\"]:\n username = get_username(ip)\n else:\n if option[\"type\"] and not option[\"allhosts\"]:\n password = get_password(ip, username)\n\n# Handle search switch\n\nif option[\"search\"]:\n search = option[\"search\"]\nelse:\n search = \"\"\n\n# Handle model switch\n\nif option[\"model\"]:\n model = option[\"model\"]\n\n# Handle verbose switch\n\nif option[\"verbose\"]:\n verbose_mode = True \nelse:\n verbose_mode = False\n\n# Handle kill switch\n\nif option[\"kill\"]:\n kill_mode = True\nelse:\n kill_mode = False\n\n# Handle verbose switch\n\nif option[\"debug\"]:\n debug_mode = True \nelse:\n debug_mode = False\n\n# Handle get switch\n\nif option[\"get\"]:\n get_value = option[\"get\"]\n\n# Handle power switch\n\nif option[\"power\"]:\n power = option[\"power\"]\nelse:\n power = \"\"\n\n# Handle domainname switch\n\nif option[\"domainname\"]:\n domainname = option[\"domainname\"]\nelse:\n domainname = \"\" \n\n# Handle hostname switch\n\nif option[\"hostname\"]:\n hostname = option[\"hostname\"]\nelse:\n hostname = \"\" \n\n# Handle gateway switch\n\nif option[\"gateway\"]:\n gateway = option[\"gateway\"]\nelse:\n gateway = \"\" \n\n# Handle netmask switch\n\nif option[\"netmask\"]:\n netmask = option[\"netmask\"]\nelse:\n netmask = \"\" \n\n# Handle primarydns switch\n\nif option[\"primarydns\"]:\n primarydns = option[\"primarydns\"]\nelse:\n primarydns = \"\" \n\n# Handle primaryntp switch\n\nif option[\"primaryntp\"]:\n primaryntp = option[\"primaryntp\"]\nelse:\n primaryntp = \"\" \n\n# Handle primarysyslog switch\n\nif option[\"primarysyslog\"]:\n primarysyslog = option[\"primarysyslog\"]\nelse:\n primarysyslog = \"\" \n\n# Handle secondaryntp switch\n\nif option[\"secondaryntp\"]:\n secondaryntp = option[\"secondaryntp\"]\nelse:\n secondaryntp = \"\" \n\n# Handle secondarydns switch\n\nif option[\"secondarydns\"]:\n secondarydns = option[\"secondarydns\"]\nelse:\n secondarydns = \"\" \n\n# Handle secondarysyslog switch\n\nif option[\"secondarysyslog\"]:\n secondarysyslog = option[\"secondarysyslog\"]\nelse:\n secondarysyslog = \"\" \n\n# Handle syslogport switch\n\nif option[\"syslogport\"]:\n syslogport = option[\"syslogport\"]\nelse:\n syslogport = \"\"\n\n# Handle avail switch\n\nif option[\"avail\"]:\n avail = option[\"avail\"]\n\n# Handle check switch\n\nif option[\"check\"]:\n check = option[\"check\"]\n\n# Handle port switch\n\nif option[\"port\"]:\n port = option[\"port\"]\nelse:\n if option[\"type\"]:\n if option[\"type\"].lower() == \"webidrac\":\n port = \"5800\"\n if option[\"type\"].lower() == \"javaidrac\":\n port = \"5900\"\n\n# Handle outlet switch\n\nif option['outlet']:\n outlet = option['outlet']\nelse:\n outlet = \"\"\n\n# Handle boot switch\n\nif option['boot']:\n boot = option['boot']\n\n# Handle MeshCmd option\n\nif option[\"meshcmd\"]:\n meshcmd = option[\"meshcmd\"]\n\n# Handle meshcommander switch\n\nif option[\"meshcommander\"]:\n mesh_bin = \"meshcommander\"\n\n# Handle meshcentral switch\n\nif option[\"meshcentral\"]:\n mesh_bin = \"meshcentral\"\n\n# Handle download value\n\nif option[\"download\"]:\n download = True\nelse:\n download = False\n\n# Run meshcommander\n\nif option[\"meshcommander\"] or option[\"meshcentral\"]:\n if option[\"port\"]:\n mesh_port = option[\"port\"]\n check_mesh_config(mesh_bin)\n start_mesh(mesh_bin, mesh_port)\n exit()\n\n# If option meshcmd is used the type of OOB is AMT\n\nif option[\"meshcmd\"]:\n option[\"type\"] = \"amt\"\n\n# Handle vendor switch\n\nif option[\"type\"]:\n ips = []\n check_local_config()\n oob_type = option[\"type\"]\n oob_type = oob_type.lower()\n if option[\"allhosts\"]:\n ips = get_ips()\n else:\n if option[\"avail\"] and not option[\"ip\"]:\n if not option[\"model\"]:\n handle_output(\"Warning:\\tNo model specified\")\n exit()\n else:\n driver = start_web_driver()\n get_web_amt_value(avail, model, driver, download)\n else:\n if option[\"ip\"]:\n ips.append(ip)\n else:\n if option[\"meshcmd\"]:\n ips.append(\"\")\n password = \"\"\n username = \"\"\n else:\n output = \"Warning:\\tNo IP specified\"\n handle_output(output)\n exit()\n for ip in ips:\n if re.search(r\"amt|idrac|ipmi\", oob_type) and option[\"sol\"]:\n status = check_ping(ip)\n if not status == False:\n sol_to_host(ip, username, password, oob_type)\n exit() \n if option[\"allhosts\"]:\n username = get_username(ip)\n password = get_password(ip, username)\n if oob_type == \"webidrac\":\n web_idrac_kvm(ip, port, username, password)\n if oob_type == \"javaidrac\":\n java_idrac_kvm(ip, port, username, password, home_dir)\n if oob_type == \"apc\":\n if option['set']:\n set_apc_power(power, ip, outlet, username, password)\n if oob_type == \"ipmi\":\n status = check_ping(ip)\n if not status == False:\n if option['get']:\n get_ipmi_value(get_value, ip, username, password)\n if option['boot']:\n set_value = \"chassis bootparam set bootflag %s\" % (boot)\n set_ipmi_value(set_value, ip, username, password)\n if option['power']:\n set_value = \"chassis power %s\" % (power)\n set_ipmi_value(set_value, ip, username, password)\n if oob_type == \"idrac\":\n status = check_ping(ip)\n if not status == False:\n if option[\"get\"]:\n bios = get_idrac_value(get_value, ip, username, password)\n if option[\"set\"]:\n if not option['file']:\n if re.search(r\"[A-Z,a-z]\",option[\"parameter\"]):\n set_specific_idrac_value(ip, username, password, group, parameter, value, dryrun)\n else:\n set_idrac_value(ip, username, password, hostname, domainname, netmask, gateway, primarydns, secondarydns, primaryntp, secondaryntp, primarysyslog, secondarysyslog, syslogport, power, dryrun)\n else:\n if re.search(r\"[A-Z,a-z]\",option[\"file\"]):\n file_name = option[\"file\"]\n file_array = file_to_array(file_name)\n set_specific_idrac_values(ip, username, password, file_array, dryrun)\n else:\n set_idrac_value(ip, username, password, hostname, domainname, netmask, gateway, primarydns, secondarydns, primaryntp, secondaryntp, primarysyslog, secondarysyslog, syslogport, power, dryrun)\n if oob_type == \"sep\":\n driver = start_web_driver()\n if option['get']:\n status = check_ping(ip)\n if not status == False:\n get_sep_value(get_value, ip, username, password, driver, http_proto, search)\n if option['set']:\n status = check_ping(ip)\n if not status == False:\n set_sep_power(power, ip, outlet, username, password, driver, http_proto)\n if oob_type == \"amt\":\n if option[\"meshcmd\"]:\n mesh_command(ip, password, meshcmd, meshcmd_bin)\n else:\n driver = start_web_driver()\n if option[\"check\"]:\n status = check_ping(ip)\n if not status == False:\n model = get_amt_value(\"model\", ip, username, password, driver, http_proto, search)\n current = get_amt_value(check, ip, username, password, driver, http_proto, search)\n avail = get_web_amt_value(check, model, driver, download)\n compare_versions(current, avail, oob_type)\n if option[\"avail\"]:\n if not option[\"model\"]:\n status = check_ping(ip)\n if not status == False:\n username = get_username(ip)\n password = get_password(ip, username)\n model = get_amt_value(\"model\", ip, username, password, driver, http_proto, search)\n get_web_amt_value(avail, model, driver, download)\n else:\n get_web_amt_value(avail, model, driver, download)\n if option[\"get\"]:\n status = check_ping(ip)\n if not status == False:\n get_amt_value(get_value, ip, username, password, driver, http_proto, search)\n if option[\"set\"]:\n status = check_ping(ip)\n if not status == False:\n set_amt_value(ip, username, password, driver, http_proto, hostname, domainname, primarydns, secondarydns, power)\nelse:\n handle_output(\"Warning:\\tNo OOB type specified\")\n exit()\n \n" }, { "alpha_fraction": 0.6492343544960022, "alphanum_fraction": 0.698113203048706, "avg_line_length": 25.02846908569336, "blob_id": "1fd2db4b0537e5fea379df76eec44876fcb73d53", "content_id": "8bc416b24a7321fb1c199a1d7bb72989d631fbc1", "detected_licenses": [ "CC-BY-4.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14630, "license_type": "permissive", "max_line_length": 147, "num_lines": 562, "path": "/README.md", "repo_name": "lateralblast/goat", "src_encoding": "UTF-8", "text": "![alt tag](https://raw.githubusercontent.com/lateralblast/goat/master/goat.png)\n\nGOAT\n====\n\nGeneral OOB Automation Tool\n\nVersion: 0.5.1\n\nIntroduction\n------------\n\nThis tools is designed to consolidate several tools into one generic tool.\n\nAt the moment it supports get/set for Intel's AMT, and some iDRAC functions.\n\nSome features:\n\n- Get system information (e.g. Serial, Model, Logs, etc)\n- Check BIOS version\n- Download BIOS\n- Set hostname and domainname for OOB device\n- Remotely reset device\n- Start MeshCommander in order to do other manage tasks (e.g. configure certificates)\n- Start amtterm for connecting to the AMT SOL (requires non TLS/Digest access to be enabled)\n\nSupported Hardware:\n\n- Intel AMT\n- Dell iDRAC\n- APC switched PDU\n- ServerEdge switched PDU\n\nNotes\n-----\n\nThere are several tools, e.g. amttool to manage AMT, however I found these did not\nhave all the functionality I needed, and some of the functionality did not work.\nI found it easier to use Selenium to drive the management web interface.\n\nIf you have not configured a certificate and thus Digest/TLS connectivity for AMT,\nyou can connect via HTTP using the --insecure switch.\n\nAt the moment hostname, username and password can be stored in a file ~/.goatpass\nto help with automation. Obviously this is not totally secure even if the file is\nonly readable by you, so I am working on a secure store method.\n\nIf you use the --allhosts switch it will step through the hosts in ~/.goatpass.\nThe format of ~/.goatpass is hostname:username:password. If no password is present\nit will prompt for one.\n\nThe script will try to install various components on Mac OS, eg Python Modules,\nand MeshCommander.\n\nMeshCommander is available from here:\n\nhttps://www.meshcommander.com/meshcommander\n\nTodo:\n\n- Add a local password store so password can be stored securely\n- Add in support for other platforms from other scripts\n\nRequirements\n------------\n\nThe following tools are required:\n\n- Python and the following libraries\n - Seleniue\n - BeautifulSoap\n - lxml\n - wget\n - paramiko\n- geckodriver\n\nYou will need both python and python-pip packages. \nAs older versions of Python are deprecated I've had issues installing the requied modules with versions of Python less than 3.\n\nI'd recommend using pyenv, but for example to install the required Python components on Ubuntu:\n\n```\nsudo apt-get install python3-setuptools python3-pip python3-dev build-essential\n```\n\nThe code will try to auto install the Python modules and other tools if they are not available, but to install them manually:\n\n```\npip install selenium\npip install bs4\npip install lxml\npip install wget\npip install paramiko\n```\n\nAn example of installing the other required tools on Mac OS:\n\n```\nbrew install geckodriver\nbrew install amtterm\nbrew install npm\nbrew install ipmitool\nmkdir meshcommander\ncd meshcommander\nnpm install meshcommander\n```\n\nAn example of installing the other required tools on Ubuntu:\n\n```\nsudo apt-get install amtterm\nsudo apt-get install npm\nsudo apt-get install ipmitool \ncd /tmp\nwget https://github.com/mozilla/geckodriver/releases/download/v0.26.0/geckodriver-v0.26.0-linux64.tar.gz\nsudo sh -c 'tar -x geckodriver -zf geckodriver-v0.26.0-linux64.tar.gz -O > /usr/bin/geckodriver'\nsudo chmod +x /usr/bin/geckodriver\nrm geckodriver-v0.26.0-linux64.tar.gz\n```\n\n\nLicense\n-------\n\nThis software is licensed as CC-BA (Creative Commons By Attrbution)\n\nhttp://creativecommons.org/licenses/by/4.0/legalcode\n\n\nSerial-Over-LAN\n---------------\n\nHere is a brief guide for enabling serial on devices running Linux.\n\nTo be able to use SOL (Serial Over LAN) management, you need to enable agetty via init,\nand enable the serial console in grub on the device that you want to remote manage.\nOnce this is done the machine will need to be rebooted for the serial console to be enabled.\n\nTo enable agetty via init you need determine the serial TTY by running the following command:\n\n```\ndmesg | grep ttyS | grep irq | grep 0000 | tr -s \" \" | cut -d\" \" -f4\n```\n\nOnce the serial TTY has been determined you can then enable agetty via init:\n\n```\necho \"S1:2345:respawn:/sbin/agetty ttySX 115200 vt100-nav\" >> /etc/inittab\ninit q\n```\n\nTo enable the serial console via grub youโ€™ll need the serial TTY number and\nthe IO port which can be determined with the following command:\n\n```\ndmesg | grep ttySX | grep irq | tr -s \" \" | cut -d\" \" -f7\n```\n\nOnce you have the serial TTY number and the IO port you can configure grub, for example:\n\n```\necho 'GRUB_CMDLINE_LINUX=\"console=ttySX,115200\"' >> /etc/default/grub\necho 'GRUB_TERMINAL=\"serial console\"' >> /etc/default/grub\necho 'GRUB_SERIAL_COMMAND=\"serial --speed=115200 --port=0xXXXX\"' >> /etc/default/grub\nupdate-grub\n```\n\nExamples\n--------\n\nGetting help:\n\n```\nusage: goat.py [-h] [--ip IP] [--username USERNAME] [--type TYPE] [--get GET] [--password PASSWORD] [--search SEARCH] [--avail AVAIL]\n [--check CHECK] [--model MODEL] [--port PORT] [--power POWER] [--hostname HOSTNAME] [--gateway GATEWAY] [--netmask NETMASK]\n [--outlet OUTLET] [--domainname DOMAINNAME] [--primarydns PRIMARYDNS] [--secondarydns SECONDARYDNS] [--primarysyslog PRIMARYSYSLOG]\n [--secondarysyslog SECONDARYSYSLOG] [--syslogport SYSLOGPORT] [--primaryntp PRIMARYNTP] [--secondaryntp SECONDARYNTP]\n [--meshcmd MESHCMD] [--group GROUP] [--parameter PARAMETER] [--value VALUE] [--boot BOOT] [--file FILE] [--set] [--kill] [--version]\n [--insecure] [--verbose] [--debug] [--mask] [--meshcommander] [--meshcentral] [--options] [--allhosts] [--sol] [--download]\n\noptional arguments:\n -h, --help show this help message and exit\n --ip IP\n --username USERNAME\n --type TYPE\n --get GET\n --password PASSWORD\n --search SEARCH\n --avail AVAIL\n --check CHECK\n --model MODEL\n --port PORT\n --power POWER\n --hostname HOSTNAME\n --gateway GATEWAY\n --netmask NETMASK\n --outlet OUTLET\n --domainname DOMAINNAME\n --primarydns PRIMARYDNS\n --secondarydns SECONDARYDNS\n --primarysyslog PRIMARYSYSLOG\n --secondarysyslog SECONDARYSYSLOG\n --syslogport SYSLOGPORT\n --primaryntp PRIMARYNTP\n --secondaryntp SECONDARYNTP\n --meshcmd MESHCMD\n --group GROUP\n --parameter PARAMETER\n --value VALUE\n --boot BOOT\n --file FILE\n --set\n --kill\n --version\n --insecure\n --verbose\n --debug\n --mask\n --meshcommander\n --meshcentral\n --options\n --allhosts\n --sol\n --download\n```\n\nGetting information about options:\n\n```\n./goat.py --options\n\nOptions:\n\n--ip Specify IP of OOB/Remote Management interface\n--username Set Username\n--type Set Type of OOB device\n--get Get Parameter\n--password Set Password\n--search Search output for value\n--avail Get available version from vendor (e.g. BIOS)\n--check Check current version against available version from vendor (e.g. BIOS)\n--model Specify model (can be used with --avail)\n--port Specify port to run service on\n--power Set power state (on, off, reset)\n--hostname Set hostname\n--gateway Set gateway\n--netmask Set netmask\n--outlet Set netmask\n--domainname Set dommainname\n--primarydns Set primary DNS\n--secondarydns Set secondary DNS\n--primarysyslog Set primary Syslog\n--secondarysyslog Set secondary Syslog\n--syslogport Set Syslog port\n--primaryntp Set primary NTP\n--secondaryntp Set secondary NTP\n--meshcmd Run Meshcmd\n--group Set group\n--parameter Set parameter\n--value Set value\n--boot Set boot device\n--file File to read in (e.g. iDRAC values)\n--set Set value\n--kill Stop existing session\n--version Display version\n--insecure Use HTTP/Telnet\n--verbose Enable verbose output\n--debug Enable debug output\n--mask Mask serial and hostname output output\n--meshcommander Use Meshcommander\n--meshcentral Use Meshcentral\n--options Display options information\n--allhosts Automate via .goatpass\n--sol Start a SOL connection to host\n--download Download BIOS\n\n```\n\nIntel AMT Examples\n------------------\n\nConnecting to Intel AMT device over SOL:\n\n```\n./goat.py --ip 192.168.1.171 --sol --type amt\nPassword for 192.168.1.171:\namtterm: NONE -> CONNECT (connection to host)\nipv4 (null) [192.168.1.171] 16994 open\namtterm: CONNECT -> INIT (redirection initialization)\namtterm: INIT -> AUTH (session authentication)\namtterm: AUTH -> INIT_SOL (serial-over-lan initialization)\namtterm: INIT_SOL -> RUN_SOL (serial-over-lan active)\nserial-over-lan redirection ok\nconnected now, use ^] to escape\n\nUbuntu 18.04.2 LTS inn01 ttyS4\n\ninn01 login:\n```\n\nSet Intel AMT device hostname:\n\n```\n./goat.py --ip 192.168.1.171 --set --hostname ecs01 --type amt\n```\n\nGet Intel AMT device BIOS version:\n\n```\n./goat.py --ip 192.168.1.171 --get bios --type amt\nVersion: DNKBLi5v.86A.0063.2019.0503.1714\n```\n\nGet available BIOS version for a specific Intel AMT device model:\n\n```\n./goat.py --avail bios --model NUC7i5DNKE --type amt\nAvailable version: 0063\nBIOS Download link: https://downloadcenter.intel.com//download/28789/BIOS-Update-DNKBLi5v-86A-\n```\n\nGet available Intel AMT device BIOS versions for a managed device:\n\n```\n./goat.py --ip 192.168.1.171 --avail bios --type amt\nComputer model: NUC7i5DNKE\nAvailable version: 0063\nBIOS Download link: https://downloadcenter.intel.com//download/28789/BIOS-Update-DNKBLi5v-86A-\n```\n\nCheck current Intel AMT device BIOS version against available vendor version:\n\n```\n./goat.py --ip 192.168.1.171 --check bios --type amt\nComputer model: NUC7i5DNKE\nVersion: DNKBLi5v.86A.0063.2019.0503.1714\nAvailable version: 0063\nBIOS Download link: https://downloadcenter.intel.com//download/28789/BIOS-Update-DNKBLi5v-86A-\nLatest version of BIOS installed\n```\n\nDownload Intel AMT device BIOS for a specific model:\n\n```\n./goat.py --avail bios --model NUC7i5DNKE --type amt --download\nAvailable version: 0063\nBIOS Download link: https://downloadcenter.intel.com//download/28789/BIOS-Update-DNKBLi5v-86A-\nDownloading https://downloadmirror.intel.com/28789/eng/DNi50063.bio to DNi50063.bio\n```\n\nReset Intel AMT device:\n\n```\n./goat.py --ip 192.168.1.171 --set --power reset --type amt\nSending power reset to 192.168.1.171 (Intel AMT has a 30s pause before operation is done)\n```\n\nStart MeshCommander:\n\n```\n./goat.py --meshcommander\nMeshCommander running on http://127.0.0.1:3000.\n```\n\nGet Intel AMT device Memory configuration:\n\n```\n./goat.py --ip 192.168.1.171 --get memory --type amt\nMemory Information\nModule 1\nNot installed\nModule 2\nNot installed\nModule 3\nManufacturer: 859B\nSerial number: XXXXXX\nSize: 16384 MB\nSpeed: 2400 MHz\nForm factor: SODIMM\nType: DDR4\nType detail: Synchronous, Unbuffered (Unregistered)\nAsset tag: 9876543210\nPart number: CT16G4SFD824A.M16FE\nModule 4\nNot installed\n```\n\nGet Intel AMT device System information:\n\n```\n./goat.py --ip 192.168.1.171 --get system --type amt\nSystem Information\nPlatform\nComputer model: NUC7i5DNKE\nManufacturer: Intel Corporation\nVersion: J57826-401\nSerial number: XXXXXXXXXXXXXX \nSystem ID: XXXXXXXXXXX\nBaseboard\nManufacturer: Intel Corporation\nProduct name: NUC7i5DNB\nVersion: J57626-401\nSerial number: XXXXXXXXXX\nAsset tag\nReplaceable?: Yes\nBIOS\nVendor: Intel Corp.\nVersion: DNKBLi5v.86A.0063.2019.0503.1714\nRelease date: 05/03/2019\n```\n\nGet Intel AMT device System Event information:\n\n```\n$ ./goat.py --ip 192.168.1.171 --get events --type amt\nEvent Log,Event,Time,Source,Description\n1,5/28/2019,9:59 pm,BIOS,Starting operating system boot process.\n2,5/28/2019,9:59 pm,Add-in card,Starting ROM initialization.\n3,5/28/2019,9:59 pm,BIOS,USB resource configuration.\n4,5/28/2019,9:59 pm,Add-in card,Starting ROM initialization.\n5,5/28/2019,9:59 pm,BIOS,Performing PCI configuration.\n6,5/28/2019,9:59 pm,BIOS,Performing PCI configuration.\n7,5/28/2019,9:59 pm,BIOS,Performing PCI configuration.\n```\n\nServerEdge Switched PDU Examples\n--------------------------------\n\nGet Status of ServerEdge switched PDU outlets:\n\n```\n./goat.py --ip 192.168.0.200 --type sep --get outlet\nOutlet A: ON (0.0)\nOutlet B: ON (0.0)\nOutlet C: OFF (0.0)\nOutlet D: OFF (0.0)\nOutlet E: OFF (0.0)\nOutlet F: OFF (0.0)\nOutlet G: OFF (0.0)\nOutlet H: OFF (0.0)\n```\n\nSet power on for outlet 1/A on ServerEdge switched PDU:\n\n```\n ./goat.py --ip 192.168.0.200 --type sep --set --outlet A --power on\n```\n\niDRAC Web KVM Examples\n----------------------\n\nThis uses the docker iDRAC container:\n\nhttps://github.com/DomiStyle/docker-idrac6\n\n\nStart iDRAC KVM webserver:\n\n```\n./goat.py --type webidrac --ip 192.168.10.191\n```\n\niDRAC Java KVM Examples\n-----------------------\n\nThis method creates a JNLP file and runs it with javaws\n\nStart javaws iDRAC KVM session:\n\n```\n./goat.py --type javaidrac --ip 192.168.10.191\n```\n\niDRAC SSH control examples\n--------------------------\n\nGet iDRAC device BIOS version:\n\n```\n./goat.py --type idrac --get bios --ip 192.168.10.211\nBios Version = 6.6.0\n```\n\nGet iDRAC version:\n\n```\n./goat.py --type idrac --get idrac --ip 192.168.10.211\niDRAC Version = 2.92\n```\n\nGet iDRAC device DNS information:\n\n```\n./goat.py --type idrac --get dns --ip 192.168.10.211\nRegister DNS RAC Name = 1\nDNS RAC Name = hostname\nCurrent DNS Domain = blah.com\nCurrent DNS Server 1 = 8.8.8.8\nCurrent DNS Server 2 = 8.8.4.4\nDNS Servers from DHCP = 0\nDNS Servers from DHCPv6 = 0\nCurrent DNS Server 1 = ::\nCurrent DNS Server 2 = ::\n```\n\nPower on server:\n\n```\n./goat.py --set --power on --type idrac --ip 192.168.10.213 --user root --password XXXXXXXX \n```\n\nSet iDRAC value:\n\n```\n./goat.py --type idrac --ip 192.168.11.233 --username root --password XXXXXXXX --set --primarysyslog 192.168.11.254\n```\n\nSet multiple iDRAC values from a file:\n\n```\ncat ./test\ncfgLanNetworking,cfgDNSServer1,192.168.11.254\ncfgLanNetworking,cfgDNSServer2,8.8.8.8\n\n./goat.py --type idrac --ip 192.168.11.233 --username root --password XXXXXXXX --set --file test\n```\n\nIPMI Examples\n-------------\n\nPower on device via IPMI:\n\n```\n./goat.py --ip 192.168.1.171 --set --power on --type ipmi\n```\n\nSet boot device via IPMI:\n\n```\n./goat.py --ip 192.168.1.171 --set --boot pxe --type ipmi\n```\n\nGet sensor information via IPMI:\n\n```\n./goat.py --ip 192.168.1.171 --get sensor --type ipmi\n```\n\nAPC switched PDU Examples\n-------------------------\n\nPower on outlet 1:\n\n```\n./goat.py --type apc --set --power on --outlet 1 --user apc --ip 192.168.10.201\n```\n\nPower off outlet 1:\n\n```\n./goat.py --type apc --set --power off --outlet 1 --user apc --ip 192.168.10.201\n```\n" } ]
2
code2pro/infibot
https://github.com/code2pro/infibot
bf531df81b41e27555e6fb5cdb12fdb69c360f34
3c0957c3b12d0834f7924dd98a6410bdd3dc0c01
543c024526383cd7ebf7bf993632c232681cafad
refs/heads/master
2021-01-15T18:21:45.330708
2017-09-13T05:55:14
2017-09-13T05:55:14
99,780,373
1
2
null
2017-08-09T07:50:55
2017-09-13T05:13:15
2017-09-13T05:55:14
Python
[ { "alpha_fraction": 0.5758904218673706, "alphanum_fraction": 0.5813698768615723, "avg_line_length": 41.94117736816406, "blob_id": "98a3290548afca9ae00bc9b339e6ce4d3207931c", "content_id": "fbf2c3814ec69da4acb3e26366084f4adf57cffe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3650, "license_type": "no_license", "max_line_length": 158, "num_lines": 85, "path": "/bot/mailchimp.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import requests, hashlib, json\nfrom bot.logger import get_logger\n\nMAILCHIMP_API_PREFIX = \"https://%s.api.mailchimp.com/3.0\"\nMAILCHIMP_GROUP_SUB_TMPL = \"/lists/%s/members\"\nMAILCHIMP_USER_GROUPS_TMPL = \"/lists/%s/members/%s\"\n\nLOG_CATEGORY = 'INFIBOT.UTIL'\nlogger = get_logger(LOG_CATEGORY)\n\n\ndef get_email_hash(email):\n email = email.strip().lower()\n return hashlib.md5(email.encode('utf-8')).hexdigest()\n\n\nclass MailChimp(object):\n def __init__(self, apikey, group_id):\n apikey = apikey.strip()\n self.apikey = apikey\n self.group_id = group_id\n self.data_center = apikey.split('-')[-1]\n self.MAILCHIMP_API_PREFIX = MAILCHIMP_API_PREFIX % self.data_center\n self.MAILCHIMP_GROUP_SUB_TMPL = self.MAILCHIMP_API_PREFIX + MAILCHIMP_GROUP_SUB_TMPL\n self.MAILCHIMP_USER_GROUPS_TMPL = self.MAILCHIMP_API_PREFIX + MAILCHIMP_USER_GROUPS_TMPL\n logger.info(\"MAILCHIMP_API_PREFIX = '%s'\" % self.MAILCHIMP_API_PREFIX)\n\n def get_auth(self):\n return ('user1234', self.apikey)\n\n def is_user_subscribed_gid(self, email, group_id):\n '''Check if the user is already subscribed to mail list with specified ID'''\n email_md5 = get_email_hash(email)\n url = self.MAILCHIMP_USER_GROUPS_TMPL % (group_id, email_md5)\n r = requests.get(url, auth=self.get_auth())\n resp = r.json()\n if r.status_code == 404:\n error = resp\n logger.info(\"is_user_subscribed_gid: Got error when querying URL '%s' [status_code=%d,error_code=%d,error_title='%s',error_description='%s']\" % (\n r.url, r.status_code, error['status'], error['title'], error['detail']\n ))\n return False\n elif r.status_code != 200:\n error = resp\n logger.error(\"is_user_subscribed_gid: Got error when querying URL '%s' [status_code=%d,error_code=%d,error_title='%s',error_description='%s']\" % (\n r.url, r.status_code, error['status'], error['title'], error['detail']\n ))\n return None\n logger.info(\"is_user_subscribed_gid: resp = %s\" % resp)\n return True\n\n def is_user_subscribed(self, email):\n '''Check if the user is already subscribed to mail list'''\n return self.is_user_subscribed_gid(email, self.group_id)\n\n def subscribe_user_gid(self, user, group_id, reg_source='TELEGRAM'):\n '''Subscribe the user to a specified group ID'''\n url = self.MAILCHIMP_GROUP_SUB_TMPL % group_id\n data = {\n 'status' : 'pending',\n 'email_address' : user.email,\n 'merge_fields' : {\n 'EMAIL' : user.email,\n 'FNAME' : user.first_name,\n 'LNAME' : user.last_name,\n 'REG_SOURCE' : reg_source,\n 'ORIG_ID' : user.orig_id,\n }\n }\n payload = json.dumps(data)\n r = requests.post(url, auth=self.get_auth(), data=payload)\n logger.info(\"Request = %s\" % r)\n resp = r.json()\n if r.status_code != 200:\n error = resp\n logger.error(\"is_user_subscribed_gid: Got error when querying URL '%s' [status_code=%d,error_code=%d,error_title='%s',error_description='%s']\" % (\n r.url, r.status_code, error['status'], error['title'], error['detail']\n ))\n return None\n logger.info(\"subscribe_user_gid: JSON Subscribe user = %s\" % resp)\n return True\n\n def subscribe_user(self, user):\n '''Subscribe the user to the default group'''\n return self.subscribe_user_gid(user, self.group_id)\n" }, { "alpha_fraction": 0.6686747074127197, "alphanum_fraction": 0.740963876247406, "avg_line_length": 32.400001525878906, "blob_id": "ac010a0f808e94a7ddb159e9eae1856f99ebd5b8", "content_id": "2c5b4b63d311fe58c221ddb32ce89580f99adfee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 166, "license_type": "no_license", "max_line_length": 91, "num_lines": 5, "path": "/misc/ssh_alive_ping.sh", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nssh user@remote \"nohup poll_every_nsec.pl 2>&1 >/dev/null &\"\n\nssh -o ServerAliveInterval=5 -o ServerAliveCountMax=1 -N -R 1234:localhost:1234 user@remote" }, { "alpha_fraction": 0.6963788270950317, "alphanum_fraction": 0.7437325716018677, "avg_line_length": 30.705883026123047, "blob_id": "df9d3c022066d19f2099844074df0113500e0f5d", "content_id": "3b8f9813e945584a769e77b3d7e77de218743168", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/misc/nginx.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "# How to Set Up with Nginx\n\n```\nUSER1=user1\nUNIX_SOCKET_PATH=/home/${USER1}/data/var/${USER1}.sock\n\nsudo python -c \"import socket as s; sock = s.socket(s.AF_UNIX); sock.bind('${UNIX_SOCKET_PATH}')\"\n\nsudo setfacl -m u:www-data:rw ${UNIX_SOCKET_PATH}\n\nsudo setfacl -m u:${USER1}:rw ${UNIX_SOCKET_PATH}\n```\n\nUse HTTPS for git clone instead of SSH. SSH clone will require credentials, while HTTPS will not.\n\nSet up firewall and allow only Telegram:\n\n```\nsudo ufw status\n\nsudo ufw allow proto tcp from 149.154.167.0/24 to any port 443\n```\n\n```\nvirtualenv -p python3 venv\n```\n\n* http://www.blog.trackets.com/2014/05/17/ssh-tunnel-local-and-remote-port-forwarding-explained-with-examples.html\n* https://unix.stackexchange.com/questions/34004/how-does-tcp-keepalive-work-in-ssh\n* https://github.com/SECTHEMALL/log2iptables\n* https://help.ubuntu.com/lts/serverguide/firewall.html\n* https://db-ip.com/all/149.154.167\n* http://docs.gunicorn.org/en/stable/settings.html#errorlog\n* https://stackoverflow.com/questions/42190984/dyld-library-not-loaded-error-preventing-virtualenv-from-loading" }, { "alpha_fraction": 0.5877668261528015, "alphanum_fraction": 0.5899968147277832, "avg_line_length": 39.24359130859375, "blob_id": "7efa4cbbb7e58f7945ee0808e4de4eb57ec50e27", "content_id": "6617bf9c4a24c3d5311e6b1f4c30ebc86a2ff11a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3139, "license_type": "no_license", "max_line_length": 135, "num_lines": 78, "path": "/bot/mailerlite.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import requests, json\nfrom bot.logger import get_logger\n\nMAILER_LITE_API_PREFIX = \"https://api.mailerlite.com/api/v2\"\nMAILER_LITE_GROUP_SUB_TMPL = MAILER_LITE_API_PREFIX + \"/groups/%s/subscribers\"\nMAILER_LITE_USER_GROUPS_TMPL = MAILER_LITE_API_PREFIX + \"/subscribers/%s/groups\"\n\nLOG_CATEGORY = 'INFIBOT.UTIL'\nlogger = get_logger(LOG_CATEGORY)\n\nclass MailerLite(object):\n def __init__(self, apikey, group_id):\n self.apikey = apikey\n self.group_id = group_id\n\n def get_headers(self):\n return {\n 'content-type': 'application/json',\n 'x-mailerlite-apikey': self.apikey,\n }\n\n def get_user_groups(self, email):\n '''Get all groups the user has subscribed to'''\n headers = self.get_headers()\n url = MAILER_LITE_USER_GROUPS_TMPL % email.strip().lower()\n r = requests.get(url, headers=headers)\n resp = r.json()\n if r.status_code != 200:\n error = resp['error']\n logger.error(\"get_user_groups: Got error when querying URL '%s' [status_code=%d,error_code=%d,error_description='%s']\" % (\n r.url, r.status_code, error['code'], error['message']\n ))\n return None\n logger.info(\"get_user_groups: JSON Groups = %s\" % resp)\n return resp\n\n def is_user_subscribed_gid(self, email, group_id):\n '''Check if the user is already subscribed to mail list with specified ID'''\n groups = self.get_user_groups(email)\n if not groups:\n return False\n def cond(group):\n logger.info(\"is_user_subscribed_gid: cond => group = %s\" % group)\n return (group['id'] == group_id and not group['unsubscribed'])\n target_groups = [group['id'] for group in groups if cond(group)]\n logger.info(\"is_user_subscribed_gid: target_groups = %s\" % target_groups)\n if target_groups != [group_id]:\n return False\n return True\n\n def is_user_subscribed(self, email):\n '''Check if the user is already subscribed to mail list'''\n return self.is_user_subscribed_gid(email, self.group_id)\n\n def subscribe_user_gid(self, user, group_id):\n '''Subscribe the user to a specified group ID'''\n headers = self.get_headers()\n url = MAILER_LITE_GROUP_SUB_TMPL % group_id\n data = {\n 'name' : user.first_name,\n 'last_name' : user.last_name,\n 'email' : user.email,\n }\n payload = json.dumps(data)\n r = requests.post(url, headers=headers, data=payload)\n resp = r.json()\n if r.status_code != 200:\n error = resp['error']\n logger.error(\"subscribe_user_gid: Got error when querying URL '%s' [status_code=%d,error_code=%d,error_description=%s]\" % (\n r.url, r.status_code, error['code'], error['message']\n ))\n return None\n logger.info(\"get_user_groups: JSON Subscribe user = %s\" % resp)\n return True\n\n def subscribe_user(self, user):\n '''Subscribe the user to the default group'''\n return self.subscribe_user_gid(user, self.group_id)\n" }, { "alpha_fraction": 0.7475209832191467, "alphanum_fraction": 0.7978641986846924, "avg_line_length": 68.0526351928711, "blob_id": "8b6a86aea2b792299a3fefe92adcc918c17fe933", "content_id": "bcb12412f4629bb712dade8e676869e3dd261b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 122, "num_lines": 19, "path": "/misc/refs.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "* https://core.telegram.org/bots/inline\n* https://core.telegram.org/bots/api#inlinequeryresult\n* https://core.telegram.org/bots#inline-keyboards-and-on-the-fly-updating\n* https://core.telegram.org/bots/api/#replykeyboardmarkup\n* http://www.agiliq.com/blog/2015/03/getting-started-with-redis-py/\n* https://stackoverflow.com/a/5801901/128028\n* https://stackoverflow.com/questions/2217001/override-pythons-in-operator\n* https://docs.python.org/2/reference/datamodel.html\n* https://pypi.python.org/pypi/redis/\n* http://www.programcreek.com/python/example/184/logging.StreamHandler\n* https://github.com/syrusakbary/validate_email\n* https://stackoverflow.com/questions/61517/python-dictionary-from-an-objects-fields\n* https://stackoverflow.com/questions/576169/understanding-python-super-with-init-methods\n* https://stackoverflow.com/questions/6025755/how-to-create-special-files-of-type-socket\n* https://www.digitalocean.com/community/tutorials/how-to-serve-flask-applications-with-gunicorn-and-nginx-on-ubuntu-16-04\n* http://www.blog.trackets.com/2014/05/17/ssh-tunnel-local-and-remote-port-forwarding-explained-with-examples.html\n* https://unix.stackexchange.com/questions/34004/how-does-tcp-keepalive-work-in-ssh\n* https://github.com/SECTHEMALL/log2iptables\n* https://help.ubuntu.com/lts/serverguide/firewall.html" }, { "alpha_fraction": 0.6108108162879944, "alphanum_fraction": 0.6486486196517944, "avg_line_length": 25.428571701049805, "blob_id": "a9f39b0c6769d024a522a16d1021a2204e03a726", "content_id": "08debbb369c8e2361c218bb90e08fb2a057436e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 370, "license_type": "no_license", "max_line_length": 59, "num_lines": 14, "path": "/wrap-gunicorn.sh", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "#!/bin/bash -x\n\nif [ -z ${LOG_SUFFIX} ]; then\n LOG_SUFFIX=$(date +%Y%m%d_%H%M%S)\nfi\nINFIBOT_ACCESS_LOG=${INFIBOT_ACCESS_LOG}.${LOG_SUFFIX}\nINFIBOT_ERROR_LOG=${INFIBOT_ERROR_LOG}.${LOG_SUFFIX}\n\nenv\n\ngunicorn -b 127.0.0.1:5000 --backlog 100 --workers 2 \\\n --log-level info --access-logfile $INFIBOT_ACCESS_LOG \\\n --error-logfile $INFIBOT_ERROR_LOG \\\n wsgi:app\n" }, { "alpha_fraction": 0.6161616444587708, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 18.799999237060547, "blob_id": "4e3237df1c0f677377cc2033ad011d64637600f6", "content_id": "bcc74e81e181ee5640b575fc582590df03a32b8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 99, "license_type": "no_license", "max_line_length": 62, "num_lines": 5, "path": "/misc/assets.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "* https://www.eventbrite.co.uk/o/london-vietstartup-8609317297\n\n```\nvirtualenv -p python3 venv\n```\n" }, { "alpha_fraction": 0.6837121248245239, "alphanum_fraction": 0.6875, "avg_line_length": 36.71428680419922, "blob_id": "7cc6c3d25f0a3defa76dc4c908fc6678bc7f5d54", "content_id": "22689bae43eab179522db38e72eb341e5459548c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 121, "num_lines": 14, "path": "/bot/logger.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import logging\n\n\ndef get_logger(log_category):\n '''Return logger with specified log category'''\n FORMAT = '%(asctime)s.%(msecs)03d:%(process)d:%(thread)d %(name)s %(levelname)s %(filename)s:%(lineno)d: %(message)s'\n DATE_FORMAT = '%Y-%m-%d_%H:%M:%S'\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATE_FORMAT)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger = logging.getLogger(log_category)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n return logger\n" }, { "alpha_fraction": 0.5188679099082947, "alphanum_fraction": 0.7169811129570007, "avg_line_length": 16.66666603088379, "blob_id": "254821e01d57568c6fe8743381b99d3c342c2064", "content_id": "918c9c2bbe618eb38d8a939320c68c04225a419d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 106, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/requirements.txt", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "Flask==0.12.2\ngunicorn==19.7.1\npyTelegramBotAPI==3.1.0\nredis==2.10.5\nvalidate-email==1.3\nrequests==2.18.1\n" }, { "alpha_fraction": 0.5675074458122253, "alphanum_fraction": 0.574184000492096, "avg_line_length": 28.30434799194336, "blob_id": "8993fd7d28d11f0f459f266d44e3aa7174bcb84c", "content_id": "da9b948380ebffcd4f2bade410b9b5fcc06b515f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 87, "num_lines": 46, "path": "/bot/storage.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import redis\n\nclass EphemeralStore(object):\n def __init__(self, ns, prefix, expire=180, db=0):\n self._redis = redis.Redis(host='localhost', port=6379, db=db)\n self._pipe = self._redis.pipeline()\n self._ns = ns\n self._prefix = prefix\n self._expire = expire\n\n def __contains__(self, key):\n return self.hgetall(key) is not None\n\n def __getitem__(self, key):\n return self.hgetall(key)\n\n def __setitem__(self, key, val):\n return self.hmset(key, val)\n\n def __delitem__(self, key):\n ikey = self.internalize_key(key)\n self._redis.delete(ikey)\n\n def internalize_key(self, key):\n return '{}.{}{}'.format(self._ns, self._prefix, key)\n\n def contains(self, key):\n ikey = self.internalize_key(key)\n ttl = self._redis.ttl(ikey)\n if ttl is None or ttl < 1:\n return None\n return True\n\n def hmset(self, key, dict_val):\n ikey = self.internalize_key(key)\n results = self._pipe.hmset(ikey, dict_val).expire(ikey, self._expire).execute()\n output = True\n for res in results:\n output = output and res\n return output\n\n def hgetall(self, key):\n ikey = self.internalize_key(key)\n if not self.contains(key):\n return None\n return self._redis.hgetall(ikey)\n" }, { "alpha_fraction": 0.739393949508667, "alphanum_fraction": 0.7488215565681458, "avg_line_length": 29.9375, "blob_id": "ca470f3c0c6a677616367248bacdd3af204143df", "content_id": "dd7c894a5bd96b1ab815cf6f5ba9361b21e9d1e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 267, "num_lines": 48, "path": "/misc/mailerlite.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "https://app.mailerlite.com/configuration/subscribe\nhttp://help.mailerlite.com/article/show/29273-double-opt-in-for-api\nhttps://app.mailerlite.com/subscribers/custom_fields/${MAILER_LITE_GROUP_ID}\n\nhttp://developers.mailerlite.com/reference#add-single-subscriber\n\nhttps://stackoverflow.com/questions/42190984/dyld-library-not-loaded-error-preventing-virtualenv-from-loading\n\nhttp://developers.mailerlite.com/docs/request\n\nhttps://app.mailerlite.com/integrations/api/\n\nhttp://developers.mailerlite.com/reference#subscribers-in-a-group-by-type\n\n```\nWhen you pip installed virtualenvwrapper, pip will have installed virtualenv for you as it is a dependency. Unfortunately, that virtualenv is not compatible with Anaconda Python. Fortunately, the Anaconda Distribution has a virtualenv that is compatible. To fix this:\n\npip uninstall virtualenv\nconda install virtualenv\n```\n\n\n```\nimport requests, json\n\nMAILER_LITE_API_PREFIX = \"https://api.mailerlite.com/api/v2\"\nMAILER_LITE_GROUP_SUB = \"/groups/%s/subscribers\" % MAILER_LITE_GROUP_ID\nMAILER_LITE_USER_GROUPS = \"/subscribers/%s/groups\" % email\n\nurl = \"%s%s\" % (MAILER_LITE_API_PREFIX, MAILER_LITE_GROUP_SUB)\n\ndata = {\n 'name' : 'John',\n 'email' : '[email protected]',\n 'fields' : {'company': 'MailerLite'}\n}\n\npayload = json.dumps(data)\n\nheaders = {\n 'content-type': \"application/json\",\n 'x-mailerlite-apikey': MAILER_LITE_API_KEY\n}\n\nresponse = requests.request(\"POST\", url, data=payload, headers=headers)\n\nprint(response.text)\n```\n" }, { "alpha_fraction": 0.6606335043907166, "alphanum_fraction": 0.6651583909988403, "avg_line_length": 30.428571701049805, "blob_id": "def1420c1eb471e42118729b8395016d082d92f5", "content_id": "aad730a2ef53db42278d1acfa556cb2e8229175e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 221, "license_type": "no_license", "max_line_length": 58, "num_lines": 7, "path": "/run.sh", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "#!/bin/bash -x\n\nLOG_SUFFIX=$(date +%Y%m%d_%H%M%S)\nWRAP_ACCESS_LOG=${INFIBOT_ACCESS_LOG}.app.${LOG_SUFFIX}\nWRAP_ERROR_LOG=${INFIBOT_ERROR_LOG}.app.${LOG_SUFFIX}\n\n./wrap-gunicorn.sh 2>${WRAP_ERROR_LOG} >${WRAP_ACCESS_LOG}\n\n" }, { "alpha_fraction": 0.725806474685669, "alphanum_fraction": 0.7338709831237793, "avg_line_length": 23.799999237060547, "blob_id": "4ef6178eeeda9119cbd4a30ad3d6f68abe617ea8", "content_id": "598094e27c90d935927e3f9d492a3cd0cfbbffa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 124, "license_type": "no_license", "max_line_length": 56, "num_lines": 5, "path": "/README.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "# InfiBot - FOSS Bot Framework for everyone\n\n**Work in Progress**\n\nFor TODO tasks, check: https://trello.com/b/BiwT9psX/dev\n" }, { "alpha_fraction": 0.7956989407539368, "alphanum_fraction": 0.8118279576301575, "avg_line_length": 38.85714340209961, "blob_id": "07f71b16d30355904fe6f1a3d3c48b7d76c6b27b", "content_id": "93875625d5fb19e94c152e64a1eaf0b1a5b17aaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 558, "license_type": "no_license", "max_line_length": 81, "num_lines": 14, "path": "/misc/mailchimp.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "https://mailchimp.com/about/ips/\nhttps://us11.admin.mailchimp.com/account/api/\nhttps://us11.api.mailchimp.com/playground/\n\nhttp://developer.mailchimp.com/documentation/mailchimp/reference/lists/\nhttp://developer.mailchimp.com/documentation/mailchimp/reference/lists/members/\n\nhttp://kb.mailchimp.com/integrations/api-integrations/set-up-webhooks\n\nhttps://us11.admin.mailchimp.com/lists/tools/webhooks-create?id=MAILCHIMP_GROUPID\n\nhttps://us11.admin.mailchimp.com/lists/settings/merge-tags?id=MAILCHIMP_GROUPID\n\nhttps://docs.python.org/2/library/hashlib.html\n" }, { "alpha_fraction": 0.6199864149093628, "alphanum_fraction": 0.6261047124862671, "avg_line_length": 30.52142906188965, "blob_id": "a2831abbc7614b16fd42877840633b71ef9481a7", "content_id": "943b5f79d44ccf88bf255d6c514ecf8ceb4b04a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4413, "license_type": "no_license", "max_line_length": 118, "num_lines": 140, "path": "/bot/util.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import telebot, requests\nfrom validate_email import validate_email\nfrom functools import wraps\nfrom datetime import datetime\n\nfrom bot.storage import EphemeralStore\nfrom bot.config import botcfg\nfrom bot.mailchimp import MailChimp\nfrom bot.mailerlite import MailerLite\nfrom bot.logger import get_logger\n\nLOG_CATEGORY = 'INFIBOT.UTIL'\n\n\ndef get_mail_backend():\n if botcfg['MAILING_BACKEND'] == 'MAILER_LITE':\n return MailerLite(apikey=botcfg['MAILER_LITE_API_KEY'],\n group_id=botcfg['MAILER_LITE_GROUPID'])\n elif botcfg['MAILING_BACKEND'] == 'MAILCHIMP':\n return MailChimp(apikey=botcfg['MAILCHIMP_API_KEY'],\n group_id=botcfg['MAILCHIMP_GROUPID'])\n\n\nlogger = get_logger(LOG_CATEGORY)\nmail_backend = get_mail_backend()\n\nclass User(object):\n def __init__(self, first_name, last_name, orig_id):\n self.first_name = first_name\n self.last_name = last_name\n self.orig_id = orig_id\n\n\nclass UserSession(EphemeralStore):\n def __init__(self, ns, prefix, expire=180, db=0):\n super(UserSession, self).__init__(ns, prefix, expire, db)\n\n def __setitem__(self, key, user):\n super().hmset(key, user.__dict__)\n\n def __getitem__(self, key):\n val = super().hgetall(key)\n if not val:\n return None\n # TODO: Fix the Bytes vs String issue\n user = User(\n val[b'first_name'].decode('utf-8'),\n val[b'last_name'].decode('utf-8'),\n val[b'orig_id'].decode('utf-8'))\n return user\n\n\nclass Event(object):\n def __init__(self, name, time, url):\n self.name = name\n self.time = time\n self.url = url\n\n def __repr__(self):\n return \"Event(name=%s, time=%s, url='%s')\" % (self.name, self.time, self.url)\n\n\ndef return_on_stop(f):\n '''Stop the current chatbot flow/session on /stop or /clear'''\n @wraps(f)\n def wrapper(message, *args, **kwds):\n if message.text.strip().lower() in ['/stop', '/clean']:\n return False\n return f(message, *args, **kwds)\n return wrapper\n\n\ndef get_bot():\n '''Return the Telegram bot with token is drawn from config'''\n return telebot.TeleBot(botcfg['TELEGRAM_TOKEN'])\n\n\ndef set_webhook():\n '''Set webhook URL if the current URL is outdated'''\n bot = get_bot()\n wh_info = bot.get_webhook_info()\n logger.info('set_webhook: Web Hook Info = %s' % wh_info)\n if wh_info.url != botcfg['TELEBOT_WEBHOOK_URL']:\n logger.info('set_webhook: Previous webhook URL was %s' % wh_info.url)\n bot.remove_webhook()\n bot.set_webhook(url=botcfg['TELEBOT_WEBHOOK_URL'])\n\n\ndef check_email(email):\n '''Validate email format'''\n return validate_email(email)\n\n\ndef get_session_storage():\n '''Return an ephemeral storage for sessions'''\n return UserSession(ns='sessions', prefix='u', expire=180)\n\n\ndef evbrite_to_local_event(events):\n ret_events = []\n for event in events:\n ev_name = event['name']['text']\n # Format: 2017-08-11T09:00:00\n ev_time = datetime.strptime(event['start']['local'], '%Y-%m-%dT%H:%M:%S')\n ev_url = event['vanity_url'] if 'vanity_url' in event else event['url']\n ret_event = Event(name=ev_name, time=ev_time, url=ev_url)\n ret_events.append(ret_event)\n return ret_events\n\n\ndef get_events():\n '''Returns Live, Started, Ended, and Completed events'''\n EVBRITE_PREFIX = botcfg['EVBRITE_PREFIX']\n EVBRITE_ANON_TOKEN = botcfg['EVBRITE_ANON_TOKEN']\n params = {\n 'token': EVBRITE_ANON_TOKEN,\n 'status': 'live,started,ended',\n 'order_by': 'start_desc',\n }\n r = requests.get(EVBRITE_PREFIX, params=params)\n resp = r.json()\n logger.info(\"get_events: [status_code=%d,url=%s]\" % (r.status_code, r.url))\n if r.status_code != 200:\n logger.error(\"get_events: Got error when querying URL '%s' [status_code=%d,error=%s,error_description=%s]\" % (\n r.url, resp['status_code'], resp['error'], resp['error_description']\n ))\n return None\n events = resp['events']\n logger.info(\"get_events: JSON Events = %s\" % events)\n return evbrite_to_local_event(events)\n\n\ndef is_user_subscribed(email):\n '''Check if the user is already subscribed to mail list'''\n return mail_backend.is_user_subscribed(email)\n\n\ndef subscribe_user(user):\n '''Subscribe the user to the default group'''\n return mail_backend.subscribe_user(user)\n" }, { "alpha_fraction": 0.47505632042884827, "alphanum_fraction": 0.5458641648292542, "avg_line_length": 33.921348571777344, "blob_id": "8d11c149c3a5ebcf82171a43c34a4699495a6b64", "content_id": "213f99040cd8b957f03ce210d5d6f3f92eb5602c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3107, "license_type": "no_license", "max_line_length": 144, "num_lines": 89, "path": "/misc/eventbrite.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "http://www.eventbrite.com/myaccount/apps/\n\nhttps://www.eventbrite.com/developer/v3/quickstart/\n\nhttps://www.eventbrite.com/developer/v3/endpoints/organizers/\n\nhttps://www.eventbrite.com/o/vietstartuplondon-14646693153\n\nhttps://vietstartuplondon.eventbrite.com\n\nhttps://www.eventbrite.co.uk/o/london-vietstartup-8609317297\n\nhttps://www.eventbriteapi.com/v3/organizers/14646693153/events/?token=${EVBRITE_ANON_TOKEN}\n\nhttps://www.eventbriteapi.com/v3/organizers/14646693153/events/?status=live%2Cended%2Cstarted&order_by=start_desc&token=${EVBRITE_ANON_TOKEN}\n\nhttps://www.eventbrite.com/developer/v3/response_formats/event/\n\nhttps://docs.python.org/2/library/datetime.html\n\nhttps://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime\n\nhttp://docs.python-requests.org/en/master/user/quickstart/#make-a-request\n\n```\n{\n \"pagination\": {\n \"object_count\": 1,\n \"page_number\": 1,\n \"page_size\": 50,\n \"page_count\": 1,\n \"has_more_items\": false\n },\n \"events\": [\n {\n \"name\": {\n \"text\": \"InfiBot Launch\",\n \"html\": \"InfiBot Launch\"\n },\n \"description\": {\n \"text\": \"With advent of technology and the rise of bots, you can now join communities and get assistance from the bots.\",\n \"html\": \"<P>With advent of technology and the rise of bots, you can now join communities and get assistance from the bots.<\\/P>\"\n },\n \"id\": \"36456262663\",\n \"url\": \"https://www.eventbrite.com/e/vslbot-launch-tickets-36456262663\",\n \"vanity_url\": \"https://vslbot-launch.eventbrite.com\",\n \"start\": {\n \"timezone\": \"Europe/London\",\n \"local\": \"2017-08-11T09:00:00\",\n \"utc\": \"2017-08-11T08:00:00Z\"\n },\n \"end\": {\n \"timezone\": \"Europe/London\",\n \"local\": \"2017-08-11T12:00:00\",\n \"utc\": \"2017-08-11T11:00:00Z\"\n },\n \"created\": \"2017-07-22T05:43:37Z\",\n \"changed\": \"2017-07-22T05:56:09Z\",\n \"capacity\": 100,\n \"capacity_is_custom\": false,\n \"status\": \"live\",\n \"currency\": \"USD\",\n \"listed\": true,\n \"shareable\": true,\n \"online_event\": false,\n \"tx_time_limit\": 480,\n \"hide_start_date\": false,\n \"hide_end_date\": false,\n \"locale\": \"en_US\",\n \"is_locked\": false,\n \"privacy_setting\": \"unlocked\",\n \"is_series\": false,\n \"is_series_parent\": false,\n \"is_reserved_seating\": false,\n \"source\": \"create_2.0\",\n \"is_free\": true,\n \"version\": \"3.0.0\",\n \"logo_id\": null,\n \"organizer_id\": \"14646693153\",\n \"venue_id\": \"20440548\",\n \"category_id\": \"102\",\n \"subcategory_id\": \"2004\",\n \"format_id\": \"11\",\n \"resource_uri\": \"https://www.eventbriteapi.com/v3/events/36456262663/\",\n \"logo\": null\n }\n ]\n}\n```" }, { "alpha_fraction": 0.6212854981422424, "alphanum_fraction": 0.6258672475814819, "avg_line_length": 34.53023147583008, "blob_id": "e6cfb33fc72043d17c55514597b8cc5c843f0b81", "content_id": "fb3de653f6e9530c084ea4906a3618289e29f40c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7639, "license_type": "no_license", "max_line_length": 123, "num_lines": 215, "path": "/infibot.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import flask, telebot, requests\nfrom telebot.types import InlineKeyboardButton\nfrom telebot.types import ReplyKeyboardRemove, KeyboardButton\n\nfrom bot.config import botcfg\nfrom bot import util\nfrom bot.logger import get_logger\n\nLOG_CATEGORY = 'INFIBOT.MAIN'\nTELEBOT_WH_PATH = '/%s' % botcfg['TELEBOT_WH_PATH']\nFB_WH_PATH = '/%s' % botcfg['FB_WH_PATH']\nINTRO_MSG = \"\"\"Hi %s, I'm a bot from VietStartupLondon.\nHere are the commands:\n/start: Show this message\n/about: Discover our community and fun projects\n/events: Query upcoming and past events\n/member: Subscribe to our upcoming events and news\n/stop: Break current conversation\n\"\"\"\nABOUTUS_MSG = \"\"\"\nHello %s, we are VietStartupLondon, a vibrant community of young Viet professionals in London.\nWe are passionate about entrepreneurship, techonology and working towards connecting Vietnamese start-ups across the globe.\n\nExplore our activities, events and mussings:\n1. Facebook Group: https://www.facebook.com/groups/284739328332602/\n2. Medium: https://medium.com/vietstartup-london\n3. Open Projects: https://trello.com/b/ODvtUhBf/vietstartup\n4. For Developers: https://github.com/VietStartupLondon\n5. Website: http://www.vietstartup.co.uk/\n\"\"\"\n\nlogger = get_logger(LOG_CATEGORY)\nsessions = util.get_session_storage()\nbot = util.get_bot()\napp = flask.Flask(__name__)\n\n\[email protected](\"/\")\ndef index():\n return ''\n\n\n################################################################\n## Specific to FB Messenger bot\n################################################################\n\n\[email protected](FB_WH_PATH, methods=['GET'])\ndef handle_fb_verification():\n '''Set up webhook to receive FB Messenger messages'''\n from flask import request\n return request.args['hub.challenge']\n\n\ndef reply_fb_message(user_id, msg):\n '''Reply to FB Messenger messages'''\n data = {\n \"recipient\" : {\"id\": user_id},\n \"message\" : {\"text\": msg},\n }\n resp = requests.post(\n \"https://graph.facebook.com/v2.10/me/messages?access_token=%s\" % botcfg['FB_PAGE_ACCESS_TOKEN'],\n json=data)\n logger.info(\"reply_fb_message: resp = %s\" % resp)\n\n\[email protected](FB_WH_PATH, methods=['POST'])\ndef handle_fb_incoming_messages():\n '''Handle incoming messages from FB Messenger'''\n from flask import request\n data = request.json\n sender = data['entry'][0]['messaging'][0]['sender']['id']\n message = data['entry'][0]['messaging'][0]['message']['text']\n reply_fb_message(sender, message[::-1])\n # Need to return 200 OK\n return ''\n\n################################################################\n## Specific to Telegram bot\n################################################################\n\[email protected](TELEBOT_WH_PATH, methods=['POST'])\ndef telegram_webhook():\n '''Set up webhook to receive Telegram messages'''\n if flask.request.headers.get('content-type') == 'application/json':\n json_string = flask.request.get_data().decode('utf-8')\n update = telebot.types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n else:\n flask.abort(403)\n\n\ndef guess_fname(message):\n '''Guess first name from available data'''\n if message.chat.first_name:\n return message.chat.first_name\n else:\n return \"there\"\n\n\[email protected]_handler(commands=['stop', 'clean'])\ndef handle_stop(message):\n '''Stop any pending queries and clear any inline keyboards'''\n chat_id = message.chat.id\n if chat_id in sessions:\n del sessions[chat_id]\n keyboard = ReplyKeyboardRemove()\n bot.send_message(message.chat.id, 'Talk to you soon!', reply_markup=keyboard)\n\n\[email protected]_handler(commands=['intro', 'start', 'help'])\ndef handle_aboutus(message):\n '''Introduce the organisation to the world'''\n fname = guess_fname(message)\n bot.send_message(message.chat.id, INTRO_MSG % fname)\n\n\[email protected]_handler(commands=['event', 'events'])\ndef handle_events(message):\n '''List events by the organisation'''\n bot.send_chat_action(message.chat.id, 'typing')\n events = util.get_events()\n if not events:\n bot.send_message(message.chat.id, \"Hey! We don't have events yet. Feel free to check again.\")\n else:\n output = \"\"\n for event in events:\n output += \"* %s\\n%s\\n%s\\n----\\n\" % (\n event.name, event.time.strftime(\"%I:%M%p %a, %b %d %Y\"),\n event.url)\n bot.send_message(message.chat.id, \"Our events:\\n%s\" % output)\n\n\[email protected]_handler(commands=['member'])\ndef handle_registration(message):\n chat_id = message.chat.id\n if chat_id in sessions:\n return False\n user_id = None\n if message.from_user:\n user_id = message.from_user.id\n sessions[chat_id] = util.User(\n message.chat.first_name, message.chat.last_name, user_id)\n fname = 'there'\n if message.chat.first_name:\n fname = message.chat.first_name\n msg = bot.reply_to(message, \"\"\"Howdy %s, what's your full name?\n\"\"\" % fname)\n bot.register_next_step_handler(msg, process_name_step)\n\n\[email protected]_on_stop\ndef process_name_step(message):\n chat_id = message.chat.id\n try:\n user = sessions[chat_id]\n user.first_name, user.last_name = message.text.strip().split(' ')\n # Commit the change\n sessions[chat_id] = user\n msg = bot.reply_to(message, 'Your email address, please:')\n bot.register_next_step_handler(msg, process_email_step)\n except Exception as e:\n logger.error('process_name_step: [chat:%d] Could not get first and last names - %s' % (chat_id, e))\n msg = bot.reply_to(message, 'Please enter first & last names:')\n bot.register_next_step_handler(msg, process_name_step)\n\n\[email protected]_on_stop\ndef process_email_step(message):\n chat_id = message.chat.id\n bot.send_chat_action(chat_id, 'typing')\n try:\n if chat_id not in sessions:\n return False\n email = message.text.strip().lower()\n if not util.check_email(email):\n raise Exception('Invalid email %s' % email)\n user = sessions[chat_id]\n # Check if the email is already registered\n if util.is_user_subscribed(email):\n bot.send_message(chat_id,\n \"Thanks %s! Here's news: You are already registered with the email %s\" % (\n user.first_name, email))\n # Session is done, clean up\n del sessions[chat_id]\n return True\n # If the email is not registered, proceed\n user.email = email\n # Commit the change\n sessions[chat_id] = user\n if not util.subscribe_user(user):\n out_msg = \"Hey %s, there's a problem and we could not register your email %s.\" % (\n user.first_name, user.email)\n out_msg += \" Please try again later. We are really sorry for the inconvenience.\"\n bot.send_message(chat_id, out_msg)\n else:\n bot.send_message(chat_id, 'Nice to meet you, %s. We have recorded your email %s.' % (\n # Session is done, clean up\n del sessions[chat_id]\n except Exception as e:\n logger.error('process_email_step: [chat:%d] Could not obtain email - %s' % (chat_id, e))\n msg = bot.reply_to(message, \"Ok, please enter your email:\")\n bot.register_next_step_handler(msg, process_email_step)\n\n\[email protected]_handler(func=lambda message: True)\ndef handle_undefined(message):\n '''Undefined commands or messages go here'''\n chat_id = message.chat.id\n fname = guess_fname(message)\n logger.info('sessions = %s' % sessions)\n if chat_id in sessions:\n return False\n bot.reply_to(message, INTRO_MSG % fname)\n" }, { "alpha_fraction": 0.7758241891860962, "alphanum_fraction": 0.800000011920929, "avg_line_length": 36.91666793823242, "blob_id": "98cfa0d5931cfb74d0a2ade4b68052d297cc4e95", "content_id": "b39d0d9ace66697b5353a374ba765ce0c0aebf98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 455, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/misc/fb-bot.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "https://developers.facebook.com/docs/apps/versions\n\nhttps://developers.facebook.com/docs/messenger-platform/guides/quick-start\nhttps://developers.facebook.com/docs/messenger-platform/guides/setup\n\nhttp://flask.pocoo.org/docs/0.12/reqcontext/\n\nhttp://masnun.com/2016/05/22/building-a-facebook-messenger-bot-with-python.html\nhttps://github.com/masnun/fb-bot\n\nhttps://blog.hartleybrody.com/fb-messenger-bot/\nhttps://github.com/hartleybrody/fb-messenger-bot/\n" }, { "alpha_fraction": 0.7014613747596741, "alphanum_fraction": 0.7787056565284729, "avg_line_length": 52.11111068725586, "blob_id": "2596523d58672040f850f3ca0c85807e8b94a30d", "content_id": "ef780e88e0097262d35ab5451a1b08bad43cc122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 479, "license_type": "no_license", "max_line_length": 158, "num_lines": 9, "path": "/misc/fb_access.md", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "\n```\nhttps://developers.facebook.com/tools/explorer/XXXX/?method=GET&path=284739328332602%2Fevents&version=v2.10\n\nhttps://developers.facebook.com/docs/facebook-login/access-tokens/#apptokens\n\ncurl -L \"https://graph.facebook.com/oauth/access_token?client_id=$FB_INFIBOT_ID&client_secret=$FB_INFIBOT_SECRET&grant_type=client_credentials\" > access_token\n\ncurl -i -X GET \"https://graph.facebook.com/v2.10/284739328332602/events?access_token=$FB_INFIBOT_ID|$FB_TEMP_ACCESS_TOKEN\"\n```\n" }, { "alpha_fraction": 0.6579634547233582, "alphanum_fraction": 0.6588337421417236, "avg_line_length": 30.91666603088379, "blob_id": "ab2f6f66dfd5585b739f2ab52c0460301b31b70d", "content_id": "5b838d178184ba7140d48c55bdb4be8fd46e177c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 88, "num_lines": 36, "path": "/bot/config.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "import os\n\nbotcfg = {}\n\nEXPECTED_ENVS = [\n 'MAILING_BACKEND',\n 'EVBRITE_ANON_TOKEN', 'EVBRITE_GROUP_ID',\n 'FB_WH_PATH', 'FB_PAGE_ACCESS_TOKEN', 'FB_WH_SECRET',\n 'TELEGRAM_TOKEN', 'TELEBOT_WEBHOOK_URL', 'TELEBOT_WH_PATH',\n]\nSUPP_MAILING_BACKENDS = {\n 'MAILCHIMP' : ['MAILCHIMP_API_KEY', 'MAILCHIMP_GROUPID'],\n 'MAILER_LITE' : ['MAILER_LITE_API_KEY', 'MAILER_LITE_GROUPID'],\n}\n\nfor var in EXPECTED_ENVS:\n if var in os.environ:\n botcfg[var] = os.environ[var]\n else:\n raise Exception('Missing env var %s' % var)\n\nif botcfg['MAILING_BACKEND'] not in SUPP_MAILING_BACKENDS:\n raise Exception('Currently supporting only %s' % list(SUPP_MAILING_BACKENDS.keys()))\n\nfor var in SUPP_MAILING_BACKENDS[botcfg['MAILING_BACKEND']]:\n if var in os.environ:\n botcfg[var] = os.environ[var]\n else:\n raise Exception('Missing env var %s' % var)\n\nfor var in ['MAILER_LITE_GROUPID']:\n if var in botcfg:\n botcfg[var] = int(botcfg[var])\n\nEVBRITE_GROUP_EVENTS_TMPL = 'https://www.eventbriteapi.com/v3/organizers/%s/events/'\nbotcfg['EVBRITE_PREFIX'] = EVBRITE_GROUP_EVENTS_TMPL % botcfg['EVBRITE_GROUP_ID']\n" }, { "alpha_fraction": 0.6074073910713196, "alphanum_fraction": 0.614814817905426, "avg_line_length": 14, "blob_id": "bb508fd6a27ffcb34f06627b5403ac93bbcf407e", "content_id": "9c6622c600285ed3e227f88d6b865b5b52169884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/wsgi.py", "repo_name": "code2pro/infibot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom infibot import app\nfrom bot import util\n\nif __name__ == \"__main__\":\n util.set_webhook()\n\n app.run()\n" } ]
21
rikumiura/pyCodes
https://github.com/rikumiura/pyCodes
0dec2988578c2470d9418a434b89bd97163403f4
83df7ff266f359d62d677e97d5c8ef0dfa4495bf
c90a8ba154eedda48dbdae252eb78fc98f826eb6
refs/heads/master
2022-02-26T12:40:31.042205
2019-09-18T18:43:38
2019-09-18T18:43:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48785045742988586, "alphanum_fraction": 0.5373831987380981, "avg_line_length": 22.930233001708984, "blob_id": "1557c044356b8c511ea59975f9b050041f14d5f3", "content_id": "968bb7194d1a7986062c6d60038e21ac39b8bae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 72, "num_lines": 43, "path": "/statistics/myEstimation_ratio.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nimport matplotlib.pyplot as plt\r\n\r\n## nใŒใ‚ใ‚‹็จ‹ๅบฆๅคงใใ„ๆ™‚ใ€Bin(n,p)ใฏN(np,np(1-p))ใซ่ฟ‘ไผผใงใใ‚‹\r\n## ใ‚ˆใฃใฆใ€z = ( x - np ) / sqrt( np(1-p) )ใฏN(0,1)ใซๅพ“ใ†\r\n## z = ( x/n - p ) / sqrt( p(1-p)/n )\r\n## = ( sp - p ) / sqrt( p(1-p)/n )ใจๆ›ธใ‘ใ‚‹\r\n## 95%ไฟก้ ผๅŒบ้–“ใฏๆฌกใฎใ‚ˆใ†ใซใชใ‚‹\r\n## sp - 1.96 * sqrt( p(1-p)/n ) <= p <= sp + 1.96 * sqrt( p(1-p)/n )\r\n## nใŒๅคงใใ„ๆ™‚ใฏp~=spใชใฎใงใ€ไปฅไธ‹ใฎๅผใ‚’ไฝฟใ†\r\n## sp - 1.96 * sqrt( sp(1-sp)/n ) <= p <= sp + 1.96 * sqrt( sp(1-sp)/n )\r\n\r\nsNum = 1000\r\nn = 100\r\np = 0.2\r\nx = np.random.binomial(n, p, sNum)\r\npm = n * p\r\npv = n * p * ( 1 - p )\r\nsm = np.mean(x)\r\ns = np.std(x,ddof=1)\r\nsp = sm / n\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nnorm_low, norm_upp = -1.96, 1.96\r\n\r\n# pmใฎ95%ไฟก้ ผๅŒบ้–“\r\np_lower = sp + norm_low * np.sqrt( sp*(1-sp) / n)\r\np_upper = sp + norm_upp * np.sqrt( sp*(1-sp) / n)\r\n\r\nprint(\"t_low, t_upp: \", norm_low, norm_upp)\r\nprint(\"sp: \", sp)\r\nprint(\"p: \", p)\r\nprint(\"p_low, p_upp: \", p_lower, p_upper)\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(1,1,1)\r\nax.hist(x, bins=100)\r\nax.set_title(\"histgram\")\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"frequency\")\r\nfig.show()\r\nplt.show()" }, { "alpha_fraction": 0.5308176279067993, "alphanum_fraction": 0.6062893271446228, "avg_line_length": 21.441177368164062, "blob_id": "4e78c25c59e0209601e0d585827559e5ac21d3a9", "content_id": "675bf5fb1fdf20e1a54ee59f09fe2feb019fa5a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 50, "num_lines": 34, "path": "/statistics/myTest_chi2.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import chi2\r\nimport matplotlib.pyplot as plt\r\n\r\n## ้ฉๅˆๆ€งใฎๆคœๅฎš\r\n\r\n# TestType = \"OneSided\"\r\nTestType = \"TwoSided\"\r\n\r\nx = np.array([55, 22, 16, 7])\r\nx_true = np.array([40, 30, 20, 10])\r\n\r\n# tๅˆ†ๅธƒใงใฎ95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\ndf = len(x)-1\r\nchi2_upp = chi2.ppf(q=[0.95], df=df)\r\n\r\n# ็ตฑ่จˆ้‡\r\nx_diff = (x - x_true)*(x - x_true)/x_true\r\nchi2_val = np.sum(x_diff)\r\n\r\nprint(\"chi2_upp: \", chi2_upp)\r\nprint(\"chi2_val: \", chi2_val)\r\n\r\nresult = True if (chi2_upp >= chi2_val) else False\r\nprint(\"result: \", result)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = chi2.cdf(chi2_val, df=df)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)" }, { "alpha_fraction": 0.4715423882007599, "alphanum_fraction": 0.5466704368591309, "avg_line_length": 36.39361572265625, "blob_id": "099dcecfe9675a365b7f8008146a4b856b9c38a1", "content_id": "73779654c0b9db5548c5acbee3b9ee06f5350304", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3514, "license_type": "no_license", "max_line_length": 132, "num_lines": 94, "path": "/kerasPrj/myHandDrtection/myCameraSSD_class1_hand.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\n\nimport sys\nsys.path.append('../ssd_keras')\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom models_ssd.keras_ssd300 import ssd_300\n\ncolors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()\nclasses = ['background',\n 'hand']\n\n# Set a few configuration parameters.\nimg_height = 300\nimg_width = 300\nn_classes = 1\nmodel_mode = 'inference'\n\nmodel = ssd_300(image_size=(img_height, img_width, 3),\n n_classes=n_classes,\n mode=model_mode,\n l2_regularization=0.0005,\n scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]\n aspect_ratios_per_layer=[[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]],\n two_boxes_for_ar1=True,\n steps=[8, 16, 32, 64, 100, 300],\n offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n clip_boxes=False,\n variances=[0.1, 0.1, 0.2, 0.2],\n normalize_coords=True,\n subtract_mean=[123, 117, 104],\n swap_channels=[2, 1, 0],\n confidence_thresh=0.01,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400)\n\n# load weight\nweights_path = 'mypartsDetectionSSD_class1_epoch-01_loss-4.9658_val_loss-6.6276.h5'\nmodel.load_weights(weights_path, by_name=True)\n\n# 3: Compile the model so that Keras won't complain the next time you load it.\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)\nprint(\"model summary: \", model.summary())\n\n# prepare capture\ncam = cv2.VideoCapture(0)\nwhile True:\n _, img = cam.read()\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n camImgH = img_rgb.shape[0]\n camImgW = img_rgb.shape[1]\n img_rgb_resized = cv2.resize(img_rgb, (img_width, img_height))\n testImg = img_rgb_resized.reshape(-1, img_height, img_width, 3).astype('float32')\n y_pred = model.predict(testImg)\n\n i = 0\n confidence_threshold = 0.5\n y_pred_thresh = [y_pred[k][y_pred[k, :, 1] > confidence_threshold] for k in range(y_pred.shape[0])]\n print(\"Predicted boxes:\\n\")\n print(' class conf xmin ymin xmax ymax')\n print(y_pred_thresh[0])\n\n for box in y_pred_thresh[i]:\n class_id = box[0]\n confidence = box[1]\n xmin = box[2] * camImgW / img_width\n ymin = box[3] * camImgH / img_height\n xmax = box[4] * camImgW / img_width\n ymax = box[5] * camImgH / img_height\n xmin = int(xmin)\n xmax = int(xmax)\n ymin = int(ymin)\n ymax = int(ymax)\n color = colors[int(box[0])]\n tlabel = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n cv2.putText(img, tlabel, (xmin, ymin), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0))\n cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)\n\n cv2.imshow('PUSH ENTER KEY', img)\n if cv2.waitKey(1) == 13: break\n\ncam.release()\ncv2.destroyAllWindows()" }, { "alpha_fraction": 0.3579879701137543, "alphanum_fraction": 0.406570166349411, "avg_line_length": 39.22343444824219, "blob_id": "1fcd9cced578223a7f42da1dccb45506e57cd8b6", "content_id": "fc99c35c24b521f3e1daec3d67f586ca8bbc1f76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15335, "license_type": "no_license", "max_line_length": 76, "num_lines": 367, "path": "/mlPrediction/toTxt_weather_old.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\nimport glob\r\nimport os\r\n\r\nArrIdx = [1,4,10,14,18,22,26,29,31,34,36,42,45,48,51,54,57,61,70,74,]\r\n# 0.. ๆ—ฅไป˜ใ€€ๅ†—้•ทใชใฎใงใƒ‘ใ‚น\r\n# 64, 67.. ๅคฉๆฐ—ใ€€ใฏใƒฉใƒ™ใƒซใŒๆ•ฐๅ€คๅŒ–ใ—ใซใใ„ใฎใงใƒ‘ใ‚น\r\n# 7.. ๆœ€ไฝŽๆฐ—ๆธฉใ€€ๅœฐๅŸŸ้™ๅฎš\r\n\r\nregion2_list = [\"choshi\", ]\r\nArrIdx_region2 = [1,4,7,11,15,19,23,26,28,31,33,36,42,45,48,51,54,57,61,70,]\r\n\r\ndataPath = \"data_weather_test/\"\r\nfor f in glob.glob(os.path.join(dataPath, \"*.csv\")):\r\n name, ext = os.path.splitext(f)\r\n txtName = name + \".txt\"\r\n file = open(f, mode='r')\r\n data_reader = csv.reader(file,delimiter=\",\")\r\n data_raw = [row for row in data_reader]\r\n name_raw = data_raw[3]\r\n data_raw = data_raw[6:]\r\n\r\n print(\"dataName :\", f)\r\n\r\n data = []\r\n # name = [ name_raw[j] for j in range(1,len(name_raw),3)]\r\n data_name = [ name_raw[idx] for idx in ArrIdx ]\r\n for i in range(len(data_raw)):\r\n # data_new = [ data_raw[i][j] for j in range(1,len(data_raw[i]),3)]\r\n data_new = [data_raw[i][idx] for idx in ArrIdx]\r\n data.append(data_new)\r\n\r\n # #ใ‚คใƒณใƒ‡ใƒƒใ‚ฏใ‚นใ”ใจไฟฎๆญฃ\r\n # # 0..ๆœˆๆ—ฅ\r\n # # 0.. 4/1 - 1..3/31\r\n # fidx = 0\r\n # dateVal = 0\r\n # popIdx = -1\r\n # for i in range(len(data)):\r\n # year, month, day = data[i][fidx].split(\"/\")\r\n # year = int(year)\r\n # month = int(month)\r\n # day = int(day)\r\n # if (month == 2) and (day == 29):\r\n # popIdx = i\r\n # continue\r\n # data[i][0] = dateVal\r\n # dateVal += 1\r\n # # 2/29ใ‚’้™คๅŽป\r\n # if popIdx >= 0:\r\n # data.pop(popIdx)\r\n\r\n # 9, 11.. ้ขจๅ‘ใ\r\n # python list ใฏ arr[:,i]ใจใ„ใ†่กจ็พใŒไฝฟใˆใชใ„\r\n fidxArr = [ArrIdx.index(31),ArrIdx.index(36)+15]\r\n for fidx in fidxArr:\r\n data_name[fidx] = \"N\"\r\n data_name.insert(fidx + 1, \"NNE\")\r\n data_name.insert(fidx + 2, \"NE\")\r\n data_name.insert(fidx + 3, \"NEE\")\r\n data_name.insert(fidx + 4, \"E\")\r\n data_name.insert(fidx + 5, \"EES\")\r\n data_name.insert(fidx + 6, \"ES\")\r\n data_name.insert(fidx + 7, \"ESS\")\r\n data_name.insert(fidx + 8, \"S\")\r\n data_name.insert(fidx + 9, \"SSW\")\r\n data_name.insert(fidx + 10, \"SW\")\r\n data_name.insert(fidx + 11, \"SWW\")\r\n data_name.insert(fidx + 12, \"W\")\r\n data_name.insert(fidx + 13, \"WWN\")\r\n data_name.insert(fidx + 14, \"WN\")\r\n data_name.insert(fidx + 15, \"WNN\")\r\n for i in range(len(data)):\r\n val = data[i][fidx]\r\n if \"ๅŒ—ๅŒ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 1)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๆฑๅŒ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 1)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๆฑๅ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 1)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅ—ๅ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 1)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 1)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅ—ๅ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 1)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"่ฅฟๅ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 1)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"่ฅฟๅŒ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 1)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅŒ—ๅŒ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 1)\r\n elif \"ๅŒ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 1)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅ—ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 1)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 1)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅŒ—่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 1)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅŒ—\" in data[i][fidx] :\r\n data[i][fidx] = 1\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๆฑ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 1)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 1)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"ๅ—\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 1)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 0)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n elif \"่ฅฟ\" in data[i][fidx]:\r\n data[i][fidx] = 0\r\n data[i].insert(fidx + 1, 0)\r\n data[i].insert(fidx + 2, 0)\r\n data[i].insert(fidx + 3, 0)\r\n data[i].insert(fidx + 4, 0)\r\n data[i].insert(fidx + 5, 0)\r\n data[i].insert(fidx + 6, 0)\r\n data[i].insert(fidx + 7, 0)\r\n data[i].insert(fidx + 8, 0)\r\n data[i].insert(fidx + 9, 0)\r\n data[i].insert(fidx + 10, 0)\r\n data[i].insert(fidx + 11, 0)\r\n data[i].insert(fidx + 12, 1)\r\n data[i].insert(fidx + 13, 0)\r\n data[i].insert(fidx + 14, 0)\r\n data[i].insert(fidx + 15, 0)\r\n\r\n # interpolate missing values\r\n for fidx in range(len(data[0])):\r\n for i in range(len(data)):\r\n val = data[i][fidx]\r\n if val == '':\r\n if i > 0:\r\n data[i][fidx] = data[i-1][fidx]\r\n else:\r\n print(\"print here is unexpected.\")\r\n\r\n # data.insert(0,data_name)\r\n print(\"len(data): \", len(data))\r\n\r\n file_w = open(txtName, mode='w', newline=\"\")\r\n writer = csv.writer(file_w)\r\n writer.writerow(data_name)\r\n for i in range(len(data)):\r\n writer.writerow(data[i])\r\n" }, { "alpha_fraction": 0.5034079551696777, "alphanum_fraction": 0.587147057056427, "avg_line_length": 20.326086044311523, "blob_id": "282564e8badacc20070dae6dafe6017b45951ff7", "content_id": "07a18cf1dcd035d70c1115fbba95a4afd0b1b3c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 65, "num_lines": 46, "path": "/statistics/myTest_welch.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nimport matplotlib.pyplot as plt\r\n\r\n## ใ‚ฆใ‚งใƒซใƒใฎtๆคœๅฎš\r\n\r\n# TestType = \"OneSided\"\r\nTestType = \"TwoSided\"\r\n\r\nsNum1 = 30\r\npm1 = 5\r\npv1 = 10\r\nx1 = np.random.normal(pm1, pv1, sNum1)\r\nsm1 = np.mean(x1)\r\ns1 = np.std(x1,ddof=1)\r\n\r\nsNum2 = 20\r\npm2 = 6\r\npv2 = 15\r\nx2 = np.random.normal(pm2, pv2, sNum2)\r\nsm2 = np.mean(x2)\r\ns2 = np.std(x2,ddof=1)\r\n\r\n# tๅˆ†ๅธƒใงใฎ95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\ndf = len(x1) + len(x2) - 2\r\nt_low, t_upp = t.ppf(q=[0.025, 0.975], df=df)\r\n\r\n# ็ตฑ่จˆ้‡\r\nt_val = ( sm1 - sm2 ) / np.sqrt(s1*s1/len(x1) + s2*s2/len(x2))\r\n\r\nprint(\"sm1: \", sm1)\r\nprint(\"sm2: \", sm2)\r\nprint(\"t_low, t_upp: \", t_low, t_upp)\r\nprint(\"t_val: \", t_val)\r\n\r\nresult = True if (t_low <= t_val) and (t_upp >= t_val) else False\r\nprint(\"result: \", result)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = t.cdf(t_val, df=df)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)\r\n" }, { "alpha_fraction": 0.5142517685890198, "alphanum_fraction": 0.570071280002594, "avg_line_length": 19.58974266052246, "blob_id": "ac471b81fc965dd5772b69fb9e9ee2277fce0ef9", "content_id": "87060182bb0e36fa561f67620b02d56b751c1f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 916, "license_type": "no_license", "max_line_length": 61, "num_lines": 39, "path": "/statistics/myTest_bin.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\n\r\n## ไบŒ้ …ๅˆ†ๅธƒใฎๆคœๅฎš\r\n\r\n# TestType = \"OneSided\"\r\nTestType = \"TwoSided\"\r\nx_test = 2200\r\n\r\nn = 12000\r\np = 1/6\r\npm = n * p\r\npv = n * p * ( 1 - p )\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nz_low, z_upp = -1.96, 1.96\r\n\r\n# zๅค‰ๆ›ใ—ใŸxใฎๅ€ค\r\nz = ( x_test - pm ) / np.sqrt(pv)\r\n\r\nprint(\"x_test: \", x_test)\r\nprint(\"z_low, z_upp: \", z_low, z_upp)\r\nprint(\"z: \", z)\r\n\r\nif TestType == \"OneSided\":\r\n result = True if (z_low <= z ) else False\r\nelse:\r\n result = True if (z_low <= z) and (z_upp >= z) else False\r\nprint(\"result: \", result)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = norm.cdf(z)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)\r\n" }, { "alpha_fraction": 0.5122451782226562, "alphanum_fraction": 0.5631324052810669, "avg_line_length": 32.084999084472656, "blob_id": "b80c4579195fd3c3cf62264ae54befa6d4b4b774", "content_id": "f3c3fbbd2985c321226594b5910baf0e0d18ff68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6819, "license_type": "no_license", "max_line_length": 124, "num_lines": 200, "path": "/facePartsSwap/myfacepartsSwap.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\r\n\r\nimport sys\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\n# Read points from text file\r\ndef readPoints(path):\r\n # Create an array of points.\r\n points = [];\r\n\r\n # Read points\r\n with open(path) as file:\r\n for line in file:\r\n x, y = line.split()\r\n points.append((int(x), int(y)))\r\n\r\n return points\r\n\r\n\r\n# Apply affine transform calculated using srcTri and dstTri to src and\r\n# output an image of size.\r\ndef applyAffineTransform(src, srcTri, dstTri, size):\r\n # Given a pair of triangles, find the affine transform.\r\n warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))\r\n\r\n # Apply the Affine Transform just found to the src image\r\n dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR,\r\n borderMode=cv2.BORDER_REFLECT_101)\r\n\r\n return dst\r\n\r\n# Warps triangular regions from img1 to img2\r\ndef warpTriangle(img1, img2, t1, t2):\r\n # Find bounding rectangle for each triangle\r\n r1 = cv2.boundingRect(np.float32([t1]))\r\n r2 = cv2.boundingRect(np.float32([t2]))\r\n\r\n # Offset points by left top corner of the respective rectangles\r\n t1Rect = []\r\n t2Rect = []\r\n t2RectInt = []\r\n\r\n for i in range(0, 3):\r\n t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))\r\n t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\r\n t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\r\n\r\n # Get mask by filling triangle\r\n mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)\r\n cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);\r\n\r\n # Apply warpImage to small rectangular patches\r\n img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\r\n\r\n size = (r2[2], r2[3])\r\n img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)\r\n\r\n img2Rect = img2Rect * mask\r\n img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]*((1.0, 1.0, 1.0) - mask)\r\n img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2Rect\r\n\r\ndef getFacialLandmarkImg(img = \"\", lpos = \"\", tset = \"\", color = (255, 0, 0)):\r\n imgshow = np.copy(img)\r\n for i in range(len(lpos)):\r\n cv2.circle(imgshow, lpos[i], 2, color, -1\r\n )\r\n for i in range(len(tset)):\r\n cv2.line(imgshow, tset[i][0], tset[i][1], color, 1)\r\n cv2.line(imgshow, tset[i][0], tset[i][2], color, 1)\r\n cv2.line(imgshow, tset[i][2], tset[i][1], color, 1)\r\n\r\n return imgshow\r\n\r\ndef partsSwap(filename1 = \"\", filename2 = \"\", swLeye = 0, swReye = 0, swNose = 0, swMouth = 0):\r\n img1 = cv2.imread(filename1);\r\n img2 = cv2.imread(filename2); # base img\r\n img1Warped = np.copy(img2);\r\n\r\n partsfile = \"tri_leye.txt\"\r\n\r\n partslist = []\r\n if swLeye : partslist.append(\"tri_leye.txt\")\r\n if swReye: partslist.append(\"tri_reye.txt\")\r\n if swNose: partslist.append(\"tri_nose.txt\")\r\n if swMouth: partslist.append(\"tri_mouth.txt\")\r\n\r\n # get shape info\r\n if len(img2.shape) == 3:\r\n height, width, channels = img2.shape[:3]\r\n else:\r\n height, width = img2.shape[:2]\r\n channels = 1\r\n\r\n # Read array of corresponding points\r\n name, ext = os.path.splitext(filename1)\r\n points1 = readPoints(name + '.txt')\r\n name, ext = os.path.splitext(filename2)\r\n points2 = readPoints(name + '.txt')\r\n\r\n # dt = calculateDelaunayTriangles(rect, hull2)\r\n dt = []\r\n tset1 = []\r\n tset2 = []\r\n\r\n output = np.copy(img2);\r\n # Read triangles from tri.txt\r\n for partsfile in partslist:\r\n mask_parts = np.zeros(img2.shape, dtype=np.uint8)\r\n with open(partsfile) as file:\r\n for line in file:\r\n x, y, z = line.split()\r\n print(\"x, y, z: \", x, y, z )\r\n\r\n x = int(x)\r\n y = int(y)\r\n z = int(z)\r\n\r\n dt.append((x,y,z))\r\n tset1.append([points1[x], points1[y], points1[z]])\r\n tset2.append([points2[x], points2[y], points2[z]])\r\n\r\n tripos = []\r\n tripos.append(points2[x])\r\n tripos.append(points2[y])\r\n tripos.append(points2[z])\r\n\r\n # create small mask for each triangles\r\n mask_small = np.zeros(img2.shape, dtype=np.uint8)\r\n cv2.fillConvexPoly(mask_small, np.int32(tripos), (255,255,255))\r\n mask_parts |= mask_small\r\n\r\n if len(dt) == 0:\r\n quit()\r\n\r\n # Apply affine transformation to each triangles\r\n for i in range(0, len(dt)):\r\n t1 = []\r\n t2 = []\r\n\r\n # get points for img1, img2 corresponding to the triangles\r\n for j in range(0, 3):\r\n t1.append(tset1[i][j])\r\n t2.append(tset2[i][j])\r\n\r\n warpTriangle(img1, img1Warped, t1, t2)\r\n\r\n mask_c1, mask_c2, mask_c3 = cv2.split(mask_parts)\r\n maskMoments = cv2.moments(mask_c1)\r\n cx = int(maskMoments['m10'] / maskMoments['m00'])\r\n cy = int(maskMoments['m01'] / maskMoments['m00'])\r\n center = (int(cx), int(cy))\r\n\r\n # Clone seamlessly.\r\n output = cv2.seamlessClone(np.uint8(img1Warped), output, mask_parts, center, cv2.NORMAL_CLONE)\r\n\r\n img1land = getFacialLandmarkImg(img1, points1, tset1)\r\n cv2.imshow(\"img1 lamdmark\", img1land)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_beforewarp_tri.jpg\"\r\n cv2.imwrite(saveimgname, img1land)\r\n\r\n img1landWarp = getFacialLandmarkImg(img1Warped, points2, tset2)\r\n cv2.imshow(\"img1 warp lamdmark\", img1landWarp)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_afterwarp_tri.jpg\"\r\n cv2.imwrite(saveimgname, img1landWarp)\r\n\r\n cv2.imshow(\"img1 warp\", img1Warped)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_afterwarp.jpg\"\r\n cv2.imwrite(saveimgname, img1Warped)\r\n\r\n return output\r\n\r\nif __name__ == '__main__':\r\n\r\n # Make sure OpenCV is version 3.0 or above\r\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\r\n\r\n if int(major_ver) < 3:\r\n print >> sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'\r\n sys.exit(1)\r\n\r\n # Read images\r\n filename1 = 'facefld/model10211041_TP_V4.jpg'\r\n filename2 = 'facefld/ookawa918IMGL1370_TP_V.jpg'\r\n\r\n output = partsSwap(filename1, filename2, swLeye = 1, swReye = 1, swNose = 0, swMouth = 1)\r\n\r\n cv2.imshow(\"Face Swapped\", output)\r\n saveimgname = filename2 + \"_faceswapped.jpg\"\r\n cv2.imwrite(saveimgname, output)\r\n cv2.waitKey(0)\r\n\r\n cv2.destroyAllWindows()\r\n\r\n" }, { "alpha_fraction": 0.41167664527893066, "alphanum_fraction": 0.480538934469223, "avg_line_length": 22.16867446899414, "blob_id": "210e2e163f0fee115c0bdb2c48c478f48b400e39", "content_id": "41e8c1b6fd2bf6586c6a624e96357ff10ef35693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2004, "license_type": "no_license", "max_line_length": 109, "num_lines": 83, "path": "/calcAffineLeastSquare/myLeastSquare.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport cv2\r\n\r\n# X = (xi, yi, 1)\r\n# X' = A * X\r\n# (xi', yi')\r\n\r\n## transfrom 3 points\r\n\r\ntheta = 60\r\ntx, ty = 5, 3\r\nM = np.array([[np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta)), tx],\r\n [np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta)), ty]], float)\r\n\r\n# points before transform\r\nX = np.array([[15, 5, 1],\r\n [25, 10, 1],\r\n [20, 15, 1]])\r\n\r\n# points after transform\r\nX_tfm = M.dot(X.T)\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\n\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"y\")\r\nax.set_title(\"point\")\r\nax.grid()\r\nax.set_xlim([0, 30])\r\nax.set_ylim([0, 30])\r\nfor i in range(len(X)):\r\n ax.plot(X[i][0], X[i][1], marker='.', markersize=10, color='r')\r\n ax.plot(X_tfm.T[i][0], X_tfm.T[i][1], marker='.', markersize=10, color='b')\r\n\r\nfig.show()\r\nfig.savefig(\"myLeastSquare_transformImg.jpg\")\r\n#cv2.waitKey(0)\r\n\r\nXX = np.array([[X[0][0], X[0][1], 1, 0, 0, 0],\r\n [0, 0, 0, X[0][0], X[0][1], 1],\r\n [X[1][0], X[1][1], 1, 0, 0, 0],\r\n [0, 0, 0, X[1][0], X[1][1], 1],\r\n [X[2][0], X[2][1], 1, 0, 0, 0],\r\n [0, 0, 0, X[2][0], X[2][1], 1]])\r\n\r\nXX_tfm = np.array([X_tfm.T[0][0], X_tfm.T[0][1],X_tfm.T[1][0], X_tfm.T[1][1],X_tfm.T[2][0], X_tfm.T[2][1]]).T\r\n\r\nM_slv = np.linalg.inv(XX).dot(XX_tfm)\r\n\r\nprint(\"M: \")\r\nprint(M)\r\nprint(\"M: \")\r\nprint(M_slv)\r\n\r\n## transform 4 points\r\nX = np.array([[15, 5, 1],\r\n [25, 10, 1],\r\n [20, 15, 1],\r\n [18, 18, 1]])\r\nX_tfm = M.dot(X.T)\r\n\r\nXX = []\r\nXX_tfm = []\r\nfor i in range(len(X)):\r\n XX.append([X[i][0], X[i][1], 1, 0, 0, 0])\r\n XX.append([0, 0, 0, X[i][0], X[i][1], 1])\r\n\r\n XX_tfm.append(X_tfm.T[i][0])\r\n XX_tfm.append(X_tfm.T[i][1])\r\n\r\nXX = np.array(XX)\r\nXX_tfm = np.array(XX_tfm).T\r\n\r\nMat = XX.T.dot(XX)\r\nMat_inv = np.linalg.inv(Mat)\r\nM_slv = np.linalg.inv(XX.T.dot(XX)).dot(XX.T).dot(XX_tfm)\r\n\r\nprint(\"M: \")\r\nprint(M)\r\nprint(\"M_slv: \")\r\nprint(M_slv)" }, { "alpha_fraction": 0.593909502029419, "alphanum_fraction": 0.6104117035865784, "avg_line_length": 29.433155059814453, "blob_id": "31905b89e5d6dc42242dc5adcd41a9a9a8c2ca8f", "content_id": "600971128c46ce03eb04603d6df6b3834527fe44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5878, "license_type": "no_license", "max_line_length": 107, "num_lines": 187, "path": "/linerRegression/myLinerRegression.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\n\r\ndef normData(dataX):\r\n dataX_norm = dataX\r\n for i in range(len(dataX[0, :])):\r\n Max = max(dataX_norm[:, i])\r\n Min = min(dataX_norm[:, i])\r\n dataX_norm[:, i] = (dataX_norm[:, i] - Min) / (Max - Min)\r\n return dataX_norm\r\n\r\ndef outlierFilter(dataXorg, upper = 98, lower = 2):\r\n data = []\r\n filter = np.array([True for i in range(len(dataXorg[:,0]))])\r\n for i in range(len(dataXorg[0, :])):\r\n if i == 1:\r\n continue\r\n upper_val = np.percentile(dataXorg[:, i], upper)\r\n lower_val = np.percentile(dataXorg[:, i], lower)\r\n filter_new = (dataXorg[:, i] <= upper_val) & (dataXorg[:, i] >= lower_val)\r\n filter = filter & filter_new\r\n data = dataXorg[filter,:]\r\n return data, filter\r\n\r\nDataType = \"diab\"\r\n#DataType = \"priceData.txt\"\r\nnumTest = 20\r\n\r\nif DataType is \"diab\":\r\n # Load the diabetes dataset\r\n diabetes = datasets.load_diabetes()\r\n\r\n # diabetes_X = diabetes.data[:, np.newaxis, 4]\r\n dataNames = diabetes.feature_names\r\n diabetes_X = diabetes.data\r\n\r\n # dataX = diabetes_X[:,0:5]\r\n dataX = diabetes_X\r\n dataY = diabetes.target\r\nelse:\r\n # https://atarimae.biz/archives/18904\r\n data = np.loadtxt(DataType,delimiter=\"\\t\")\r\n dataNames = [\"square\",\t\"age\",\t\"distance\"]\r\n\r\n dataX = data[:, 1:len(data)]\r\n dataY = data[:, 0]\r\n\r\n## outlier filter\r\n# dataX, filter = outlierFilter(dataX)\r\n# dataY = dataY[filter]\r\n\r\ndataX = normData(dataX)\r\nminY, maxY = min(dataY), max(dataY)\r\ndataY = (dataY - minY) / (maxY - minY)\r\n\r\n## insert dummy data\r\n# arrdummy = np.array([0.5 for i in range(len(dataX[:,0]))])\r\n# dataX = np.vstack((dataX.T, arrdummy)).T\r\n# dataNames.extend(\"a\")\r\n#\r\n# arrdummy = np.array([np.random.rand() for i in range(len(dataX[:,0]))])\r\n# dataX = np.vstack((dataX.T, arrdummy)).T\r\n# dataNames.extend(\"b\")\r\n\r\n# Split the data into training/testing sets\r\ndataX_train = dataX[:-numTest]\r\ndataX_test = dataX[-numTest:]\r\n\r\n# Split the targets into training/testing sets\r\ndataY_train = dataY[:-numTest]\r\ndataY_test = dataY[-numTest:]\r\n\r\n## hist data\r\nfigAll_hist = plt.figure()\r\nfigAll_hist.subplots_adjust(wspace=0.4, hspace=0.3)\r\nfor i in range(len(dataX[0,:])):\r\n numcol = 3\r\n numrow = len(dataX[0,:]) / numcol if (len(dataX[0,:]) % numcol == 0 ) else len(dataX[0,:]) / numcol + 1\r\n ax = figAll_hist.add_subplot(int(numrow),numcol,i+1)\r\n ax.hist(dataX_train[:,i], color='blue')\r\n ttl = dataNames[i]\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_hist.show()\r\n\r\n## plot data\r\nfigAll_train_bfnorm = plt.figure()\r\nfigAll_train_bfnorm.subplots_adjust(wspace=0.4, hspace=0.6)\r\nfor i in range(len(dataX[0,:])):\r\n numcol = 3\r\n numrow = len(dataNames) / numcol + 1\r\n ax = figAll_train_bfnorm.add_subplot(numrow,numcol,i+1)\r\n x = dataX_train[:,i]\r\n ax.scatter(dataX_train[:,i], dataY_train, color=\"#222222\", alpha=0.7)\r\n dataIdx = np.argsort(dataX_train[:,i])\r\n x = dataX_train[:,i][dataIdx]\r\n ttl = dataNames[i]\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_train_bfnorm.show()\r\n\r\n\r\n# Create linear regression object\r\nregr = linear_model.LinearRegression()\r\n\r\n# Train the model using the training sets\r\nregr.fit(dataX_train, dataY_train)\r\n\r\n# Make predictions using the testing set\r\ndataY_pred = regr.predict(dataX_test)\r\n\r\n# The coefficients\r\nprint('Coefficients: \\n', regr.coef_)\r\n# The mean squared error\r\nprint(\"Mean squared error: %.2f\"\r\n % mean_squared_error(dataY_test, dataY_pred))\r\n# Explained variance score: 1 is perfect prediction\r\nprint('Variance score: %.2f' % r2_score(dataY_test, dataY_pred))\r\n\r\n\r\ny_train = dataX_train.dot(regr.coef_) + regr.intercept_\r\n\r\ny_pred_train = regr.predict(dataX_train)\r\nfigAll_train = plt.figure()\r\nfigAll_train.subplots_adjust(wspace=0.4, hspace=0.6)\r\nfor i in range(len(dataX[0,:])):\r\n numcol = 3\r\n numrow = len(dataNames) / numcol + 1\r\n ax = figAll_train.add_subplot(numrow,numcol,i+1)\r\n x = dataX_train[:,i]\r\n ax.scatter(dataX_train[:,i], dataY_train, color=\"#222222\", alpha=0.7)\r\n dataIdx = np.argsort(dataX_train[:,i])\r\n x = dataX_train[:,i][dataIdx]\r\n y = y_pred_train[dataIdx]\r\n ax.plot(x,y, color='blue')\r\n\r\n y = y_train[dataIdx]\r\n ax.plot(x, y, color='red')\r\n ttl = dataNames[i] + \" \" + \"{:.2f}\".format(regr.coef_[i])\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_train.show()\r\n\r\nfigAll_test = plt.figure()\r\nfigAll_test.subplots_adjust(wspace=0.4, hspace=0.6)\r\nfor i in range(len(dataX[0,:])):\r\n numcol = 3\r\n numrow = len(dataNames) / numcol + 1\r\n ax = figAll_test.add_subplot(numrow,numcol,i+1)\r\n x = dataX_test[:,i]\r\n ax.scatter(dataX_test[:,i], dataY_test, color=\"#222222\", alpha=0.7)\r\n dataIdx = np.argsort(dataX_test[:,i])\r\n x = dataX_test[:,i][dataIdx]\r\n y = dataY_pred[dataIdx]\r\n ax.plot(x,y, color='blue')\r\n ttl = dataNames[i] + \" \" + \"{:.2f}\".format(regr.coef_[i])\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_test.show()\r\n\r\nplt.show()\r\n\r\n# sortedIdx = np.argsort(abs(regr.coef_))\r\n# print(\"sorted idx: \", sortedIdx[::-1])\r\n#\r\n# numShow = 3\r\n# fig = plt.figure()\r\n# for i in range(numShow):\r\n# fIdx = sortedIdx[::-1][i]\r\n# ax = fig.add_subplot(1,numShow,i+1)\r\n# ax.scatter(diabetes_X_test[:,fIdx], diabetes_y_test, color='black')\r\n# dataIdx = np.argsort(diabetes_X_test[:,fIdx])\r\n# x = diabetes_X_test[:,fIdx][dataIdx]\r\n# y = dataY_pred[dataIdx]\r\n# ax.plot(x,y, color='blue', linewidth=3)\r\n# ttl = dataNames[fIdx] + \" \" + \"{:.2f}\".format(regr.coef_[fIdx])\r\n# ax.set_title(ttl)\r\n# ax.set_xlabel(\"x\")\r\n# ax.set_ylabel(\"y\")\r\n# fig.show()\r\n# plt.show()" }, { "alpha_fraction": 0.5987837910652161, "alphanum_fraction": 0.6341394186019897, "avg_line_length": 33.83743667602539, "blob_id": "ff06e77c9f0e4351c2319f280588c93c0ae0d470", "content_id": "f6ab6a22dac07b92b057ee3bc49691c779c4282a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7071, "license_type": "no_license", "max_line_length": 125, "num_lines": 203, "path": "/kerasPrj/myASLrecognition/myKerasFinetune_r3.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.optimizers import Adam\nimport pandas as pd\n\nALP2Num = {\"A\":0, \"B\":1, \"C\":2, \"D\":3, \"E\":4, \"F\":5, \"G\":6, \"H\":7, \"I\":8, \"J\":9,\n \"K\":10, \"L\":11, \"M\":12, \"N\":13, \"O\":14, \"P\":15, \"Q\":16, \"R\":17, \"S\":18, \"T\":19,\n \"U\":20, \"V\":21, \"W\":22, \"X\":23, \"Y\":24, \"Z\":25, \"del\":26, \"space\":27, 'nothing':28\n }\n\ndef load_data_lbl(fname=[],imgW=150,imgH=150,totalImgNum=84000,sampleStep=3):\n train_flds = glob.glob(fname)\n dataX = np.zeros((int(totalImgNum/sampleStep),imgH,imgW,3)).astype('float32')\n dataLbl = []\n imgIdx = 0\n fileIdx = 0\n for fld in train_flds:\n imgfiles = glob.glob(fld + '/*.jpg')\n if len(imgfiles) == 0: continue\n rootName, fldName = fld.split('\\\\')\n # skip \"nothing\"\n if fldName == 'nothing': continue\n print(\"current fld name: \", fldName)\n for fidx, imgfile in enumerate(imgfiles):\n if fileIdx % sampleStep == 0:\n dataX[imgIdx] = img_to_array(load_img(imgfile, target_size=IMG_DIM))\n dataLbl.append(fldName)\n imgIdx += 1\n fileIdx += 1\n\n # if fidx % 10 == 0: print('{:03d}'.format(2\n # fidx) + \"/ \" + '{:05d}'.format(len(imgfiles)))\n\n datay = np.zeros(len(dataLbl)).astype('int')\n for i, lbl in enumerate(dataLbl):\n datay[i] = ALP2Num[lbl]\n\n print(\"len(dataX): \", len(dataX))\n print(\"imgIdx: \", imgIdx)\n\n return dataX, datay\n\ndef load_data_lbl_old2(fname=[],imgW=150,imgH=150):\n train_flds = glob.glob(fname)\n dataX = []\n dataLbl = []\n initalFlg = 1\n for fld in train_flds:\n imgfiles = glob.glob(fld + '/*.jpg')\n if len(imgfiles) == 0: continue\n rootName, fldName = fld.split('\\\\')\n # skip \"nothing\"\n if fldName == 'nothing': continue\n print(\"\")\n print(\"current fld name: \", fldName)\n for fidx, imgfile in enumerate(imgfiles):\n if initalFlg:\n dataX = np.zeros((1, imgH, imgW, 3)).astype('float32')\n initalFlg = 0\n continue\n dataX = np.vstack((dataX,img_to_array(load_img(imgfile, target_size=IMG_DIM)).reshape(1, imgH, imgW, 3)))\n dataLbl.append(fldName)\n\n # if fidx in [len(imgfiles)/4, len(imgfiles)*2/4, len(imgfiles)*3/4]:\n # print(\".\", end=' ')\n if fidx % 10 == 0: print('{:03d}'.format(fidx) + \"/ \" + '{:05d}'.format(len(imgfiles)))\n\n datay = np.zeros(len(dataLbl)).astype('int')\n for i, lbl in enumerate(dataLbl):\n datay[i] = ALP2Num[lbl]\n\n # dataX = np.array(dataX)\n # datay = np.array(datay).astype('int')\n #\n # # delete \"nothing\" img\n # idxsNotDelete = (datay != 28)\n # datay = datay[idxsNotDelete]\n # dataX = dataX[idxsNotDelete]\n\n return dataX, datay\n\ndef load_data_lbl_old(fname=[],imgW=150,imgH=150):\n train_flds = glob.glob(fname)\n dataX = []\n dataLbl = []\n for fld in train_flds:\n imgfiles = glob.glob(fld + '/*.jpg')\n if len(imgfiles) == 0: continue\n rootName, fldName = fld.split('\\\\')\n for imgfile in imgfiles:\n dataX.append(img_to_array(load_img(imgfile, target_size=IMG_DIM)))\n dataLbl.append(fldName)\n\n datay = np.zeros(len(dataLbl))\n for i, lbl in enumerate(dataLbl):\n datay[i] = ALP2Num[lbl]\n\n dataX = np.array(dataX)\n datay = np.array(datay).astype('int')\n\n # delete \"nothing\" img\n idxsNotDelete = (datay != 28)\n datay = datay[idxsNotDelete]\n dataX = dataX[idxsNotDelete]\n\n return dataX, datay\n\nimgW = 150\nimgH = 150\nIMG_DIM = (imgW, imgH)\n\nX_train, Y_train = load_data_lbl('asl_alphabet_train/*',imgW=150,imgH=150,totalImgNum=84000,sampleStep=3)\nX_test, Y_test = load_data_lbl('asl-alphabet-test/*',imgW=150,imgH=150,totalImgNum=840,sampleStep=1)\n\n# one-hot encoding\nenc = OneHotEncoder(sparse=False, dtype=np.float32)\nYtrain_ohe = enc.fit_transform(Y_train.reshape(-1,1))\nYtest_ohe = enc.fit_transform(Y_test.reshape(-1,1))\ntotal_classes = Ytrain_ohe.shape[1]\n\n# img Augmentation\nbatchSize = 30\ntrainSplSize = len(X_train)\ntrain_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,\n width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,\n fill_mode='nearest')\nval_datagen = ImageDataGenerator(rescale=1./255)\ntrain_generator = train_datagen.flow(X_train, Ytrain_ohe, batch_size=batchSize)\nval_generator = val_datagen.flow(X_test, Ytest_ohe, batch_size=20)\n\ndel X_train, X_test, Y_train, Y_test, Ytrain_ohe, Ytest_ohe\n\n# build CNN\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer, BatchNormalization,GlobalAveragePooling2D\nfrom keras.models import Sequential\nfrom keras import optimizers\n# load VGG16\nfrom keras.applications import vgg16\nfrom keras.models import Model\nimport keras\n\nvgg = vgg16.VGG16(include_top=False, weights='imagenet',input_shape=(imgW,imgH,3))\n\nx = vgg.output\n# x = GlobalAveragePooling2D()(x)\nx = keras.layers.Flatten()(x)\nx = Dense(512, activation='relu')(x)\nx = BatchNormalization()(x)\nx = Dropout(0.3)(x)\npredictions = Dense(total_classes, activation='softmax')(x)\nmodel = Model(inputs=vgg.input, outputs=predictions)\n\n# set trainable\nfor layer in model.layers[:11]:\n layer.trainable = False\nfor layer in model.layers[11:]:\n layer.trainable = True\n\nlayers = [(layer, layer.name, layer.trainable) for layer in model.layers]\nprint(\"layer, layer.name, layer.trainable\")\nfor layer in layers:\n print(layer)\n\nEpochs = 15\nstepPerEpochs = int(trainSplSize/batchSize)\nmodel.compile(Adam(lr=.003), loss='categorical_crossentropy', metrics=['accuracy'])\nprint(\"model.summary()\")\nprint(model.summary())\nhistory = model.fit_generator(train_generator, steps_per_epoch=stepPerEpochs, epochs=Epochs,\n validation_data=val_generator, validation_steps=50,\n verbose=1)\n\nmodel.save('ASL_vgg16ft_r3.h5')\n\n# results\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\nt = f.suptitle('Basic CNN Performance', fontsize=12)\nf.subplots_adjust(top=0.85, wspace=0.3)\n\nepoch_list = list(range(1,Epochs+1))\nax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')\nax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')\nax1.set_xticks(np.arange(0, Epochs+1, 5))\nax1.set_ylabel('Accuracy Value')\nax1.set_xlabel('Epoch')\nax1.set_title('Accuracy')\nl1 = ax1.legend(loc=\"best\")\n\nax2.plot(epoch_list, history.history['loss'], label='Train Loss')\nax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')\nax2.set_xticks(np.arange(0, Epochs+1, 5))\nax2.set_ylabel('Loss Value')\nax2.set_xlabel('Epoch')\nax2.set_title('Loss')\nl2 = ax2.legend(loc=\"best\")\n\n# plt.show()\nplt.savefig('myKerasFinetune_r3.png')" }, { "alpha_fraction": 0.5192392468452454, "alphanum_fraction": 0.5751142501831055, "avg_line_length": 31.569307327270508, "blob_id": "d7587f5ca683560fef7c508f18dc257f25ce27f3", "content_id": "f3bf839bc310c5fe2437db0503f0a3f75ba0b101", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6783, "license_type": "no_license", "max_line_length": 124, "num_lines": 202, "path": "/moveFaceLandmark/myfaceMoveLandmark.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\r\n\r\nimport sys\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\nimport copy\r\n\r\n# Read points from text file\r\ndef readPoints(path):\r\n # Create an array of points.\r\n points = [];\r\n\r\n # Read points\r\n with open(path) as file:\r\n for line in file:\r\n if len(line) < 4:\r\n points.append((0,0))\r\n continue\r\n x, y = line.split()\r\n points.append((int(x), int(y)))\r\n\r\n return points\r\n\r\n\r\n# Apply affine transform calculated using srcTri and dstTri to src and\r\n# output an image of size.\r\ndef applyAffineTransform(src, srcTri, dstTri, size):\r\n # Given a pair of triangles, find the affine transform.\r\n warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))\r\n\r\n # Apply the Affine Transform just found to the src image\r\n dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR,\r\n borderMode=cv2.BORDER_REFLECT_101)\r\n\r\n return dst\r\n\r\n# Warps triangular regions from img1 to img2\r\ndef warpTriangle(img1, img2, t1, t2):\r\n # Find bounding rectangle for each triangle\r\n r1 = cv2.boundingRect(np.float32([t1]))\r\n r2 = cv2.boundingRect(np.float32([t2]))\r\n\r\n # Offset points by left top corner of the respective rectangles\r\n t1Rect = []\r\n t2Rect = []\r\n t2RectInt = []\r\n\r\n for i in range(0, 3):\r\n t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))\r\n t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\r\n t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))\r\n\r\n # Get mask by filling triangle\r\n mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)\r\n cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);\r\n\r\n # Apply warpImage to small rectangular patches\r\n img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\r\n\r\n size = (r2[2], r2[3])\r\n img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)\r\n\r\n img2Rect = img2Rect * mask\r\n img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]*((1.0, 1.0, 1.0) - mask)\r\n img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2Rect\r\n\r\ndef getFacialLandmarkImg(img = \"\", lpos = \"\", tset = \"\", color = (255, 0, 0)):\r\n imgshow = np.copy(img)\r\n for i in range(len(lpos)):\r\n cv2.circle(imgshow, lpos[i], 2, color, -1\r\n )\r\n for i in range(len(tset)):\r\n cv2.line(imgshow, tset[i][0], tset[i][1], color, 1)\r\n cv2.line(imgshow, tset[i][0], tset[i][2], color, 1)\r\n cv2.line(imgshow, tset[i][2], tset[i][1], color, 1)\r\n\r\n return imgshow\r\n\r\ndef moveLandmark(filename1 = \"\", offsetPx = -10):\r\n img1 = cv2.imread(filename1);\r\n img2 = cv2.imread(filename1);\r\n img1Warped = np.copy(img1);\r\n\r\n # Read array of corresponding points\r\n name, ext = os.path.splitext(filename1)\r\n points1 = readPoints(name + '.txt')\r\n\r\n # Read triangles from tri.txt\r\n dt = []\r\n with open(\"triface.txt\") as file:\r\n for line in file:\r\n p1_idx, p2_idx, p3_idx = line.split()\r\n print(\"p1_idx, p2_idx, p3_idx : \", p1_idx, p2_idx, p3_idx )\r\n dt.append((int(p1_idx), int(p2_idx), int(p3_idx)))\r\n\r\n # set outside points\r\n # add new triangles\r\n pointsOut = []\r\n tsetOut = []\r\n # left face line\r\n for i in range(1, 6):\r\n xpos = points1[i][0] - 20\r\n ypos = points1[i][1]\r\n points1.append((int(xpos), int(ypos)))\r\n dt.append((i, i-1, len(points1) - 1))\r\n if i != 1:\r\n dt.append((i-1, len(points1) - 2, len(points1) - 1))\r\n # right face line\r\n for i in range(11, 16):\r\n xpos = points1[i][0] + 20\r\n ypos = points1[i][1]\r\n points1.append((int(xpos), int(ypos)))\r\n dt.append((i, i+1, len(points1) - 1))\r\n if i != 11:\r\n dt.append((i, len(points1) - 2, len(points1) - 1))\r\n\r\n #make offset point\r\n points2 = copy.deepcopy(points1)\r\n # left face line\r\n for i in range(1, 5):\r\n points2[i] = (points1[i][0] - offsetPx, points1[i][1])\r\n # right face line\r\n for i in range(12, 16):\r\n points2[i] = (points1[i][0] + offsetPx, points1[i][1])\r\n\r\n # Find delanauy traingulation for convex hull points\r\n sizeImg = img1.shape\r\n rect = (0, 0, sizeImg[1], sizeImg[0])\r\n\r\n # dt = calculateDelaunayTriangles(rect, hull2)\r\n tset1 = []\r\n tset2 = []\r\n\r\n for i in range(len(dt)):\r\n p1Idx, p2Idx, p3Idx = int(dt[i][0]), int(dt[i][1]), int(dt[i][2])\r\n tset1.append([points1[p1Idx], points1[p2Idx], points1[p3Idx]])\r\n tset2.append([points2[p1Idx], points2[p2Idx], points2[p3Idx]])\r\n\r\n if len(dt) == 0:\r\n quit()\r\n\r\n # Apply affine transformation to Delaunay triangles\r\n for i in range(0, len(dt)):\r\n t1 = []\r\n t2 = []\r\n\r\n # get points for img1, img2 corresponding to the triangles\r\n for j in range(0, 3):\r\n t1.append(tset1[i][j])\r\n t2.append(tset2[i][j])\r\n\r\n warpTriangle(img1, img1Warped, t1, t2)\r\n\r\n imgshow = getFacialLandmarkImg(img1, points1, tset1)\r\n cv2.imshow(\"img1 lamdmark\", imgshow)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_beforewarp.jpg\"\r\n cv2.imwrite(saveimgname, imgshow)\r\n\r\n imgshow = getFacialLandmarkImg(img1Warped, points2, tset2)\r\n cv2.imshow(\"img1 lamdmark warped\", imgshow)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_afterwarp.jpg\"\r\n cv2.imwrite(saveimgname, imgshow)\r\n\r\n imgsbase = getFacialLandmarkImg(img1, points1, [])\r\n imgshow = getFacialLandmarkImg(imgsbase, points2, tset2, (0,0,255))\r\n cv2.imshow(\"img1 lamdmark warped(before and after)\", imgshow)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n saveimgname = filename1 + \"_beforeafterwarp.jpg\"\r\n cv2.imwrite(saveimgname, imgshow)\r\n\r\n return img1Warped\r\n\r\nif __name__ == '__main__':\r\n\r\n # Make sure OpenCV is version 3.0 or above\r\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\r\n\r\n if int(major_ver) < 3:\r\n print >> sys.stderr, 'ERROR: Script needs OpenCV 3.0 or higher'\r\n sys.exit(1)\r\n\r\n # Read images\r\n filename1 = 'imgfld/ookawa918IMGL1370_TP_V.jpg'\r\n\r\n offset = 5\r\n output = moveLandmark(filename1, offset)\r\n\r\n cv2.imshow(\"Face landmark moved\", output)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n strOffs = \"_p\" + str(abs(offset)) if (offset > 0) else \"_m\" + str(abs(offset))\r\n saveimgname = filename1 + \"_output\" + strOffs + \".jpg\"\r\n cv2.imwrite(saveimgname, output)\r\n\r\n #showFacialLandmark(\"before\", img1, points1, tset1)\r\n #showFacialLandmark(\"after\", output, points2, tset2)\r\n\r\n" }, { "alpha_fraction": 0.4580645263195038, "alphanum_fraction": 0.5311827659606934, "avg_line_length": 20.238094329833984, "blob_id": "83490474521f57a050945f9dca51f43c36a7ab9d", "content_id": "93af7990e8d65156b976c2087caa57eb4adbf21a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/statistics/myEstimation_numSample.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nimport matplotlib.pyplot as plt\r\n\r\n## sp - 1.96 * sqrt( sp(1-sp)/n ) <= p <= sp + 1.96 * sqrt( sp(1-sp)/n )\r\n## ใ‚ˆใ‚Šใ€ไฟก้ ผๅŒบ้–“ใฎๅน…ใฏ\r\n## 2 * 1.96 * sqrt( sp(1-sp)/n )\r\n## ่จญๅฎšใ—ใŸใ„ๅŒบ้–“ๅน…ใ‚’p_widthใจใ™ใ‚‹ใจใ€\r\n## n >= ( 2 * 1.96 * sqrt( sp * ( 1-sp )) / p_width )^2\r\n\r\nsp = 0.1\r\np_width = 0.05\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nnorm_low, norm_upp = -1.96, 1.96\r\n\r\n# ๅฟ…่ฆใ‚ตใƒณใƒ—ใƒซๆ•ฐ\r\nn = 2 * np.abs(norm_upp) * np.sqrt( sp* (1-sp)) / p_width\r\nn = n*n\r\n\r\nprint(\"n: \", n)" }, { "alpha_fraction": 0.515709638595581, "alphanum_fraction": 0.56446373462677, "avg_line_length": 23.89719581604004, "blob_id": "f7aec43c72272432b5040344aebd0216ee061661", "content_id": "eeeab316e89c2d00faeefa2f84a4cb976fcfa5e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3083, "license_type": "no_license", "max_line_length": 113, "num_lines": 107, "path": "/statistics/myKalmanFilter.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n# wikipedia ่กŒๅˆ—ใฎๅฝขๅผใงๆ›ธใ‹ใ‚Œใฆใ„ใ‚‹ใฎใงๅ‚่€ƒใซใชใ‚‹\r\n# https://ja.wikipedia.org/wiki/%E3%82%AB%E3%83%AB%E3%83%9E%E3%83%B3%E3%83%95%E3%82%A3%E3%83%AB%E3%82%BF%E3%83%BC\r\n\r\n# obs, estใจใ„ใ†่กจ่จ˜ใŒๅˆ†ใ‹ใ‚Šใ‚„ใ™ใ„\r\n# https://qiita.com/IshitaTakeshi/items/740ac7e9b549eee4cc04\r\n\r\n# 1ๆฌกๅ…ƒใฎๅ ดๅˆใฎใ‚ซใƒซใƒžใƒณใ‚ฒใ‚คใƒณใฎๅฐŽๅ‡บ\r\n# http://bufonmake.blogspot.com/2015/05/test.html\r\n\r\n# ?? ่กŒๅˆ—ใฎๅฝขๅผใฎใ‚ซใƒซใƒžใƒณใ‚ฒใ‚คใƒณk=PHS^-1ใŒๅˆ†ใ‹ใ‚‰ใชใ„ใ€‚ใ€€xใŒ๏ผ’ๆฌกๅ…ƒใฎๆ™‚ใ€Kใฎ่ฆ็ด ใฏใฉใ†ๆ›ธใ‘ใ‚‹๏ผŸ๏ผŸ\r\n\r\ntnum = 20\r\ndt = 0.1\r\nacc = 0.1\r\n\r\nx_odo = np.array([[0, 0]]).T #็Šถๆ…‹\r\nx_est = np.array([[0, 0]]).T #็Šถๆ…‹\r\nP_est = np.array([[0, 0],[0, 0]])\r\nF = np.array([[1, dt],[0, 1]])\r\nB = np.array([[0.5*dt**2, dt]]).T\r\nu = acc\r\nH = np.array([[1, 0],[0, 1]]) #็Šถๆ…‹โ†’่ฆณๆธฌ็ฉบ้–“ใซใ™ใ‚‹\r\nR = np.array([[0.1, 0],[0, 0.1]])#่ฆณๆธฌๅ€คใซใฎใ‚‹ใƒŽใ‚คใ‚บใฎๅ…ฑๅˆ†ๆ•ฃ่กŒๅˆ—\r\nQ = np.array([[0.01, 0],[0, 0.01]])#ไบˆๆธฌๅ€คใซใฎใ‚‹ใƒŽใ‚คใ‚บใฎๅ…ฑๅˆ†ๆ•ฃ่กŒๅˆ—\r\n\r\nx_tr = np.array([[0, 0]]).T\r\n\r\nx_odo_array = []\r\nx_est_array = []\r\nz_array = []\r\nx_klm_array = []\r\nt_array_0 = []\r\nt_array = []\r\n\r\nx_tr_array = []\r\n\r\nx_est_array.append(x_est)\r\nx_tr_array.append(x_tr)\r\nt_array_0.append(0)\r\n\r\nfor i in range(tnum):\r\n # ็œŸๅ€ค\r\n x_tr = F.dot(x_tr) + B*u + np.array([[np.random.normal(0,Q[0,0]), np.random.normal(0,Q[1,1])]]).T\r\n x_tr_array.append(x_tr)\r\n\r\n # ใƒขใƒ‡ใƒซใซๅŸบใฅใๆŽจๅฎš\r\n x_odo = F.dot(x_odo) + B*u\r\n x_odo_array.append(x_odo)\r\n P_est = F.dot(P_est).dot(F.T) + Q #??\r\n\r\n # ่ฆณๆธฌๅ€ค\r\n z = H.dot(x_tr) + np.array([[np.random.normal(0,R[0,0]), np.random.normal(0,R[1,1])]]).T\r\n z_array.append(z)\r\n\r\n #ใ€€่ชคๅทฎ\r\n e = z - H.dot(x_est)\r\n\r\n #S = R + H.dot(P_est).dot(H.T)\r\n #K = P_est.dot(H.T).dot(np.linalg.inv(S))\r\n\r\n # x_klm = x_est + K.dot(e)\r\n # P_klm = (np.matrix(np.identity(2)) - K.dot(H)).dot(P_est)\r\n\r\n #x_klm_array.append(x_klm)\r\n\r\n K = np.array([[0.1, 0],[0, 0.1]])\r\n I = np.array([[1, 0],[0, 1]])\r\n x_est = (I - K.dot(H)).dot(x_odo) + K.dot(z)\r\n x_est_array.append(x_est)\r\n\r\n t_array_0.append((i+1)*dt)\r\n t_array.append((i+1)*dt)\r\n\r\n ''' ๆ›ดๆ–ฐ '''\r\n # x_est = x_klm\r\n # P_est = P_klm\r\n\r\n# ไฝ็ฝฎใ ใ‘ๅ–ใ‚Šๅ‡บใ™\r\nx_tr_array_pos = []\r\nx_est_array_pos = []\r\nfor i in range(len(x_est_array)):\r\n x_est_array_pos.append(x_est_array[i][0,0])\r\n x_tr_array_pos.append(x_tr_array[i][0,0])\r\n\r\nz_array_pos = []\r\nx_klm_array_pos = []\r\nfor i in range(len(z_array)):\r\n z_array_pos.append(z_array[i][0,0])\r\n # x_klm_array_pos.append(x_klm_array[i][0,0])\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nax.plot(t_array_0,x_tr_array_pos,color='red',marker='o',label='gt')\r\nax.plot(t_array_0,x_est_array_pos,color='green',marker='o',label='est')\r\nax.plot(t_array,z_array_pos,color='blue',marker='o',label='obs')\r\n# ax.plot(t_array,x_klm_array_pos,color='orange',marker='o',label='signal filtered')\r\nax.set_xlabel('t')\r\nax.set_ylabel('x')\r\nax.legend(loc=\"lower right\")\r\n\r\nfig.show()\r\n\r\na = 1" }, { "alpha_fraction": 0.5437787771224976, "alphanum_fraction": 0.5889400839805603, "avg_line_length": 22.155555725097656, "blob_id": "b15ecdec42ead8bd67732ec31e9996e9d069f2a3", "content_id": "2db32f17b1f4a33d26db547ce213178b6d304fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 79, "num_lines": 45, "path": "/statistics/myTest_mean.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nimport matplotlib.pyplot as plt\r\n\r\n## ๅนณๅ‡ๅ€คใฎๆคœๅฎš\r\n\r\nsNum = 1000\r\nTestType = \"OneSided\"\r\n# TestType = \"TwoSided\"\r\npm_test = 0.3\r\n\r\nx = np.random.randn(sNum)\r\npm = 0\r\npv = 1\r\nsm = np.mean(x)\r\ns = np.std(x,ddof=1)\r\n\r\n# tๅˆ†ๅธƒใงใฎ95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\ndf = len(x) - 1\r\nt_low, t_upp = t.ppf(q=[0.025, 0.975], df=df)\r\n\r\n# pmใฎ95%ไฟก้ ผๅŒบ้–“\r\npm_lower = sm + t_low * np.sqrt( s*s / len(x))\r\npm_upper = sm + t_upp * np.sqrt( s*s / len(x))\r\n\r\nprint(\"t_low, t_upp: \", t_low, t_upp)\r\nprint(\"sm: \", sm)\r\nprint(\"pm: \", pm)\r\nprint(\"pm_low, pm_upp: \", pm_lower, pm_upper)\r\nprint(\"pm_test: \", pm_test)\r\n\r\nif TestType == \"OneSided\":\r\n result = True if (pm_lower <= pm_test ) else False\r\nelse:\r\n result = True if (pm_lower <= pm_test) and (pm_upper >= pm_test) else False\r\nprint(\"result: \", result)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = t.cdf(pm_test, df=df)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)" }, { "alpha_fraction": 0.5861014127731323, "alphanum_fraction": 0.6293706297874451, "avg_line_length": 39.6363639831543, "blob_id": "5439a84120c023a8e3dc3c3b7bb9a6517aa414b1", "content_id": "e5e9b2c8f93d51bf9bd310d6cb43ca7ad8aca046", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2288, "license_type": "no_license", "max_line_length": 126, "num_lines": 55, "path": "/statistics/myImgCat.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import cv2\r\nimport numpy as np\r\n\r\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\r\n h_min = min(im.shape[0] for im in im_list)\r\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\r\n for im in im_list]\r\n return cv2.hconcat(im_list_resize)\r\n\r\ndef vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\r\n w_min = min(im.shape[1] for im in im_list)\r\n im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)\r\n for im in im_list]\r\n return cv2.vconcat(im_list_resize)\r\n\r\ndef concat_tile_resize(im_list_2d, interpolation=cv2.INTER_CUBIC):\r\n im_list_v = [hconcat_resize_min(im_list_h, interpolation=cv2.INTER_CUBIC) for im_list_h in im_list_2d]\r\n return vconcat_resize_min(im_list_v, interpolation=cv2.INTER_CUBIC)\r\n\r\nim1Name = \"imgfld/ookawa918IMGL1370_TP_V.jpg_beforeafterwarp.jpg\"\r\nim2Name = \"imgfld/ookawa918IMGL1370_TP_V.jpg_output_p5.jpg\"\r\nim3Name = \"imgfld/ookawa918IMGL1370_TP_V.jpg_beforewarp.jpg\"\r\nim4Name = \"imgfld/ookawa918IMGL1370_TP_V.jpg_afterwarp.jpg\"\r\nim5Name = \"imgfld/ookawa918IMGL1370_TP_V.jpg_output_p5.jpg\"\r\nim6Name = \"\"\r\nimgNames = [im1Name, im2Name, im3Name, im4Name, im5Name, im6Name]\r\n\r\ntilePtn = 2\r\n\r\noutName = \"myfaceMoveLandmark_movelandmark.jpg\"\r\noutWidth = 600\r\n\r\nimgSet = []\r\nfor name in imgNames:\r\n if len(name) != 0:\r\n im = cv2.imread(name)\r\n else:\r\n im = np.ones((outWidth, outWidth, 3),dtype=np.uint8)\r\n im *= 255\r\n imgSet.append(im)\r\n\r\n# make tile img.\r\nif tilePtn == 6:\r\n im_tile_resize = concat_tile_resize([[imgSet[0], imgSet[1]],\r\n [imgSet[2], imgSet[3]],\r\n [imgSet[4], imgSet[5]]])\r\nelif tilePtn == 4:\r\n im_tile_resize = concat_tile_resize([[imgSet[0], imgSet[1]],\r\n [imgSet[2], imgSet[3]]])\r\nelif tilePtn == 2:\r\n im_tile_resize = concat_tile_resize([[imgSet[0], imgSet[1]]])\r\n\r\nw_tileImg = min([outWidth, im_tile_resize.shape[1]])\r\ncv2.resize(im_tile_resize, (w_tileImg, int(im_tile_resize.shape[0] * w_tileImg / im.shape[1])), interpolation=cv2.INTER_CUBIC)\r\ncv2.imwrite(outName, im_tile_resize)" }, { "alpha_fraction": 0.4384615421295166, "alphanum_fraction": 0.5179487466812134, "avg_line_length": 29.019229888916016, "blob_id": "40ab86733f3fafbf2cddb0bd0ddf17072dfe9f16", "content_id": "7f9341b9da36685d1f14d24ef862b68d6a659b79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1600, "license_type": "no_license", "max_line_length": 97, "num_lines": 52, "path": "/kerasPrj/myASLrecognition/myCameraASLRecognition.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nfrom keras.models import load_model\n\nALP2Num = {\"A\":0, \"B\":1, \"C\":2, \"D\":3, \"E\":4, \"F\":5, \"G\":6, \"H\":7, \"I\":8, \"J\":9,\n \"K\":10, \"L\":11, \"M\":12, \"N\":13, \"O\":14, \"P\":15, \"Q\":16, \"R\":17, \"S\":18, \"T\":19,\n \"U\":20, \"V\":21, \"W\":22, \"X\":23, \"Y\":24, \"Z\":25, \"del\":26, \"space\":27\n }\nNum2ALF = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\",\n \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\",\n \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"del\", \"space\"\n ]\n\nmodel = load_model('ASL_vgg16ft_r5.h5')\n\n# prepare capture\ncam = cv2.VideoCapture(0)\nwhile True:\n # ็”ปๅƒใ‚’ๅ–ๅพ— --- (*2)\n _, img = cam.read()\n imgflip = cv2.flip(img,1)\n\n roi = (250, 200, 400, 350)\n\n #rect_img = imgflip.copy()\n rect_img = img.copy()\n cv2.rectangle(rect_img, (roi[2],roi[3]), (roi[0],roi[1]), (255, 0, 0), 2)\n\n #\n s_roi = img[roi[1]: roi[3], roi[0]: roi[2]]\n\n testImg = s_roi.reshape(-1, s_roi.shape[0], s_roi.shape[1], s_roi.shape[2]).astype('float32')\n testImg /= 255\n predictions = model.predict(testImg)\n print(predictions)\n maxArg = np.argmax(predictions[0])\n print(\"maxArg\")\n print(maxArg)\n print(\"result\")\n print(Num2ALF[maxArg])\n\n # ใ€€ๅ‡บๅŠ›็”ปๅƒใฎๅŒใ˜็ฎ‡ๆ‰€ใซๅŸ‹ใ‚่พผใฟ\n rect_img[roi[1]: roi[3], roi[0]: roi[2]] = s_roi\n\n if predictions[0][maxArg] > 0.5:\n text = Num2ALF[maxArg]\n cv2.putText(rect_img,text,(roi[1]-10,roi[2]-10),cv2.FONT_HERSHEY_PLAIN,2,(255,0,0))\n\n cv2.imshow('PUSH ENTER KEY', rect_img)\n if cv2.waitKey(1) == 13: break\ncam.release()\ncv2.destroyAllWindows()" }, { "alpha_fraction": 0.6470361948013306, "alphanum_fraction": 0.6845650672912598, "avg_line_length": 31.892404556274414, "blob_id": "75eb4fae764d3b2da69acdb2103a125fab8a0a12", "content_id": "dfac1fa98f8805dc37884b8f11e6d35b354a4028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5196, "license_type": "no_license", "max_line_length": 125, "num_lines": 158, "path": "/kerasPrj/myASLrecognition/myKerasFinetune.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.optimizers import Adam\nimport pandas as pd\n\nimgW = 150\nimgH = 150\nIMG_DIM = (imgW, imgH)\ntrain_flds = glob.glob('asl-alphabet-test/*')\ndataX = []\ndataLbl = []\nfor fld in train_flds:\n imgfiles = glob.glob(fld + '/*.jpg')\n if len(imgfiles) == 0: continue\n rootName, fldName = fld.split('\\\\')\n for imgfile in imgfiles:\n dataX.append(img_to_array(load_img(imgfile, target_size=IMG_DIM)))\n dataLbl.append(fldName)\n\n\nALP2Num = {\"A\":0, \"B\":1, \"C\":2, \"D\":3, \"E\":4, \"F\":5, \"G\":6, \"H\":7, \"I\":8, \"J\":9,\n \"K\":10, \"L\":11, \"M\":12, \"N\":13, \"O\":14, \"P\":15, \"Q\":16, \"R\":17, \"S\":18, \"T\":19,\n \"U\":20, \"V\":21, \"W\":22, \"X\":23, \"Y\":24, \"Z\":25, \"del\":26, \"space\":27, 'nothing':28\n }\n\ndatay = np.zeros(len(dataLbl))\nfor i, lbl in enumerate(dataLbl):\n datay[i] = ALP2Num[lbl]\n\n\ndataX = np.array(dataX)\ndatay = np.array(datay).astype('int')\n\n# delete \"nothing\" img\nidxsNotDelete = (datay != 28)\ndatay = datay[idxsNotDelete]\ndataX = dataX[idxsNotDelete]\n\nX_train, X_test, Y_train, Y_test = train_test_split(dataX, datay,train_size=0.8,random_state=2)\n\n# normalization\nXtrain_scaled = X_train.astype('float32')\nXtest_scaled = X_test.astype('float32')\nXtrain_scaled /= 255\nXtest_scaled /= 255\n\n# one-hot encoding\n# Ytrain_ohe = pd.get_dummies(Y_train.reset_index(drop=True)).as_matrix()\n# Ytest_ohe = pd.get_dummies(Y_test.reset_index(drop=True)).as_matrix()\nenc = OneHotEncoder(sparse=False, dtype=np.float32)\nYtrain_ohe = enc.fit_transform(Y_train.reshape(-1,1))\nYtest_ohe = enc.fit_transform(Y_test.reshape(-1,1))\n\n# img Augmentation\ntrain_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,\n width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2,\n fill_mode='nearest')\nval_datagen = ImageDataGenerator(rescale=1./255)\ntrain_generator = train_datagen.flow(X_train, Ytrain_ohe, batch_size=30)\nval_generator = val_datagen.flow(X_test, Ytest_ohe, batch_size=20)\n\n\n# output = vgg.layers[-1].output\n# output = keras.layers.Flatten()(output)\n# vgg_model = Model(vgg.input, output)\n#\n# vgg_model.trainable = False\n# for layer in vgg_model.layers:\n# layer.trainable = False\n\n# build CNN\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer, BatchNormalization,GlobalAveragePooling2D\nfrom keras.models import Sequential\nfrom keras import optimizers\n# load VGG16\nfrom keras.applications import vgg16\nfrom keras.models import Model\nimport keras\n\nvgg = vgg16.VGG16(include_top=False, weights='imagenet',input_shape=(imgW,imgH,3))\n\n# model = Sequential()\n# model.add(vgg_model)\n# input_shape = vgg_model.output_shape[1]\n# model.add(Dense(512, activation='relu', input_dim=input_shape))\n# model.add(Dropout(0.3))\n# model.add(Dense(512, activation='relu'))\n# model.add(Dropout(0.3))\n# model.add(Dense(28, activation='softmax'))\n\nx = vgg.output\n# x = GlobalAveragePooling2D()(x)\nx = keras.layers.Flatten()(x)\nx = Dense(512, activation='relu')(x)\n# x = BatchNormalization()(x)\nx = Dropout(0.3)(x)\nx = Dense(512, activation='relu')(x)\n# x = BatchNormalization()(x)\nx = Dropout(0.3)(x)\ntotal_classes = Ytrain_ohe.shape[1]\npredictions = Dense(total_classes, activation='softmax')(x)\nmodel = Model(inputs=vgg.input, outputs=predictions)\n\n# set trainable\nmodel.trainable = True\nset_trainable = False\nfor layer in model.layers:\n if layer.name in ['block5_conv1', 'block4_conv1']:\n set_trainable = True\n if set_trainable:\n layer.trainable = True\n else:\n layer.trainable = False\n\nlayers = [(layer, layer.name, layer.trainable) for layer in model.layers]\nprint(\"layer, layer.name, layer.trainable\")\nfor layer in layers:\n print(layer)\n\nEpochs = 30\nmodel.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy'])\nprint(\"model.summary()\")\nprint(model.summary())\nhistory = model.fit_generator(train_generator, steps_per_epoch=100, epochs=Epochs,\n validation_data=val_generator, validation_steps=50,\n verbose=1)\n\nmodel.save('ASL_vgg16ft.h5')\n\n# results\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\nt = f.suptitle('Basic CNN Performance', fontsize=12)\nf.subplots_adjust(top=0.85, wspace=0.3)\n\nepoch_list = list(range(1,Epochs+1))\nax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')\nax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')\nax1.set_xticks(np.arange(0, Epochs+1, 5))\nax1.set_ylabel('Accuracy Value')\nax1.set_xlabel('Epoch')\nax1.set_title('Accuracy')\nl1 = ax1.legend(loc=\"best\")\n\nax2.plot(epoch_list, history.history['loss'], label='Train Loss')\nax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')\nax2.set_xticks(np.arange(0, Epochs+1, 5))\nax2.set_ylabel('Loss Value')\nax2.set_xlabel('Epoch')\nax2.set_title('Loss')\nl2 = ax2.legend(loc=\"best\")\n\n# plt.show()\nplt.savefig('myKerasFinetune.png')" }, { "alpha_fraction": 0.5554187297821045, "alphanum_fraction": 0.6235632300376892, "avg_line_length": 24.35416603088379, "blob_id": "c265b0561d2f058b8aebf59cb0b39a6265107bde", "content_id": "2a202c376439b7e218f855aab4a08c6549429d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2440, "license_type": "no_license", "max_line_length": 61, "num_lines": 96, "path": "/SchedulePlotter/SchedulePlotter.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import export_graphviz\n\n\n## year, month\nst_y = 2017\nst_m = 4\ned_y = 2021\ned_m = 3\n\n## o\nshape00_y = [2017,2018,2019]\nshape00_m = [5,6,4]\nshape00_lb = [\"init\",\"mid\",\"fin\"]\n## โ–ก\nshape01_y = [2017,2017]\nshape01_m = [6,8]\nshape01_lb = [\"s1\",\"s2\"]\n## โ–ณ\nshape02_y = [2018,2018,2019]\nshape02_m = [5,10,12]\nshape02_lb = [\"r1\",\"\",\"\"]\n\nxlbls = []\ncurrent_m = st_m\ncurrent_y = st_y\nwhile 1:\n if current_m == 1:\n xlbl = str(current_m) + \"\\n\" + str(current_y)\n elif (current_y == st_y) and (current_m == st_m):\n xlbl = str(current_m) + \"\\n\" + str(current_y)\n else:\n xlbl = str(current_m)\n xlbls.append(xlbl)\n\n current_m += 1\n if current_m == 13:\n current_m = 1\n current_y += 1\n\n if (current_y == ed_y) and (current_m == ed_m):\n break\n\n\nshape00_xpoint = []\nfor idx, year in enumerate(shape00_y):\n st_point = st_y * 12 + st_m\n tgt_point = year * 12 + shape00_m[idx]\n xpoint = tgt_point - st_point\n shape00_xpoint.append(xpoint)\n\nshape01_xpoint = []\nfor idx, year in enumerate(shape01_y):\n st_point = st_y * 12 + st_m\n tgt_point = year * 12 + shape01_m[idx]\n xpoint = tgt_point - st_point\n shape01_xpoint.append(xpoint)\n\nshape02_xpoint = []\nfor idx, year in enumerate(shape02_y):\n st_point = st_y * 12 + st_m\n tgt_point = year * 12 + shape02_m[idx]\n xpoint = tgt_point - st_point\n shape02_xpoint.append(xpoint)\n\n## figsize a4 = (11 inch, 8 inch)\nfig = plt.figure(figsize=(11,8))\n\nax = fig.add_subplot(1,1,1)\nax.set_title(\"title\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_xticks(np.arange(0, len(xlbls)-1, 1), minor=False)\nax.set_xticklabels(xlbls)\nax.set_xlim([-1,len(xlbls)])\nax.set_yticks(np.linspace(0, 5, 0), minor=False)\nax.set_ylim([0,1])\nax.grid(b=None, which='major', axis='x')\n\nfor idx, xp in enumerate(shape00_xpoint):\n ax.plot(xp, 0.2, marker=\"o\", markersize=20, color=\"blue\")\n ax.text(xp, 0.2, shape00_lb[idx])\n\nfor idx, xp in enumerate(shape01_xpoint):\n ax.plot(xp, 0.4, marker=\"s\", markersize=20, color=\"blue\")\n ax.text(xp, 0.4, shape01_lb[idx])\n\nfor idx, xp in enumerate(shape02_xpoint):\n ax.plot(xp, 0.6, marker=\"D\", markersize=20, color=\"blue\")\n ax.text(xp, 0.6, shape02_lb[idx])\n\nplt.show()\n\n" }, { "alpha_fraction": 0.47484055161476135, "alphanum_fraction": 0.5201984643936157, "avg_line_length": 28.717391967773438, "blob_id": "d6e8c9e76dbb439f931268985c194c0a002af146", "content_id": "9cfeaf9fec3ef21cc32a3bc47f97940d8bbb61e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1437, "license_type": "no_license", "max_line_length": 74, "num_lines": 46, "path": "/mlPrediction/toTxt_sakura.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\nimport glob\r\nimport os\r\n\r\n# 1ๆœˆใฏใ˜ใพใ‚Š\r\ndaysInMonth = np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\r\nplaceRow = 65\r\n# 65.. Tokyo\r\n\r\nplaceRows = [65, 64, 67, 42,\r\n 44, 53, 54, 57,\r\n 59, 60, 61]\r\nplaceNames = [\"tokyo\", \"shizuoka\", \"yokohama\", \"nagano\",\r\n \"utsunomiya\", \"kumagaya\", \"mito\", \"nagoya\",\r\n \"kofu\", \"choshi\", \"tsu\"]\r\n\r\ndataPath = \"data_sakura/\"\r\nfor f in glob.glob(os.path.join(dataPath, \"*.csv\")):\r\n name, ext = os.path.splitext(f)\r\n fld, filename = name.split(\"\\\\\")\r\n file = open(f, mode='r')\r\n data_reader = csv.reader(file,delimiter=\",\")\r\n data_raw = [row for row in data_reader]\r\n name_raw = data_raw[0]\r\n\r\n for pidx, plc in enumerate(placeRows):\r\n data_plc = data_raw[plc]\r\n # ๆœˆๆ—ฅใ‚’ๆ•ฐๅ€คใซๅค‰ๆ›\r\n data = []\r\n for i in range(2,len(data_plc)-1,2):\r\n month = int(data_plc[i])\r\n day = int(data_plc[i+1])\r\n if month == 2 and day == 29:\r\n day = 28\r\n month = month - 1\r\n dayVal = sum(daysInMonth[:month]) + (day-1)\r\n data.append(dayVal)\r\n\r\n print(\"placeName: \", data_plc[0])\r\n txtName = fld + \"\\\\\" + placeNames[pidx] + \"_\" + filename + \".txt\"\r\n file_w = open(txtName, mode='w')\r\n writer = csv.writer(file_w)\r\n writer.writerow(data)" }, { "alpha_fraction": 0.5052833557128906, "alphanum_fraction": 0.5494716763496399, "avg_line_length": 20.191490173339844, "blob_id": "e5f0f150b1532424732f32398ba94da3e0066d72", "content_id": "ade94c4657c4894f323e5445ad3b1eac374beda7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 67, "num_lines": 47, "path": "/statistics/myEstimation_pmean.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nimport matplotlib.pyplot as plt\r\n\r\n## ใ‚ตใƒณใƒ—ใƒซใฎๆฏ้›†ๅ›ฃใŒๆญฃ่ฆๅˆ†ๅธƒใซๅพ“ใ†ใจใใ€\r\n## t = (sm - pm) / sqrt( s^2 / n )ใ€€ใฏ่‡ช็”ฑๅบฆ t-1ใฎtๅˆ†ๅธƒใซๅพ“ใ†\r\n## 95%ไฟก้ ผๅŒบ้–“ใฏๆฌกใฎใ‚ˆใ†ใซใชใ‚‹\r\n## sm + T0.025 * sqrt( s^2/n ) <= pm <= sp + T0.975 * sqrt( s^2/n )\r\n\r\nDistType = \"Norm\"\r\nDistType = \"Bin\"\r\n\r\nsNum = 1000\r\n\r\nif DistType == \"Norm\":\r\n x = np.random.randn(sNum)\r\n pm = 0\r\n pv = 1\r\nelif DistType == \"Bin\":\r\n n = 100\r\n p = 0.2\r\n x = np.random.binomial(n, p, sNum)\r\n pm = n * p\r\n pv = n * p * ( 1 - p )\r\nsm = np.mean(x)\r\ns = np.std(x,ddof=1)\r\n\r\n# tๅˆ†ๅธƒใงใฎ95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nt_low, t_upp = t.ppf(q=[0.025, 0.975], df=len(x) - 1)\r\n\r\n# pmใฎ95%ไฟก้ ผๅŒบ้–“\r\npm_lower = sm + t_low * np.sqrt( s*s / len(x))\r\npm_upper = sm + t_upp * np.sqrt( s*s / len(x))\r\n\r\nprint(\"t_low, t_upp: \", t_low, t_upp)\r\nprint(\"sm: \", sm)\r\nprint(\"pm: \", pm)\r\nprint(\"pm_low, pm_upp: \", pm_lower, pm_upper)\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(1,1,1)\r\nax.hist(x, bins=100)\r\nax.set_title(\"histgram\")\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"frequency\")\r\nfig.show()\r\nplt.show()" }, { "alpha_fraction": 0.5543822646141052, "alphanum_fraction": 0.6008448004722595, "avg_line_length": 20.069766998291016, "blob_id": "f60d896ef4fd1af536cbf9784a58ec80aad948f4", "content_id": "6215e15658cdabec704288a4c9ca7882bb1b2d8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 61, "num_lines": 43, "path": "/statistics/myTest_poisson.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\n\r\n## ใƒใ‚ขใ‚ฝใƒณๅˆ†ๅธƒใฎๆคœๅฎš\r\n\r\n## 1ใƒตๆœˆใ‚ใŸใ‚Šๅนณๅ‡lamdaๅ›ž่ตทใใ‚‹ไบ‹่ฑกใŒ่ตทใ“ใ‚‹ๅ›žๆ•ฐxใฏใƒใ‚ขใ‚ฝใƒณๅˆ†ๅธƒPo(lamda)ใซๅพ“ใ†\r\n## x_=1/n *ฮฃxi (i=1,..,n)ใฏไธญๅฟƒๆฅต้™ๅฎš็†ใซใ‚ˆใ‚ŠN(lamda, lamda/n)ใซๅพ“ใ†\r\n\r\n\r\n# TestType = \"OneSided\"\r\nTestType = \"TwoSided\"\r\nn = 12\r\nx_test = 16\r\n\r\nlamda = 20\r\npm = lamda\r\npv = lamda\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nz_low, z_upp = -1.96, 1.96\r\n\r\n# zๅค‰ๆ›ใ—ใŸxใฎๅ€ค\r\nz = ( x_test - lamda ) / np.sqrt(lamda/n)\r\n\r\nprint(\"x_test: \", x_test)\r\nprint(\"z_low, z_upp: \", z_low, z_upp)\r\nprint(\"z: \", z)\r\n\r\nif TestType == \"OneSided\":\r\n result = True if (z_low <= z ) else False\r\nelse:\r\n result = True if (z_low <= z) and (z_upp >= z) else False\r\nprint(\"result: \", result)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = norm.cdf(z)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)" }, { "alpha_fraction": 0.5705996155738831, "alphanum_fraction": 0.5948344469070435, "avg_line_length": 29.508960723876953, "blob_id": "3eff27c75683d57af6d90da35603543036539897", "content_id": "604f4c19921a73670ceb3ad813ac12f36a925798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8805, "license_type": "no_license", "max_line_length": 115, "num_lines": 279, "path": "/mlPrediction/myMlPrediction.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimport glob\r\nimport os\r\nimport csv\r\nimport matplotlib.cm as cm\r\n\r\ndef normData(dataX):\r\n dataX_norm = dataX\r\n for i in range(len(dataX[0, :])):\r\n Max = max(dataX_norm[:, i])\r\n Min = min(dataX_norm[:, i])\r\n if (Max - Min) > 1.0e-6:\r\n dataX_norm[:, i] = (dataX_norm[:, i] - Min) / (Max - Min)\r\n else:\r\n dataX_norm[:, i] = - Max\r\n return dataX_norm\r\n\r\ndef day2md(day):\r\n daysInMonth = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\r\n day_res = 0\r\n month_res = 0\r\n day_tmp = round(day) + 1\r\n for i, days in enumerate(daysInMonth):\r\n tmp = int(day_tmp - days)\r\n if tmp <= 0:\r\n month_res = i + 1\r\n day_res = day_tmp\r\n break\r\n else:\r\n day_tmp -= days\r\n return month_res, day_res\r\n\r\n\r\ndataX = []\r\ndataXNames = []\r\ndataX_2d_list = []\r\ndataNames = []\r\ndataNames_clor = []\r\ndataY = []\r\n\r\n# load data X\r\n# 4ๆœˆ ๏ฝž 3ๆœˆใฎใƒ‡ใƒผใ‚ฟ\r\n# 333 .. 2/28\r\n# 347ใŒmax\r\ndataXPath = \"data_weather/\"\r\nnNum = 0\r\nfNum = 0\r\nfor f in glob.glob(os.path.join(dataXPath, \"*.txt\")):\r\n # get data\r\n data_raw = np.loadtxt(f, delimiter=\",\", dtype=None, skiprows=1)\r\n #data_feature = data_raw[:334,:]\r\n data_feature = data_raw[:344, :]\r\n dataX_2d_list.append(data_feature)\r\n fNum = len(data_feature[0,:])*len(data_feature[:,0])\r\n data = data_feature\r\n nNum += 1\r\n for val in data:\r\n dataX.append(val)\r\n\r\n # get names\r\n file = open(f, mode='r')\r\n data_reader = csv.reader(file,delimiter=\",\")\r\n data_raw = [row for row in data_reader]\r\n dataNames = data_raw[0]\r\n\r\n arr_clr = cm.get_cmap(\"tab20\").colors\r\n name_clor = [arr_clr[i] for i in range(len(dataNames))]\r\n\r\n for day in range(len(data_feature[:,0])):\r\n dayFromJan = day + 90 if day < 275 else day - 275\r\n md_m, md_d = day2md(dayFromJan)\r\n dataNamesDays = [name + \"_day\" + str(dayFromJan) + \"_\" + str(md_m) + \"/\" + str(md_d) for name in dataNames]\r\n dataXNames.extend(dataNamesDays)\r\n dataNames_clor.extend(name_clor)\r\n\r\ndataX = np.array(dataX)\r\ndataX = np.reshape(dataX, (nNum,fNum))\r\n# dataX = dataX[:-1,:]\r\n# nNum -= 1\r\n\r\nisNanIdx = np.argwhere(np.isnan(dataX))\r\nisInfIdx = np.argwhere(np.isinf(dataX))\r\nprint(\"isNanIdx: \", isNanIdx)\r\nprint(\"isInfIdx: \", isInfIdx)\r\n\r\n# load data Y\r\ndataYPath = \"data_sakura/\"\r\nfor f in glob.glob(os.path.join(dataYPath, \"*.txt\")):\r\n data_tgt = np.loadtxt(f, delimiter=\",\")\r\n for val in data_tgt:\r\n dataY.append(val)\r\ndataY = np.array(dataY)\r\n\r\n## plot 2dlist data\r\nminY, maxY = min(dataY), max(dataY)\r\ndataY_01 = (dataY - minY) / (maxY - minY)\r\nfigAll_plt = plt.figure()\r\nfigAll_plt.subplots_adjust(wspace=0.4, hspace=0.6)\r\nfor i in range(len(dataNames)):\r\n numcol = 3\r\n numrow = len(dataNames) / numcol + 1\r\n ax = figAll_plt.add_subplot(numrow,numcol,i+1)\r\n for j in range(nNum):\r\n y = dataX_2d_list[j][:,i]\r\n x = range(len(y))\r\n cval = \"\"\r\n if dataY_01[j] >= 0.5:\r\n cval = \"#ee0000\"\r\n else:\r\n cval = \"#0000ee\"\r\n ax.plot(x,y,c=cval, alpha=0.7)\r\n #carr = np.array([dataY_01[j] for nn in range(len(x))])\r\n #ax.scatter(x, y, c=carr, cmap=\"jet\")\r\n ttl = dataNames[i]\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_plt.show()\r\n\r\n## calc 1d st\r\nminY, maxY = min(dataY), max(dataY)\r\ndataY_01 = (dataY - minY) / (maxY - minY)\r\nfigStat_plt = plt.figure()\r\nfigStat_plt.subplots_adjust(wspace=0.4, hspace=0.6)\r\nftoShow = [0, 1, 3, 5, 6]\r\nfor i in range(len(ftoShow)):\r\n f_mean = []\r\n f_max = []\r\n f_min = []\r\n f_var = []\r\n for j in range(nNum):\r\n y = dataX_2d_list[j][:,i]\r\n f_mean.append(sum(y)/float(len(y)))\r\n f_max.append(max(y))\r\n f_min.append(min(y))\r\n f_var.append(np.std(y))\r\n f_mean = np.array(f_mean)\r\n f_max = np.array(f_max)\r\n f_min = np.array(f_min)\r\n f_var = np.array(f_var)\r\n\r\n x = range(nNum)\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4*i + 1)\r\n ax.scatter(f_mean, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_mean\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 2)\r\n ax.scatter(f_max, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_max\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 3)\r\n ax.scatter(f_min, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_min\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 4)\r\n ax.scatter(f_var, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_std\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigStat_plt.show()\r\n\r\n# data normalization\r\ndataX = normData(dataX)\r\n# dataXminY, maxY = min(dataY), max(dataY)\r\n# dataY = (dataY - minY) / (maxY - minY)\r\n\r\n# Split the data into training/testing sets\r\nnumTest = 20\r\ndataX_train = dataX[:-numTest]\r\ndataX_test = dataX[-numTest:]\r\ndataY_train = dataY[:-numTest]\r\ndataY_test = dataY[-numTest:]\r\n\r\n# regression by RF\r\nrf = RandomForestRegressor(n_estimators=100,random_state=42)\r\nrf.fit(dataX_train, dataY_train)\r\ndataY_pred = rf.predict(dataX_test)\r\nerr_mae = abs(dataY_test - dataY_pred)\r\nave_mae = abs(dataY_test - np.mean(dataY_test))\r\n\r\nprint(\"err_mae: \", np.mean(err_mae))\r\nprint(\"ave_mae: \", np.mean(ave_mae))\r\nprint(\"importances: \", rf.feature_importances_)\r\nsortedIdx = np.argsort(abs(rf.feature_importances_))\r\nprint(\"sorted idx: \", sortedIdx[::-1])\r\n\r\nnumShow = 3\r\nfig = plt.figure()\r\nfor i in range(numShow):\r\n fIdx = sortedIdx[::-1][i]\r\n ax = fig.add_subplot(1,numShow,i+1)\r\n ax.scatter(dataX_test[:,fIdx], dataY_test, color=\"#222222\", alpha=0.7)\r\n dataIdx = np.argsort(dataX_test[:,fIdx])\r\n x = dataX_test[:,fIdx][dataIdx]\r\n y = dataY_pred[dataIdx]\r\n ax.plot(x,y, color='blue', linewidth=3)\r\n # ttl = dataNames[fIdx] + \" \" + \"{:.2f}\".format(regr.coef_[fIdx])\r\n ttl = dataXNames[fIdx] + \" \" + \"{:.2f}\".format(rf.feature_importances_[fIdx])\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfig.show()\r\n\r\n# validation\r\nnumFold = 10\r\nmae_arr = []\r\naveMae_arr = []\r\ncoef_list = []\r\nr2score_arr = []\r\nif len(dataX[:,0]) != len(dataY):\r\n print(\"err! dataX[:,0] != dataY\")\r\nidxes = np.random.permutation(len(dataX[:,0]))\r\nnumTsample = int(len(dataX[:,0]) / numFold)\r\nfor i in range(numFold):\r\n idxes_test = idxes[i*numTsample: i*numTsample + numTsample]\r\n idxes_train = np.hstack((idxes[0:i*numTsample],idxes[i*numTsample + numTsample:]))\r\n # idxes_test = [ idxes[i*numTsample + j] for j in range(len(numTsample))]\r\n if i == numFold - 1:\r\n idxes_test = idxes[i * numTsample:]\r\n idxes_train = idxes[:i * numTsample]\r\n\r\n dataX_train = dataX[idxes_train,:]\r\n dataX_test = dataX[idxes_test, :]\r\n dataY_train = dataY[idxes_train]\r\n dataY_test = dataY[idxes_test]\r\n\r\n rf = RandomForestRegressor(n_estimators=100, random_state=42)\r\n rf.fit(dataX_train, dataY_train)\r\n dataY_pred = rf.predict(dataX_test)\r\n err_mae = np.mean(abs(dataY_test - dataY_pred))\r\n ave_mae = np.mean(abs(dataY_test - np.mean(dataY_test)))\r\n\r\n mae_arr.append(err_mae)\r\n aveMae_arr.append(ave_mae)\r\n coef_list.append(rf.feature_importances_)\r\n r2score_arr.append(r2_score(dataY_test, dataY_pred))\r\n\r\nmae_arr = np.array(mae_arr)\r\naveMae_arr = np.array(aveMae_arr)\r\nr2score_arr = np.array(r2score_arr)\r\nimportance_arr = np.zeros(coef_list[0].shape)\r\nfor i in range(len(coef_list)):\r\n importance_arr += coef_list[i]\r\nimportance_arr /= len(coef_list)\r\n\r\n# 1 to 10th important features\r\nprint(\"err_mae: \", np.mean(mae_arr))\r\nprint(\"ave_mae: \", np.mean(aveMae_arr))\r\nprint('Importance: \\n', importance_arr)\r\nsortedIdx = np.argsort(importance_arr)\r\nprint(\"sorted idx: \", sortedIdx[::-1])\r\nfor i in range(10):\r\n print( i, \" th important: \", dataXNames[sortedIdx[::-1][i]])\r\n print(\"importance: \", importance_arr[sortedIdx[::-1][i]])\r\n\r\n\r\nfile_log = open(\"myMLPrediction_Result.txt\", mode='w', newline=\"\")\r\nwriter_log = csv.writer(file_log)\r\nsumImportance = 0\r\nrow = [\"fname\", \"importance\", \"Sum(importance)\"]\r\nwriter_log.writerow(row)\r\nfor i in range(len(importance_arr)):\r\n sumImportance += importance_arr[sortedIdx[::-1][i]]\r\n row = [dataXNames[sortedIdx[::-1][i]], importance_arr[sortedIdx[::-1][i]], sumImportance]\r\n writer_log.writerow(row)" }, { "alpha_fraction": 0.4317047894001007, "alphanum_fraction": 0.48597970604896545, "avg_line_length": 38.94013977050781, "blob_id": "bd2c26e59b0961176880b38f4c8f185125958543", "content_id": "2165a40560a360da2052339060161bb8ca4f6a12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23936, "license_type": "no_license", "max_line_length": 129, "num_lines": 568, "path": "/recon3d/my3dreconstraction.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom pylab import *\r\n# from sfm import triangulate\r\n# from sfm import compute_P_from_essential\r\n# from homography import make_homog\r\nimport sfm\r\nimport homography\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport tkinter as Tk\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\n\r\n# 3ๆฌกๅ…ƒๅ†ๆง‹ๆˆ\r\n# ๏ผ‘ใ€2ใคใฎ๏ผ’ๆฌกๅ…ƒ็”ปๅƒใ‹ใ‚‰็‰นๅพด็‚นใ‚’ๅฏพๅฟœไป˜ใ‘ใ‚‹่กŒๅˆ—Fใ‚’ๆฑ‚ใ‚ใ‚‹๏ผˆใ‚จใƒ”ใƒใƒผใƒฉๆ‹˜ๆŸ๏ผ‰\r\n# x2 * F * x1 =0\r\n# ใ€€๏ผ’ใ€Fใ‚’ใ‚ซใƒกใƒฉใฎๅ†…้ƒจ่กŒๅˆ—ใงๆญฃ่ฆๅŒ–ใ—ใŸใ‚‚ใฎใ‚’Eใจใ™ใ‚‹\r\n# ใ€€๏ผ“ใ€Eใ‹ใ‚‰ๅค–้ƒจ่กŒๅˆ—Pใ‚’ๆŽจๅฎšใ™ใ‚‹\r\n# ใ€€๏ผ”ใ€x1, x2, Pใ‹ใ‚‰Xใ‚’ๆฑ‚ใ‚ใ‚‹๏ผˆไธ‰่ง’ๆธฌ้‡๏ผ‰\r\n\r\ndef calcRmat(agl_deg, nVec):\r\n agl = np.deg2rad(agl_deg)\r\n Rmat = np.array([\r\n [np.cos(agl) + nVec[0] * nVec[0] * (1 - np.cos(agl)), nVec[0] * nVec[1] * (1 - np.cos(agl)) - nVec[2] * np.sin(agl),\r\n nVec[0] * nVec[2] * (1 - np.cos(agl)) + nVec[1] * np.sin(agl)],\r\n [nVec[0] * nVec[1] * (1 - np.cos(agl)) + nVec[2] * np.sin(agl), np.cos(agl) + nVec[1] * nVec[1] * (1 - np.cos(agl)),\r\n nVec[1] * nVec[2] * (1 - np.cos(agl)) - nVec[0] * np.sin(agl)],\r\n [nVec[0] * nVec[2] * (1 - np.cos(agl)) - nVec[1] * np.sin(agl),\r\n nVec[1] * nVec[2] * (1 - np.cos(agl)) + nVec[0] * np.sin(agl), np.cos(agl) + nVec[2] * nVec[2] * (1 - np.cos(agl))]\r\n ])\r\n return Rmat\r\n\r\ndef calcRmatDeg(Rmat):\r\n theta = ( Rmat[0][0] + Rmat[1][1] + Rmat[2][2] - 1.0 ) * 0.5\r\n delta = 1.0e-6\r\n if (abs(theta) - 1 ) > delta: return -1, -200, -200\r\n val_rad = np.arccos(theta)\r\n val_deg = np.rad2deg(val_rad)\r\n\r\n ny = (Rmat[0][2] - Rmat[2][0]) * 0.5 * ( 1.0 / np.sin(theta))\r\n nx = (Rmat[2][1] - Rmat[1][2]) * 0.5 * ( 1.0 / np.sin(theta))\r\n nz = (Rmat[1][0] - Rmat[0][1]) * 0.5 * ( 1.0 / np.sin(theta))\r\n nvec = np.array([nx, ny, nz])\r\n\r\n Rmat2 = calcRmat(val_deg, nvec)\r\n RmatDef = Rmat - Rmat2\r\n\r\n DefNrm = np.linalg.norm(RmatDef, \"fro\")\r\n if DefNrm > delta: return -1, val_deg, DefNrm\r\n\r\n return 1, val_deg, DefNrm\r\n\r\nclass TKExample():\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n # ่จญๅฎš\r\n self.Focal = 200\r\n self.Imgwidth, self.Imgheight = 640, 480\r\n self.Ang_cam2 = 0\r\n self.Trans_x_cam2 = 0\r\n self.Trans_y_cam2 = 0\r\n self.Trans_z_cam2 = 4\r\n #ใ‚ซใƒกใƒฉๅ†…้ƒจใƒ‘ใƒฉใƒกใƒผใ‚ฟ\r\n self.K = np.array([[self.Focal, 0, self.Imgwidth / 2],\r\n [0, self.Focal, self.Imgheight / 2],\r\n [0, 0, 1]], float)\r\n #ใ‚ซใƒกใƒฉๅค–้ƒจใƒ‘ใƒฉใƒกใƒผใ‚ฟ\r\n self.P1 = np.array([[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, 1, 0]])\r\n\r\n nVec = np.array([1,0,0])\r\n Rmat = calcRmat(self.Ang_cam2, nVec)\r\n # Rmat, jac = cv2.Rodrigues(np.array([[aglRadx, aglRady, aglRadz]], float))\r\n self.P2 = np.array([[Rmat[0][0], Rmat[0][1], Rmat[0][2], self.Trans_x_cam2],\r\n [Rmat[1][0], Rmat[1][1], Rmat[1][2], self.Trans_y_cam2],\r\n [Rmat[2][0], Rmat[2][1], Rmat[2][2], self.Trans_z_cam2]])\r\n\r\n # 3dใ‚ชใƒ–ใ‚ธใ‚งใ‚ฏใƒˆ\r\n self.ll = [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5], [2, 4],\r\n [2, 6], [3, 5], [3, 6], [4, 7], [5, 7], [6, 7]]\r\n\r\n self.Xpt = [[2, 0, 2, 3, 0.6, 3, 3, 3],\r\n [2, 2, 3, 2, 4, 2, 3, 3],\r\n [4, 1, 4, 4, 1, 1, 4, 1],\r\n [1, 1, 1, 1, 1, 1, 1, 1]]\r\n\r\n # Xp_add = [[1, 1, 2.5, 1.8, 1.8, 3, 3],\r\n # [2, 2.5, 2.5, 3, 3, 3, 2.5],\r\n # [2.5, 2.5, 4, 2.5, 1, 2.5, 2.5],\r\n # [1, 1, 1, 1, 1, 1, 1]]\r\n # self.Xpt[0].extend(Xp_add[0])\r\n # self.Xpt[1].extend(Xp_add[1])\r\n # self.Xpt[2].extend(Xp_add[2])\r\n # self.Xpt[3].extend(Xp_add[3])\r\n\r\n # self.llcolor = [\"g\",\"c\",\"c\",\"m\",\"m\",\"g\",\"c\",\"g\",\"c\",\"m\",\"m\",\"g\"]\r\n self.llcolor = [\"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\", \"m\"]\r\n\r\n # 3dใ‹ใ‚‰2d็”ปๅƒใ‚’ไฝœๆˆ\r\n fig2d = plt.figure(figsize=(10, 6))\r\n self.ax1 = fig2d.add_subplot(325)\r\n self.ax2 = fig2d.add_subplot(326)\r\n self.draw2d()\r\n\r\n self.ax_wld = fig2d.add_subplot(321, projection='3d')\r\n self.draw3d_obj_wld()\r\n\r\n self.ax_rcn = fig2d.add_subplot(322, projection='3d')\r\n self.draw3d_reconstracted()\r\n\r\n self.ax_rcn_est = fig2d.add_subplot(323, projection='3d')\r\n self.draw3d_reconstracted_estP()\r\n\r\n # self.ax_cam1 = fig2d.add_subplot(334, projection='3d')\r\n # self.ax_cam2 = fig2d.add_subplot(335, projection='3d')\r\n # self.draw3d_obj_cam()\r\n\r\n root = Tk.Tk()\r\n\r\n # Canvasใ‚’็”Ÿๆˆ\r\n self.canvas = FigureCanvasTkAgg(fig2d, master=root)\r\n # canvas.get_tk_widget().pack(side=Tk.BOTTOM, expand=0)\r\n # canvas._tkcanvas.pack(side=Tk.BOTTOM, expand=0)\r\n self.canvas.get_tk_widget().grid(row=1, column=0, columnspan=6, pady=(15, 15), padx=(25, 25),\r\n sticky=Tk.N + Tk.S + Tk.E + Tk.W)\r\n\r\n self.bt = Tk.Button(root, text='UPDATE', command=self.updatePrm)\r\n self.bt.grid(row=2, column=0, columnspan=6)\r\n self.lb00 = Tk.Label(root, text='focal')\r\n self.lb00.grid(row=3, column=0, sticky=Tk.W)\r\n self.et00 = Tk.Entry(root)\r\n self.et00.insert(Tk.END, \"200\")\r\n self.et00.grid(row=3, column=1, sticky=Tk.W)\r\n\r\n self.lb01 = Tk.Label(root, text='img width')\r\n self.lb01.grid(row=4, column=0, sticky=Tk.W)\r\n self.et01 = Tk.Entry(root)\r\n self.et01.insert(Tk.END, \"640\")\r\n self.et01.grid(row=4, column=1, sticky=Tk.W)\r\n\r\n self.lb02 = Tk.Label(root, text='img height')\r\n self.lb02.grid(row=4, column=2, sticky=Tk.W)\r\n self.et02 = Tk.Entry(root)\r\n self.et02.insert(Tk.END, \"480\")\r\n self.et02.grid(row=4, column=3, sticky=Tk.W)\r\n\r\n self.lb10 = Tk.Label(root, text='Cam2 external Param')\r\n self.lb10.grid(row=8, column=0, sticky=Tk.W)\r\n\r\n self.lb11 = Tk.Label(root, text='ang')\r\n self.lb11.grid(row=9, column=0, sticky=Tk.W)\r\n self.et11 = Tk.Entry(root)\r\n self.et11.insert(Tk.END, \"0\")\r\n self.et11.grid(row=9, column=1, sticky=Tk.W)\r\n\r\n self.lb14 = Tk.Label(root, text='trans x')\r\n self.lb14.grid(row=10, column=0, sticky=Tk.W)\r\n self.et14 = Tk.Entry(root)\r\n self.et14.insert(Tk.END, \"0\")\r\n self.et14.grid(row=10, column=1, sticky=Tk.W)\r\n\r\n self.lb15 = Tk.Label(root, text='trans y')\r\n self.lb15.grid(row=10, column=2, sticky=Tk.W)\r\n self.et15 = Tk.Entry(root)\r\n self.et15.insert(Tk.END, \"0\")\r\n self.et15.grid(row=10, column=3, sticky=Tk.W)\r\n\r\n self.lb16 = Tk.Label(root, text='trans z')\r\n self.lb16.grid(row=10, column=4, sticky=Tk.W)\r\n self.et16 = Tk.Entry(root)\r\n self.et16.insert(Tk.END, \"4\")\r\n self.et16.grid(row=10, column=5, sticky=Tk.W)\r\n\r\n root.mainloop()\r\n\r\n def updatePrm(self):\r\n focal = float(self.et00.get())\r\n imgwidth, imgheight = float(self.et01.get()), float(self.et02.get())\r\n ang_cam2 = float(self.et11.get())\r\n trans_x_cam2 = float(self.et14.get())\r\n trans_y_cam2 = float(self.et15.get())\r\n trans_z_cam2 = float(self.et16.get())\r\n self.setCamprm(focal, imgwidth, imgheight,\r\n ang_cam2, trans_x_cam2, trans_y_cam2, trans_z_cam2)\r\n self.draw2d()\r\n # self.draw3d_obj_wld()\r\n # self.draw3d_obj_cam()\r\n self.draw3d_reconstracted()\r\n self.draw3d_reconstracted_estP()\r\n self.canvas.draw()\r\n\r\n def setCamprm(self, focal, imgwidth, imgheight,\r\n ang_cam2, trans_x_cam2, trans_y_cam2, trans_z_cam2):\r\n self.Focal = focal\r\n self.Imgwidth, self.Imgheight = imgwidth, imgheight\r\n self.Ang_cam2 = ang_cam2\r\n self.Trans_x_cam2 = trans_x_cam2\r\n self.Trans_y_cam2 = trans_y_cam2\r\n self.Trans_z_cam2 = trans_z_cam2\r\n self.K = np.array([[self.Focal, 0, self.Imgwidth / 2], [0, self.Focal, self.Imgheight / 2], [0, 0, 1]], float)\r\n\r\n nVec = np.array([1,0,0])\r\n Rmat = calcRmat(self.Ang_cam2, nVec)\r\n # Rmat, jac = cv2.Rodrigues(np.array([[aglRadx, aglRady, aglRadz]], float))\r\n self.P2 = np.array([[Rmat[0][0], Rmat[0][1], Rmat[0][2], self.Trans_x_cam2],\r\n [Rmat[1][0], Rmat[1][1], Rmat[1][2], self.Trans_y_cam2],\r\n [Rmat[2][0], Rmat[2][1], Rmat[2][2], self.Trans_z_cam2]])\r\n\r\n def draw3d_obj_cam(self):\r\n self.ax_cam1.cla()\r\n self.ax_cam2.cla()\r\n\r\n # ใ€€ใ‚ซใƒกใƒฉๅบงๆจ™ ( X', Y', Z', 1 )\r\n x1p = self.P1.dot(self.Xpt)\r\n x2p = self.P2.dot(self.Xpt)\r\n\r\n self.ax_cam1.set_title(\"obj cam1 cam\")\r\n self.ax_cam1.set_xlim([-5, 5])\r\n self.ax_cam1.set_ylim([-5, 5])\r\n self.ax_cam1.set_zlim([-5, 5])\r\n self.ax_cam1.set_xlabel(\"x\")\r\n self.ax_cam1.set_ylabel(\"y\")\r\n self.ax_cam1.set_zlabel(\"z\")\r\n self.ax_cam1.grid(which = \"major\", axis = \"x\")\r\n self.ax_cam1.grid(which=\"major\", axis=\"y\")\r\n org = np.array([0, 0, 0])\r\n xv = np.array([1, 0, 0])\r\n yv = np.array([0, 1, 0])\r\n zv = np.array([0, 0, 1])\r\n self.ax_cam1.quiver(org[0], org[1], org[2], xv[0], xv[1], xv[2], length=1, color='orange')\r\n self.ax_cam1.quiver(org[0], org[1], org[2], yv[0], yv[1], yv[2], length=1, color='orange')\r\n self.ax_cam1.quiver(org[0], org[1], org[2], zv[0], zv[1], zv[2], length=1, color='orange')\r\n\r\n self.ax_cam2.set_title(\"obj cam2 cam\")\r\n self.ax_cam2.set_xlim([-5, 5])\r\n self.ax_cam2.set_ylim([-5, 5])\r\n self.ax_cam2.set_zlim([-5, 5])\r\n self.ax_cam2.set_xlabel(\"x\")\r\n self.ax_cam2.set_ylabel(\"y\")\r\n self.ax_cam2.set_zlabel(\"z\")\r\n self.ax_cam2.grid(which = \"major\", axis = \"x\")\r\n self.ax_cam2.grid(which=\"major\", axis=\"y\")\r\n org = np.array([0, 0, 0])\r\n xv = np.array([1, 0, 0])\r\n yv = np.array([0, 1, 0])\r\n zv = np.array([0, 0, 1])\r\n self.ax_cam2.quiver(org[0], org[1], org[2], xv[0], xv[1], xv[2], length=1, color='orange')\r\n self.ax_cam2.quiver(org[0], org[1], org[2], yv[0], yv[1], yv[2], length=1, color='orange')\r\n self.ax_cam2.quiver(org[0], org[1], org[2], zv[0], zv[1], zv[2], length=1, color='orange')\r\n\r\n for i,l in enumerate(self.ll):\r\n x = np.array([x1p[0][l[0]], x1p[0][l[1]]])\r\n y = np.array([x1p[1][l[0]], x1p[1][l[1]]])\r\n z = np.array([x1p[2][l[0]], x1p[2][l[1]]])\r\n self.ax_cam1.plot(x, y, z, marker='.', markersize=5, color=self.llcolor[i])\r\n x = np.array([x2p[0][l[0]], x2p[0][l[1]]])\r\n y = np.array([x2p[1][l[0]], x2p[1][l[1]]])\r\n z = np.array([x2p[2][l[0]], x2p[2][l[1]]])\r\n self.ax_cam2.plot(x, y, z, marker='.', markersize=5, color=self.llcolor[i])\r\n\r\n def draw2d(self):\r\n self.ax1.cla()\r\n self.ax2.cla()\r\n\r\n # ใ‚ซใƒกใƒฉ่กŒๅˆ—\r\n A1 = self.K.dot(self.P1)\r\n A2 = self.K.dot(self.P2)\r\n\r\n # ใ€€็”ปๅƒๅบงๆจ™ ( sx, sy, s )\r\n x1p = A1.dot(self.Xpt)\r\n x2p = A2.dot(self.Xpt)\r\n\r\n # sใงๆญฃ่ฆๅŒ– (x, y, 1)\r\n for i in range(3):\r\n x1p[i] /= x1p[2]\r\n x2p[i] /= x2p[2]\r\n\r\n self.ax1.set_xlim([0, self.Imgwidth])\r\n self.ax1.set_ylim([0, self.Imgheight])\r\n self.ax1.set_xlabel(\"x_img\")\r\n self.ax1.set_ylabel(\"y_img\")\r\n self.ax1.set_title(\"obj cam1 img\")\r\n self.ax1.grid(which = \"major\", axis = \"x\")\r\n self.ax1.grid(which=\"major\", axis=\"y\")\r\n\r\n self.ax2.set_xlim([0, self.Imgwidth])\r\n self.ax2.set_ylim([0, self.Imgheight])\r\n self.ax2.set_xlabel(\"x_img\")\r\n self.ax2.set_ylabel(\"y_img\")\r\n self.ax2.set_title(\"obj cam2 img\")\r\n self.ax2.grid(which=\"major\", axis=\"x\")\r\n self.ax2.grid(which=\"major\", axis=\"y\")\r\n #self.ax1.axis('scaled')\r\n for i,l in enumerate(self.ll):\r\n x = np.array([x1p[0][l[0]], x1p[0][l[1]]])\r\n y = np.array([x1p[1][l[0]], x1p[1][l[1]]])\r\n # self.ax1.plot(x, y, marker='.', markersize=5, color='red')\r\n self.ax1.plot(x, y, marker='.', markersize=5, color=self.llcolor[i])\r\n x = np.array([x2p[0][l[0]], x2p[0][l[1]]])\r\n y = np.array([x2p[1][l[0]], x2p[1][l[1]]])\r\n #self.ax2.plot(x, y, marker='.', markersize=5, color='blue')\r\n self.ax2.plot(x, y, marker='.', markersize=5, color=self.llcolor[i])\r\n\r\n # draw epipolar line\r\n ## calcurate F: x1 * F * x2\r\n F = sfm.compute_fundamental(x1p, x2p)\r\n ## calc epi poler\r\n e1 = sfm.compute_epipole(F)\r\n e2 = sfm.compute_epipole(F.T)\r\n\r\n print(\"F:\", F)\r\n\r\n numLine = len(x1p[0,:])\r\n for i in range(numLine):\r\n # epiline on img1\r\n line = dot(F, x2p[:,i])\r\n t = linspace(0, self.Imgwidth, 100)\r\n lt = array([(line[2] + line[0] * tt) / (-line[1]) for tt in t])\r\n # ndx = (lt >= 0) & (lt < self.Imgheight)\r\n # self.ax1.plot(t[ndx], lt[ndx], linewidth=1)\r\n self.ax1.plot(t, lt, linewidth=1)\r\n self.ax1.plot(e1[0]/e1[2], e1[1]/e1[2],'r*')\r\n\r\n # epiline on img1\r\n line = dot(F.T, x1p[:,i])\r\n t = linspace(0, self.Imgwidth, 100)\r\n lt = array([(line[2] + line[0] * tt) / (-line[1]) for tt in t])\r\n self.ax2.plot(t, lt, linewidth=1)\r\n self.ax2.plot(e2[0]/e2[2], e2[1]/e2[2],'r*')\r\n\r\n def draw3d_obj_wld(self):\r\n self.ax_wld.cla()\r\n\r\n self.ax_wld.set_xlabel(\"x_wld\")\r\n self.ax_wld.set_ylabel(\"y_wld\")\r\n self.ax_wld.set_zlabel(\"z_wld\")\r\n self.ax_wld.set_xlim([-5, 5])\r\n self.ax_wld.set_ylim([-5, 5])\r\n self.ax_wld.set_zlim([-5, 5])\r\n self.ax_wld.set_title(\"obj wld\")\r\n self.ax_wld.set_xticks(np.arange(-5, 5 + 1, 1))\r\n self.ax_wld.set_yticks(np.arange(-5, 5 + 1, 1))\r\n for i, l in enumerate(self.ll):\r\n x = np.array([self.Xpt[0][l[0]], self.Xpt[0][l[1]]])\r\n y = np.array([self.Xpt[1][l[0]], self.Xpt[1][l[1]]])\r\n z = np.array([self.Xpt[2][l[0]], self.Xpt[2][l[1]]])\r\n self.ax_wld.plot(x, y, z, marker='.', markersize=5, color=self.llcolor[i])\r\n\r\n # ใƒฏใƒผใƒซใƒ‰ๅบงๆจ™\r\n orgWld = np.array([0, 0, 0])\r\n axWld_ax1 = np.array([1, 0, 0])\r\n axWld_ax2 = np.array([0, 1, 0])\r\n axWld_ax3 = np.array([0, 0, 1])\r\n self.ax_wld.quiver(orgWld[0], orgWld[1], orgWld[2], axWld_ax1[0], axWld_ax1[1], axWld_ax1[2], length=1, color='g')\r\n self.ax_wld.quiver(orgWld[0], orgWld[1], orgWld[2], axWld_ax2[0], axWld_ax2[1], axWld_ax2[2], length=1, color='g')\r\n self.ax_wld.quiver(orgWld[0], orgWld[1], orgWld[2], axWld_ax3[0], axWld_ax3[1], axWld_ax3[2], length=1, color='g')\r\n\r\n # # ่ปธใฎ็งปๅ‹•๏ผˆๅŽŸ็‚นใ€็ต‚็‚นใ‚’้€†ๆ–นๅ‘ใซ็งปๅ‹•๏ผ‰\r\n # # cam1\r\n # orgPos_st = np.array([orgWld[0],orgWld[1],orgWld[2],1])\r\n # orgPos_edx = np.array([axWld_ax1[0], axWld_ax1[1], axWld_ax1[2], 1])\r\n # orgPos_edy = np.array([axWld_ax2[0], axWld_ax2[1], axWld_ax2[2], 1])\r\n # orgPos_edz = np.array([axWld_ax3[0], axWld_ax3[1], axWld_ax3[2], 1])\r\n # Rmat, jac = cv2.Rodrigues(np.array([[-np.rad2deg(self.Ang_x_cam1),\r\n # -np.rad2deg(self.Ang_y_cam1),\r\n # -np.rad2deg(self.Ang_z_cam1)]], float))\r\n # Pmat = np.array([[Rmat[0][0], Rmat[0][1], Rmat[0][2], -self.Trans_x_cam1],\r\n # [Rmat[1][0], Rmat[1][1], Rmat[1][2], -self.Trans_y_cam1],\r\n # [Rmat[2][0], Rmat[2][1], Rmat[2][2], -self.Trans_z_cam1]])\r\n # orgCam = Pmat.dot(orgPos_st)\r\n # orgPos_edx_cam1 = Pmat.dot(orgPos_edx)\r\n # orgPos_edy_cam1 = Pmat.dot(orgPos_edy)\r\n # orgPos_edz_cam1 = Pmat.dot(orgPos_edz)\r\n # axCam_ax1 = orgPos_edx_cam1 - orgCam\r\n # axCam_ax2 = orgPos_edy_cam1 - orgCam\r\n # axCam_ax3 = orgPos_edz_cam1 - orgCam\r\n # self.ax_wld.quiver(orgCam[0], orgCam[1], orgCam[2], axCam_ax1[0], axCam_ax1[1], axCam_ax1[2], length=1, color='orange')\r\n #\r\n # # cam1\r\n # orgPos_st = np.array([orgWld[0],orgWld[1],orgWld[2],1])\r\n # orgPos_edx = np.array([axWld_ax1[0], axWld_ax1[1], axWld_ax1[2], 1])\r\n # orgPos_edy = np.array([axWld_ax2[0], axWld_ax2[1], axWld_ax2[2], 1])\r\n # orgPos_edz = np.array([axWld_ax3[0], axWld_ax3[1], axWld_ax3[2], 1])\r\n # Rmat, jac = cv2.Rodrigues(np.array([[-np.rad2deg(self.Ang_x_cam2),\r\n # -np.rad2deg(self.Ang_y_cam2),\r\n # -np.rad2deg(self.Ang_z_cam2)]], float))\r\n # Pmat = np.array([[Rmat[0][0], Rmat[0][1], Rmat[0][2], -self.Trans_x_cam2],\r\n # [Rmat[1][0], Rmat[1][1], Rmat[1][2], -self.Trans_y_cam2],\r\n # [Rmat[2][0], Rmat[2][1], Rmat[2][2], -self.Trans_z_cam2]])\r\n # orgCam = Pmat.dot(orgPos_st)\r\n # orgPos_edx_cam2 = Pmat.dot(orgPos_edx)\r\n # orgPos_edy_cam2 = Pmat.dot(orgPos_edy)\r\n # orgPos_edz_cam2 = Pmat.dot(orgPos_edz)\r\n # axCam_ax1 = orgPos_edx_cam2 - orgCam\r\n # axCam_ax2 = orgPos_edy_cam2 - orgCam\r\n # axCam_ax3 = orgPos_edz_cam2 - orgCam\r\n # self.ax_wld.quiver(orgCam[0], orgCam[1], orgCam[2], axCam_ax1[0], axCam_ax1[1], axCam_ax1[2], length=1,color='orange')\r\n # self.ax_wld.quiver(orgCam[0], orgCam[1], orgCam[2], axCam_ax2[0], axCam_ax2[1], axCam_ax2[2], length=1,color='orange')\r\n # self.ax_wld.quiver(orgCam[0], orgCam[1], orgCam[2], axCam_ax3[0], axCam_ax3[1], axCam_ax3[2], length=1,color='orange')\r\n #\r\n\r\n def draw3d_reconstracted(self):\r\n self.ax_rcn.cla()\r\n\r\n # ใ‚ซใƒกใƒฉ่กŒๅˆ—๏ผˆๅ†…้ƒจใƒปๅค–้ƒจ๏ผ‰\r\n M1 = self.K.dot(self.P1)\r\n M2 = self.K.dot(self.P2)\r\n\r\n #ใ€€็”ปๅƒๅบงๆจ™ ( sx, sy, s )\r\n x1p = M1.dot(self.Xpt)\r\n x2p = M2.dot(self.Xpt)\r\n \r\n # sใงๆญฃ่ฆๅŒ– (x, y, 1)\r\n for i in range(3):\r\n x1p[i] /= x1p[2]\r\n x2p[i] /= x2p[2]\r\n\r\n # ไธ‰่ง’ๆธฌ้‡ใจๆญฃ่ฆๅŒ– (X,Y,Z,1)\r\n X = sfm.triangulate(x1p, x2p, M1, M2)\r\n\r\n self.ax_rcn.set_xlabel(\"x_wld_rcn\")\r\n self.ax_rcn.set_ylabel(\"y_wld_rcn\")\r\n self.ax_rcn.set_zlabel(\"z_wld_rcn\")\r\n self.ax_rcn.set_xlim([-5, 5])\r\n self.ax_rcn.set_ylim([-5, 5])\r\n self.ax_rcn.set_zlim([-5, 5])\r\n self.ax_rcn.set_title(\"obj wld rcn\")\r\n for l in self.ll:\r\n x = np.array([X[0][l[0]], X[0][l[1]]])\r\n y = np.array([X[1][l[0]], X[1][l[1]]])\r\n z = np.array([X[2][l[0]], X[2][l[1]]])\r\n self.ax_rcn.plot(x, y, z, marker='.', markersize=5, color='red')\r\n\r\n def draw3d_reconstracted_estP(self):\r\n self.ax_rcn_est.cla()\r\n\r\n # ใ‚ซใƒกใƒฉ่กŒๅˆ—๏ผˆๅ†…้ƒจใƒปๅค–้ƒจ๏ผ‰\r\n M1 = self.K.dot(self.P1)\r\n M2 = self.K.dot(self.P2)\r\n\r\n # ใ€€็”ปๅƒๅบงๆจ™ ( sx, sy, s )\r\n x1p = M1.dot(self.Xpt)\r\n x2p = M2.dot(self.Xpt)\r\n\r\n # sใงๆญฃ่ฆๅŒ– (x, y, 1)\r\n for i in range(3):\r\n x1p[i] /= x1p[2]\r\n x2p[i] /= x2p[2]\r\n\r\n # pts1 = []\r\n # pts2 = []\r\n # for i in range(len(x1p[0])):\r\n # pts1.append([x1p[0][i], x1p[1][i]])\r\n # pts2.append([x2p[0][i], x2p[1][i]])\r\n # pts1 = np.int32(pts1)\r\n # pts2 = np.int32(pts2)\r\n\r\n # ็”ปๅƒ1, ็”ปๅƒ2ใฎ็‰นๅพด็‚นใ‚’ๅฏพๅฟœไป˜ใ‘ใ‚‹่กŒๅˆ—Fใ‚’่จˆ็ฎ—\r\n # F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)\r\n # mask = np.reshape(mask, (1, len(mask)))[0]\r\n # idx_mask = np.arange(len(mask))\r\n # idx_mask = idx_mask[mask == 1]\r\n\r\n x1n = np.dot(inv(self.K), x1p)\r\n x2n = np.dot(inv(self.K), x2p)\r\n\r\n # RANSACใงEใ‚’ๆŽจๅฎš\r\n # Method_EstE = \"RAN\"\r\n Method_EstE = \"\"\r\n E = \"\"\r\n inliers = \"\"\r\n if Method_EstE is \"RAN\":\r\n model = sfm.RansacModel()\r\n E, inliers = sfm.F_from_ransac(x1n, x2n, model)\r\n else:\r\n E = sfm.compute_fundamental(x1n, x2n)\r\n inliers = [ True for i in range(len(x1n[0,:]))]\r\n\r\n print(\"E: \", E)\r\n\r\n # # ใ‚ซใƒกใƒฉ่กŒๅˆ—ใ‚’่จˆ็ฎ—ใ™ใ‚‹๏ผˆP2ใฏ4ใคใฎ่งฃใฎใƒชใ‚นใƒˆ๏ผ‰\r\n P1 = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])\r\n P2, S, R1, R2, U, V = sfm.compute_P_from_essential_mod(E)\r\n\r\n print(\"S: \", S)\r\n print(\"S fro: \", np.linalg.norm(S, \"fro\"))\r\n\r\n print(\"R1: \", R1)\r\n print(\"R2: \", R2)\r\n print(\"R1 deg: \", calcRmatDeg(R1))\r\n print(\"R2 deg: \", calcRmatDeg(R2))\r\n P2_R = self.P2[:,0:3]\r\n print(\"P2_R: \", P2_R)\r\n # print(\"U: \", U)\r\n # print(\"V: \", V)\r\n # print(\"S*R1: \", S.dot(R1))\r\n # print(\"S*R2: \", S.dot(R2))\r\n\r\n\r\n # 2ใคใฎใ‚ซใƒกใƒฉใฎๅ‰ใซ็‚นใฎใ‚ใ‚‹่งฃใ‚’้ธใถ\r\n ind = 0\r\n maxres = 0\r\n for i in range(4):\r\n # triangulate inliers and compute depth for each camera\r\n # ใ‚คใƒณใƒฉใ‚คใ‚ขใ‚’ไธ‰่ง’ๆธฌ้‡ใ—ๅ„ใ‚ซใƒกใƒฉใ‹ใ‚‰ใฎๅฅฅ่กŒใใ‚’่จˆ็ฎ—ใ™ใ‚‹\r\n X = sfm.triangulate(x1n[:, inliers], x2n[:, inliers], P1, P2[i])\r\n d1 = np.dot(P1, X)[2]\r\n d2 = np.dot(P2[i], X)[2]\r\n if sum(d1 > 0) + sum(d2 > 0) > maxres:\r\n maxres = sum(d1 > 0) + sum(d2 > 0)\r\n ind = i\r\n infront = (d1 > 0) & (d2 > 0)\r\n\r\n #ๅ…จ่กจ็คบ็”จ\r\n M1 = self.K.dot(P1)\r\n M2_est = self.K.dot(P2[ind])\r\n X = sfm.triangulate(x1p, x2p, M1, M2_est)\r\n\r\n # print(\"P2 est:\", P2[ind])\r\n # print(\"P2: \", self.P2)\r\n\r\n # for i in range(4):\r\n # print(\"P2 est[ \", i, \" ]: \", P2[i])\r\n #\r\n # M2 = self.K.dot(self.P2)\r\n # print(\"M2: \", M2)\r\n\r\n self.ax_rcn_est.set_xlabel(\"x_wld_rcn\")\r\n self.ax_rcn_est.set_ylabel(\"y_wld_rcn\")\r\n self.ax_rcn_est.set_zlabel(\"z_wld_rcn\")\r\n # self.ax_rcn_est.set_xlim([-5, 5])\r\n # self.ax_rcn_est.set_ylim([-5, 5])\r\n # self.ax_rcn_est.set_zlim([-5, 5])\r\n self.ax_rcn_est.set_title(\"obj wld rcn est\")\r\n for l in self.ll:\r\n x = np.array([X[0][l[0]], X[0][l[1]]])\r\n y = np.array([X[1][l[0]], X[1][l[1]]])\r\n z = np.array([X[2][l[0]], X[2][l[1]]])\r\n self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='red')\r\n\r\n # # ๅฏพๅฟœใŒ่ฆ‹ใคใ‹ใ‚‰ใชใ‹ใฃใŸ็‚น\r\n # x = X[0,[ not val for val in mask]]\r\n # y = X[1,[ not val for val in mask]]\r\n # z = X[2,[ not val for val in mask]]\r\n # self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='black', linestyle = \"\")\r\n\r\n # ใ‚ซใƒกใƒฉใ‚ˆใ‚Šๅฅฅใซใ‚ใ‚‹ใจๆŽจๅฎšใ•ใ‚ŒใŸ็‚น\r\n # X = sfm.triangulate(x1[:, idx_mask], x2[:, idx_mask], P1, P2[i])\r\n x = X[0,[ not val for val in infront]]\r\n y = X[1,[ not val for val in infront]]\r\n z = X[2,[ not val for val in infront]]\r\n self.ax_rcn_est.plot(x, y, z, marker='.', markersize=5, color='blue', linestyle = \"\")\r\n\r\n # #\r\n # for i, inf in enumerate(infront):\r\n # if inf :\r\n # x = np.array([X[0][i], X[0][i]])\r\n # y = np.array([X[1][i], X[1][i]])\r\n # z = np.array([X[2][i], X[2][i]])\r\n # self.ax_rcn_est.scatter(x, y, z, markersize=5, color='black')\r\n\r\nif __name__ == '__main__':\r\n ex = TKExample()" }, { "alpha_fraction": 0.5384199023246765, "alphanum_fraction": 0.5646644830703735, "avg_line_length": 29.058822631835938, "blob_id": "f73b06f455c365cb7f8c3236ade9935f2e3365e2", "content_id": "02b3ebc076b5ec23aeea8e2e325b76d32d5a3b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3976, "license_type": "no_license", "max_line_length": 64, "num_lines": 119, "path": "/mlPrediction/toTxt_weather_pd.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\nimport glob\r\nimport os\r\nimport pandas as pd\r\n\r\nfNameList = [\r\n\"ๅนณๅ‡ๆฐ—ๆธฉ(โ„ƒ)\",\r\n\"ๆœ€้ซ˜ๆฐ—ๆธฉ(โ„ƒ)\",\r\n\"้™ๆฐด้‡ใฎๅˆ่จˆ(mm)\",\r\n\"ๆ—ฅ็…งๆ™‚้–“(ๆ™‚้–“)\",\r\n\"ๆœ€ๆทฑ็ฉ้›ช(cm)\",\r\n\"้™้›ช้‡ๅˆ่จˆ(cm)\",\r\n\"ๅนณๅ‡้ขจ้€Ÿ(m/s)\",\r\n\"ๆœ€ๅคง้ขจ้€Ÿ(m/s)\",\r\n\"ๆœ€ๅคง็žฌ้–“้ขจ้€Ÿ(m/s)\",\r\n\"ๅนณๅ‡่’ธๆฐ—ๅœง(hPa)\",\r\n\"ๅนณๅ‡ๆนฟๅบฆ(๏ผ…)\",\r\n\"ๆœ€ๅฐ็›ธๅฏพๆนฟๅบฆ(๏ผ…)\",\r\n\"ๅนณๅ‡็พๅœฐๆฐ—ๅœง(hPa)\",\r\n\"ๅนณๅ‡ๆตท้ขๆฐ—ๅœง(hPa)\",\r\n\"ๆœ€ไฝŽๆตท้ขๆฐ—ๅœง(hPa)\",\r\n\"ๅนณๅ‡้›ฒ้‡(10ๅˆ†ๆฏ”)\",\r\n\"10ๅˆ†้–“้™ๆฐด้‡ใฎๆœ€ๅคง(mm)\",\r\n#\"ๅˆ่จˆๅ…จๅคฉๆ—ฅๅฐ„้‡(MJ/ใŽก)\",\r\n]\r\n\r\nymdName = \"ๅนดๆœˆๆ—ฅ\"\r\n\r\nfNameList_new = [\r\n\"Ave(tmp)[cels]\",\r\n\"Max(tmp)[cels]\",\r\n\"Sum(rain)[mm]\",\r\n\"Len(day)[hour]\",\r\n\"Max(snow)[cm]\",\r\n\"Sum(snow)[cm]\",\r\n\"windSpeed[m/s]\",\r\n\"Max(WS)[m/s]\",\r\n\"Max(inst WS)[m/s]\",\r\n\"Ave(stream P)[hPa]\",\r\n\"Ave(humid)[๏ผ…]\",\r\n\"Min(humid)[๏ผ…]\",\r\n\"Ave(P)[hPa]\",\r\n\"Ave(Sea P)[hPa]\",\r\n\"Min(Sea P)[hPa]\",\r\n\"Ave(Clowd)[10min]\",\r\n\"Max(rain10min)[mm]\",\r\n#\"Sum(sol radiation)[MJ/ใŽก]\",\r\n]\r\n\r\nfile_log = open(\"log.txt\", mode='w', newline=\"\")\r\nwriter_log = csv.writer(file_log)\r\n\r\ndataPath = \"data_weather_test/\"\r\nfor f in glob.glob(os.path.join(dataPath, \"*.csv\")):\r\n name, ext = os.path.splitext(f)\r\n pdData_raw = pd.read_csv(f,header=2, encoding=\"cp932\")\r\n # 2/29ใ‚’ใ‚นใ‚ญใƒƒใƒ—\r\n for i, val in enumerate(pdData_raw[ymdName][2:]):\r\n year, month, day = val.split(\"/\")\r\n year = int(year)\r\n month = int(month)\r\n day = int(day)\r\n if (month == 2) and (day == 29):\r\n pdData_raw = pdData_raw.drop(i, axis=0)\r\n # print(\"ๅนณๅ‡ๆฐ—ๆธฉ(โ„ƒ)\", pdData_raw[\"ๅนณๅ‡ๆฐ—ๆธฉ(โ„ƒ)\"][2:])\r\n pdData = pd.DataFrame({\r\n fNameList_new[0]: pdData_raw[fNameList[0]][2:],\r\n fNameList_new[1]: pdData_raw[fNameList[1]][2:],\r\n fNameList_new[2]: pdData_raw[fNameList[2]][2:],\r\n fNameList_new[3]: pdData_raw[fNameList[3]][2:],\r\n fNameList_new[4]: pdData_raw[fNameList[4]][2:],\r\n fNameList_new[5]: pdData_raw[fNameList[5]][2:],\r\n fNameList_new[6]: pdData_raw[fNameList[6]][2:],\r\n fNameList_new[7]: pdData_raw[fNameList[7]][2:],\r\n fNameList_new[8]: pdData_raw[fNameList[8]][2:],\r\n fNameList_new[9]: pdData_raw[fNameList[9]][2:],\r\n fNameList_new[10]: pdData_raw[fNameList[10]][2:],\r\n fNameList_new[11]: pdData_raw[fNameList[11]][2:],\r\n fNameList_new[12]: pdData_raw[fNameList[12]][2:],\r\n fNameList_new[13]: pdData_raw[fNameList[13]][2:],\r\n fNameList_new[14]: pdData_raw[fNameList[14]][2:],\r\n fNameList_new[15]: pdData_raw[fNameList[15]][2:],\r\n fNameList_new[16]: pdData_raw[fNameList[16]][2:],\r\n# fNameList_new[17]: pdData_raw[fNameList[17]][2:],\r\n })\r\n pdData = pdData.reset_index(drop=True)\r\n\r\n # ็ฉบ็™ฝใฏ็›ดๅ‰ใฎๅ€คใง่ฃœ้–“\r\n for fnm_new in fNameList_new:\r\n for i,val in enumerate(pdData[fnm_new]):\r\n if np.isnan(val):\r\n if i > 0:\r\n pdData[fnm_new][i] = pdData[fnm_new][i-1]\r\n else:\r\n pdData[fnm_new][i] = pdData[fnm_new][i+1]\r\n\r\n pdDataList = pdData.values.tolist()\r\n pdHead = pdData.head(1)\r\n file_w = open(name +\".txt\", mode='w', newline=\"\")\r\n writer = csv.writer(file_w)\r\n writer.writerow(pdHead)\r\n for i in range(len(pdDataList)):\r\n writer.writerow(pdDataList[i])\r\n # utf8ใงไฟๅญ˜ใงใใชใ„\r\n # pdData.to_csv(name +\".txt\", index=False, encoding='utf-8')\r\n\r\n # log\r\n print(\"pdData.shape\", pdData.shape)\r\n #print(\"pdData.describe()\",pdData.describe())\r\n writer_log.writerow([name])\r\n writer_log.writerow(pdData.shape)\r\n writer_log.writerows(pdData.describe().values.tolist())\r\n pdData_np = np.array(pdDataList)\r\n isNanIdx = np.argwhere(np.isnan(pdData_np))\r\n isInfIdx = np.argwhere(np.isinf(pdData_np))\r\n writer_log.writerow([\"isNanIdx: \", isNanIdx])\r\n writer_log.writerow([\"isInfIdx: \", isInfIdx])\r\n" }, { "alpha_fraction": 0.5816320776939392, "alphanum_fraction": 0.6083948016166687, "avg_line_length": 45.88471984863281, "blob_id": "1ecbc583b92bf4567f394e94f804c60f156c883f", "content_id": "d8767a1cb8bf0818c9ab4e4c43999e5cd875c9fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17487, "license_type": "no_license", "max_line_length": 192, "num_lines": 373, "path": "/kerasPrj/myHandDrtection/mypartsDetectionSSD_class1.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import h5py\nimport numpy as np\nimport shutil\nfrom matplotlib import pyplot as plt\nfrom keras.preprocessing import image\nfrom imageio import imread\nimport os, sys\nsys.path.append('../ssd_keras')\nfrom misc_utils.tensor_sampling_utils import sample_tensors\nfrom math import ceil\n\nfrom keras.optimizers import Adam, SGD\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n\nfrom models.keras_ssd300 import ssd_300\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\nfrom keras_layers.keras_layer_L2Normalization import L2Normalization\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_geometric_ops import Resize\nfrom data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n\n\npyfileName = os.path.basename(__file__).split('.')[0]\ndirName = os.path.dirname(__file__)\n\n# subsample kernels and biases\nclassifier_names = ['conv4_3_norm_mbox_conf',\n 'fc7_mbox_conf',\n 'conv6_2_mbox_conf',\n 'conv7_2_mbox_conf',\n 'conv8_2_mbox_conf',\n 'conv9_2_mbox_conf']\n\nweights_source_path = 'VGG_VOC0712_SSD_300x300_iter_120000.h5'\nweights_destination_path = pyfileName + '.h5'\nshutil.copy(weights_source_path, weights_destination_path)\n\nweights_source_file = h5py.File(weights_source_path, 'r')\nweights_destination_file = h5py.File(weights_destination_path)\n\nconv4_3_norm_mbox_conf_kernel = weights_source_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_source_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\nn_classes_source = 21\nclasses_of_interest = [0, 15]\n\nfor name in classifier_names:\n # Get the trained weights for this layer from the source HDF5 weights file.\n kernel = weights_source_file[name][name]['kernel:0'].value\n bias = weights_source_file[name][name]['bias:0'].value\n\n # Get the shape of the kernel. We're interested in sub-sampling\n # the last dimension, 'o'.\n height, width, in_channels, out_channels = kernel.shape\n\n # Compute the indices of the elements we want to sub-sample.\n # Keep in mind that each classification predictor layer predicts multiple\n # bounding boxes for every spatial location, so we want to sub-sample\n # the relevant classes for each of these boxes.\n if isinstance(classes_of_interest, (list, tuple)):\n subsampling_indices = []\n for i in range(int(out_channels / n_classes_source)):\n indices = np.array(classes_of_interest) + i * n_classes_source\n subsampling_indices.append(indices)\n subsampling_indices = list(np.concatenate(subsampling_indices))\n elif isinstance(classes_of_interest, int):\n subsampling_indices = int(classes_of_interest * (out_channels / n_classes_source))\n else:\n raise ValueError(\"`classes_of_interest` must be either an integer or a list/tuple.\")\n\n # Sub-sample the kernel and bias.\n # The `sample_tensors()` function used below provides extensive\n # documentation, so don't hesitate to read it if you want to know\n # what exactly is going on here.\n new_kernel, new_bias = sample_tensors(weights_list=[kernel, bias],\n sampling_instructions=[height, width, in_channels, subsampling_indices],\n axes=[[3]],\n # The one bias dimension corresponds to the last kernel dimension.\n init=['gaussian', 'zeros'],\n mean=0.0,\n stddev=0.005)\n\n # Delete the old weights from the destination file.\n del weights_destination_file[name][name]['kernel:0']\n del weights_destination_file[name][name]['bias:0']\n # Create new datasets for the sub-sampled weights.\n weights_destination_file[name][name].create_dataset(name='kernel:0', data=new_kernel)\n weights_destination_file[name][name].create_dataset(name='bias:0', data=new_bias)\n\n# Make sure all data is written to our output file before this sub-routine exits.\nweights_destination_file.flush()\n\nconv4_3_norm_mbox_conf_kernel = weights_destination_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_destination_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\n# build SSD model\nimg_height = 300 # Height of the input images\nimg_width = 300 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nsubtract_mean = [123, 117, 104] # The per-channel mean of the images in the dataset\nswap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we should set this to `True`, but weirdly the results are better without swapping.\n# TODO: Set the number of classes.\nn_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\nscales = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets.\n# scales = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets.\naspect_ratios = [[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\ntwo_boxes_for_ar1 = True\nsteps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\noffsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\nclip_boxes = False # Whether or not you want to limit the anchor boxes to lie entirely within the image boundaries\nvariances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are scaled as in the original implementation\nnormalize_coords = True\n\n\nK.clear_session() # Clear previous models from memory.\nmodel = ssd_300(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=subtract_mean,\n swap_channels=swap_channels)\n\nprint(\"Model built.\")\n\n# 2: Load the sub-sampled weights into the model.\nweights_path = weights_destination_path\nmodel.load_weights(weights_path, by_name=True)\nprint(\"Weights file loaded:\", weights_path)\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model.\nsgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\nmodel.compile(optimizer=sgd, loss=ssd_loss.compute_loss)\nprint(\"model.summary()\")\nprint(model.summary())\n\n\n# load train dataset\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\nVOC_2012_images_dir = dirName + '/../../../pyCodes_dataset/ssd_keras/datasets/VOCdevkit/VOC2012/JPEGImages/'\n\n## The directories that contain the annotations.\nVOC_2012_annotations_dir = dirName + '/../../../pyCodes_dataset/ssd_keras/datasets/VOCdevkit/VOC2012/Annotations/'\n\nVOC_2012_trainval_image_set_filename = dirName + '/../../../pyCodes_dataset/ssd_keras/datasets/VOCdevkit/VOC2012/ImageSets/Main/person_train_new_hand.txt'\nVOC_2012_test_image_set_filename = dirName + '/../../../pyCodes_dataset/ssd_keras/datasets/VOCdevkit/VOC2012/ImageSets/Main/person_val_new_hand.txt'\n\nclasses=['background',\n 'hand', 'head', 'foot']\n\ninclude_classes = [0, 1]\n\ntrain_dataset.parse_hand_xml(images_dirs=[VOC_2012_images_dir],\n image_set_filenames=[VOC_2012_trainval_image_set_filename],\n annotations_dirs=[VOC_2012_annotations_dir],\n classes=classes,\n include_classes=include_classes,\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False)\n\nval_dataset.parse_hand_xml(images_dirs=[VOC_2012_images_dir],\n image_set_filenames=[VOC_2012_test_image_set_filename],\n annotations_dirs=[VOC_2012_annotations_dir],\n classes=classes,\n include_classes=include_classes,\n exclude_truncated=False,\n exclude_difficult=True,\n ret=False)\n\n## Optional: Convert the dataset into an HDF5 dataset.\ntrain_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_12_train_hand.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)\n\nval_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_12_val_hand.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)\n\n##Set the batch size.\nbatch_size = 10\nssd_data_augmentation = SSDDataAugmentation(img_height=img_height,\n img_width=img_width,\n background=subtract_mean)\n\n# For the validation generator:\nconvert_to_3_channels = ConvertTo3Channels()\nresize = Resize(height=img_height, width=img_width)\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n model.get_layer('fc7_mbox_conf').output_shape[1:3],\n model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.5,\n normalize_coords=normalize_coords)\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[ssd_data_augmentation],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))\n\n## Define a learning rate schedule.\ndef lr_schedule(epoch):\n if epoch < 80:\n return 0.001\n elif epoch < 100:\n return 0.0001\n else:\n return 0.00001\n\n\nmodel_checkpoint = ModelCheckpoint(filepath=pyfileName + '_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n#model_checkpoint.best =\n\ncsv_logger = CSVLogger(filename=pyfileName + '_training_log.csv',\n separator=',',\n append=True)\n\nlearning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n verbose=1)\n\nterminate_on_nan = TerminateOnNaN()\n\ncallbacks = [model_checkpoint,\n csv_logger,\n learning_rate_scheduler,\n terminate_on_nan]\n\n## If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 5\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)\n\n\norig_images = [] # Store the images here.\ninput_images = [] # Store resized versions of the images here.\n\n# We'll only load one image in this example.\nimg_path = dirName + '/../../../pyCodes_dataset/ssd_keras/fish_bike.jpg'\n\norig_images.append(imread(img_path))\nimg = image.load_img(img_path, target_size=(img_height, img_width))\nimg = image.img_to_array(img)\ninput_images.append(img)\ninput_images = np.array(input_images)\n\ny_pred = model.predict(input_images)\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.4,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)\n# confidence_threshold = 0.5\n#\n# y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded[0])\n\n# Display the image and draw the predicted boxes onto it.\n\n# Set the colors for the bounding boxes\ncolors = plt.cm.hsv(np.linspace(0, 1, 2)).tolist()\nclasses=['background',\n 'hand']\n\nplt.figure(figsize=(20,12))\nplt.imshow(orig_images[0])\n\ncurrent_axis = plt.gca()\n\nfor box in y_pred_decoded[0]:\n # Transform the predicted bounding boxes for the 300x300 image to the original image dimensions.\n xmin = box[2] * orig_images[0].shape[1] / img_width\n ymin = box[3] * orig_images[0].shape[0] / img_height\n xmax = box[4] * orig_images[0].shape[1] / img_width\n ymax = box[5] * orig_images[0].shape[0] / img_height\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))\n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})\n\nplt.show()" }, { "alpha_fraction": 0.556808352470398, "alphanum_fraction": 0.5809193253517151, "avg_line_length": 27.274110794067383, "blob_id": "ee4e098e02df8667c1a0d264e33013d7dc9fc18e", "content_id": "9207175847988a5a8a5113623fa0037e0db97e4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5765, "license_type": "no_license", "max_line_length": 74, "num_lines": 197, "path": "/mlPrediction/myMlPrediction_stat_old.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nimport glob\r\nimport os\r\nimport csv\r\n\r\ndef normData(dataX):\r\n dataX_norm = dataX\r\n for i in range(len(dataX[0, :])):\r\n Max = max(dataX_norm[:, i])\r\n Min = min(dataX_norm[:, i])\r\n if (Max - Min) > 1.0e-6:\r\n dataX_norm[:, i] = (dataX_norm[:, i] - Min) / (Max - Min)\r\n else:\r\n dataX_norm[:, i] = - Max\r\n return dataX_norm\r\n\r\ndataX = []\r\ndataX_2d_list = []\r\ndataNames = []\r\ndataY = []\r\n\r\n# load data X\r\n# 333 .. 2/28\r\ndataXPath = \"data_weather/\"\r\nnNum = 0\r\nfNum = 0\r\nfor f in glob.glob(os.path.join(dataXPath, \"*.txt\")):\r\n # get data\r\n data_raw = np.loadtxt(f, delimiter=\",\", dtype=None, skiprows=1)\r\n data_feature = data_raw[:334,:]\r\n dataX_2d_list.append(data_feature)\r\n fNum = len(data_feature[0,:])*len(data_feature[:,0])\r\n data = data_feature\r\n nNum += 1\r\n for val in data:\r\n dataX.append(val)\r\n\r\n # get names\r\n file = open(f, mode='r')\r\n data_reader = csv.reader(file,delimiter=\",\")\r\n data_raw = [row for row in data_reader]\r\n dataNames = data_raw[0]\r\ndataX = np.array(dataX)\r\ndataX = np.reshape(dataX, (nNum,fNum))\r\n\r\n# load data Y\r\ndataYName = \"data_sakura/sakura_2018_2001.txt\"\r\ndata_tgt = np.loadtxt(dataYName, delimiter=\",\")\r\ndataY = data_tgt\r\n\r\n\r\n## plot 2dlist data\r\nminY, maxY = min(dataY), max(dataY)\r\ndataY_01 = (dataY - minY) / (maxY - minY)\r\nfigAll_plt = plt.figure()\r\nfigAll_plt.subplots_adjust(wspace=0.4, hspace=0.6)\r\nfor i in range(len(dataNames)):\r\n numcol = 3\r\n numrow = len(dataNames) / numcol + 1\r\n ax = figAll_plt.add_subplot(numrow,numcol,i+1)\r\n for j in range(nNum):\r\n y = dataX_2d_list[j][:,i]\r\n x = range(len(y))\r\n cval = \"\"\r\n if dataY_01[j] >= 0.5:\r\n cval = \"#ee0000\"\r\n else:\r\n cval = \"#0000ee\"\r\n ax.plot(x,y,c=cval, alpha=0.7)\r\n #carr = np.array([dataY_01[j] for nn in range(len(x))])\r\n #ax.scatter(x, y, c=carr, cmap=\"jet\")\r\n ttl = dataNames[i]\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigAll_plt.show()\r\nplt.show()\r\n\r\n## calc 1d st\r\nminY, maxY = min(dataY), max(dataY)\r\ndataY_01 = (dataY - minY) / (maxY - minY)\r\nfigStat_plt = plt.figure()\r\nfigStat_plt.subplots_adjust(wspace=0.4, hspace=0.6)\r\nftoShow = [0, 1, 3, 5, 6]\r\nfor i in range(len(ftoShow)):\r\n f_mean = []\r\n f_max = []\r\n f_min = []\r\n f_var = []\r\n for j in range(nNum):\r\n y = dataX_2d_list[j][:,i]\r\n f_mean.append(sum(y)/float(len(y)))\r\n f_max.append(max(y))\r\n f_min.append(min(y))\r\n f_var.append(np.std(y))\r\n f_mean = np.array(f_mean)\r\n f_max = np.array(f_max)\r\n f_min = np.array(f_min)\r\n f_var = np.array(f_var)\r\n\r\n x = range(nNum)\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4*i + 1)\r\n ax.scatter(f_mean, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_mean\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 2)\r\n ax.scatter(f_max, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_max\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 3)\r\n ax.scatter(f_min, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_min\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\n\r\n ax = figStat_plt.add_subplot(len(ftoShow), 4, 4 * i + 4)\r\n ax.scatter(f_var, dataY, color=\"#222222\", alpha=0.7)\r\n ttl = dataNames[i] + \"_std\"\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfigStat_plt.show()\r\nplt.show()\r\n\r\n# calc statistics. Use it as feature set.\r\ndataX_stat = []\r\nfor j in range(nNum):\r\n for i in range(len(dataNames)):\r\n y = dataX_2d_list[j][:,i]\r\n dataX_stat.append(sum(y)/float(len(y)))\r\n dataX_stat.append(max(y))\r\n dataX_stat.append(min(y))\r\n dataX_stat.append(np.std(y))\r\ndataX_stat = np.array(dataX_stat)\r\ndataX_stat = np.reshape(dataX_stat, (nNum, len(dataNames)*4))\r\ndataX = dataX_stat\r\n\r\n# data normalization\r\ndataX = normData(dataX)\r\n# dataXminY, maxY = min(dataY), max(dataY)\r\n# dataY = (dataY - minY) / (maxY - minY)\r\n\r\n# Split the data into training/testing sets\r\nnumTest = 2\r\ndataX_train = dataX[:-numTest]\r\ndataX_test = dataX[-numTest:]\r\n\r\n# Split the targets into training/testing sets\r\ndataY_train = dataY[:-numTest]\r\ndataY_test = dataY[-numTest:]\r\n\r\n# data training and get results\r\nregr = linear_model.LinearRegression()\r\nregr.fit(dataX_train, dataY_train)\r\ndataY_pred = regr.predict(dataX_test)\r\n\r\nprint('Coefficients: \\n', regr.coef_)\r\nprint(\"Mean squared error: %.2f\"\r\n % mean_squared_error(dataY_test, dataY_pred))\r\nprint('Variance score: %.2f' % r2_score(dataY_test, dataY_pred))\r\n\r\nfa = dataY_test - dataY_pred\r\nfa *= fa\r\nfb = dataY_test - ( sum(dataY_test) / len(dataY_test))\r\nfb *= fb\r\nmyr2 = 1- sum(fa)/ sum(fb)\r\n\r\nsortedIdx = np.argsort(abs(regr.coef_))\r\nprint(\"sorted idx: \", sortedIdx[::-1])\r\n\r\nnumShow = 3\r\nfig = plt.figure()\r\nfor i in range(numShow):\r\n fIdx = sortedIdx[::-1][i]\r\n ax = fig.add_subplot(1,numShow,i+1)\r\n ax.scatter(dataX_test[:,fIdx], dataY_test, color=\"#222222\", alpha=0.7)\r\n dataIdx = np.argsort(dataX_test[:,fIdx])\r\n x = dataX_test[:,fIdx][dataIdx]\r\n y = dataY_pred[dataIdx]\r\n ax.plot(x,y, color='blue', linewidth=3)\r\n # ttl = dataNames[fIdx] + \" \" + \"{:.2f}\".format(regr.coef_[fIdx])\r\n ttl = str(fIdx) + \" \" + \"{:.2f}\".format(regr.coef_[fIdx])\r\n ax.set_title(ttl)\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel(\"y\")\r\nfig.show()\r\nplt.show()" }, { "alpha_fraction": 0.5957598686218262, "alphanum_fraction": 0.6136255264282227, "avg_line_length": 29.096296310424805, "blob_id": "8572aa1e92d398e5fa4d61d6bdfe1f499a3d1cd0", "content_id": "a7707985ac75de681e7e5f7f562e00cbae5b1fb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4198, "license_type": "no_license", "max_line_length": 76, "num_lines": 135, "path": "/mlPrediction/myMlPrediction_Test.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets, linear_model\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimport glob\r\nimport os\r\nimport csv\r\n\r\ndef normData(dataX, fmaxes_ = [], fmins_ = ''):\r\n dataX_norm = dataX\r\n fmaxes = []\r\n fmins = []\r\n for i in range(len(dataX[0, :])):\r\n Max = max(dataX_norm[:, i]) if len(fmaxes_) == 0 else fmaxes_[i]\r\n Min = min(dataX_norm[:, i]) if len(fmins_) == 0 else fmins_[i]\r\n fmaxes.append(Max)\r\n fmins.append(Min)\r\n if (Max - Min) > 1.0e-6:\r\n dataX_norm[:, i] = (dataX_norm[:, i] - Min) / (Max - Min)\r\n else:\r\n dataX_norm[:, i] = - Max\r\n return np.array(dataX_norm), np.array(fmaxes), np.array(fmins)\r\n\r\ndef day2md(day):\r\n daysInMonth = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])\r\n day_res = 0\r\n month_res = 0\r\n day_tmp = round(day) + 1\r\n for i, days in enumerate(daysInMonth):\r\n tmp = int(day_tmp - days)\r\n if tmp <= 0:\r\n month_res = i + 1\r\n day_res = day_tmp\r\n break\r\n else:\r\n day_tmp -= days\r\n return month_res, day_res\r\n\r\n\r\ndataX_train = []\r\ndataX_test = []\r\ndataXNames = []\r\ndataX_2d_list = []\r\ndataNames = []\r\ndataY_train = []\r\n\r\n# load data X train\r\ndataXPath = \"data_weather/\"\r\nnNum = 0\r\nfNum = 0\r\nfor f in glob.glob(os.path.join(dataXPath, \"*.txt\")):\r\n # get data\r\n data_raw = np.loadtxt(f, delimiter=\",\", dtype=None, skiprows=1)\r\n #data_feature = data_raw[:334,:]\r\n data_feature = data_raw[:344, :]\r\n dataX_2d_list.append(data_feature)\r\n fNum = len(data_feature[0,:])*len(data_feature[:,0])\r\n data = data_feature\r\n nNum += 1\r\n for val in data:\r\n dataX_train.append(val)\r\n\r\n # get names\r\n file = open(f, mode='r')\r\n data_reader = csv.reader(file,delimiter=\",\")\r\n data_raw = [row for row in data_reader]\r\n dataNames = data_raw[0]\r\n\r\n for day in range(len(data_feature[:,0])):\r\n dataNamesDays = [name + \"_day\" + str(day) for name in dataNames]\r\n dataXNames.extend(dataNamesDays)\r\ndataX_train = np.array(dataX_train)\r\ndataX_train = np.reshape(dataX_train, (nNum,fNum))\r\n\r\nisNanIdx = np.argwhere(np.isnan(dataX_train))\r\nisInfIdx = np.argwhere(np.isinf(dataX_train))\r\nprint(\"isNanIdx: \", isNanIdx)\r\nprint(\"isInfIdx: \", isInfIdx)\r\n\r\n# load data X test\r\ndataXPath = \"data_weather_test/\"\r\nnNum = 0\r\nfNum = 0\r\nfor f in glob.glob(os.path.join(dataXPath, \"*.txt\")):\r\n # get data\r\n data_raw = np.loadtxt(f, delimiter=\",\", dtype=None, skiprows=1)\r\n #data_feature = data_raw[:334,:]\r\n data_feature = data_raw[:344, :]\r\n dataX_2d_list.append(data_feature)\r\n fNum = len(data_feature[0,:])*len(data_feature[:,0])\r\n data = data_feature\r\n nNum += 1\r\n for val in data:\r\n dataX_test.append(val)\r\n\r\ndataX_test = np.array(dataX_test)\r\ndataX_test = np.reshape(dataX_test, (nNum,fNum))\r\n\r\nisNanIdx = np.argwhere(np.isnan(dataX_test))\r\nisInfIdx = np.argwhere(np.isinf(dataX_test))\r\nprint(\"isNanIdx: \", isNanIdx)\r\nprint(\"isInfIdx: \", isInfIdx)\r\n\r\n# load data Y\r\ndataYPath = \"data_sakura/\"\r\nfor f in glob.glob(os.path.join(dataYPath, \"*.txt\")):\r\n data_tgt = np.loadtxt(f, delimiter=\",\")\r\n for val in data_tgt:\r\n dataY_train.append(val)\r\ndataY_train = np.array(dataY_train)\r\n\r\ndataX_train, maxes_train, mins_train = normData(dataX_train)\r\ndataX_test, tmp1, tmp2 = normData(dataX_test, maxes_train, mins_train)\r\n\r\n# regression by RF\r\nrf = RandomForestRegressor(n_estimators=100,random_state=42)\r\nrf.fit(dataX_train, dataY_train)\r\ndataY_pred = rf.predict(dataX_train)\r\nerr_mae = abs(dataY_train - dataY_pred)\r\nave_mae = abs(dataY_train - np.mean(dataY_train))\r\n\r\nprint(\"Train result\")\r\nprint(\"err_mae: \", np.mean(err_mae))\r\nprint(\"ave_mae: \", np.mean(ave_mae))\r\nprint(\"importances: \", rf.feature_importances_)\r\nsortedIdx = np.argsort(abs(rf.feature_importances_))\r\nprint(\"sorted idx: \", sortedIdx[::-1])\r\n\r\n# getResult\r\ndataY_pred = rf.predict(dataX_test)\r\nprint(\"Prediction result\")\r\nfor datay in dataY_pred:\r\n month, day = day2md(datay)\r\n print(\"pred, month, day\", datay, month, day)\r\n" }, { "alpha_fraction": 0.5882315039634705, "alphanum_fraction": 0.6164888739585876, "avg_line_length": 48.25477600097656, "blob_id": "9640811069d1bc03fc0729b7b353eb55b5b1eb95", "content_id": "a2a7d955575bfc82fb659e4efe236dfb3a57fab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15465, "license_type": "no_license", "max_line_length": 192, "num_lines": 314, "path": "/kerasPrj/myHandDrtection/myplotAugImg_hand.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import h5py\nimport numpy as np\nimport shutil\nfrom matplotlib import pyplot as plt\nfrom keras.preprocessing import image\nfrom imageio import imread\nfrom misc_utils.tensor_sampling_utils import sample_tensors\nfrom math import ceil\n\nfrom keras.optimizers import Adam, SGD\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\n\nfrom models.keras_ssd300 import ssd_300\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\nfrom keras_layers.keras_layer_L2Normalization import L2Normalization\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_geometric_ops import Resize\nfrom data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n\n\nclassifier_names = ['conv4_3_norm_mbox_conf',\n 'fc7_mbox_conf',\n 'conv6_2_mbox_conf',\n 'conv7_2_mbox_conf',\n 'conv8_2_mbox_conf',\n 'conv9_2_mbox_conf']\n\nweights_source_path = 'VGG_VOC0712_SSD_300x300_iter_120000.h5'\nweights_destination_path = 'VGG_VOC0712_SSD_300x300_iter_120000_class2_hand.h5'\nshutil.copy(weights_source_path, weights_destination_path)\n\nweights_source_file = h5py.File(weights_source_path, 'r')\nweights_destination_file = h5py.File(weights_destination_path)\n\nconv4_3_norm_mbox_conf_kernel = weights_source_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_source_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\nn_classes_source = 21\nclasses_of_interest = [0, 15]\n\nsubsampling_indices = []\nfor i in range(int(4*21/n_classes_source)):\n indices = np.array(classes_of_interest) + i * n_classes_source\n subsampling_indices.append(indices)\nsubsampling_indices = list(np.concatenate(subsampling_indices))\n\nprint(subsampling_indices)\n\nfor name in classifier_names:\n # Get the trained weights for this layer from the source HDF5 weights file.\n kernel = weights_source_file[name][name]['kernel:0'].value\n bias = weights_source_file[name][name]['bias:0'].value\n\n # Get the shape of the kernel. We're interested in sub-sampling\n # the last dimension, 'o'.\n height, width, in_channels, out_channels = kernel.shape\n\n # Compute the indices of the elements we want to sub-sample.\n # Keep in mind that each classification predictor layer predicts multiple\n # bounding boxes for every spatial location, so we want to sub-sample\n # the relevant classes for each of these boxes.\n if isinstance(classes_of_interest, (list, tuple)):\n subsampling_indices = []\n for i in range(int(out_channels / n_classes_source)):\n indices = np.array(classes_of_interest) + i * n_classes_source\n subsampling_indices.append(indices)\n subsampling_indices = list(np.concatenate(subsampling_indices))\n elif isinstance(classes_of_interest, int):\n subsampling_indices = int(classes_of_interest * (out_channels / n_classes_source))\n else:\n raise ValueError(\"`classes_of_interest` must be either an integer or a list/tuple.\")\n\n # Sub-sample the kernel and bias.\n # The `sample_tensors()` function used below provides extensive\n # documentation, so don't hesitate to read it if you want to know\n # what exactly is going on here.\n new_kernel, new_bias = sample_tensors(weights_list=[kernel, bias],\n sampling_instructions=[height, width, in_channels, subsampling_indices],\n axes=[[3]],\n # The one bias dimension corresponds to the last kernel dimension.\n init=['gaussian', 'zeros'],\n mean=0.0,\n stddev=0.005)\n\n # Delete the old weights from the destination file.\n del weights_destination_file[name][name]['kernel:0']\n del weights_destination_file[name][name]['bias:0']\n # Create new datasets for the sub-sampled weights.\n weights_destination_file[name][name].create_dataset(name='kernel:0', data=new_kernel)\n weights_destination_file[name][name].create_dataset(name='bias:0', data=new_bias)\n\n# Make sure all data is written to our output file before this sub-routine exits.\nweights_destination_file.flush()\n\nconv4_3_norm_mbox_conf_kernel = weights_destination_file[classifier_names[0]][classifier_names[0]]['kernel:0']\nconv4_3_norm_mbox_conf_bias = weights_destination_file[classifier_names[0]][classifier_names[0]]['bias:0']\n\nprint(\"Shape of the '{}' weights:\".format(classifier_names[0]))\nprint()\nprint(\"kernel:\\t\", conv4_3_norm_mbox_conf_kernel.shape)\nprint(\"bias:\\t\", conv4_3_norm_mbox_conf_bias.shape)\n\n\nimg_height = 300 # Height of the input images\nimg_width = 300 # Width of the input images\nimg_channels = 3 # Number of color channels of the input images\nsubtract_mean = [123, 117, 104] # The per-channel mean of the images in the dataset\nswap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we should set this to `True`, but weirdly the results are better without swapping.\n# TODO: Set the number of classes.\nn_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\nscales = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets.\n# scales = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets.\naspect_ratios = [[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\ntwo_boxes_for_ar1 = True\nsteps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\noffsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\nclip_boxes = False # Whether or not you want to limit the anchor boxes to lie entirely within the image boundaries\nvariances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are scaled as in the original implementation\nnormalize_coords = True\n\n\nK.clear_session() # Clear previous models from memory.\nmodel = ssd_300(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=subtract_mean,\n swap_channels=swap_channels)\n\nprint(\"Model built.\")\n\n# 2: Load the sub-sampled weights into the model.\nweights_path = weights_destination_path\nmodel.load_weights(weights_path, by_name=True)\nprint(\"Weights file loaded:\", weights_path)\n# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model.\nsgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\nmodel.compile(optimizer=sgd, loss=ssd_loss.compute_loss)\nprint(\"model.summary()\")\nprint(model.summary())\n\n##########\n\n## make model to train\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\nVOC_2012_images_dir = './datasets/VOCdevkit/VOC2012/JPEGImages/'\n\n# The directories that contain the annotations.\nVOC_2012_annotations_dir = './datasets/VOCdevkit/VOC2012/Annotations/'\n\n# The paths to the image sets.\nVOC_2012_trainval_image_set_filename = './datasets/VOCdevkit/VOC2012/ImageSets/Main/person_train_new_hand.txt'\nVOC_2012_test_image_set_filename = './datasets/VOCdevkit/VOC2012/ImageSets/Main/person_val_new_hand.txt'\n\nclasses=['background',\n 'hand']\n\ninclude_classes = [0, 1]\n\ntrain_dataset.parse_hand_xml(images_dirs=[VOC_2012_images_dir],\n image_set_filenames=[VOC_2012_trainval_image_set_filename],\n annotations_dirs=[VOC_2012_annotations_dir],\n classes=classes,\n include_classes=include_classes,\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False)\n\n# val_dataset.parse_hand_xml(images_dirs=[VOC_2012_images_dir],\n# image_set_filenames=[remove_flg_and_save(VOC_2012_test_image_set_filename)],\n# annotations_dirs=[VOC_2012_annotations_dir],\n# classes=classes,\n# include_classes=include_classes,\n# exclude_truncated=False,\n# exclude_difficult=True,\n# ret=False)\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n# train_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_12_train_hand.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# val_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_12_val_hand.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# 3: Set the batch size.\nbatch_size = 1 # Change the batch size if you like, or if you run into GPU memory issues.\n\n# 4: Set the image transformations for pre-processing and data augmentation options.\n# For the training generator:\nssd_data_augmentation = SSDDataAugmentation(img_height=img_height,\n img_width=img_width,\n background=subtract_mean)\n\n# For the validation generator:\nconvert_to_3_channels = ConvertTo3Channels()\nresize = Resize(height=img_height, width=img_width)\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n model.get_layer('fc7_mbox_conf').output_shape[1:3],\n model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.5,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[ssd_data_augmentation],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'processed_labels',\n 'original_images',\n 'original_labels'},\n keep_images_without_gt=False)\n\ncount = 0\nwhile True:\n batch_images, batch_labels, org_images, org_labels = next(train_generator)\n print(\"count: \", count)\n if count in [0, 1, 2]:\n for i in range(len(batch_images)):\n plt.figure(figsize=(20, 12))\n plt.imshow(batch_images[i])\n xmin = batch_labels[i][:,1]\n ymin = batch_labels[i][:,2]\n xmax = batch_labels[i][:,3]\n ymax = batch_labels[i][:,4]\n plt.gca().add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color='green', fill=False, linewidth=2))\n\n plt.figure(figsize=(20, 12))\n plt.imshow(org_images[i])\n xmin = org_labels[i][0][1]\n ymin = org_labels[i][0][2]\n xmax = org_labels[i][0][3]\n ymax = org_labels[i][0][4]\n plt.gca().add_patch( plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color='green', fill=False, linewidth=2))\n plt.show()\n count += 1\n\n\n\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nbatch_images, batch_labels = next(val_generator)\nfor i in range(len(batch_images)):\n plt.figure(figsize=(20,12))\n plt.imshow(batch_images[i])\nplt.show()" }, { "alpha_fraction": 0.49331849813461304, "alphanum_fraction": 0.6180400848388672, "avg_line_length": 23, "blob_id": "ef254605252bfa8b16ce3afc9b18f6b5f282d3a1", "content_id": "83acbb26a07e689089b69495a4609a55b243793d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/statistics/myTest_example.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\n\r\n## ใƒใ‚ขใ‚ฝใƒณๅˆ†ๅธƒใฎๆคœๅฎš\r\n\r\n## 1ใƒตๆœˆใ‚ใŸใ‚Šๅนณๅ‡lamdaๅ›ž่ตทใใ‚‹ไบ‹่ฑกใŒ่ตทใ“ใ‚‹ๅ›žๆ•ฐxใฏใƒใ‚ขใ‚ฝใƒณๅˆ†ๅธƒPo(lamda)ใซๅพ“ใ†\r\n## x_=1/n *ฮฃxi (i=1,..,n)ใฏไธญๅฟƒๆฅต้™ๅฎš็†ใซใ‚ˆใ‚ŠN(lamda, lamda/n)ใซๅพ“ใ†\r\n\r\nx_base = np.array([349,261,321,309,323,264,294,328,309,376,350,420])\r\nx = np.array([282,288,303,244,282,276,314,310,299,343,372,381])\r\n\r\nn = 12\r\nx_test = x.mean()\r\n\r\nlamda = x_base.mean()\r\npm = lamda\r\npv = lamda\r\nprint(\"lamda: \", lamda)\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nz_low, z_upp = -1.96, 1.96\r\n# zๅค‰ๆ›ใ—ใŸxใฎๅ€ค\r\nz = ( x_test - lamda ) / np.sqrt(lamda/n)\r\nprint(\"x_test: \", x_test)\r\nprint(\"z_low, z_upp: \", z_low, z_upp)\r\nprint(\"z: \", z)\r\n\r\n# pๅ€คใ€€็ตฑ่จˆ้‡ใ‚ˆใ‚Šๆฅต็ซฏใชๅ€คใ‚’ๅ–ใ‚‹็ขบ็އ\r\np_val = norm.cdf(z)\r\np_val = 1 - p_val if p_val > 0.5 else p_val\r\nprint(\"p_val: \", p_val)\r\nres_005 = True if p_val > 0.05 else False\r\nres_001 = True if p_val > 0.01 else False\r\nprint(\"p_val > 0.05: \", res_005)\r\nprint(\"p_val > 0.01: \", res_001)" }, { "alpha_fraction": 0.530558705329895, "alphanum_fraction": 0.555680513381958, "avg_line_length": 26.698925018310547, "blob_id": "a20624234157b1cb0a55a30ab94d6cabc7f5ab0d", "content_id": "5bf98cd32601cf31ed41a020339e03ccfff2543d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2745, "license_type": "no_license", "max_line_length": 104, "num_lines": 93, "path": "/statistics/myCetralLimitTheorem.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import poisson\r\nfrom scipy.stats import norm\r\nimport math\r\n\r\n## ไธญๅฟƒๆฅต้™ๅฎš็†\r\n## ๅนณๅ‡ๅ€คใŒๆญฃ่ฆๅˆ†ๅธƒใซๅพ“ใ†\r\n\r\nDistType = \"Norm\"\r\nDistType = \"Bin\"\r\nDistType = \"Poisson\"\r\n\r\nsNum = 10000\r\nnumSampleAve = 12\r\n\r\nx_true = []\r\nx_distb = []\r\nx_distb_ = []\r\n\r\nif DistType == \"Norm\":\r\n pm = 0\r\n pv = 1\r\n x = np.random.normal(pm, pv, sNum)\r\n x_ = np.array([np.random.normal(pm, pv, numSampleAve).mean() for i in range(sNum)])\r\n m_ = x_.mean()\r\n std_ = x_.std()\r\nelif DistType == \"Bin\":\r\n n = 100\r\n p = 0.2\r\n x = np.random.binomial(n, p, sNum)\r\n pm = n * p\r\n pv = n * p * ( 1 - p )\r\n x_ = np.array([np.random.binomial(n, p, numSampleAve).mean() for i in range(sNum)])\r\n m_ = x_.mean()\r\n std_ = x_.std()\r\nelif DistType == \"Poisson\":\r\n lam = 300\r\n x = np.random.poisson(lam,sNum)\r\n pm = lam\r\n pv = lam\r\n x_ = np.array([np.random.poisson(lam,numSampleAve).mean() for i in range(sNum)])\r\n m_ = x_.mean()\r\n std_ = x_.std()\r\n #ๆฏ้›†ๅ›ฃใฎๅˆ†ๅธƒ\r\n x_true = np.arange(x.min(),x.max())\r\n x_distb = np.array([poisson.pmf(x_true[i], lam) for i in range(len(x_true))])\r\n #ไธญๅฟƒๆฅต้™ๅฎš็†ใง่ฟ‘ไผผใ•ใ‚Œใ‚‹ๆญฃ่ฆๅˆ†ๅธƒ\r\n x_distb_ = np.array([norm.pdf(x_true[i], pm, np.sqrt(pv/numSampleAve)) for i in range(len(x_true))])\r\nsm = np.mean(x)\r\ns = np.std(x,ddof=1)\r\nsstd = np.std(x,ddof=0)\r\n\r\n\r\nfig = plt.figure()\r\nfig.subplots_adjust(top=0.85)\r\nax = fig.add_subplot(1,2,1)\r\nax.hist(x, bins=100, normed = True, label=\"sample\")\r\nax.set_title(\"histgram\")\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"frequency\")\r\nax.plot(x_true,x_distb,color=\"#ff0000\",label=\"population\")\r\ntext = \"population\" + \"\\n\"\r\ntext += \"mean: \" + \"{:.2f}\".format(pm) + \"\\n\"\r\ntext += \"var: \" + \"{:.2f}\".format(pv) + \"\\n\"\r\ntext += \"\\nsample\" + \"\\n\"\r\ntext += \"mean: \" + \"{:.2f}\".format(sm) + \"\\n\"\r\ntext += \"var: \" + \"{:.2f}\".format(sstd*sstd)\r\nax.text(0.1, 0.7, text,\r\n bbox={'facecolor':'#ffffff', 'alpha':0.5, 'pad':10},\r\n transform=ax.transAxes,)\r\nax.legend()\r\n\r\nax = fig.add_subplot(1,2,2)\r\nax.hist(x_, bins=100, normed = True, label=\"sample mean\")\r\nax.set_title(\"histgram\")\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"frequency\")\r\nax.set_xlim([x.min(),x.max()])\r\nax.plot(x_true,x_distb_,color=\"#ff0000\", label=\"CLT\")\r\ntext = \"CLT\" + \"\\n\"\r\ntext += \"pm: \" + \"{:.2f}\".format(pm) + \"\\n\"\r\ntext += \"pv/n: \" + \"{:.2f}\".format(pv/numSampleAve) + \"\\n\"\r\ntext += \"\\n\"\r\ntext += \"sample mean\" + \"\\n\"\r\ntext += \"mean: \" + \"{:.2f}\".format(m_) + \"\\n\"\r\ntext += \"var: \" + \"{:.2f}\".format(std_*std_)\r\nax.text(0.1, 0.7, text,\r\n bbox={'facecolor':'#ffffff', 'alpha':0.5, 'pad':10},\r\n transform=ax.transAxes,)\r\nax.legend()\r\nfig.show()\r\nplt.show()" }, { "alpha_fraction": 0.48697394132614136, "alphanum_fraction": 0.5440881848335266, "avg_line_length": 19.276596069335938, "blob_id": "49886be31982839a3e8a55da48ca18efaeda4e08", "content_id": "0a526c93695b7cdc160a186216ebc679b4da2a90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 67, "num_lines": 47, "path": "/statistics/myEstimation_var.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "import numpy as np\r\nfrom scipy.stats import t\r\nfrom scipy.stats import chi2\r\nimport matplotlib.pyplot as plt\r\n\r\n## kai2 = (n - 1)*s^2 / pvใ€€ใฏ่‡ช็”ฑๅบฆ n-1ใฎฯ‡2ๅˆ†ๅธƒใซๅพ“ใ†\r\n## 95%ไฟก้ ผๅŒบ้–“ใฏๆฌกใฎใ‚ˆใ†ใซใชใ‚‹\r\n## (n-1) * s*s / ฯ‡2 0.025(n-1) <= pv <= (n-1) * s*s / ฯ‡2 0.975(n-1)\r\n\r\nDistType = \"Norm\"\r\nDistType = \"Bin\"\r\n\r\nsNum = 1000\r\n\r\nif DistType == \"Norm\":\r\n x = np.random.randn(sNum)\r\n pm = 0\r\n pv = 1\r\nelif DistType == \"Bin\":\r\n n = 100\r\n p = 0.2\r\n x = np.random.binomial(n, p, sNum)\r\n pm = n * p\r\n pv = n * p * ( 1 - p )\r\nsm = np.mean(x)\r\ns = np.std(x,ddof=1)\r\n\r\n# 95%ๅŒบ้–“ใฎไธŠ้™ใ€ไธ‹้™\r\nlow, upp = chi2.ppf(q=[0.025, 0.975], df=len(x) - 1)\r\n\r\n# pvใฎ95%ไฟก้ ผๅŒบ้–“\r\npv_lower = ( n -1 ) * s*s / upp\r\npv_upper = ( n -1 ) * s*s / low\r\n\r\nprint(\"low, upp: \", low, upp)\r\nprint(\"s*s: \", s*s)\r\nprint(\"pv: \", pv)\r\nprint(\"pv_low, pv_upp: \", pv_lower, pv_upper)\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(1,1,1)\r\nax.hist(x, bins=100)\r\nax.set_title(\"histgram\")\r\nax.set_xlabel(\"x\")\r\nax.set_ylabel(\"frequency\")\r\nfig.show()\r\nplt.show()" }, { "alpha_fraction": 0.5261698365211487, "alphanum_fraction": 0.5545927286148071, "avg_line_length": 38.06944274902344, "blob_id": "88e73774290f983b7a1aee05be603c80e675493b", "content_id": "68f49fb92e520b849bd72522136f759045678d5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2885, "license_type": "no_license", "max_line_length": 116, "num_lines": 72, "path": "/facePartsSwap/myfaceSaveLandmark.py", "repo_name": "rikumiura/pyCodes", "src_encoding": "UTF-8", "text": "\r\nimport sys\r\nimport os\r\nimport dlib\r\nimport glob\r\nimport cv2\r\n\r\n\r\n\r\ndef drawparts(imgMat=\"\",shape=\"\",st=\"\",ed=\"\"):\r\n for id in range(st + 1, ed + 1):\r\n cv2.line(imgMat, (shape.part(id - 1).x, shape.part(id - 1).y), (shape.part(id).x, shape.part(id).y),\r\n (255, 0, 0))\r\n # text_st = \"st:\" + str(st)\r\n # text_ed = \"ed:\" + str(ed)\r\n # cv2.putText(imgMat, text_st, (shape.part(st).x,shape.part(st).y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))\r\n # cv2.putText(imgMat, text_ed, (shape.part(ed).x, shape.part(ed).y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\r\n\r\ndef savelandmark(predictor_path=\"\",faces_folder_path=\"\"):\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(predictor_path)\r\n\r\n for f in glob.glob(os.path.join(faces_folder_path, \"*.jpg\")):\r\n name, ext = os.path.splitext(f)\r\n txtName = name + \".txt\"\r\n fp_w = open(txtName, mode='w')\r\n print(\"Processing file: {}\".format(f))\r\n img = dlib.load_rgb_image(f)\r\n\r\n imgMat = cv2.imread(f)\r\n\r\n # Ask the detector to find the bounding boxes of each face. The 1 in the\r\n # second argument indicates that we should upsample the image 1 time. This\r\n # will make everything bigger and allow us to detect more faces.\r\n dets = detector(img, 1)\r\n print(\"Number of faces detected: {}\".format(len(dets)))\r\n for k, d in enumerate(dets):\r\n print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(\r\n k, d.left(), d.top(), d.right(), d.bottom()))\r\n shape = predictor(img, d)\r\n\r\n for ptId in range(shape.num_parts):\r\n #write landmark positions\r\n str_w = str(shape.part(ptId).x) + \" \" + str(shape.part(ptId).y) + \"\\n\"\r\n fp_w.write(str_w)\r\n #draw landmarks\r\n cv2.circle(imgMat,(shape.part(ptId).x, shape.part(ptId).y),2,(255,0,0))\r\n\r\n drawparts(imgMat, shape, 0, 16) # face line\r\n drawparts(imgMat, shape, 36, 41) # leye\r\n drawparts(imgMat, shape, 17, 21) # leyebrow\r\n drawparts(imgMat, shape, 42, 47) # reye\r\n drawparts(imgMat, shape, 22, 26) # reyebrow\r\n drawparts(imgMat, shape, 27, 30) # nose\r\n drawparts(imgMat, shape, 31, 35) # nose bottom\r\n drawparts(imgMat, shape, 48, 59) # mouth out\r\n drawparts(imgMat, shape, 60, 67) # mouth in\r\n\r\n fp_w.close()\r\n\r\n cv2.imshow(\"landmark\", imgMat)\r\n saveimgname = name + \"_landmarks.jpg\"\r\n cv2.imwrite(saveimgname, imgMat)\r\n cv2.waitKey(0)\r\n\r\n cv2.destroyAllWindows()\r\n\r\n\r\nif __name__ == '__main__':\r\n predictor_path = \"shape_predictor_68_face_landmarks.dat\"\r\n faces_folder_path = \"facefld\"\r\n\r\n savelandmark(predictor_path,faces_folder_path)" } ]
32
iiie/django-redis-sessions-fork
https://github.com/iiie/django-redis-sessions-fork
b1097ec6e1da7b7ed96abc579eaaa54450f3066d
5164b1ba58862ec117a61aff1d1d81d2cb2ead09
c457ae6a0635a949e0f6b85efab878e77d4ebd94
refs/heads/master
2021-01-09T05:11:09.482507
2014-07-22T20:48:10
2014-07-22T20:48:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5988371968269348, "alphanum_fraction": 0.6075581312179565, "avg_line_length": 17.849315643310547, "blob_id": "2c2cf8f1d7721125f363f1e679a75e9eb9fbe4af", "content_id": "d0474ba78d6fef98fd98daebded8e235c08e230b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1376, "license_type": "permissive", "max_line_length": 48, "num_lines": 73, "path": "/redis_sessions_fork/settings.py", "repo_name": "iiie/django-redis-sessions-fork", "src_encoding": "UTF-8", "text": "import os\n\nfrom django.conf import settings\n\n\nSESSION_REDIS_HOST = getattr(\n settings,\n 'SESSION_REDIS_HOST',\n '127.0.0.1'\n)\nSESSION_REDIS_PORT = getattr(\n settings,\n 'SESSION_REDIS_PORT',\n 6379\n)\nSESSION_REDIS_DB = getattr(\n settings,\n 'SESSION_REDIS_DB',\n 0\n)\nSESSION_REDIS_PREFIX = getattr(\n settings,\n 'SESSION_REDIS_PREFIX',\n 'django_sessions'\n)\nSESSION_REDIS_PASSWORD = getattr(\n settings,\n 'SESSION_REDIS_PASSWORD',\n None\n)\n\nSESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = getattr(\n settings,\n 'SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH',\n None\n)\n\nSESSION_REDIS_URL = getattr(\n settings,\n 'SESSION_REDIS_URL',\n None\n)\n\nSESSION_REDIS_CONNECTION_POOL = getattr(\n settings,\n 'SESSION_REDIS_CONNECTION_POOL',\n None\n)\n\nSESSION_REDIS_JSON_ENCODING = getattr(\n settings,\n 'SESSION_REDIS_JSON_ENCODING',\n 'latin-1'\n)\n\nif SESSION_REDIS_URL is None:\n # redis clouds ENV variables\n SESSION_REDIS_ENV_URLS = getattr(\n settings,\n 'SESSION_REDIS_ENV_URLS', (\n 'REDISCLOUD_URL',\n 'REDISTOGO_URL',\n 'OPENREDIS_URL',\n 'REDISGREEN_URL',\n 'MYREDIS_URL',\n )\n )\n\n for url in SESSION_REDIS_ENV_URLS:\n redis_env_url = os.environ.get(url)\n if redis_env_url:\n SESSION_REDIS_URL = redis_env_url\n break\n" }, { "alpha_fraction": 0.6948408484458923, "alphanum_fraction": 0.6948408484458923, "avg_line_length": 18.382978439331055, "blob_id": "dac7ce225d34d900f1eca70d9dd0eb2d5e536e1c", "content_id": "c57b90630283f688ce73d7b30f268b4028d0b0a1", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "permissive", "max_line_length": 61, "num_lines": 47, "path": "/redis_sessions_fork/backend.py", "repo_name": "iiie/django-redis-sessions-fork", "src_encoding": "UTF-8", "text": "from django.contrib.sessions.backends.base import CreateError\n\nfrom . import utils, connection\n\n\[email protected]\ndef expire(key):\n return connection.redis_server.ttl(key)\n\n\[email protected]\ndef keys(pattern):\n return connection.redis_server.keys(pattern)\n\n\[email protected]\ndef get(key):\n value = connection.redis_server.get(key)\n\n value = utils.force_unicode(value)\n\n return value\n\n\[email protected]\ndef exists(key):\n return connection.redis_server.exists(key)\n\n\[email protected]\ndef delete(key):\n return connection.redis_server.delete(key)\n\n\[email protected]\ndef save(key, expire, data, must_create):\n expire = int(expire)\n\n data = utils.force_unicode(data)\n\n if must_create:\n if connection.redis_server.setnx(key, data):\n connection.redis_server.expire(key, expire)\n else:\n raise CreateError\n else:\n connection.redis_server.setex(key, expire, data)\n" }, { "alpha_fraction": 0.6642335653305054, "alphanum_fraction": 0.6716987490653992, "avg_line_length": 21.92015266418457, "blob_id": "d6516fd3c48d70354f52cbe5361f614ee5caa666", "content_id": "8455b3202ab026561714c48ff23d41f4cf3290ae", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6036, "license_type": "permissive", "max_line_length": 73, "num_lines": 263, "path": "/tests/tests.py", "repo_name": "iiie/django-redis-sessions-fork", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-.\nimport os\nimport time\nfrom imp import reload\n\nfrom django.conf import settings as django_settings\nfrom django.contrib.sessions.backends.base import CreateError\nfrom django.core import management\nfrom django.contrib.sessions.models import Session\n\nfrom redis_sessions_fork import settings as session_settings\nfrom redis_sessions_fork import connection, utils, backend\n\n\nsession_module = utils.import_module(django_settings.SESSION_ENGINE)\nsession = session_module.SessionStore()\n\n\nmanagement.call_command('syncdb', interactive=False)\n\n\ndef test_redis_prefix():\n assert utils.add_prefix('foo') == \\\n '%s:foo' % django_settings.SESSION_REDIS_PREFIX\n\n assert 'foo' == utils.remove_prefix(utils.add_prefix('foo'))\n\n session_settings.SESSION_REDIS_PREFIX = ''\n\n assert utils.add_prefix('foo') == 'foo'\n assert 'foo' == utils.remove_prefix(utils.add_prefix('foo'))\n\n\ndef test_modify_and_keys():\n assert not session.modified\n\n session['test'] = 'test_me'\n\n assert session.modified\n\n assert session['test'] == 'test_me'\n\n\ndef test_save_and_delete():\n session['key'] = 'value'\n session.save()\n\n assert session.exists(session.session_key)\n\n session.delete(session.session_key)\n\n assert not session.exists(session.session_key)\n\n\ndef test_flush():\n session['key'] = 'another_value'\n session.save()\n\n key = session.session_key\n\n session.flush()\n\n assert not session.exists(key)\n\n\ndef test_items():\n session['item1'], session['item2'] = 1, 2\n session.save()\n\n # Python 3.* fix\n assert sorted(list(session.items())) == [('item1', 1), ('item2', 2)]\n\n\ndef test_expiry():\n session.set_expiry(1)\n\n assert session.get_expiry_age() == 1\n\n session['key'] = 'expiring_value'\n session.save()\n\n key = session.session_key\n\n assert session.exists(key)\n\n time.sleep(2)\n\n assert not session.exists(key)\n\n\ndef test_save_and_load():\n session.set_expiry(60)\n session.setdefault('item_test', 8)\n session.save()\n\n session_data = session.load()\n\n assert session_data.get('item_test') == 8\n\n\ndef test_save_and_load_nonascii():\n session['nonascii'] = 'ั‚ะตัั‚'\n session.save()\n\n session_data = session.load()\n\n assert utils.force_unicode(session_data['nonascii']) == \\\n utils.force_unicode('ั‚ะตัั‚')\n\n\ndef test_save_existing_key():\n try:\n session.save(must_create=True)\n\n assert False\n except CreateError:\n pass\n\n\ndef test_redis_url_config():\n reload(session_settings)\n\n session_settings.SESSION_REDIS_URL = 'redis://localhost:6379/0'\n\n reload(connection)\n\n redis_server = connection.redis_server\n\n host = redis_server.connection_pool.connection_kwargs.get('host')\n port = redis_server.connection_pool.connection_kwargs.get('port')\n db = redis_server.connection_pool.connection_kwargs.get('db')\n\n assert host == 'localhost'\n assert port == 6379\n assert db == 0\n\n\ndef test_unix_socket():\n # Uncomment this in `redis.conf`:\n #\n # unixsocket /tmp/redis.sock\n # unixsocketperm 755\n reload(session_settings)\n\n session_settings.SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = \\\n 'unix:///tmp/redis.sock'\n\n reload(connection)\n\n redis_server = connection.redis_server\n\n path = redis_server.connection_pool.connection_kwargs.get('path')\n db = redis_server.connection_pool.connection_kwargs.get('db')\n\n assert path == session_settings.SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH\n\n assert db == 0\n\n\ntest_connection_pool = connection.redis.ConnectionPool(\n host=session_settings.SESSION_REDIS_HOST,\n port=session_settings.SESSION_REDIS_PORT,\n db=session_settings.SESSION_REDIS_DB,\n password=session_settings.SESSION_REDIS_PASSWORD\n)\n\n\ndef test_with_connection_pool_config():\n reload(session_settings)\n\n session_settings.SESSION_REDIS_CONNECTION_POOL = \\\n 'tests.tests.test_connection_pool'\n\n reload(connection)\n\n redis_server = connection.redis_server\n\n assert redis_server.connection_pool == test_connection_pool\n\n\ndef test_redis_url_config_from_env():\n reload(session_settings)\n\n os.environ['MYREDIS_URL'] = 'redis://localhost:6379/0'\n\n reload(session_settings)\n reload(connection)\n\n redis_server = connection.redis_server\n\n host = redis_server.connection_pool.connection_kwargs.get('host')\n port = redis_server.connection_pool.connection_kwargs.get('port')\n db = redis_server.connection_pool.connection_kwargs.get('db')\n\n assert host == 'localhost'\n assert port == 6379\n assert db == 0\n\n\ndef test_serializers():\n test_object = {'foo': 'bar'}\n\n for class_name in (\n 'UjsonSerializer',\n ):\n try:\n serializer = utils.import_by_path(\n 'redis_sessions_fork.serializers.%s' % class_name\n )()\n except ImportError:\n continue\n\n serializer_data = serializer.loads(serializer.dumps(test_object))\n\n assert test_object == serializer_data\n\n\ndef test_flush_redis_sessions():\n session['foo'] = 'bar'\n session.save()\n\n keys_before_flush = backend.keys('*')\n\n management.call_command('flush_redis_sessions')\n\n keys_after_flush = backend.keys('*')\n\n assert not keys_before_flush == keys_after_flush\n\n assert len(keys_after_flush) == 0\n\n\ndef test_migrate_to_orm():\n session['foo'] = 'bar'\n session.save()\n\n management.call_command('migrate_sessions_to_orm')\n\n orm_session = Session.objects.all()[0]\n\n assert session.decode(orm_session.session_data)['foo'] == 'bar'\n\n\ndef test_migrate_to_redis():\n management.call_command('flush_redis_sessions')\n\n management.call_command('migrate_sessions_to_redis')\n\n orm_session = Session.objects.all()[0]\n\n check_session = session_module.SessionStore(\n session_key=orm_session.session_key\n )\n\n assert check_session.load()['foo'] == 'bar'\n\n\ndef test_flush_orm_sessions():\n management.call_command('flush_orm_sessions')\n\n orm_session = Session.objects.all()\n\n assert orm_session.count() == 0\n" }, { "alpha_fraction": 0.6447916626930237, "alphanum_fraction": 0.6447916626930237, "avg_line_length": 27.235294342041016, "blob_id": "f2460e85cec1f2b10d7c1411a4c958ab05371090", "content_id": "a354bafe96e2a9aa2657e7f1b7c17d19e9db4167", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 960, "license_type": "permissive", "max_line_length": 76, "num_lines": 34, "path": "/redis_sessions_fork/connection.py", "repo_name": "iiie/django-redis-sessions-fork", "src_encoding": "UTF-8", "text": "import redis\n\nfrom . import settings, utils\n\n\ndef get_redis_server():\n if not settings.SESSION_REDIS_CONNECTION_POOL is None:\n return redis.StrictRedis(\n connection_pool=utils.import_by_path(\n settings.SESSION_REDIS_CONNECTION_POOL\n )\n )\n\n if not settings.SESSION_REDIS_URL is None:\n return redis.StrictRedis.from_url(\n settings.SESSION_REDIS_URL\n )\n\n if not settings.SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH is None:\n return redis.StrictRedis(\n unix_socket_path=settings.SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH,\n db=settings.SESSION_REDIS_DB,\n password=settings.SESSION_REDIS_PASSWORD\n )\n\n return redis.StrictRedis(\n host=settings.SESSION_REDIS_HOST,\n port=settings.SESSION_REDIS_PORT,\n db=settings.SESSION_REDIS_DB,\n password=settings.SESSION_REDIS_PASSWORD\n )\n\n\nredis_server = get_redis_server()\n" } ]
4
naveedn/cmsc417-project3
https://github.com/naveedn/cmsc417-project3
c035db6ec1770f9662bac9c675952fe0a9070e02
ae3db71cff3fe2c5709383fe8a3fd0fdf6cffcf4
49360105beaedba4f86f26f4f816eca2f0768f30
refs/heads/master
2016-03-17T09:53:51.364625
2015-05-15T22:36:31
2015-05-15T22:36:31
34,127,644
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5935913324356079, "alphanum_fraction": 0.5960427522659302, "avg_line_length": 34.805641174316406, "blob_id": "fa6b5183f5548acbf63d886aa8ce37b8f83e1394", "content_id": "2a05b344d382a5fc4f7241c736a12855c11aae6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 22844, "license_type": "no_license", "max_line_length": 93, "num_lines": 638, "path": "/src/components/Router.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "require File.expand_path('../LSRTable', __FILE__)\nrequire File.expand_path('../VCTable', __FILE__) \nrequire File.expand_path('../Translator',__FILE__) \nrequire File.expand_path('../MessageHandler',__FILE__)\n\nrequire 'logger'\nrequire 'pp'\nrequire 'socket'\nrequire 'stringio'\nrequire 'openssl'\ninclude Socket::Constants\n\n# This class contains most of the information a node would need:\n# - a config attribute which contains information in the config file\n# - a link state routing table object\n# - a virtual circuit table object\n# - a socket for actual connections\n# - a table containing \"trust factor\" of a given node (Used for Part 3)\n\nclass Router\nattr_reader :config\nattr_accessor :link_state_table\nattr_reader :vc_table\nattr_accessor :translator\nattr_accessor :socket\n\n def initialize(config_hash);\n @config = config_hash;\n @gen_log = config_hash[\"logger\"];\n @logger = @gen_log.clone\n @logger.level = Logger::INFO;\n @port = @config[\"sockets\"][\"udp_port\"];\n @socket = create_conn();\n @rsa_key = @config[\"private_key\"];\n\n # data structures\n @translator = Translator.new(@config);\n @link_state_table = LinkStateTable.new(@gen_log);\n @vc_table = VCTable.new(@gen_log);\n @handler = MessageHandler.new(@gen_log);\n end\n \n # creates a UDP socket -- necessary for Control Packets\n def create_conn\n if @socket then\n return @socket\n else\n socket = UDPSocket.new\n socket.bind(\"\",@port);\n return socket;\n end\n end\n\n # main body of router, recieves messages, parses them\n # executes response, and writes info back to socket\n def handle_data_message(data)\n \n @logger.info(\"ROUTER:PARSEDATA\"){\"parse message cmd\"}\n # figure out what kind of command we are receiving \n # and handle accordingly\n \n msg = data.split(\" \");\n command = msg[0];\n dst_ip = msg[1];\n \n case command \n when \"SENDMSG\"\n @logger.info(\"ROUTER:SMG\"){\"handling SENDMSG command\"}\n num_words = msg.length;\n\n str_arr = msg.slice(2,num_words); #get all words seperated by space\n join_str = str_arr.join(\" \");\n payload = join_str;\n\n send_message(dst_ip,payload);\n when \"ENCSMG\"\n @logger.warn(\"ROUTER:ESMG\"){\"handling Encrypted SENDMSG command\"}\n num_words = msg.length\n str_arr = msg.slice(2,num_words);\n join_str = str_arr.join(\" \");\n payload = join_str;\n\n send_encrypted_message(dst_ip,payload)\n when \"PING\"\n @logger.info(\"ROUTER:PNG\"){\"handling PING command\"}\n num_ping = msg[2].to_i;\n delay = msg[3].to_i;\n send_ping(dst_ip,num_ping,delay)\n when \"TRACEROUTE\"\n @logger.info(\"ROUTER:TRC\"){\"handling TRACEROUTE command\"}\n send_trace(dst_ip);\n when \"DUMP\"\n @logger.info(\"ROUTER:DUMP\"){\"internal dump method\"}\n @link_state_table.print_routing_table();\n @link_state_table.print_forwarding_table();\n else\n @logger.error(\"ROUTER\"){\"invalid command received\"}\n end\n end\n\n # main body of router, recieves messages, parses them\n # executes response, and writes info back to socket\n def handle_control_message()\n\n # this is the main response handler, will be running all the time\n begin\n # wait for flooding packet\n @logger.debug(\"ROUTER:PARSESOCK\"){\"waiting for message\"}\n msg, incoming_addr = @socket.recvfrom_nonblock(@config[\"sockets\"][\"mtu\"])\n # figure out what kind of packet we are receiving and handle \n #accordingly\n # LSP : link-state-packet :: initiate flooding protocol\n # VCP : virtual-circuit pkt :: create forwarding entry\n # VCKP : vc kill packet :: tear down circuit\n # ACK : message response :: message has been received\n # NACK : message not received:: need to resend\n \n packet = msg.split(\"||\");\n header = packet[0];\n src_ip = packet[1];\n dst_ip = packet[2];\n payload = packet[3];\n \n case header\n when \"LSP\"\n @logger.debug(\"ROUTER:LSP\"){\"handling incoming LSP packet\"}\n handle_incoming_flood_packet(payload,src_ip);\n when \"VCP\"\n @logger.debug(\"ROUTER:VCP\"){\"handling incoming VCP packet\"}\n create_virtual_circuit(src_ip,dst_ip, incoming_addr);\n when \"VCKP\"\n @logger.info(\"ROUTER:VCKP\"){\"handling incoming VCKP packet\"}\n destroy_virtual_circuit(src_ip,dst_ip, incoming_addr);\n when \"SMG\"\n @logger.info(\"ROUTER:SMG\"){\"handling incoming SENDMESSAGE packet\"}\n handle_send_message(src_ip,dst_ip,payload);\n when \"ESMG\"\n @logger.warn(\"ROUTER:ESMG\"){\"handling incoming encrypted SENDMESSAGE packet\"}\n handle_encrypted_send_message(src_ip,dst_ip,payload);\n when \"PNG\"\n @logger.info(\"ROUTER:PNG\"){\"handling incoming PING packet\"}\n handle_ping(src_ip,dst_ip);\n when \"TRC\"\n @logger.info(\"ROUTER:TRC\"){\"handling incoming TRACEROUTE packet\"}\n handle_traceroute(src_ip,dst_ip,payload);\n when \"TRCRESP\"\n @logger.info(\"ROUTER:TRCRESP\"){\"handling incoming traceroute RESPONSE packet\"}\n send_trace_response(dst_ip,payload);\n when \"ACK\"\n @logger.info(\"ROUTER:ACK\"){\"ACK Message Received\"}\n handle_ack_message();\n when \"NACK\"\n @logger.warn(\"ROUTER:NACK\"){\"NACK Message Received\"}\n handle_nack_message(src_ip,dst_ip,payload);\n else\n @logger.error(\"ROUTER\"){\"invalid packet received\"}\n end\n\n rescue Errno::EAGAIN \n select = IO.select([@socket],nil,nil,1); #poll for data\n if select\n retry # if data comes in, retry\n end\n end\n end\n\n # read the weights file, update the LSP table if necessary\n # build the packet, and then send it out on the outgoing socket\n def flood()\n weights_file = @config[\"files\"][\"weights_output\"];\n\n # get the weights for neighbors, construct the packet\n @link_state_table.get_weights_for_neighbors(weights_file,@translator);\n packet = @link_state_table.build_link_state_packet()\n\n # send it to all connected interfaces\n @translator.neighbors.values.each do |v|\n packet = packet.gsub '<src_ip>',v[:src];\n packet = packet.gsub '<dst_ip>',v[:dst];\n \n @socket.send(packet,0,v[:dst], @port);\n @logger.debug(\"ROUTER\"){\"sent flooding message to #{v[:dst]}\"}\n end\n end\n \n####################### Method Handlers ###################################\n###########################################################################\n\n def handle_incoming_flood_packet(serialized_payload, ip_addr_of_sender)\n data = Marshal::load(serialized_payload)\n \n # check if data is \"fresh\" by looking at sequence number\n fresh = true\n data.each do |lsp_entry|\n \n hostname = lsp_entry[:node];\n dest = lsp_entry[:dest];\n weight = lsp_entry[:weight];\n seq = lsp_entry[:seq];\n\n fresh = @link_state_table.check_entry(hostname,dest,weight,seq);\n entry = \"entry #{hostname} #{dest} #{weight} #{seq}\"\n \n if fresh == false then\n @logger.debug(\"ROUTER:LSP\"){\"#{entry} stale\"}\n elsif fresh == true then\n \n @logger.debug(\"ROUTER:LSP\"){\"#{entry} fresh\"}\n @link_state_table.add_entry(hostname,dest,weight,seq);\n \n # forward the packet on all other outgoing ifaces\n @logger.debug(\"ROUTER:LSP\"){\"about to forward packets\"}\n \n @translator.neighbors.values.each do |v|\n incoming_ip = v[:src];\n outgoing_ip = v[:dst];\n\n unless outgoing_ip == ip_addr_of_sender then\n # reconstruct packet\n packet = \"LSP||#{incoming_ip}||#{outgoing_ip}||#{serialized_payload}\"\n @logger.debug(\"ROUTER:LSP\"){ \"sender ip: #{ip_addr_of_sender}\"}\n @logger.debug(\"ROUTER:LSP\"){ \"outgoing ip: #{outgoing_ip}\" }\n @socket.send(packet,0,outgoing_ip,@port)\n end\n end\n end\n end\n end\n\n # method to create a virtual circuit for a given src and dst\n def create_virtual_circuit(src_ip,dst_ip, incoming_addr)\n \n # translate ip_addresses to hostnames\n src_node = @translator.get_hostname_for_interface(src_ip);\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n @logger.info(\"ROUTER:VCP\"){\"translating IP's to hostnames\"}\n\n # if dst then send its own vc_conn request to src\n if (@config[\"hostname\"] == dst_node) then\n entry = @vc_table.get_entry(src_node,dst_node);\n if entry\n # a full circuit has been made\n puts \"a full circuit has been made for #{config[\"hostname\"]}\"\n else\n create_virtual_circuit(dst_ip,src_ip, @socket.addr);\n end\n else \n # get next hop from shortest path table\n nh_node = @link_state_table.get_next_hop_node(dst_node);\n \n @logger.info(\"ROUTER:VCP\"){\"src=#{src_node},dst=#{dst_node},nhn=#{nh_node}\"}\n # then add entry to VC table\n if nh_node.nil? == false\n entry = @vc_table.create_forwarding_entry(src_node,dst_node,nh_node);\n \n # construct new packet to forward the VC request if entry was made\n if entry.nil? == false then\n @logger.info(\"ROUTER:VCP\"){\"entry is not nil!\"} \n # reconstruct packet\n packet = \"VCP||#{src_ip}||#{dst_ip}\"\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(nh_node);\n @logger.error(\"ROUTER:VCP\"){\"next hop ip is: #{next_hop_ip}\"}\n\n @socket.send(packet,0,next_hop_ip, @port);\n @logger.info(\"ROUTER:VCP\"){\"sending VCP packet to #{next_hop_ip}\"}\n\n else\n # otherwise send failure back to src\n send_nack_message(src_ip, incoming_addr,dst_ip,\"VCP\");\n @logger.error(\"ROUTER:NACK\"){\"failure to add entry to VC table\"}\n end\n else\n # otherwise send failure back to src\n send_nack_message(src_ip, incoming_addr,dst_ip,\"VCP\");\n @logger.error(\"ROUTER:NACK\"){\"failure to get the next hop node\"}\n end\n end\n end\n\n # method to send failure back to the previous hop\n def send_nack_message(src_ip, incoming_addr, orig_dst_ip,msg_type)\n data = [msg_type, orig_dst_ip];\n payload = Marshal::dump(data)\n\n neighbor_node = @translator.get_hostname_for_interface(incoming_addr);\n self_ip = @translator.get_interface_for_hostname(neighbor_node);\n\n packet = \"NACK||#{self_ip}||#{src_ip}||#{payload}\"\n @logger.debug(\"ROUTER:NACK\"){\"NACK packet = #{packet}\"}\n @socket.send(packet,0,incoming_addr[3],@port);\n end\n\n # method to acknowledge a packet being sent\n def send_ack_message(src_ip,incoming_addr,msg_type)\n \n neighbor_node = @translator.get_hostname_for_interface(incoming_addr);\n self_ip = @translator.get_interface_for_hostname(neighbor_node);\n\n packet = \"ACK||#{self_ip}||#{incoming_addr}||#{msg_type}\"\n @logger.debug(\"ROUTER:ACK\"){\"ACK packet = #{packet}\"}\n @socket.send(packet,0,incoming_addr[3],@port);\n end\n\n def handle_nack_message(src_ip,dst_ip,payload);\n # translate ip_addresses to hostnames\n src_node = @translator.get_hostname_for_interface(src_ip);\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n @logger.info(\"ROUTER:NACK\"){\"translating IP's to hostnames\"}\n\n # if dst then retransmit\n if (@translator.get_incoming_interfaces.values.include? dst_ip) then\n data = Marshal::load(payload)\n msg_type = data[0];\n orig_dst_ip = data[1];\n\n @logger.error(\"ROUTER:NACK\"){\"failure to send #{msg_type} message to #{orig_dst_ip}\"}\n end\n end\n\n def send_message(dst_ip, payload)\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # get internal address for next hop node\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n \n # create packet\n packet = \"SMG||#{self_ip}||#{dst_ip}||#{payload}\"\n @logger.info(\"SENDMSG\"){\"SENDMESSAGE packet is: #{packet}\"} \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port);\n end\n\n def send_encrypted_message(dst_ip,msg);\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n # get path for to destination node\n # represented like [src, in1, in2, in3, dst];\n path_to_dst = @link_state_table.get_path_for_node(dst_node);\n \n if path_to_dst != nil\n encryption_array = [];\n\n while path_to_dst.length > 0 \n node = path_to_dst.pop # work from dest backwards\n \n # encrypt the payload with AES cipher\n cipher = OpenSSL::Cipher::AES.new(128,:CBC)\n cipher.encrypt\n key = cipher.random_key\n iv = cipher.random_iv\n\n msg = cipher.update(msg) + cipher.final\n cipher_token = \"#{key}%#{iv}\"\n\n # use public RSA key of recipient to encode the cipher\n public_key_file = \"#{@config[\"files\"][\"key_server\"]}/#{node}.pem\"\n pkey = OpenSSL::PKey::RSA.new File.read public_key_file\n encrypted_cipher_token = pkey.public_encrypt(cipher_token);\n \n encryption_array.unshift([node, encrypted_cipher_token]);\n end\n \n next_hop_node = encryption_array[0][0];\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n\n # then send the payload to the nhn\n enc_arr = Marshal::dump(encryption_array);\n\n payload = \"#{enc_arr}~~#{msg}\"\n packet = \"ESMG||#{self_ip}||#{next_hop_ip}||#{payload}\"\n @logger.warn(\"EMSG\"){\"EMSG: curr=#{@config[\"hostname\"]} nhn=#{next_hop_node}\"} \n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port);\n else\n puts \"no path to destination!\"\n end\n end\n\n def send_ping(dst_ip,num_ping,delay)\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n \n # get internal address for nex hop node\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n\n # create packet\n packet = \"PNG||#{self_ip}||#{dst_ip}||nil\"\n @logger.info(\"PING\"){\"PING packet is: #{packet}\"} \n \n # create loop\n while num_ping > 0\n\n # send packet out on socket \n @socket.send(packet,0,next_hop_ip,@port);\n num_ping = num_ping - 1; # decrement num_ping\n sleep(delay) # sleep for delay amount of time\n end\n end\n\n def send_trace(dst_ip)\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # get own name and internal address for next hop node\n hostname = @config[\"hostname\"]\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n \n data = [[hostname, self_ip]];\n payload = Marshal::dump(data);\n\n packet = \"TRC||#{self_ip}||#{dst_ip}||#{payload}\"\n @logger.info(\"TRACEROUTE\"){\"TRACEROUTE packet is: #{packet}\"} \n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port);\n end\n\n def handle_send_message(src_ip,dst_ip,payload);\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n if(config[\"hostname\"] == dst_node)\n puts \"RECEIVED MSG #{src_ip.to_s} #{payload}\"\n else\n\n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # get internal address for nex hop node\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n \n # create packet\n packet = \"SMG||#{src_ip}||#{dst_ip}||#{payload}\"\n @logger.info(\"SENDMSG\"){\"SENDMESSAGE packet is: #{packet}\"} \n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port);\n end\n end\n\n def handle_encrypted_send_message(src_ip,dst_ip,payload)\n # split / parse the payload\n pay_arr = payload.split(\"~~\");\n encrypted_nodes = Marshal::load(pay_arr[0]);\n encrypted_msg = pay_arr[1]\n\n encryption_pair = encrypted_nodes.shift \n node = encryption_pair[0]\n cipher_token = encryption_pair[1]\n\n # decrypt the key\n private_key = OpenSSL::PKey::RSA.new(@config[\"private_key\"]);\n cipher_str = private_key.private_decrypt(cipher_token);\n \n # break the string up\n cipher_arr = cipher_str.split(\"%\");\n cipher_key = cipher_arr[0]\n cipher_iv = cipher_arr[1]\n\n # decrypt the cipher text\n decipher = OpenSSL::Cipher::AES.new(128,:CBC);\n decipher.decrypt\n decipher.key = cipher_key\n decipher.iv = cipher_iv\n \n peeled_msg = decipher.update(encrypted_msg) + decipher.final;\n \n if encrypted_nodes.empty?\n @logger.warn(\"ESMG\"){\"no elements in the array!\"}\n puts \"[encrypted]: RECEIVED MSG #{src_ip} #{peeled_msg}\"\n else\n # get next hop information\n next_node = encrypted_nodes[0][0];\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_node);\n self_ip = @translator.get_interface_for_hostname(next_node);\n \n # reserialize and format data for nhn\n serialized_array = Marshal::dump(encrypted_nodes);\n \n # then send the payload to the nhn\n payload = \"#{serialized_array}~~#{peeled_msg}\"\n packet = \"ESMG||#{self_ip}||#{next_hop_ip}||#{payload}\"\n @logger.warn(\"EMSG\"){\"EMSG: curr=#{@config[\"hostname\"]} nhn=#{next_node}\"} \n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port);\n end\n end\n\n def handle_ping(src_ip,dst_ip)\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n if(config[\"hostname\"] == dst_node)\n puts \"RECEIVED PING MSG from #{src_ip} \"\n else\n \n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # create packet\n packet = \"PNG||#{src_ip}||#{dst_ip}||nil\"\n @logger.info(\"PING\"){\"PING packet is: #{packet}\"} \n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port); \n end\n end\n\n def handle_traceroute(src_ip,dst_ip,payload)\n data = Marshal::load(payload);\n\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n if(config[\"hostname\"] == dst_node)\n hostname = @config[\"hostname\"];\n entry = [hostname,dst_ip]\n data.push(entry);\n payload = Marshal::dump(data);\n \n # send back to src\n src_ip = data[0][1];\n @logger.info(\"TRACEROUTE\"){\"reached destination\"}\n send_trace_response(src_ip,payload);\n\n else\n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # get own name and internal address for next hop node\n hostname = @config[\"hostname\"]\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n \n # add entry for intermediate node\n entry = [hostname, self_ip];\n data.push(entry);\n\n payload = Marshal::dump(data);\n packet = \"TRC||#{self_ip}||#{dst_ip}||#{payload}\"\n \n @logger.info(\"TRACEROUTE\"){\"forwarding TRC pkt to #{next_hop_node} for dst:#{dst_ip}\"}\n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port); \n end\n end\n\n def send_trace_response(dst_ip,payload);\n # get name of dst\n dst_node = @translator.get_hostname_for_interface(dst_ip);\n \n # if this is the destination node\n if(config[\"hostname\"] == dst_node)\n data = Marshal::load(payload);\n data.each{|a| puts \"#{a[0]}, #{a[1]}\"}\n else\n \n # get next hop from shortest path table\n next_hop_node = @link_state_table.get_next_hop_node(dst_node);\n next_hop_ip = @translator.get_outgoing_interface_for_hostname(next_hop_node);\n\n # get own ip address for next hop node\n self_ip = @translator.get_interface_for_hostname(next_hop_node);\n \n @logger.info(\"TRACEROUTE\"){\"sending response back to #{dst_ip} from #{self_ip}\"}\n packet = \"TRCRESP||#{self_ip}||#{dst_ip}||#{payload}\"\n \n # send out on socket\n @socket.send(packet,0,next_hop_ip,@port); \n end \n end\n\n####################### Auxiliary Methods #################################\n###########################################################################\n# gets the hostnames for the src and dst ip addresses\n\n\n# prints the config file in a pretty format\n def print_config(io = $stdout)\n pp(@config,io)\n end\n\n# prints out the routing table\n def print_routing_table()\n # build the routing table\n @link_state_table.build_routing_table(@config[\"hostname\"]);\n \n # print the table and capture io stream in a string\n str = capture_io{ @link_state_table.print_info}\n # get the output file\n output_file = \"#{@config[\"files\"][\"lsr_files\"]}/#{@config[\"hostname\"]}.out\"\n # dump to output\n dump_to_output_file(output_file,str)\n end\n\n# prints out the virtual circuit table\n def print_vc_table()\n str = capture_io{ @vc_table.print_info }\n output_file = \"#{@config[\"files\"][\"vc_files\"]}/#{@config[\"hostname\"]}.out\"\n dump_to_output_file(output_file,str);\n end\n\n# dump any data to a file\n def dump_to_output_file(output_file, data)\n File.open(output_file, \"w\"){ |file| file.puts data }\n end\n \n# capture stdout and store it in string\n def capture_io\n begin\n old_stdio = $stdout\n $stdout = StringIO.new('','w')\n yield\n $stdout.string\n ensure\n $stdout = old_stdio\n end\n end\nend\n" }, { "alpha_fraction": 0.48481881618499756, "alphanum_fraction": 0.5117532014846802, "avg_line_length": 23.60240936279297, "blob_id": "9fc48225ebe25b82847d79985cc394be9f92d04c", "content_id": "c6ed70cf52cf6320e23a74c02ec181ef3ab2a292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 67, "num_lines": 83, "path": "/test/test_lsrTable.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "require File.expand_path(\"../../src/components/LSRTable\", __FILE__)\nrequire 'logger'\nrequire 'stringio'\nrequire 'yaml'\n\n# table looks like this:\n#\n# B ------ C\n# / \\ / \\\n# A E -- F D\n# \\ / \\ /\n# G ------ H\n#\n\n# helper method to capture stdout to a string\ndef capture_stdio\n begin\n old_stdout = $stdout\n $stdout = StringIO.new('','w')\n yield\n $stdout.string\n ensure\n $stdout = old_stdout\n end\nend\n\ndef test_build_routing_table(lsr_table)\n expected_output = \"node: A, cost: 0, prev: \\n\"\\\n \"node: B, cost: 2, prev: A\\n\"\\\n \"node: E, cost: 4, prev: B\\n\"\\\n \"node: G, cost: 5, prev: E\\n\"\\\n \"node: F, cost: 6, prev: E\\n\"\\\n \"node: H, cost: 8, prev: F\\n\"\\\n \"node: C, cost: 9, prev: B\\n\"\\\n \"node: D, cost: 10, prev: H\\n\"\n \n lsr_table.build_routing_table(\"A\");\n str = capture_stdio { lsr_table.print_routing_table }\n if (str != expected_output) then\n puts \"FAILURE in test_build_routing_table\"\n puts \"expected output:\\n#{expected_output}\"\n puts \"result:\\n#{str}\"\n end\nend\n\ndef run_test_suite()\n logger = Logger.new(STDOUT);\n # SET UP lsrTable\n l = LinkStateTable.new(logger);\n \n l.add_entry('A','B',2,1);\n l.remove_entry('A');\n l.clear_table();\n l.add_entry('A','B',2,1);\n l.add_entry('A','G',6,1);\n l.add_entry('B','A',2,1);\n l.add_entry('B','C',7,1);\n l.add_entry('B','E',2,1);\n l.add_entry('C','B',7,1);\n l.add_entry('C','D',3,1);\n l.add_entry('C','F',3,1);\n l.add_entry('D','C',3,1);\n l.add_entry('D','H',2,1);\n l.add_entry('E','B',2,1);\n l.add_entry('E','F',2,1);\n l.add_entry('E','G',1,1);\n l.add_entry('F','C',3,1);\n l.add_entry('F','E',2,1);\n l.add_entry('F','H',2,1);\n l.add_entry('G','A',6,1);\n l.add_entry('G','E',1,1);\n l.add_entry('G','H',4,1);\n l.add_entry('H','D',2,1);\n l.add_entry('H','F',2,1);\n l.add_entry('H','G',4,1);\n\n # RUN TESTS\n test_build_routing_table(l)\n #l.print_info\n\nend\n\nrun_test_suite\n" }, { "alpha_fraction": 0.6281884908676147, "alphanum_fraction": 0.6640726327896118, "avg_line_length": 23.606382369995117, "blob_id": "32074ebeebe04bd8faf4812edaac86b6b16ad3c9", "content_id": "2dcc48aba785e83fd2adaf7c0fb5a74997d56193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2313, "license_type": "no_license", "max_line_length": 70, "num_lines": 94, "path": "/test/test_translator.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "require 'yaml'\nrequire 'stringio'\nrequire 'socket'\nrequire File.expand_path(\"../../src/components/Translator\",__FILE__)\n\n# Description:\n# This class will test teh functionality of the SwitchingFabric Class\n\n\n# auxiliary method to capture stdout and put it into string\ndef capture_io\n begin\n old_stdout = $stdout\n $stdout = StringIO.new('','w')\n yield\n $stdout.string\n ensure\n $stdout = old_stdout\n end\nend\n\n# aux method to go through the logical step of printing an error out\ndef print_error(str,expected_output)\n if str != expected_output then\n puts \"FAILURE: \"\n puts \"expected value:\\n#{expected_output}\"\n puts \"result:\\n#{str}\"\n end\nend\n\ndef test_get_neighbors(sf)\n expected_output = \"{ n4 = {:src = 10.0.0.20, :dst = 10.0.0.21} }\\n\"\\\n \"{ n2 = {:src = 10.0.1.20, :dst = 10.0.1.21} }\\n\"\n str = capture_io { sf.print_neighbors; }\n print_error(str,expected_output);\nend\n\ndef test_get_interfaces(sf)\n expected_output = \"{ eth0 => 10.0.0.20 }\\n\"\\\n \"{ eth1 => 10.0.1.20 }\\n\"\n\n str = capture_io {sf.print_interfaces}\n print_error(str,expected_output)\nend\n\ndef test_get_hostname_for_interface(sf)\n expected_output = \"n2\"\n str = sf.get_hostname_for_interface(\"10.0.1.21\")\n print_error(str,expected_output)\n\n expected_output = \"n4\"\n str = sf.get_hostname_for_interface(\"10.0.0.21\")\n print_error(str,expected_output)\n\nend\n\ndef test_get_interface_for_hostname(sf)\n expected_output = \"10.0.1.20\"\n str = sf.get_interface_for_hostname(\"n2\")\n print_error(str,expected_output)\n\n expected_output = \"10.0.0.20\"\n str = sf.get_interface_for_hostname(\"n4\")\n print_error(str,expected_output)\nend\n\ndef test_get_connecting_interface(sf)\n expected_output = \"10.0.1.21\"\n str = sf.get_connecting_interface(\"10.0.1.20\")\n print_error(str,expected_output)\nend\n\ndef run_test_suite()\n #setup\n config = YAML::load_file(\"config.yaml\")\n config[\"logger\"] = Logger.new(STDOUT)\n\n sf = Translator.new(config)\n name = sf.get_hostname\n \n if (name == \"n1\") then\n # run tests\n test_get_interfaces(sf)\n test_get_hostname_for_interface(sf)\n test_get_interface_for_hostname(sf)\n test_get_neighbors(sf)\n test_get_connecting_interface(sf)\n else\n puts \"This test is meant to be run on node 'n1'.. Exiting\"\n exit 0\n end\nend\n\nrun_test_suite\n" }, { "alpha_fraction": 0.7189952731132507, "alphanum_fraction": 0.7189952731132507, "avg_line_length": 16.36111068725586, "blob_id": "02a294ee6d671711ce4251dadae547bc767e9403", "content_id": "b2920cc3dfcfcf8abd2cdd2fa35d6f5b03bdb284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 637, "license_type": "no_license", "max_line_length": 86, "num_lines": 36, "path": "/src/components/MessageHandler.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "\nclass MessageHandler\n# This class is responsible for the low-level details of splitting a received packet,\n# adding the necessary header information to an existing packet, and passing that back\n# to the router for it to send the message over the wire\n\n def initialize(logger)\n @logger = logger;\n end\n\n def build_flood_packet_header\n end\n\n def build_vc_packet_header\n end\n\n def build_vckp_packet_header\n end\n\n def build_trust_node_header\n end\n\n def build_send_msg_packet\n end\n\n def build_ping_packet\n end\n\n def build_traceroute_packet\n end\n\n def build_nack_packet\n end\n\n def build_ack_packet\n end\nend\n \n" }, { "alpha_fraction": 0.5877247452735901, "alphanum_fraction": 0.5976441502571106, "avg_line_length": 27.298246383666992, "blob_id": "1b938c6bd74dbddec8a78f787e19c01db48ca121", "content_id": "832b25abc4e32245dc4471e11fd70014d66748b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4839, "license_type": "no_license", "max_line_length": 75, "num_lines": 171, "path": "/src/components/Translator.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "require 'socket'\nrequire 'logger'\n# This class is reponsible for translating hostnames to ip addresses and\n# getting the outgoing ip addresses for an interface\n\n# Vocabulary used:\n# Incoming interface = IP Address that this router has stored for itself\n# Outgoing interface = IP Address of router on the other side of the link\n\nclass Translator \n attr_accessor :neighbors\n\n def initialize(config_hash)\n @config = config_hash;\n @hostname = config_hash[\"hostname\"];\n @interfaces = get_incoming_interfaces();\n @neighbors = get_neighbors();\n @nodenames = get_nodenames();\n \n @logger = @config[\"logger\"].clone;\n @logger.level = Logger::ERROR\n end\n\n # get the physical connections this router is connected to\n # returns a hash of interfaces to their corresponding IP addresses\n def get_incoming_interfaces()\n res_str = %x{ifconfig | grep -B1 \"inet addr\" | \\\n awk '{ if ( $1 == \"inet\") { print $2 } \\\n else if ($2 == \"Link\") {printf \"%s:\", $1 } }' | \\\n awk -F: '{print $1 \":\\t\" $3 }'};\n\n interfaces = {}; \n res_arr = res_str.split(\"\\n\");\n\n res_arr.each do |line|\n iface_arr = line.split(\":\\t\");\n iface_name = iface_arr[0];\n iface_addr = iface_arr[1];\n\n # skip local host\n next if (iface_addr == \"127.0.0.1\")\n interfaces[iface_name] = iface_addr;\n end\n \n return interfaces;\n end\n\n # get outgoing interface for a given internal interface\n def get_connecting_interface(ip_addr)\n @neighbors.each do |k,v|\n if v.values.include? ip_addr then\n if v[:src] == ip_addr\n return v[:dst]\n else\n return v[:src]\n end\n end\n end\n return nil\n end\n\n # Reads the nodes to addrs file and tracks all addresses for a nodename\n # data looks like: \"n1\" => [10.0.0.20, 10.0.1.20]\"\n def get_nodenames\n nodenames = {}\n File.foreach(@config[\"files\"][\"nodes_to_addrs\"]) do |line|\n line_arr = line.split(\"\\t\");\n node_name = line_arr[0].strip;\n node_addr = line_arr[1].chomp!;\n \n # push associated IP addresses for stored nodename\n if(nodenames.has_key? node_name)\n nodenames[node_name].push(node_addr);\n else\n nodenames[node_name] = [node_addr];\n end\n end\n\n return nodenames\n end\n\n # returns hostname of corresponding IP address\n def get_hostname_for_interface(ip_addr)\n @nodenames.each do |nodename, arr_ips| \n if arr_ips.include? ip_addr then\n return nodename\n end\n end\n \n return nil;\n end\n \n # returns the incoming IP address of a neighbors hostname\n def get_interface_for_hostname(nodename)\n @neighbors.each do |k,v|\n @logger.info(\"TRANSLATOR\"){\"nodename = #{nodename}, k = #{k}\"}\n if (k == nodename)\n return v[:src]\n end\n end\n\n return nil\n end\n\n def get_outgoing_interface_for_hostname(nodename)\n ip_addr = get_interface_for_hostname(nodename)\n outgoing_ip = get_connecting_interface(ip_addr)\n return outgoing_ip;\n end\n \n # gets all the IP addresses of connecting nodes\n # data looks like:\n # - { \"n1\" = {:src = \"10.0.0.20\", :dst = \"10.0.0.21\"}, { \"n2\" ... } }\n def get_neighbors()\n neighbors = {}\n neighbor_ips = [];\n \n interfaces = get_incoming_interfaces.values();\n \n # get the IP address associated with the other side of the link\n File.foreach(@config[\"files\"][\"addrs_to_links\"]) do |line|\n line_arr = line.split(\"\\t\");\n src_ip = line_arr[0];\n dst_ip = line_arr[1].chomp!;\n\n if interfaces.include? src_ip\n neighbor_ips.push({:src => src_ip, :dst => dst_ip})\n elsif interfaces.include? dst_ip\n neighbor_ips.push({:src => dst_ip, :dst => src_ip})\n end\n end \n \n # get the name associatd with these new ip addresses\n File.foreach(@config[\"files\"][\"nodes_to_addrs\"]) do |line|\n line_arr = line.split(\"\\t\");\n node_name = line_arr[0];\n node_addr = line_arr[1].chomp!;\n\n if (idx = neighbor_ips.index{|x| x[:dst] == node_addr })\n neighbors[node_name] = neighbor_ips[idx]\n end\n end\n return neighbors\n end\n\n # returns all other outgoing interfaces based on the ip_addr passed in\n def get_other_outgoing_interfaces(outgoing_ip_addr)\n outgoing_ips = [];\n \n @neighbors.values.each do |incoming_ip,outgoing_ip|\n unless outgoing_ip_addr == outgoing_ip\n outgoing_ips.push(outgoing_ip)\n end\n end\n\n return outgoing_ips\n end\n\n## PRINTING METHODS ##\n def print_neighbors(io = $stdout)\n @neighbors.each do |name,links|\n io.puts\"{ #{name} = {:src = #{links[:src]}, :dst = #{links[:dst]}} }\"\n end\n end\n\n def print_interfaces(io = $stdout)\n @interfaces.sort.map do |k,v|\n io.puts \"{ #{k} => #{v} }\"\n end\n end\nend\n" }, { "alpha_fraction": 0.5952380895614624, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 26.176469802856445, "blob_id": "15cd1850bdc76fb84623b640a9e02e20c78e4855", "content_id": "5797e639fbff3d7e4a0bd852b75f2e42fc1d1fb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2310, "license_type": "no_license", "max_line_length": 90, "num_lines": 85, "path": "/src/components/VCTable.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "# this class will store the forwarding entries and maintain the \n# virtual circuits\n\nclass VCTable\n\n Entry = Struct.new(:src, :dst, :path);\n\n def initialize(logger)\n @entries = [];\n @logger = logger.clone;\n @logger.level = Logger::ERROR\n end\n\n # create a forwarding entry from a given src to a given output link\n def create_forwarding_entry(src,dst,path)\n # check for conflicts with the connection identifiers\n entry = get_entry(src,dst);\n \n # if there is no conflicts, then create a new entry\n if entry.nil? then\n entry = Entry.new(src,dst,path);\n @entries.push(entry);\n @logger.debug(\"VCTABLE\"){\"successful entry into VC table\"}\n\n # if there is a conflict, then return the existing entry\n else\n @logger.debug(\"VCTABLE\"){\"returning stored entry for #{src} to #{dst}\"}\n end\n \n # return the new entry\n return entry;\n end\n\n # remove an entry from the table\n def remove_forwarding_entry(src,dst)\n idx = @entries.index{|x| ((x[:src] == src) && (x[:dst] == dst))}\n if idx != nil then\n @entries.delete_at(idx);\n end\n end\n \n def get_entry(src,dst)\n idx = @entries.index{|x| ((x[:src] == src) && (x[:dst] == dst))}\n if idx != nil then\n return @entries[idx];\n end\n return nil;\n end\n\n # get forwarding entry for a specific node\n def get_forwarding_entry(src_node, dst_node)\n \n @logger.debug(\"VCTABLE\"){\"src node:#{src_node}, dst node:#{dst_node}\"}\n\n next_hop_idx = @entries.index{|x| ((x[:src] == src_node) && (x[:dst] == dst_node))};\n \n # if the destination is the current node\n if next_hop_idx.nil?\n @logger.debug(\"VCTABLE\"){\"next_hop: #{dst_node}\"}\n next_hop = dst_node;\n end\n \n next_hop = @entries[next_hop_idx];\n @logger.debug(\"VCTABLE\"){\"next_hop of dst: #{next_hop.inspect}\"}\n return next_hop;\n end\n \n # clears the table of all forwarding entries\n def clear_table\n @entries = [];\n end\n\n # print the virtual circuits\n def print_entries_table(io = $stdout)\n @entries.each do |e|\n io.puts \"[src:#{e[:src]}, dst:#{e[:dst]}, path:#{e[:path]}]\"\n end\n end\n\n def print_info(io = $stdout)\n io.puts \"VIRTUAL CIRCUIT TABLE\"\n io.puts \"*********************\"\n print_entries_table(io);\n end\nend\n" }, { "alpha_fraction": 0.6916167736053467, "alphanum_fraction": 0.703592836856842, "avg_line_length": 29.363636016845703, "blob_id": "cb43f0e348e41204c2189a352849d9bce861289a", "content_id": "57ee01a109cf759fce14c937f809af6aaca0ff3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 334, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/src/scripts/clean.sh", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nLOG_DIR=\"/home/core/Desktop/code/p3/output/logs\"\nROUTE_DIR=\"/home/core/Desktop/code/p3/output/routing_tables\"\nVC_DIR=\"/home/core/Desktop/code/p3/output/virtual_circuits\"\nWEIGHTS_FILE=\"/home/core/Desktop/code/p3/input/weights.txt\"\n\nrm -f \"$LOG_DIR\"/*.log\nrm -f \"$ROUTE_DIR\"/*.out\nrm -f \"$VC_DIR\"/*.out\nrm -f \"$WEIGHTS_FILE\"\n" }, { "alpha_fraction": 0.6736842393875122, "alphanum_fraction": 0.678947389125824, "avg_line_length": 17.799999237060547, "blob_id": "42468068b2b2e7340ae83ef1ea69351b9f6b693c", "content_id": "9b331c63fb1240e7a909802374cdafc03ee241a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 190, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/test/run_tests.sh", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "\n# This script will run all the test files in this directory\n\n\nls *.rb -altrh | awk '{print $9}' > tests.txt\n\nwhile read file; do\n ruby $file config.yaml \ndone <tests.txt\n\nrm -f tests.txt\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 51, "blob_id": "51545f324ee3421e2846e31126f21fe877e32b40", "content_id": "079b1aabd337748b5a9b09211e1afc4725b03f46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 104, "license_type": "no_license", "max_line_length": 52, "num_lines": 2, "path": "/src/scripts/kill_all.sh", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "kill -9 $(ps aux | grep [r]uby | awk '{print $2}')\nkill -9 $(ps aux | grep [v]noded | awk '{print $2}')\n" }, { "alpha_fraction": 0.5840767025947571, "alphanum_fraction": 0.5889830589294434, "avg_line_length": 25.520709991455078, "blob_id": "1667d9d66eb4f666a096dc13cfb078bbb506f86c", "content_id": "aea0651c36823038b2ecd47710614121bacdb967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4484, "license_type": "no_license", "max_line_length": 74, "num_lines": 169, "path": "/test/test_vcTable.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ruby\nrequire 'logger'\nrequire 'yaml'\nrequire 'stringio'\nrequire File.expand_path(\"../../src/components/VCTable\", __FILE__)\n\n# Description: This class will test the basic functionality of the \n# VC Table to ensure correctness. Dealing with Mocked Data from LSR table.\n#\n# table looks like this:\n#\n# B ------ C\n# / \\ / \\\n# A E -- F D\n# \\ / \\ /\n# G ------ H\n#\n# Testing connections from node E\n\nVertex = Struct.new(:node,:cost,:previous);\n\ndef compare_results(expected,actual, test_number)\n if expected != actual then\n puts \"TEST #{test_number} FAILED\"\n\n puts \"expected = #{expected}\"\n puts \"actual = #{actual}\"\n end\nend\n\ndef capture_io\n begin\n old_stdio = $stdout\n $stdout = StringIO.new('','w')\n yield\n $stdout.string\n ensure\n $stdout = old_stdio\n end\nend\n\n\ndef test_insert_one(v)\n expected_output = \"[src:A, dst:E, path:B]\\n\"\n \n v.create_forwarding_entry(\"A\",\"E\");\n str = capture_io{ v.print_entries_table }\n compare_results(expected_output,str,1); \nend\n\ndef test_insert_another(v)\n expected_output = \"[src:A, dst:E, path:B]\\n\"\\\n \"[src:A, dst:D, path:F]\\n\"\n \n\n v.create_forwarding_entry(\"A\",\"D\");\n str = capture_io{ v.print_entries_table}\n compare_results(expected_output,str,2);\nend\n\ndef test_insert_three(v)\n expected_output = \"[src:A, dst:E, path:B]\\n\"\\\n \"[src:A, dst:D, path:F]\\n\"\\\n \"[src:G, dst:C, path:F]\\n\"\\\n \"[src:E, dst:H, path:F]\\n\"\\\n \"[src:D, dst:G, path:G]\\n\"\n \n v.create_forwarding_entry(\"G\",\"C\");\n v.create_forwarding_entry(\"E\",\"H\");\n v.create_forwarding_entry(\"D\",\"G\");\n str = capture_io{ v.print_entries_table;}\n compare_results(expected_output, str,3);\n \nend\n\ndef test_delete(v)\n expected_output = \"[src:A, dst:D, path:F]\\n\"\\\n \"[src:G, dst:C, path:F]\\n\"\\\n \"[src:E, dst:H, path:F]\\n\"\n \n \n v.remove_forwarding_entry(\"A\",\"E\");\n v.remove_forwarding_entry(\"D\",\"G\");\n str = capture_io { v.print_entries_table; }\n compare_results(expected_output, str,5);\nend\n\ndef test_update(v)\n expected_output = \"[src:A, dst:D, path:G]\\n\"\\\n \"[src:G, dst:C, path:B]\\n\"\\\n \"[src:E, dst:H, path:G]\\n\"\n\n \n sp_table = [];\n sp_table.push(Vertex.new(\"E\",0,nil));\n sp_table.push(Vertex.new(\"G\",1,\"E\"));\n sp_table.push(Vertex.new(\"B\",2,\"E\"));\n sp_table.push(Vertex.new(\"F\",12,\"E\"));\n sp_table.push(Vertex.new(\"A\",4,\"B\"));\n sp_table.push(Vertex.new(\"H\",4,\"G\"));\n sp_table.push(Vertex.new(\"C\",5,\"B\"));\n sp_table.push(Vertex.new(\"D\",6,\"H\"));\n\n v.build_forwarding_table(sp_table);\n \n # internal method update should be called here\n v.create_forwarding_entry(\"A\",\"D\");\n v.create_forwarding_entry(\"G\",\"C\");\n v.create_forwarding_entry(\"E\",\"H\");\n\n str = capture_io{ v.print_entries_table;}\n compare_results(expected_output, str,6);\nend\n\ndef test_build_forwarding_table(vc_table, shortest_path_table)\n expected_output = \"dest_addr: D, outgoing_link: F\\n\"\\\n \"dest_addr: C, outgoing_link: F\\n\"\\\n \"dest_addr: H, outgoing_link: F\\n\"\\\n \"dest_addr: A, outgoing_link: B\\n\"\\\n \"dest_addr: F, outgoing_link: F\\n\"\\\n \"dest_addr: B, outgoing_link: B\\n\"\\\n \"dest_addr: G, outgoing_link: G\\n\"\\\n \"dest_addr: E, outgoing_link: \\n\"\n\n vc_table.build_forwarding_table(shortest_path_table);\n \n str = capture_io {vc_table.print_forwarding_table }\n \n if (str != expected_output)\n puts \"FAILURE in test_forwarding_table\"\n puts \"expected output:\\n#{expected_output}\"\n puts \"result:\\n#{str}\"\n end\nend\n\n# the mocked data to run these tests with\ndef get_mock_shortest_path_table\n # build shortest path table \n sp_table = [];\n sp_table.push(Vertex.new(\"E\",0,nil));\n sp_table.push(Vertex.new(\"G\",1,\"E\"));\n sp_table.push(Vertex.new(\"B\",2,\"E\"));\n sp_table.push(Vertex.new(\"F\",2,\"E\"));\n sp_table.push(Vertex.new(\"A\",4,\"B\"));\n sp_table.push(Vertex.new(\"H\",4,\"F\"));\n sp_table.push(Vertex.new(\"C\",5,\"F\"));\n sp_table.push(Vertex.new(\"D\",6,\"H\"));\n \n return sp_table\nend\n\ndef run_test_suite()\n logger = Logger.new(STDOUT)\n # SETUP\n v = VCTable.new(logger);\n sp_table = get_mock_shortest_path_table\n\n test_build_forwarding_table(v,sp_table);\n test_insert_one(v)\n test_insert_another(v)\n test_insert_three(v)\n test_delete(v)\n test_update(v)\n #v.print_info\n v.clear_table\n\nend\n\nrun_test_suite()\n\n\n" }, { "alpha_fraction": 0.7414810061454773, "alphanum_fraction": 0.7437745928764343, "avg_line_length": 52.543861389160156, "blob_id": "a022a374d99c94858bc0cbe841bbe909ffb48ac2", "content_id": "8d361bbe50972b95c48831f8f968ee2b9b62fd71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3052, "license_type": "no_license", "max_line_length": 96, "num_lines": 57, "path": "/README.md", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "DONE:\n=====\n- All of Part 1 (Link State Routing and Virtual Circuit Creation \n- All of Part 2 (PING, SENDMSG, and TRACEROUTE commands implemented)\n- All of Part 3 (using onion-routing to encrypt messages between a sender and receiver)\n\nHOW TO RUN:\n===========\n- From the root directory, `cd` into the *src/scripts* directory\n- run the weights.sh scripts (i.e. `./weights.sh`\n- for each virtual node, run the run.sh script (`./run.sh`)\n- thats all!\n\n### Data Messages\nSENDMSG [DST] [MSG] - deliver MSG text to process on DST\nPING [DST] [NUMPINGS] [DELAY] - send NUMPING ping messages to DST, DELAY seconds inbetween pings\nTRACEROUTE [DST] - perform a traceroute to DST\nDUMP - show the output for the current implementation of dijkstra's\n\n### Different Control Packets\nLSP : LinkState Packet :: weights to immediate neighbors\nVCP : Virtual Circuit :: create a virtual circuit from src to dst\nVCKP: VC Kill Packet :: Tear down virtual circuit\nACK : Acknowledge Packet :: message has been received and handled\nNACK: Failure Packet :: message has been received but failed\nSMG : Send Message Packet:: a send message packet is received, forward it along\nESMG: Encrypted SMG :: follow onion-routing protocol and decrypt as you pass along\nPNG : Ping Packet :: forward the packet to the destination\nTRC : Traceroute :: Collect Intermediate node's info and pass along to destination\nTRCRESP: TRC response :: Send the packet of data containing all network hops back to host\n\nBONUS FEATURES\n==============\n\n- you can start nodes up at any time, the link-state routing is robust and can handle failure\n- there is a DUMP command that will output the current state of the Link State table\n- Dynamic Logging: you can set a variable at the top of each file to get more verbose logging\nto show what is happening at any given moment\n- there is a test folder that contains static tests for both dijkstra's and the other classes\n- there is a script to kill all instances -- kill\\_all.sh\n- there is a script to clean all the log files -- clean.sh\n- you can add configuration settings pretty quickly with the YAML config being loaded\n\nAdverserial Routing\n===================\nI implemented Onion Routing by doing the following:\n\n* On bootup, every node generates a public/private key, and writes public key to \"server\"\n** In this case the server is a directory called public\\_keys in the input folder\n* When a src is going to send an encrypted message to the destination, it will:\n1. Get the Path to the destination from the Link State Table\n2. Working from the destination, the message will be encrypted with a cipher in an onion\n3. When the cipher encrypts the msg, the src will use the corresponding node's \n public RSA key to encrypt the Cipher Key and IV fields.\n4. The msg will then have multiple cipher encryptions on the payload, like an onion. \n Each node will use its own private key (stored in memory, not visible to anyone else) to\n decrypt the cipher tokens and use the cipher tokens to \"unpeel\" a layer of the msg\n" }, { "alpha_fraction": 0.6007761359214783, "alphanum_fraction": 0.6107203364372253, "avg_line_length": 25.600000381469727, "blob_id": "059f7eb9dd2b35510f7ac3148f31422c8ed51b84", "content_id": "6505d030470af5715ea0bfc0144e61a7263445b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4123, "license_type": "no_license", "max_line_length": 88, "num_lines": 155, "path": "/src/node.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ruby\n\nrequire File.expand_path('../components/Router', __FILE__)\nrequire 'socket'\nrequire 'thread'\nrequire 'yaml'\nrequire 'timeout'\nrequire 'logger'\nrequire 'openssl'\n\n\n# Description:\n# This is the entry-point for the application. Node.rb functions as a task runner, its \n# purpose is to instantiate a router and a bunch of threads to handle asynchronous tasks\n# By: Naveed Nadjmabadi\n\ndef main\n # check if correct params are called\n if (ARGV.length != 1) then\n puts \"NO CONFIG FILE SPECIFIED, EXITING\"\n exit 1;\n end\n\n # set up configuration\n hostname = %x{hostname}.chomp! # get hostname\n \n config = YAML::load_file(ARGV.shift)\n config[\"hostname\"] = hostname;\n\n logger = create_log(config); # create a logger and put it in config hash\n logger.level = Logger::WARN;\n\n r = Router.new(config);\n \n # take timestamp\n ts = DateTime.now\n\n # generate the RSA key\n key = OpenSSL::PKey::RSA.new 1024 \n \n # save private key internally in memory\n config[\"private_key\"] = key.to_pem\n\n # write public key to server file\n pub_key_file = \"#{config[\"files\"][\"key_server\"]}/#{hostname}.pem\"\n open pub_key_file, \"w\" do |file|\n file.write key.public_key.to_pem\n file.flush\n end\n\n # child process\n child_proc_boolean = false\n child_proc = nil\n serv_sock = nil\n clnt_sock = nil\n\n # pre-emptive flood \n r.flood();\n\n loop do \n # fork out the child which will listen to stdin\n if (child_proc_boolean == false) then\n child_proc = fork\n child_proc_boolean = true\n\n # create pair of sockets for communication between processes\n if (child_proc.nil? == false) then\n serv_sock = TCPServer.new(\"127.0.0.1\",config[\"sockets\"][\"tcp_port\"])\n else\n sleep(1);\n begin\n clnt_sock = TCPSocket.new(\"127.0.0.1\",config[\"sockets\"][\"tcp_port\"])\n rescue Errno::ECONNREFUSED\n retry\n end\n end\n \n else\n # parent process code to to listen from socket\n if child_proc.nil? == false\n if (clnt_sock == nil)\n clnt_sock = serv_sock.accept\n end\n begin\n data = clnt_sock.recv_nonblock(config[\"sockets\"][\"mtu\"])\n r.handle_data_message(data);\n rescue Errno::EAGAIN,Errno::EWOULDBLOCK\n # do nothing, no data from socket\n end\n\n # child process to listen to $stdin and to send it to parent proc\n else\n begin\n while line = gets\n begin\n clnt_sock.send(line, config[\"sockets\"][\"tcp_port\"])\n rescue Errno::EAGAIN, Errno::EACCES\n # do nothing, can't write\n end\n end\n clnt_sock.close\n rescue Errno::EIO\n # parent proc died\n exit\n end\n end\n end\n \n # send flooding packet outbound if flooding round has occured\n if ((seconds_elapsed(ts,DateTime.now) % config[\"round_time\"][\"flood_packet\"]) == 0)\n logger.info(\"NODE\"){\"flooding packet call\"}\n r.flood();\n end\n\n # handle message and respond\n r.handle_control_message()\n \n # create phony virtual circuit\n # if (seconds_elapsed(ts,DateTime.now) == 7)\n # if (hostname == \"n1\") then\n # puts \"virtual circuit has been sent\"\n # logger.info(\"NODE\"){\"creating virtual circuit\"}\n # r.create_virtual_circuit(\"10.0.0.20\",\"10.0.6.21\", r.socket.addr);\n # end\n # end\n\n # build and dump routing table to output if flooding table has occurred\n if ((seconds_elapsed(ts,DateTime.now) % config[\"round_time\"][\"dump_info\"]) == 0)\n logger.info(\"NODE\"){\"taking router dump\"}\n r.print_routing_table();\n r.print_vc_table();\n end\n end\nend\n\ndef seconds_elapsed(datetime_start,datetime_end)\n return ((datetime_end - datetime_start) * 24 * 60 * 60).to_i\nend\n\n\ndef create_log(config)\n # get the output log dir\n logdir = config[\"files\"][\"log_files\"];\n hostname = config[\"hostname\"];\n\n # instantiate the logger\n file = File.open(\"#{logdir}/#{hostname}.log\", \"w\")\n logger = Logger.new(file);\n\n config[\"logger\"] = logger # add to config hash\n \n return logger;\nend\n\nmain\n" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.675000011920929, "avg_line_length": 18.047618865966797, "blob_id": "a662cf7f79b7168e8932c502cd88abe656a610bd", "content_id": "858a06305cbb4fdbb0429fd5bd97d60f3b90a93c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 400, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/test/test_router.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/usr/bin/env ruby\nrequire 'yaml'\n\nrequire File.expand_path(\"../../src/components/Router\", __FILE__)\n\n# Description:\n# This is the class that tests the functionality of the Node class. \n# This class is meant to be run in the host, NOT in CORE\n# By: Naveed Nadjmabadi\n\n\ndef run_test_suite\n\n # SET UP NODE\n config = YAML::load_file(\"config.yaml\")\n r = Router.new(config);\n\n # RUN TESTS\nend\n\nrun_test_suite()\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6754385828971863, "avg_line_length": 17.83333396911621, "blob_id": "2ee364f39a228cbb7dd2c91080de704d4367a588", "content_id": "dfc84c69a8cd28a6b28cbf9d2f92e8f504dc4fbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 114, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/src/scripts/run.sh", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nHOME_DIR=\"/home/core/code/p3/src\"\nRUBY=\"/usr/bin/ruby\"\n\n$RUBY $HOME_DIR/node.rb $HOME_DIR/config.yaml \n" }, { "alpha_fraction": 0.5897671580314636, "alphanum_fraction": 0.5918537974357605, "avg_line_length": 32.280555725097656, "blob_id": "0f53ae3b7d79ce80906d083ad538763621a7f3ad", "content_id": "d468dcb61f06303c75dedc1c8eb4b44dafc2ffb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 11981, "license_type": "no_license", "max_line_length": 91, "num_lines": 360, "path": "/src/components/LSRTable.rb", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "require 'monitor'\nrequire File.expand_path(\"../VCTable\", __FILE__);\n\n# this class contains the adjacency list of all nodes, and their routes\n# it will construct the link-state-packets for flooding as well\nclass LinkStateTable\n\n # define struct that represents a graph node\n Vertex = Struct.new(:node, :cost, :previous);\n \n # define struct that represents link-state packet\n LSP = Struct.new(:node, :dest, :weight, :seq);\n\n def initialize(logger)\n @shortest_path_table = []; # an array of Vertex Structs\n @link_state_packet_table = []; # an array of LSP structs\n @forwarding_table = {};\n @lock = Monitor.new\n \n @logger = logger.clone;\n @logger.level = Logger::ERROR;\n end\n \n # returns the hostname of the current router\n def get_hostname()\n return %x{hostname}.chomp!\n end\n \n # method to send most up-to-date version of the link-state-table\n def build_link_state_packet()\n \n lsp_array = get_lsp_entries_for_node(get_hostname())\n @logger.debug(\"LSRTABLE\"){\"link state packet looks like: #{lsp_array}\"} \n payload = Marshal::dump(lsp_array)\n return \"LSP||<src_ip>||<dst_ip>||#{payload}\"\n end\n\n # dijkstra's implementation -- will find the shortest lengths to all nodes from the src\n def build_routing_table(src)\n \n # Step 0: Initialize Data Structures necessary\n # - a struct for each vertex (:node, :cost, :previous)\n # - an array containing these structs, sorted from min-to-max\n # - an array containing the marked / final nodes after they have been processed\n inf = 99999999;\n tenative_nodes = [];\n permanent_nodes = [];\n\n # Step 1:\n # - make source vertex as root (:weight = 0, :previous = null, :node = hostname)\n # - make all other vertices (:weight = inf, :previous = null, :node = nodename)\n\n origin = Vertex.new(src, 0, nil);\n tenative_nodes.push(origin);\n\n @lock.synchronize{\n uniq_nodes = [src]\n @link_state_packet_table.each do |el|\n unless (uniq_nodes.include?(el[:node])) then\n # create the vertex\n node = Vertex.new(el[:node], inf, nil)\n uniq_nodes.push(el[:node]); \n tenative_nodes.push(node);\n end\n end\n }\n\n # Step 2:\n # - While array is not empty\n # - pop the smallest element off and put it into completed_queue\n # - find all of the adjacent nodes\n # - for each adjacent node, check if it is in the minHeap\n ## - If true && adj.weight > distance between src-adj + weight of src\n ## - update weight of adj\n \n while (tenative_nodes.length != 0) do\n # pop smallest elem and put into completed\n curr_node = tenative_nodes.shift;\n permanent_nodes.push(curr_node);\n \n # get adjacents\n adjacents = get_lsp_entries_for_node(curr_node[:node]);\n # for each adjacent node, check if it is in the minHeap\n if (adjacents.nil? == false)\n adjacents.each do |el|\n @logger.debug(\"LSRTABLE\"){ \n \"adjacent node for #{curr_node[:node]}: \"\\\n \"node=#{el[:node]}, dest=#{el[:dest]}, weight=#{el[:weight]}\"\n }\n\n adj_node_name = el[:dest];\n adj_node_weight = el[:weight].to_i;\n # if the node is in the tenative queue\n if((idx = tenative_nodes.index{|x| x[:node] == adj_node_name}) != nil) then\n vertex = tenative_nodes[idx] \n # and the tenative weight is greater than the distance to it now\n if (vertex[:cost] > (adj_node_weight + curr_node[:cost])) then\n # update the tenative weight and predecessor fields\n vertex[:cost] = (adj_node_weight + curr_node[:cost]);\n vertex[:previous] = curr_node[:node];\n end\n end\n end\n end\n \n # sort the array to make sure the smallest element is at front of queue\n tenative_nodes.sort!{|a,b| a[:cost] <=> b[:cost]}\n end\n\n # Step 3: Update the shortest path && forwarding table to show final queue information\n # - completed nodes contains structs that look like - {:node, :cost, :previous}\n # - Forwarding Table should look like {:destination_node, :outgoing_node }\n @lock.synchronize {\n @shortest_path_table = permanent_nodes;\n @forwarding_table = build_forwarding_table(permanent_nodes);\n }\n end\n \n # using a completed shortest path table, get the path for every node\n def build_forwarding_table(shortest_path_table)\n forwarding_table = {};\n\n shortest_path_table.each do |v|\n node = v[:node]\n forwarding_table[node] = get_path_for_node(node, shortest_path_table).shift\n end\n\n return forwarding_table;\n end\n \n # use the results from Dijkstra's table to compute the path necessary for a node to \n # reach the destination. Returns results in an ordered array where the index corresponds\n # to the hop of the node i.e. [src, node1, node2, node3, destination]\n def get_path_for_node(dest, shortest_path_table = @shortest_path_table)\n \n if (shortest_path_table.empty? || (shortest_path_table.include? dest == false))\n return nil;\n end\n\n idx = shortest_path_table.index{ |x| x[:node] == dest} \n current_node = shortest_path_table[idx]\n previous_node = current_node[:previous];\n \n route_path = [];\n\n while (previous_node != nil) do\n # confusing but this method puts curr_node at beginning of route_path\n route_path.unshift(current_node[:node]) \n \n idx = shortest_path_table.index{|x| x[:node] == previous_node}\n \n if (idx != nil) then\n current_node = shortest_path_table[idx];\n previous_node = current_node[:previous]\n end\n end\n\n return route_path;\n end\n\n def get_next_hop_node(dst_node);\n return @forwarding_table[dst_node];\n end\n\n # check entry's sequence number to determine if flood packet is \"fresh\"\n def check_entry(hostname, destination, weight, sequence_number)\n # check to see if packet already exists\n idx = @link_state_packet_table.index{ |e|\n ((e[:node] == hostname) && (e[:dest] == destination))\n }\n\n if (idx.nil? == false)\n @logger.debug(\"LSRTABLE\"){\"flooding entry exists for packet\"}\n\n # check the sequence number to verify \"freshness\"\n stored_seq = @link_state_packet_table[idx][:seq];\n\n if (sequence_number > stored_seq) then\n @logger.debug(\"LSRTABLE\"){\n \"incoming sequence number:#{sequence_number} > stored value:#{stored_seq}\"\n }\n return true;\n else\n @logger.debug(\"LSRTABLE\"){\n \"incoming sequence number:#{sequence_number} < stored value:#{stored_seq}\"\n }\n return false;\n end\n end\n\n return true; # entry does not exist in our router\n end\n\n # add entries when first initializing router, or receiving a flood packet \n def add_entry(hostname, destination, weight, sequence_number)\n # check to see if packet already exists\n idx = @link_state_packet_table.index{ |x|\n ((x[:node] == hostname) && (x[:dest] == destination))\n }\n\n if (idx.nil? == false)\n if (sequence_number > @link_state_packet_table[idx][:seq]) then\n # remove old packet\n if (@link_state_packet_table[idx][:node] == hostname)\n remove_entry(hostname, destination);\n else\n remove_entry(destination,hostname);\n end\n\n # lock down the lsr_table\n @lock.synchronize {\n # add new one\n new_lsp = LSP.new(hostname,destination,weight,sequence_number);\n @link_state_packet_table.push(new_lsp);\n }\n end\n else\n # otherwise this is a new entry and should be added regardless\n @lock.synchronize {\n new_lsp = LSP.new(hostname,destination,weight,sequence_number);\n @link_state_packet_table.push(new_lsp);\n }\n end \n end\n\n # remove entry if path no longer exists :: internal method\n def remove_entry(host,dest)\n @lock.synchronize {\n idx = @link_state_packet_table.index{|x|((x[:node] == host) && (x[:dest] == dest))}\n if idx\n @link_state_packet_table[idx] = nil;\n @link_state_packet_table.compact! # removes the nil entry\n end\n }\n end\n\n # removes all entries\n def clear_table\n @lock.synchronize {\n @link_state_packet_table.clear()\n }\n end\n \n # same as get_entry but with seq num\n def get_lsp_entries_for_node(nodename)\n res = [];\n \n @lock.synchronize {\n @link_state_packet_table.each do |link_state_entry|\n @logger.debug(\"LSRTABLE\"){\"link state entry looks like: #{link_state_entry}\"}\n \n if link_state_entry[:node] == nodename\n res.push(link_state_entry);\n end\n end\n }\n \n if (res.length == 0) \n @logger.warn(\"LSRTABLE\"){\"no entry found\"}\n return nil\n end\n\n return res;\n end\n\n # given a set of IP addresses of adjacent nodes, find their weight and\n # populate the link-state-packet table with the hosts information\n def get_weights_for_neighbors(weights_file, translator)\n interfaces_array = translator.get_incoming_interfaces().values\n list = {};\n \n # read the weights file \n begin\n File.foreach(weights_file) do |line|\n line_arr = line.split(\",\");\n \n if line_arr.length < 4 then\n @logger.error(\"LSRTABLE\"){\"line #{line_arr.inspect}\"}\n end\n \n src_ip = line_arr[0];\n dest_ip = line_arr[1];\n weight = line_arr[2];\n seq_num = line_arr[3].chomp!;\n\n \n # get addr of the connecting interface\n if interfaces_array.include? src_ip then\n # unless its a duplicate\n unless ((list.has_key? src_ip) && (list[src_ip] == dest_ip)) then\n list[src_ip] = dest_ip;\n \n # add entry to link-state-routing table\n node = translator.get_hostname_for_interface(dest_ip)\n add_entry(get_hostname,node,weight,seq_num)\n end\n\n # do the same check against the destination IP\n elsif interfaces_array.include? dest_ip then \n unless ((list.has_key? dest_ip) && (list[dest_ip] == src_ip)) then\n list[dest_ip] = src_ip;\n \n node = translator.get_hostname_for_interface(src_ip)\n add_entry(get_hostname,node,weight,seq_num)\n end\n end\n end\n \n # this will happen if weights ruby file is updating same time as the read\n rescue NoMethodError\n sleep 1\n retry\n rescue Errno::ENOENT\n puts \"You need to run the weights.sh script in the src/scripts \"\\\n \"directory -- or put a weights.txt file in the input dir\"\n puts \"EXITING NOW\"\n exit\n end\n \n return list\n end\n \n # print just the lsr table\n def print_lsr_table(io = $stdout)\n @lock.synchronize{\n @link_state_packet_table.each do |e| \n io.puts \"[src:#{e[:node]}, dest:#{e[:dest]}, weight:#{e[:weight]}, seq:#{e[:seq]}]\"\n end\n }\n end\n\n # print just the routing table\n def print_routing_table(io = $stdout)\n @lock.synchronize{\n @shortest_path_table.each do |el|\n io.puts \"node: #{el[:node]}, cost: #{el[:cost]}, prev: #{el[:previous]}\"\n end\n }\n end\n\n # print just the forwarding table\n def print_forwarding_table(io = $stdout)\n @forwarding_table.each do |k,v|\n io.puts \"dest_addr: #{k}, outgoing_link: #{v}\"\n end\n end\n\n # print the adjacency list\n def print_info(io = $stdout)\n io.puts \"ROUTING TABLE\"\n io.puts \"*************\"\n print_routing_table(io)\n io.puts \"\\nFORWARDING TABLE\"\n io.puts \"****************\\n\"\n print_forwarding_table(io);\n io.puts \"\\nLSR TABLE ENTRIES:\"\n io.puts \"******************\\n\"\n print_lsr_table(io);\n end\nend\n" }, { "alpha_fraction": 0.5657810568809509, "alphanum_fraction": 0.6747759580612183, "avg_line_length": 43.949642181396484, "blob_id": "b429365a2b86aee0cfd9552812cbe46bcea10da0", "content_id": "082e3a195fec155a3259669b0bf6e18d202fb654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6248, "license_type": "no_license", "max_line_length": 66, "num_lines": 139, "path": "/src/scripts/run.py", "repo_name": "naveedn/cmsc417-project3", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom core import pycore\nfrom core.misc import ipaddr\nfrom core.constants import *\n\n\n\ndef add_to_server(session):\n global server\n try:\n server.addsession(session)\n return True\n except NameError:\n return False\n\ndef main():\n # create session\n session = pycore.Session(persistent=True)\n add_to_server(session)\n\n # add objects to the session\n n1 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n1\")\n n1.setposition(x=164.0,y=158.0)\n\n n2 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n2\")\n n2.setposition(x=163.0,y=377.0)\n\n n3 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n3\")\n n3.setposition(x=170.0,y=640.0)\n\n n4 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n4\")\n n4.setposition(x=436.0,y=145.0)\n\n n5 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n5\")\n n5.setposition(x=441.0,y=359.0)\n\n n6 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n6\")\n n6.setposition(x=438.0,y=638.0)\n\n n7 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n7\")\n n7.setposition(x=707.0,y=125.0)\n \n n8 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n8\")\n n8.setposition(x=712.0,y=341.0)\n\n n9 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n9\")\n n9.setposition(x=712.0,y=645.0)\n\n n10 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n10\")\n n10.setposition(x=911.0,y=111.0)\n\n n11 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n11\")\n n11.setposition(x=919.0,y=330.0)\n\n n12 = session.addobj(cls = pycore.nodes.CoreNode, name=\"n12\")\n n12.setposition(x=923.0,y=662.0)\n\n eth0 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth0\");\n eth1 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth1\");\n eth2 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth2\");\n eth3 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth3\");\n eth4 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth4\");\n eth5 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth5\");\n eth6 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth6\");\n eth7 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth7\");\n eth8 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth8\");\n eth9 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth9\");\n eth10 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth10\");\n eth11 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth11\");\n eth12 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth12\");\n eth13 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth13\");\n eth14 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth14\");\n eth15 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth15\");\n eth16 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth16\");\n eth17 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth17\");\n eth18 = session.addobj(cls = pycore.nodes.PtpNet, name=\"eth18\");\n \n ptpnet = session.addobj(cls = pycore.nodes.PtpNet);\n # add interfaces between the nodes\n n1.newnetif(net=eth0, addrlist=[\"10.0.0.20/24\"], ifindex=0)\n n4.newnetif(net=eth0, addrlist=[\"10.0.0.21/24\"], ifindex=0)\n n1.newnetif(net=eth1, addrlist=[\"10.0.1.20/24\"], ifindex=1)\n n2.newnetif(net=eth1, addrlist=[\"10.0.1.21/24\"], ifindex=0)\n n2.newnetif(net=eth2, addrlist=[\"10.0.2.20/24\"], ifindex=1)\n n5.newnetif(net=eth2, addrlist=[\"10.0.2.21/24\"], ifindex=0)\n n4.newnetif(net=eth3, addrlist=[\"10.0.3.20/24\"], ifindex=1)\n n5.newnetif(net=eth3, addrlist=[\"10.0.3.21/24\"], ifindex=1)\n n2.newnetif(net=eth4, addrlist=[\"10.0.4.20/24\"], ifindex=2)\n n3.newnetif(net=eth4, addrlist=[\"10.0.4.21/24\"], ifindex=0)\n n5.newnetif(net=eth5, addrlist=[\"10.0.5.20/24\"], ifindex=2)\n n6.newnetif(net=eth5, addrlist=[\"10.0.5.21/24\"], ifindex=0)\n n3.newnetif(net=eth6, addrlist=[\"10.0.6.20/24\"], ifindex=1)\n n6.newnetif(net=eth6, addrlist=[\"10.0.6.21/24\"], ifindex=1)\n n4.newnetif(net=eth7, addrlist=[\"10.0.7.20/24\"], ifindex=2)\n n7.newnetif(net=eth7, addrlist=[\"10.0.7.21/24\"], ifindex=0)\n n5.newnetif(net=eth8, addrlist=[\"10.0.8.20/24\"], ifindex=3)\n n8.newnetif(net=eth8, addrlist=[\"10.0.8.21/24\"], ifindex=0)\n n6.newnetif(net=eth9, addrlist=[\"10.0.9.20/24\"], ifindex=2)\n n9.newnetif(net=eth9, addrlist=[\"10.0.9.21/24\"], ifindex=0)\n n4.newnetif(net=eth10, addrlist=[\"10.0.10.20/24\"], ifindex=3)\n n8.newnetif(net=eth10, addrlist=[\"10.0.10.21/24\"], ifindex=1)\n n5.newnetif(net=eth11, addrlist=[\"10.0.11.20/24\"], ifindex=4)\n n9.newnetif(net=eth11, addrlist=[\"10.0.11.21/24\"], ifindex=1)\n n6.newnetif(net=eth12, addrlist=[\"10.0.12.20/24\"], ifindex=3)\n n8.newnetif(net=eth12, addrlist=[\"10.0.12.21/24\"], ifindex=2)\n n7.newnetif(net=eth13, addrlist=[\"10.0.13.20/24\"], ifindex=1)\n n8.newnetif(net=eth13, addrlist=[\"10.0.13.21/24\"], ifindex=3)\n n5.newnetif(net=eth14, addrlist=[\"10.0.14.20/24\"], ifindex=5)\n n7.newnetif(net=eth14, addrlist=[\"10.0.14.21/24\"], ifindex=2)\n n9.newnetif(net=eth15, addrlist=[\"10.0.15.20/24\"], ifindex=2)\n n8.newnetif(net=eth15, addrlist=[\"10.0.15.21/24\"], ifindex=4)\n n9.newnetif(net=eth16, addrlist=[\"10.0.16.20/24\"], ifindex=3)\n n12.newnetif(net=eth16, addrlist=[\"10.0.16.21/24\"], ifindex=0)\n n8.newnetif(net=eth17, addrlist=[\"10.0.17.20/24\"], ifindex=5)\n n11.newnetif(net=eth17, addrlist=[\"10.0.17.21/24\"], ifindex=0)\n n7.newnetif(net=eth18, addrlist=[\"10.0.18.20/24\"], ifindex=3)\n n10.newnetif(net=eth18, addrlist=[\"10.0.18.21/24\"], ifindex=0)\n\n # start the session\n session.instantiate()\n\n # start a terminal with the run command \n n1.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n2.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n3.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n4.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n5.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n6.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n7.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n8.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n9.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n10.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n11.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n n12.shcmd(\"/home/core/Desktop/code/p3/src/scripts/run.sh &\")\n\n\nif __name__ == \"__main__\" or __name__ == \"__builtin__\":\n main()\n" } ]
16
pitchireddy4a3/basic-linux-cmd-day-1
https://github.com/pitchireddy4a3/basic-linux-cmd-day-1
9c82f6bdeaa9dfea13aa03e4806de0d087a0dfc8
679e0d62305b801459ccf9c52680fd67ed4c7cdd
e3f27d1e3881f3d4ebbfbfd4bb9e207820c27f96
refs/heads/master
2023-03-07T10:41:33.217435
2021-02-09T10:52:46
2021-02-09T10:52:46
337,307,886
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 26, "blob_id": "b24f80bfa700161fd30e58828cfa30bb90a5ddc0", "content_id": "27b1693bbda4406a3d0a1e858c1d829828bc0f41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/ram.py", "repo_name": "pitchireddy4a3/basic-linux-cmd-day-1", "src_encoding": "UTF-8", "text": "print(\"go to lunch break\")\n\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 15.5, "blob_id": "ca46cb010645fee308e2b4f12cb91ffc2be1b82f", "content_id": "2bcfc024f370ce2db29f14a3b6b0d9ddcb6baeb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/hii.py", "repo_name": "pitchireddy4a3/basic-linux-cmd-day-1", "src_encoding": "UTF-8", "text": "print(\"hello nnN\")\nprint(\"hii\")\n" } ]
2
jachicao/easycancha
https://github.com/jachicao/easycancha
942df473c4b29e8b7c577f193bba176d5db06a87
55dcc04c42f2f4df1e07db96e486cd0714120f87
6d670e7bdd235f6d7766c2a8afb8b46278615db3
refs/heads/master
2020-03-28T09:55:28.345905
2018-11-19T02:10:30
2018-11-19T02:10:30
148,069,733
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6387622356414795, "alphanum_fraction": 0.6543973684310913, "avg_line_length": 24.37190055847168, "blob_id": "d84be65ebc0bfbb1e58726413966fc64b944fbe1", "content_id": "bb3abd359636dcbfdd57d4a5fb07db621b9a08b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3070, "license_type": "no_license", "max_line_length": 76, "num_lines": 121, "path": "/app/easycancha/models.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.utils.timezone import localtime\nfrom .cipher import AESCipher\n\nLENGTH_16 = 16\nLENGTH_32 = 32\nLENGTH_64 = 64\nLENGTH_128 = 128\nLENGTH_256 = 256\n\n# Create your models here.\n\n\nclass Platform(models.Model):\n NAME_EASYCANCHA = 'easycancha'\n SEED_LIST = [\n NAME_EASYCANCHA\n ]\n name = models.CharField(unique=True, max_length=LENGTH_32)\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass PlatformUser(models.Model):\n username = models.CharField(unique=True, max_length=LENGTH_32)\n password = models.CharField(max_length=LENGTH_256)\n\n platform = models.ForeignKey(Platform, on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.username} - {self.platform}'\n\n\ndef encrypt(string):\n aes = AESCipher()\n return aes.encrypt(string).decode()\n\n\ndef platformuser_pre_save(sender, instance, *args, **kwargs):\n instance.password = encrypt(instance.password)\n\n\npre_save.connect(platformuser_pre_save, sender=PlatformUser)\n\n\nclass Weekday(models.Model):\n NAME_DICT = {\n 1: 'Monday',\n 2: 'Tuesday',\n 3: 'Wednesday',\n 4: 'Thursday',\n 5: 'Friday',\n 6: 'Saturday',\n 7: 'Sunday',\n }\n\n number = models.IntegerField(unique=True)\n name = models.CharField(max_length=LENGTH_32)\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Sport(models.Model):\n NAME_TENNIS = 'Tenis'\n SEED_LIST = [\n NAME_TENNIS\n ]\n name = models.CharField(unique=True, max_length=LENGTH_128)\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Club(models.Model):\n name = models.CharField(unique=True, max_length=LENGTH_128)\n easycancha_id = models.IntegerField(unique=True)\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass ClubSport(models.Model):\n club = models.ForeignKey(Club, on_delete=models.CASCADE)\n sport = models.ForeignKey(Sport, on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.club} - {self.sport}'\n\n class Meta:\n unique_together = ('club', 'sport')\n\n\nclass RecurrentReservation(models.Model):\n clubsport = models.ForeignKey(ClubSport, on_delete=models.CASCADE)\n weekday = models.ForeignKey(Weekday, on_delete=models.CASCADE)\n\n platformuser = models.ForeignKey(PlatformUser, on_delete=models.CASCADE)\n\n hour = models.IntegerField()\n minute = models.IntegerField()\n duration = models.IntegerField()\n\n def __str__(self):\n return f'{self.clubsport} - {self.weekday} - ' \\\n f'{str(self.hour).zfill(2)}:{str(self.minute).zfill(2)} - '\\\n f'{self.duration} minutes'\n\n\nclass OneTimeReservation(models.Model):\n clubsport = models.ForeignKey(ClubSport, on_delete=models.CASCADE)\n datetime = models.DateTimeField()\n duration = models.IntegerField()\n\n platformuser = models.ForeignKey(PlatformUser, on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.clubsport} - {localtime(self.datetime)} '\\\n f'- {self.duration} minutes'\n" }, { "alpha_fraction": 0.7684210538864136, "alphanum_fraction": 0.7684210538864136, "avg_line_length": 18, "blob_id": "057bb7321fa0984c11412478f0c4f1b41e192fe9", "content_id": "1c3d9d6860602e02655ff82a5960e091e2dba1e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/app/easycancha/apps.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass EasycanchaConfig(AppConfig):\n name = 'easycancha'\n" }, { "alpha_fraction": 0.6869983673095703, "alphanum_fraction": 0.6934189200401306, "avg_line_length": 26.086956024169922, "blob_id": "d9b6fb966480a6813705d1bc9e59d17a98cf5324", "content_id": "8ca6a5d608e116b39ffbb9e9792e8831e07b025f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 623, "license_type": "no_license", "max_line_length": 92, "num_lines": 23, "path": "/Dockerfile", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "FROM python:3.6-alpine\n\nRUN apk update && apk upgrade && \\\n echo @edge http://nl.alpinelinux.org/alpine/edge/community >> /etc/apk/repositories && \\\n echo @edge http://nl.alpinelinux.org/alpine/edge/main >> /etc/apk/repositories\n\nRUN apk update \\\n && apk add --virtual build-deps g++ gcc python3-dev musl-dev git libffi-dev \\\n && apk add postgresql-dev postgresql-client libxslt-dev \\\n && pip install psycopg2 pycrypto \\\n && apk del build-deps \\\n && rm -rf /var/cache/apk/*\n\nRUN mkdir /code\nWORKDIR /code\n\nCOPY requirements.txt ./\n\nRUN pip install --no-cache-dir -r requirements.txt\n\nCOPY . .\n\nWORKDIR /code/app\n" }, { "alpha_fraction": 0.6917613744735718, "alphanum_fraction": 0.6917613744735718, "avg_line_length": 36.05263137817383, "blob_id": "3517b05f8fbecc289b76837f70545e9db9027db7", "content_id": "82297a820bea4fc1e12eadfc636c38ba96688b2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/app/seed.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "from app import wsgi # noqa\nfrom os import environ\nfrom easycancha.models import Weekday, Sport, Platform, PlatformUser\n\nfor name in Sport.SEED_LIST:\n Sport.objects.get_or_create(name=name)\n\nfor number, name in Weekday.NAME_DICT.items():\n Weekday.objects.get_or_create(number=number, defaults=dict(name=name))\n\nfor name in Platform.SEED_LIST:\n platform, _ = Platform.objects.get_or_create(name=name)\n\n if 'EASYCANCHA_USERNAME' in environ and 'EASYCANCHA_PASSWORD' in environ:\n PlatformUser.objects.get_or_create(\n username=environ['EASYCANCHA_USERNAME'], defaults=dict(\n password=environ['EASYCANCHA_PASSWORD'],\n platform_id=platform.id\n ))\n" }, { "alpha_fraction": 0.6363528966903687, "alphanum_fraction": 0.6445021629333496, "avg_line_length": 31.071969985961914, "blob_id": "64268f90c8e11738a14b6be7cbebcb63c5b62380", "content_id": "da74ea2a01d6f9189346443ac20427bd99e74adc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8472, "license_type": "no_license", "max_line_length": 79, "num_lines": 264, "path": "/app/easycancha/tasks.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom time import sleep\nfrom pytz import timezone\nfrom datetime import datetime as datetime_datetime, timedelta\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\n\nHEADLESS = True\n\nURL_LOGIN = 'https://www.easycancha.com/login'\nURL_RESERVATIONS = 'https://www.easycancha.com/bookings'\nURL_CLUB = 'https://www.easycancha.com/book/clubs/{club_id}/sports'\n\nLOGIN_USERNAME_XPATH = '//input[@type=\"email\"]'\nLOGIN_PASSWORD_XPATH = '//input[@type=\"password\"]'\nLOGIN_BUTTON_XPATH = '//button[text()=\"Ingresar\"]'\n\nRESERVATIONS_XPATH = '//div[contains(text(), \"Mis reservas\")]'\nHAS_RESERVATION_XPATH = '//span[contains(text(), \"reserva activa\")]'\nSHOW_MORE_RESERVATIONS = '//button[contains(text(), \"Cargar mรกs reservas\")]'\nRESERVATION_CARDS_XPATH = '//div[@class=\"bookingContainer\"]'\nRESERVATION_RELATIVE_DATE_XPATH = './div[3]/div[1]'\nRESERVATION_RELATIVE_HOUR_XPATH = './div[3]/div[2]'\n\nLOADING_XPATH = '//div[@class=\"loading\"]'\n\nSPORT_TYPE_XPATH = '//div[contains(text(), \"{type}\")]/../../..'\n\nMODAL_XPATH = '//div[@class=\"modal-dialog modal-md\"]'\nMONTH_YEAR_XPATH = '//th[@colspan=\"5\"]'\nMONTH_XPATH = '//span[text()=\"{month_name}\"]/..'\nDAY_XPATH = '//span[text()=\"{day_number}\"]/..'\nSELECT_TIME_XPATH = '//select[@id=\"time\"]'\nOPTION_TIME_VALUE_FORMAT = 'string:{hour}:{minute}:{second}'\nSELECT_DURATION_XPATH = '//*[@id=\"timespan\"]'\nOPTION_DURATION_VALUE_FORMAT = 'number:{duration}'\nSEARCH_XPATH = '//button[text()=\"Buscar\"]'\n\nNOT_FOUND_XPATH = '//div[contains(text(), \"No se encontraron resultados\")]'\n\nCOURT_OPTIONS_XPATH = '//strong[text()=\"RESERVAR\"]/..'\nRESERVE_XPATH = '//button[text()=\"Reservar\"]'\n\nRESERVE_DONE_XPATH = '//*[contains(text(), \"ยก Tu reserva ya estรก lista !\")]'\n\nWAIT_TIME = 60\n\nchile_timezone = timezone('America/Santiago')\n\nMONTH_TRANSFORMATION = {\n 1: 'enero',\n 2: 'febrero',\n 3: 'marzo',\n 4: 'abril',\n 5: 'mayo',\n 6: 'junio',\n 7: 'julio',\n 8: 'agosto',\n 9: 'septiembre',\n 10: 'octubre',\n 11: 'noviembre',\n 12: 'diciembre'\n}\n\nMONTH_NAME_TO_NUMBER = {\n 'enero': 1,\n 'febrero': 2,\n 'marzo': 3,\n 'abril': 4,\n 'mayo': 5,\n 'junio': 6,\n 'julio': 7,\n 'agosto': 8,\n 'septiembre': 9,\n 'octubre': 10,\n 'noviembre': 11,\n 'diciembre': 12,\n}\n\nWEEKDAY_TRANSFORMATION = {\n 'Lunes': 0,\n 'Martes': 1,\n 'Miรฉrcoles': 2,\n 'Jueves': 3,\n 'Viernes': 4,\n 'Sรกbado': 5,\n 'Domingo': 6\n}\n\n\ndef has_element_by_xpath(driver, xpath):\n try:\n driver.find_element_by_xpath(xpath)\n return True\n except Exception as e:\n return False\n\n\ndef is_displayed(element):\n if element.is_displayed():\n return True\n try:\n attribute = element.get_attribute('style')\n return 'display: block' in attribute\n except Exception as e:\n return False\n\n\ndef has_element_displayed_by_xpath(driver, xpath):\n try:\n element = driver.find_element_by_xpath(xpath)\n return is_displayed(element)\n except Exception as e:\n return False\n\n\ndef wait_element_displayed(element):\n counter = 0\n while (not is_displayed(element)) and counter < WAIT_TIME:\n sleep(0.1)\n counter += 0.1\n return is_displayed(element)\n\n\ndef wait_loading_by_xpath(driver, xpath):\n element = driver.find_element_by_xpath(xpath)\n counter = 0\n started = False\n while counter < WAIT_TIME:\n is_disp = is_displayed(element)\n if not started:\n started = is_disp\n\n if started:\n if not is_disp:\n break\n\n sleep(0.1)\n counter += 0.1\n\n sleep(2)\n\n\ndef wait_element_displayed_by_xpath(driver, xpath):\n element = WebDriverWait(driver, WAIT_TIME).until(\n expected_conditions.presence_of_element_located((\n By.XPATH, xpath)))\n return wait_element_displayed(element)\n\n\ndef click_element_by_xpath(driver, xpath):\n element = driver.find_element_by_xpath(xpath)\n try:\n element.click()\n except Exception as e:\n driver.execute_script('arguments[0].click();', element)\n\n\ndef select_option_by_xpath(driver, xpath, value):\n element = driver.find_element_by_xpath(xpath)\n Select(element).select_by_value(value)\n\n\ndef login(driver, username, password):\n user_element = driver.find_element_by_xpath(LOGIN_USERNAME_XPATH)\n user_element.clear()\n user_element.send_keys(username)\n password_element = driver.find_element_by_xpath(LOGIN_PASSWORD_XPATH)\n password_element.clear()\n password_element.send_keys(password)\n click_element_by_xpath(driver, LOGIN_BUTTON_XPATH)\n wait_loading_by_xpath(driver, LOADING_XPATH)\n\n\ndef reserve_date(\n driver, username, password, sport_type, club_id, datetime, duration):\n print(sport_type, club_id, datetime, duration)\n if datetime < datetime_datetime.now(chile_timezone):\n return\n if datetime - datetime_datetime.now(chile_timezone) > timedelta(\n weeks=1, days=1):\n return\n\n # login\n driver.get(URL_LOGIN)\n\n while has_element_by_xpath(driver, LOGIN_USERNAME_XPATH):\n login(driver, username, password)\n\n # check for old reservations\n driver.get(URL_RESERVATIONS)\n if has_element_displayed_by_xpath(driver, LOADING_XPATH):\n wait_loading_by_xpath(driver, LOADING_XPATH)\n\n sleep(2)\n\n if has_element_by_xpath(driver, SHOW_MORE_RESERVATIONS):\n click_element_by_xpath(driver, SHOW_MORE_RESERVATIONS)\n\n for element in driver.find_elements_by_xpath(RESERVATION_CARDS_XPATH):\n date_text = element.find_element_by_xpath(\n RESERVATION_RELATIVE_DATE_XPATH).get_attribute('innerText').strip()\n day_name, day_number, month_name, year_number = date_text.split(' ')\n month_number = MONTH_NAME_TO_NUMBER.get(month_name)\n hour_text = element.find_element_by_xpath(\n RESERVATION_RELATIVE_HOUR_XPATH).get_attribute('innerText').strip()\n hour, minute = hour_text.split(':')\n reservation_datetime = datetime_datetime(\n int(year_number), int(month_number), int(day_number), int(hour),\n int(minute), 0, 0, chile_timezone)\n if reservation_datetime - datetime < timedelta(minutes=duration) or \\\n datetime - reservation_datetime < timedelta(minutes=duration):\n return\n\n # reserve new\n driver.get(URL_CLUB.format(club_id=club_id))\n\n sport_xpath = SPORT_TYPE_XPATH.format(type=sport_type)\n wait_element_displayed_by_xpath(driver, sport_xpath)\n click_element_by_xpath(driver, sport_xpath)\n wait_element_displayed_by_xpath(driver, MODAL_XPATH)\n click_element_by_xpath(driver, MONTH_YEAR_XPATH)\n month_xpath = MONTH_XPATH.format(\n month_name=MONTH_TRANSFORMATION.get(datetime.month))\n wait_element_displayed_by_xpath(driver, month_xpath)\n click_element_by_xpath(driver, month_xpath)\n day_xpath = DAY_XPATH.format(\n day_number=str(datetime.day).zfill(2))\n wait_element_displayed_by_xpath(driver, day_xpath)\n click_element_by_xpath(driver, day_xpath)\n select_option_by_xpath(driver, SELECT_TIME_XPATH,\n OPTION_TIME_VALUE_FORMAT.format(\n hour=str(datetime.hour).zfill(2),\n minute=str(datetime.minute).zfill(2),\n second=str(datetime.second).zfill(2)\n ))\n select_option_by_xpath(driver, SELECT_DURATION_XPATH,\n OPTION_DURATION_VALUE_FORMAT.format(\n duration=duration\n ))\n\n click_element_by_xpath(driver, SEARCH_XPATH)\n wait_loading_by_xpath(driver, LOADING_XPATH)\n if has_element_by_xpath(driver, LOGIN_USERNAME_XPATH):\n login(driver, username, password)\n wait_loading_by_xpath(driver, LOADING_XPATH)\n\n if has_element_by_xpath(driver, NOT_FOUND_XPATH):\n return\n\n wait_element_displayed_by_xpath(driver, COURT_OPTIONS_XPATH)\n click_element_by_xpath(driver, COURT_OPTIONS_XPATH)\n wait_element_displayed_by_xpath(driver, RESERVE_XPATH)\n click_element_by_xpath(driver, RESERVE_XPATH)\n wait_element_displayed_by_xpath(driver, RESERVE_DONE_XPATH)\n\n\ndef get_next_weekday(date, weekday):\n difference = weekday - date.weekday()\n days_ahead = difference % 7\n if difference == 0:\n days_ahead = 7\n return date + timedelta(days_ahead)\n" }, { "alpha_fraction": 0.8127853870391846, "alphanum_fraction": 0.8127853870391846, "avg_line_length": 30.285715103149414, "blob_id": "817332c2da80b95ee611e499aad5cc96499006f7", "content_id": "060b7b9a1ad3252909bc0bdcdb07e83c3f5a6832", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 79, "num_lines": 14, "path": "/app/easycancha/admin.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Club, ClubSport, RecurrentReservation, OneTimeReservation,\\\n Platform, PlatformUser\n\n# admin.site.register(Weekday)\n# admin.site.register(Sport)\nadmin.site.register(Platform)\nadmin.site.register(PlatformUser)\nadmin.site.register(Club)\nadmin.site.register(ClubSport)\nadmin.site.register(RecurrentReservation)\nadmin.site.register(OneTimeReservation)\n" }, { "alpha_fraction": 0.859375, "alphanum_fraction": 0.875, "avg_line_length": 8.142857551574707, "blob_id": "1133ff99d9b8426bc359edea6681676ec0f85236", "content_id": "377362d212a7488c320dab0feea44feb0822a9f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 64, "license_type": "no_license", "max_line_length": 13, "num_lines": 7, "path": "/requirements.txt", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "selenium\npython-dotenv\npytz\ndjango\npsycopg2\nwhitenoise\npycrypto\n" }, { "alpha_fraction": 0.6963939666748047, "alphanum_fraction": 0.6971694231033325, "avg_line_length": 31.64556884765625, "blob_id": "3c04cc2a06617abc3dfec4197bc2ab60cbdd58d2", "content_id": "3547271a3267cfc24bbc11bdd444be4cc0f3646d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2579, "license_type": "no_license", "max_line_length": 72, "num_lines": 79, "path": "/app/main.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "from app import wsgi # noqa\nfrom pytz import timezone\nfrom traceback import print_exc\nfrom django.utils.timezone import localtime\nfrom datetime import datetime as datetime_datetime\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom easycancha.models import RecurrentReservation, OneTimeReservation\nfrom easycancha.tasks import reserve_date, get_next_weekday\nfrom easycancha.cipher import AESCipher\n\n\ndef decrypt(string):\n aes = AESCipher()\n return aes.decrypt(string)\n\n\nHEADLESS = True\n\nchile_timezone = timezone('America/Santiago')\n\n\nchrome_options = ChromeOptions()\n\nif HEADLESS:\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n\ndriver = Chrome(chrome_options=chrome_options)\n\nfor recurrentreservation in RecurrentReservation.objects.select_related(\n 'clubsport', 'clubsport__club',\n 'clubsport__sport', 'weekday', 'platformuser').iterator():\n clubsport = recurrentreservation.clubsport\n sport = clubsport.sport\n club = clubsport.club\n platformuser = recurrentreservation.platformuser\n weekday = recurrentreservation.weekday\n now = datetime_datetime.now(chile_timezone)\n next_date = \\\n get_next_weekday(now, weekday.number).date()\n next_datetime = datetime_datetime(\n next_date.year, next_date.month, next_date.day,\n recurrentreservation.hour,\n recurrentreservation.minute, 0, 0, chile_timezone)\n try:\n reserve_date(\n driver,\n platformuser.username,\n decrypt(platformuser.password),\n sport.name, club.easycancha_id,\n next_datetime, recurrentreservation.duration)\n except Exception as e:\n print(e)\n print_exc()\n\nfor onetimereservation in OneTimeReservation.objects.select_related(\n 'clubsport', 'clubsport__club',\n 'clubsport__sport', 'platformuser').iterator():\n clubsport = onetimereservation.clubsport\n sport = clubsport.sport\n club = clubsport.club\n platformuser = onetimereservation.platformuser\n try:\n reserve_date(\n driver,\n platformuser.username,\n decrypt(platformuser.password),\n sport.name, club.easycancha_id,\n localtime(\n onetimereservation.datetime, chile_timezone),\n onetimereservation.duration)\n except Exception as e:\n print(e)\n print_exc()\n\ndriver.quit()\n" }, { "alpha_fraction": 0.6461538672447205, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 9.833333015441895, "blob_id": "8d6a387cf7963c17b3c44f1f9c2526dd8a3b3b79", "content_id": "ad6d2acda7e8f3ccadc9234cae5dc870b13220fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 65, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/execute.sh", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "#!/bin/sh\nset -e\n\ndocker-compose exec django python ${@}\n\nexit 0\n" }, { "alpha_fraction": 0.4888226389884949, "alphanum_fraction": 0.5275707840919495, "avg_line_length": 18.171428680419922, "blob_id": "c40bf1eca6c7ee96b7bb857d62dd82ec074279cf", "content_id": "31db568d00b254798c5068443e7fe33d98baee5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 671, "license_type": "no_license", "max_line_length": 52, "num_lines": 35, "path": "/docker-compose.yml", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "version: '3'\n\nservices:\n postgres:\n image: postgres:alpine\n ports:\n - '5432:5432'\n volumes:\n - ./postgres-data:/var/lib/postgresql/data\n env_file: .env\n environment:\n TZ: 'America/Santiago'\n PGTZ: 'America/Santiago'\n restart: always\n django:\n build: .\n command: >\n /bin/sh -c \"\n until psql -h postgres -U \"develop\" -c '\\q';\n do\n sleep 1;\n done;\n sleep 10;\n python manage.py runserver 0.0.0.0:8000;\n \"\n restart: always\n links:\n - postgres\n depends_on:\n - postgres\n ports:\n - '80:8000'\n env_file: .env\n environment:\n IS_DOCKER: 'true'\n" }, { "alpha_fraction": 0.6135265827178955, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 30.846153259277344, "blob_id": "d28df832c9eb3278ea0b58af3efe903f56e9e1f4", "content_id": "9b88a29021091cbb862f316a55d2e683f98dc441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "no_license", "max_line_length": 122, "num_lines": 26, "path": "/app/easycancha/migrations/0003_auto_20181118_2300.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-19 02:00\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('easycancha', '0002_platform_platformuser'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='onetimereservation',\n name='platformuser',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='easycancha.PlatformUser'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='recurrentreservation',\n name='platformuser',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='easycancha.PlatformUser'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5467440485954285, "alphanum_fraction": 0.554158627986908, "avg_line_length": 38.769229888916016, "blob_id": "2ea49d0b3d9fc5f9be74e56aca1deb054d9bf332", "content_id": "e754f9a9491fe2bad88e783e1f7f50c2e95d9f97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3102, "license_type": "no_license", "max_line_length": 121, "num_lines": 78, "path": "/app/easycancha/migrations/0001_initial.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-10-28 23:39\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Club',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, unique=True)),\n ('easycancha_id', models.IntegerField(unique=True)),\n ],\n ),\n migrations.CreateModel(\n name='ClubSport',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('club', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.Club')),\n ],\n ),\n migrations.CreateModel(\n name='OneTimeReservation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('datetime', models.DateTimeField()),\n ('duration', models.IntegerField()),\n ('clubsport', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.ClubSport')),\n ],\n ),\n migrations.CreateModel(\n name='RecurrentReservation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('hour', models.IntegerField()),\n ('minute', models.IntegerField()),\n ('duration', models.IntegerField()),\n ('clubsport', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.ClubSport')),\n ],\n ),\n migrations.CreateModel(\n name='Sport',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, unique=True)),\n ],\n ),\n migrations.CreateModel(\n name='Weekday',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('number', models.IntegerField(unique=True)),\n ('name', models.CharField(max_length=32)),\n ],\n ),\n migrations.AddField(\n model_name='recurrentreservation',\n name='weekday',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.Weekday'),\n ),\n migrations.AddField(\n model_name='clubsport',\n name='sport',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.Sport'),\n ),\n migrations.AlterUniqueTogether(\n name='clubsport',\n unique_together={('club', 'sport')},\n ),\n ]\n" }, { "alpha_fraction": 0.5519230961799622, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 33.66666793823242, "blob_id": "ca5b51cdf4f04c370a379b9ff943626a1d182e8d", "content_id": "67773d8a94c1b21a3cca105adfb3dcad2f09be14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 119, "num_lines": 30, "path": "/app/easycancha/migrations/0002_platform_platformuser.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-19 01:40\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('easycancha', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Platform',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=32, unique=True)),\n ],\n ),\n migrations.CreateModel(\n name='PlatformUser',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=32, unique=True)),\n ('password', models.CharField(max_length=256)),\n ('platform', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='easycancha.Platform')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5785440802574158, "alphanum_fraction": 0.5957854390144348, "avg_line_length": 29.705883026123047, "blob_id": "0affc35590127e3ac3374f2ea86192c440797f15", "content_id": "5d2fb47614c443dbf149fa1621f43a0c4dd8a026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1044, "license_type": "no_license", "max_line_length": 72, "num_lines": 34, "path": "/app/easycancha/cipher.py", "repo_name": "jachicao/easycancha", "src_encoding": "UTF-8", "text": "import base64\nimport hashlib\nimport struct\n\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom django.conf import settings\n\n\nclass AESCipher(object):\n def __init__(self):\n self.bs = AES.block_size\n self.key = hashlib.sha256(settings.SECRET_KEY.encode()).digest()\n\n def _pad(self, s):\n return s + (self.bs - len(s) % self.bs) * '0'\n\n def encrypt(self, raw):\n raw_size = len(raw)\n raw_bytes = self._pad(raw)\n raw_size_bytes = struct.pack('<i', raw_size)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(\n iv + raw_size_bytes + cipher.encrypt(raw_bytes))\n\n def decrypt(self, enc):\n enc = base64.b64decode(enc)\n iv = enc[:self.bs]\n raw_size = struct.unpack('<i', enc[self.bs:self.bs + 4])[0]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n raw_bytes = cipher.decrypt(enc[self.bs + 4:])\n raw = raw_bytes[:raw_size].decode('utf_8')\n return raw\n" } ]
14
dmarcus-wire/image-data-pipeline
https://github.com/dmarcus-wire/image-data-pipeline
252e18bf9f22e5a69f6b972991b36a88f9e2998a
45f0c03d079f7dcb710c156d6dfc697f62d0b5e3
46e0cbf3ef1bc0ad4bdf1178d95b3317ac949e9f
refs/heads/master
2023-04-14T18:58:11.117594
2021-04-26T19:50:49
2021-04-26T19:50:49
361,875,705
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6973865032196045, "alphanum_fraction": 0.7070151567459106, "avg_line_length": 30.65217399597168, "blob_id": "fa41a07a6974bb187c583015dca69336627142fb", "content_id": "313c31cfce751c9f93967460b47ca04013a0fd98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 727, "license_type": "no_license", "max_line_length": 133, "num_lines": 23, "path": "/scripts/load-display-save.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python scripts/load-display-save.py -i images/input/uss-freedom.jpg\n\n# import necessary packages\nimport argparse\nimport imutils\nimport cv2\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, default=\"./images/input/uss-freedom.jpg\", # single switch image path to image on disk\n help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load, dimensions, display and save\nimage = cv2.imread(args[\"image\"])\n(h, w, c) = image.shape[:3]\nprint(\"width: {} pixels\".format(w))\nprint(\"height: {} pixels\".format(h))\nprint(\"channels: {}\".format(c))\ncv2.imshow(\"Image\", image)\ncv2.imwrite(\"images/output/uss-freedom-original.jpg\", image)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 8.75, "blob_id": "834bac75a73a07bbedf2a30dec1821cc889834d5", "content_id": "46c67564fc515740ef78391d8cfcff65406c8eaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/production.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python production.py\n\n# TODO" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 12.11111068725586, "blob_id": "d14091ce0f7ee80ae3c062c102526ef0d271d4a7", "content_id": "0211f5020d9e51fbb5a1a36bd5b4d28f561867fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 117, "license_type": "no_license", "max_line_length": 23, "num_lines": 9, "path": "/requirements.txt", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# necessary packages\nargparse\nimutils\nopencv-python\nopencv-contrib-python\n\n# for jupyter notebooks\njupyter\nmatplotlib" }, { "alpha_fraction": 0.7086494565010071, "alphanum_fraction": 0.7564491629600525, "avg_line_length": 33.71052551269531, "blob_id": "db43a4e8dd6d5fe07771f1a0cb2ea3535caf5203", "content_id": "65626787c4e26fb75ebc88a4dbed9c31a99b62ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 91, "num_lines": 38, "path": "/scripts/rotate-in-bounds.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python scripts/rotate-in-bounds.py -i images/output/uss-freedom-cropped.jpg\n\n# import necessary packages\nimport argparse\nimport imutils\nimport cv2\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, # single switch image path to image on disk\n help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load and display original\nimage = cv2.imread(args[\"image\"]) # load image from disk\ncv2.imshow(\"Original\", image)\n\n# rotate and save\nrotate33cc = imutils.rotate_bound(image, -33) # rotate in bounds\ncv2.imshow(\"Rotated without cropping\", rotate33cc)\ncv2.imwrite(\"images/output/uss-freedom-rotate-33cc.jpg\", rotate33cc)\ncv2.waitKey(0)\n\nrotate45c = imutils.rotate_bound(image, 45) # rotate in bounds\ncv2.imshow(\"Rotated without cropping\", rotate45c)\ncv2.imwrite(\"images/output/uss-freedom-rotate-45c.jpg\", rotate45c)\ncv2.waitKey(0)\n\nrotate90cc = imutils.rotate_bound(image, -90) # rotate in bounds\ncv2.imshow(\"Rotated without cropping\", rotate90cc)\ncv2.imwrite(\"images/output/uss-freedom-rotate-90cc.jpg\", rotate90cc)\ncv2.waitKey(0)\n\nrotate180c = imutils.rotate_bound(image, -33) # rotate in bounds\ncv2.imshow(\"Rotated without cropping\", rotate180c)\ncv2.imwrite(\"images/output/uss-freedom-rotate-180c.jpg\", rotate180c)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.664643406867981, "alphanum_fraction": 0.696509838104248, "avg_line_length": 27.69565200805664, "blob_id": "0b4ac56cffcb6662d5eeae65c009297e0b6ddb56", "content_id": "467d6c61e3029dd847a9988f86a80bc82d898c20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 76, "num_lines": 23, "path": "/scripts/crop-to-object.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python scripts/crop-to-object.py -i images/output/uss-freedom-original.jpg\n\n# import necessary packages\nimport argparse\nimport cv2\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\",\"--image\", type=str,\n help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load, crop, save and show\nimage = cv2.imread(args[\"image\"])\ncropped = image[700:1050, 300:1400]\n(h, w, c) = image.shape[:3]\nprint(\"width: {} pixels\".format(w))\nprint(\"height: {} pixels\".format(h))\nprint(\"channels: {}\".format(c))\ncv2.imshow(\"Cropped\", cropped)\ncv2.imwrite(\"images/output/uss-freedom-cropped.jpg\", cropped)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.6981339454650879, "alphanum_fraction": 0.7244786024093628, "avg_line_length": 29.399999618530273, "blob_id": "757b69cda888f2ed6da8326d5b146a7d6c08ef19", "content_id": "87bb34d32898a03ffc650f28e68642c9a0e9196f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 91, "num_lines": 30, "path": "/scripts/image-arithmetic.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python scripts/image-arithmetic.py -i images/output/uss-freedom-rotate-90cc.jpg\n\n# import necessary packages\nimport numpy as np\nimport argparse\nimport cv2\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, # single switch image path to image on disk\n help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load and display original\nimage = cv2.imread(args[\"image\"]) # load image from disk\ncv2.imshow(\"Original\", image)\n\n# lighten, save\nM = np.ones(image.shape, dtype=\"uint8\") * 100\nadded = cv2.add(image, M)\ncv2.imshow(\"Lighter\", added)\ncv2.imwrite(\"images/output/uss-freedom-rotate-90cc-lt.jpg\", added)\n\n# darken, save\nM = np.ones(image.shape, dtype=\"uint8\") * 70\nsubtracted = cv2.subtract(image, M)\ncv2.imshow(\"Darker\", subtracted)\ncv2.imwrite(\"images/output/uss-freedom-rotate-90cc-dk.jpg\", subtracted)\ncv2.waitKey(0)" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.7734767198562622, "avg_line_length": 33.04878234863281, "blob_id": "0bd833202924dd43996b5c5824d925e8138a9587", "content_id": "8f717454019322a4f9eabb70d6cfe3cd0c8e487c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1395, "license_type": "no_license", "max_line_length": 131, "num_lines": 41, "path": "/README.md", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# Data Pipeline\nBased on single image I/O. Could write for N image I/O.\n\n## Project Structure\n|name|item|description|\n|-|-|-|\n|images|image buckets|stores input images (ingest), writes to output images (normalized/generated)|\n|notebooks|notebooks per action|experimenting with pipeline phases and data|\n|test.py|test python script|generalized from the notebook, but not-production|\n|production.py|production python script|lean and mean|\n\n## Use Case: iterating over folder of updated imagery or video\n- create training data\n- control data fed model\n\n## Pipeline (phase N because these can go in any order)\n- PHASE 00: load from disk, display and save original\n- PHASE 01: crop to object \n- PHASE 01: resize to 600 pixels (wide)\n- PHASE 02: rotate in bounds\n- PHASE N: flip\n- PHASE N: crop\n- PHASE N: arithmetic\n- PHASE N: cleanup output image\n\n## Use case: generate computer vision training data for identifying Littoral combat ships from aerial imagery\nCould be used for training data or data ingest normalization of healthcare (pills / blood cells), automotive (license plates), etc.\n1. Aircraft carriers\n1. Amphibious warfare ships\n1. Amphibious assault ships\n1. Amphibious command ships\n1. Amphibious transport docks\n1. Dock landing ships\n1. Expeditionary sea base\n1. Cruisers\n1. Destroyers\n1. Frigates\n1. **Littoral combat ships**\n1. Mine countermeasures ships\n1. Patrol ships\n1. Submarines" }, { "alpha_fraction": 0.7052096724510193, "alphanum_fraction": 0.7179161310195923, "avg_line_length": 31.83333396911621, "blob_id": "f324958f7b5c561394536d80c36fd54d10a7537d", "content_id": "7803afdc499705f5d4839acadd94b320334492a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 91, "num_lines": 24, "path": "/scripts/resize-image.py", "repo_name": "dmarcus-wire/image-data-pipeline", "src_encoding": "UTF-8", "text": "# USAGE\n# python scripts/resize-image.py -i images/output/uss-freedom-cropped.jpg\n\n# import necessary packages\nimport argparse\nimport imutils\nimport cv2\n\n# construct the argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, # single switch image path to image on disk\n help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load, , calculate aspect ratio, resize, dimensions, display and save\nimage = cv2.imread(args[\"image\"])\nresized = imutils.resize(image, width=300)\n(h, w, c) = resized.shape[:3]\nprint(\"width: {} pixels\".format(w))\nprint(\"height: {} pixels\".format(h))\nprint(\"channels: {}\".format(c))\ncv2.imshow(\"Resized using imutils\", resized)\ncv2.imwrite(\"images/output/uss-freedom-resized.jpg\", resized)\ncv2.waitKey(0)" } ]
8
hhost-madsen/prg105-madlibs2_acceptingInput
https://github.com/hhost-madsen/prg105-madlibs2_acceptingInput
3cfbe00b21436155971d7703ca8072caa5d63bba
3e3fad8d788c6bd584bc36ff5230e08c37bb5e4d
eeab2907c2df434606317a951ece25f6ad5a7682
refs/heads/master
2020-04-14T04:48:18.291079
2016-09-12T18:24:36
2016-09-12T18:24:36
68,035,723
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.575927197933197, "alphanum_fraction": 0.575927197933197, "avg_line_length": 22.63793182373047, "blob_id": "0e05d56a4781bf5ea270f6116013111c6edce99b", "content_id": "19eb71198a58287ece0de8160236b4def4582a98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 64, "num_lines": 58, "path": "/Madlibs2_accepting_input.py", "repo_name": "hhost-madsen/prg105-madlibs2_acceptingInput", "src_encoding": "UTF-8", "text": "# Variable/Input\r\n\r\nprint \"Please enter a thing, it can be anything.\"\r\nthing = raw_input()\r\nif thing == \"\":\r\n thing = \"iPhone\"\r\n\r\nprint \"Please enter an adjective.\"\r\nadjective = raw_input()\r\nif adjective == \"\":\r\n adjective = \"on roam\"\r\n\r\nprint \"Please enter an accessory.\"\r\naccessory = raw_input()\r\nif accessory == \"\":\r\n accessory = \"case\"\r\n\r\nprint \"Please enter a noun.\"\r\ndisplay = raw_input()\r\nif display == \"\":\r\n display = \"face\"\r\n\r\nprint \"Please enter an emotion.\"\r\nemotion = raw_input()\r\nif emotion == \"\":\r\n emotion = \"lighted\"\r\n\r\nprint \"Please enter any type of action.\"\r\naction = raw_input()\r\nif action == \"\":\r\n action = \"ring\"\r\n\r\nprint \"Please enter another adjective.\"\r\nspin = raw_input()\r\nif spin == \"\":\r\n spin = \"rotate\"\r\n\r\nprint \"Please enter another noun.\"\r\nscreen = raw_input()\r\nif screen == \"\":\r\n screen = \"display\"\r\n\r\nprint \"Please enter a type of emoji.\"\r\nicon = raw_input()\r\nif icon == \"\":\r\n icon = \"emoji\"\r\n\r\n# Poem\r\nprint(\"I'm a little \" + thing + \",\")\r\nprint(\"Short and \" + adjective + \",\")\r\nprint(\"Here is my \" + accessory + \",\")\r\nprint(\"Here is my \" + display + \",\")\r\nprint(\"When I get all \" + emotion + \",\")\r\nprint(\"Hear me \" + action + \",\")\r\nprint(\"Tip me over and \" + spin + \" \" + screen + \",\")\r\nprint(\"Here's an \" + icon + \" of what I can do,\")\r\nprint(\"I can turn my \" + accessory + \" into a \" + display + \",\")\r\nprint(\"Tip me over and \" + spin + \" \" + screen + \".\")\r\n" } ]
1
HandiSutriyan/Atoma-Tasks
https://github.com/HandiSutriyan/Atoma-Tasks
d89aee0bac3712711ada29669754d23c4f4e0340
c30fc76145ff91ee394251c7c4e47d930bd5bee4
cbb7d3181e13a3f627634353e88de96d54ec8c75
refs/heads/master
2020-03-24T03:21:27.565226
2018-07-26T10:58:28
2018-07-26T10:58:28
142,415,879
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40325865149497986, "alphanum_fraction": 0.4297352433204651, "avg_line_length": 20.34782600402832, "blob_id": "582bd612ae103749c8816b3ca691a5f7008cd36a", "content_id": "e69f3979ee19d07d6737995aee0a07fa4200ba2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 491, "license_type": "no_license", "max_line_length": 51, "num_lines": 23, "path": "/backend/task1.py", "repo_name": "HandiSutriyan/Atoma-Tasks", "src_encoding": "UTF-8", "text": "def binary_gap(n):\n max_gap = 0\n current_gap = 0\n\n # Skip the tailing zero(s)\n while n > 0 and n % 2 == 0:\n n //= 2\n\n while n > 0:\n remainder = n % 2\n if remainder == 0:\n # Inside a gap\n current_gap += 1\n else:\n # Gap ends\n if current_gap != 0:\n max_gap = max(current_gap, max_gap)\n current_gap = 0\n n //= 2\n print (max_gap)\n\nx = int(input(''))\nbinary_gap(int(x))\n" }, { "alpha_fraction": 0.6623376607894897, "alphanum_fraction": 0.6709956526756287, "avg_line_length": 27.75, "blob_id": "d14d512f1a6a0e0f3eecfeed4bf615cfe50297f9", "content_id": "38fe0697ffc1c178c9ed9afbd57169c7c7e72050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/backend/task2.py", "repo_name": "HandiSutriyan/Atoma-Tasks", "src_encoding": "UTF-8", "text": "x = input('Masukkan array dengan tanda koma sebagai pemisah: \\n ')\ny = input('Pengali: ')\n# mengubah string dari input user menjadi array\na = x.split(',')\n# merotasi elemen array\nfor i in range(0, int(y)):\n\ta += a.pop(0)\nprint(a)\n\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 18.66666603088379, "blob_id": "ec86554d1e5306ec002aaec9cc782029c84ff3b3", "content_id": "61997f49d2a6cdb872a4c3fceec725668c77d876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 30, "num_lines": 6, "path": "/backend/task3.py", "repo_name": "HandiSutriyan/Atoma-Tasks", "src_encoding": "UTF-8", "text": "x = float(input(''))\ny = float(input(''))\nd = float(input(''))\n\nresult = ((y+(d-x))/d)\nprint (\"Output: \",int(result))" }, { "alpha_fraction": 0.7790697813034058, "alphanum_fraction": 0.7790697813034058, "avg_line_length": 27.66666603088379, "blob_id": "27ce85529535ec52f955b3f635e4090b7ee98d28", "content_id": "835b725b26d353146b0849c9f0318d868474c48a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 86, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/README.md", "repo_name": "HandiSutriyan/Atoma-Tasks", "src_encoding": "UTF-8", "text": "# Atoma-Tasks\n- Jawaban soal seleksi developer untuk PT. Atoma\n- Dibuat dengan Python\n" } ]
4
youup99/Recipe_Scraper
https://github.com/youup99/Recipe_Scraper
b1a54821ec8085f2a18357803365d5ec945690a9
655848e2efc3abf85b5b54bae927ca3e5f823741
f96ff878d8e2442b0774249e39dd264d3c9df031
refs/heads/main
2023-02-04T00:10:11.708291
2020-12-29T06:00:31
2020-12-29T06:00:31
323,793,730
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6613756418228149, "avg_line_length": 20, "blob_id": "f58b3d114a252dbdd5565273d8ad0a1aec42056a", "content_id": "aa8c459dfeb1cff346ade509074cb3d48e0a1e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 189, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/README.md", "repo_name": "youup99/Recipe_Scraper", "src_encoding": "UTF-8", "text": "# Recipe_Scraper\n\nAn API to grab recipes for top # of dishes.\n\n## Routes\n\n**1. /recipes?searchName={...}&num={# of recipes}**\n\n- E.g. http://127.0.0.1:5000/recipes?searchName=korean&num=20\n" }, { "alpha_fraction": 0.6180665493011475, "alphanum_fraction": 0.6234548091888428, "avg_line_length": 30.549999237060547, "blob_id": "aad4398d380ae43d906020ddf769b78ac2f91da3", "content_id": "6abc845a2b6ba1344fa59065cd15711f81c050fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3155, "license_type": "no_license", "max_line_length": 83, "num_lines": 100, "path": "/main.py", "repo_name": "youup99/Recipe_Scraper", "src_encoding": "UTF-8", "text": "import os\nimport selenium\nfrom selenium import webdriver\nimport time\nfrom PIL import Image\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nos.chdir('C:/Projects/Recipe_Scraper')\n\n# Install driver\nopts = webdriver.ChromeOptions()\nopts.headless = True\n\ndriver = webdriver.Chrome(ChromeDriverManager().install(), options=opts)\ndriver.get('https://www.epicurious.com/search/?content=recipe')\nprint('READY')\n\n\ndef scroll_to_end(driver):\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n time.sleep(5) # sleep_between_interactions\n\n\ndef get_recipes_by_name(name, totalRecipes):\n search_url = 'https://www.epicurious.com/search/{name}?content=recipe'\n search_url = search_url.format(name=name)\n\n return search(search_url, totalRecipes)\n\n\ndef get_recipes_by_name_filtered(name, totalRecipes, include, exclude):\n search_url = 'https://www.epicurious.com/search/{name}?content=recipe'\n search_url = search_url.format(name=name)\n\n # include\n for i in range(len(include)):\n if i == 0:\n search_url += '&include=' + include[0]\n else:\n search_url += '%2C' + include[i]\n # exclude\n for i in range(len(exclude)):\n if i == 0:\n search_url += '&exclude=' + exclude[0]\n else:\n search_url += '%2C' + exclude[i]\n\n return search(search_url, totalRecipes)\n\n\ndef get_recipes_by_ingredients(totalRecipes, include, exclude):\n search_url = 'https://www.epicurious.com/search/?content=recipe'\n\n # include\n for i in range(len(include)):\n if i == 0:\n search_url += '&include=' + include[0]\n else:\n search_url += '%2C' + include[i]\n # exclude\n for i in range(len(exclude)):\n if i == 0:\n search_url += '&exclude=' + exclude[0]\n else:\n search_url += '%2C' + exclude[i]\n\n return search(search_url, totalRecipes)\n\n\n# Generic search function\ndef search(search_url, totalRecipes):\n print('URL: ', search_url)\n driver.get(search_url)\n recipes = []\n recipe_count = 0\n\n # scroll_to_end(driver)\n\n results = driver.find_elements_by_xpath(\n '//article[contains(@class,\"recipe-content-card\")]')\n\n while recipe_count < totalRecipes:\n # name, reviews, make it again, url\n recipe = {\n 'id': recipe_count + 1,\n 'name': results[recipe_count].find_element_by_css_selector(\n 'a.view-complete-item').get_attribute('title'),\n 'review_count': results[recipe_count].find_element_by_css_selector(\n 'dl.recipes-ratings-summary').get_attribute('data-reviews-count'),\n 'rating': results[recipe_count].find_element_by_css_selector(\n 'dl.recipes-ratings-summary').get_attribute('data-reviews-rating'),\n 'make_it_again': results[recipe_count].find_element_by_css_selector(\n 'dd.make-again-percentage').text,\n 'url': results[recipe_count].find_element_by_css_selector(\n 'a.view-complete-item').get_attribute('href')\n }\n recipes.append(recipe)\n recipe_count += 1\n\n return recipes\n" }, { "alpha_fraction": 0.6677713394165039, "alphanum_fraction": 0.6694283485412598, "avg_line_length": 26.43181800842285, "blob_id": "2cd17b8ba12403d7e287bf2bbf25848eaeac9a45", "content_id": "091a6b8b0814c21e99638a9accbf35755bfd841b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "no_license", "max_line_length": 94, "num_lines": 44, "path": "/api.py", "repo_name": "youup99/Recipe_Scraper", "src_encoding": "UTF-8", "text": "import flask\nfrom flask import request, jsonify\nfrom main import get_recipes_by_name, get_recipes_by_name_filtered, get_recipes_by_ingredients\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\[email protected]('/', methods=['GET'])\ndef home():\n return \"<h1>Recipe Scraper</h1>\"\n\n\[email protected]('/recipesByName', methods=['POST'])\ndef search_by_name():\n req_data = request.get_json()\n search_name = req_data['name']\n num = req_data['num']\n recipes = get_recipes_by_name(search_name, num)\n return jsonify(recipes)\n\n\[email protected]('/recipesByNameFiltered', methods=['POST'])\ndef search_by_name_with_filter():\n req_data = request.get_json()\n search_name = req_data['name']\n num = req_data['num']\n include = req_data['include']\n exclude = req_data['exclude']\n recipes = get_recipes_by_name_filtered(search_name, num, include, exclude)\n return jsonify(recipes)\n\n\[email protected]('/recipesByIngredients', methods=['POST'])\ndef search_by_ingredients():\n req_data = request.get_json()\n num = req_data['num']\n include = req_data['include']\n exclude = req_data['exclude']\n recipes = get_recipes_by_ingredients(num, include, exclude)\n return jsonify(recipes)\n\n\napp.run()\n" } ]
3
danwilliams34/Controlled_Assessment-1
https://github.com/danwilliams34/Controlled_Assessment-1
65e7cfb063b9b2dd639f3abf882484e68372a664
b1ff73fdb3aa714c3c192242ee5020b3e89f5bc5
5668a30e54660b236611d822a663f8d03d86cf21
refs/heads/master
2020-04-05T22:55:13.519382
2014-06-25T11:36:35
2014-06-25T11:36:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5233074426651001, "alphanum_fraction": 0.532371461391449, "avg_line_length": 34.80132293701172, "blob_id": "06c8b3d114a03cb6c28c13a70eeb552d0bcc3ac7", "content_id": "994fd18bd933a32c779ba9cfdb57596a4d1361da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5406, "license_type": "no_license", "max_line_length": 244, "num_lines": 151, "path": "/README.md", "repo_name": "danwilliams34/Controlled_Assessment-1", "src_encoding": "UTF-8", "text": "#Controlled Assessment: Report\n===============================\n##Task 1-\n-------------------------------\n###Design:\n-------------------------------\n####What I Will Need To Include-\n\n1) The exchange rates need to be able to be changed by user.\n\n2) User should be able to enter amount.\n\n3) The user should be able to select the chosen exchanging currency.\n\n4) The printed figure should be to two decimal places.\n\n---------------------------------------------------------------------------------------------------\n####Pseudocode-\n\n```\nBEGIN\n\nINPUT currency to be converted, currency converting to (Pound Sterling/Euro/US Dollar/Japanese Yen)\nASSIGN to variables: c_type1, c_type2\nINPUT numb1 as c_type1[key]\nMATCH c_type1, c_type2 to key in dictionary\nIF c_type1 != Pound Sterling and c_type2 != Pound Sterling:\n CONVERT c_type1 into Pound Sterling\n CONVERT Pound Sterling into c_type2\n RETURN int of c_type2\nELSE:\n IDENTIFY Pound Sterling as c_type1 or c_type2\n CHANGE this value to or from Pound Sterling\n RETURN int of c_type2\n \nEND\n```\n----------------------------------------------------------------------------------------------------\n####Variables-\n\n|Variables Used | Type of Variable | Discussion|\n|:---|:---:|---:|\n|currencies | Dictionary | Used to store the easily changeable exchange rates for each: GBP, EUR, USD and JPY.|\n|c_type1 | String | Used to store the 'converting from' input text.|\n|c_type2 | String | Used to store the 'converting to' input text.|\n|answer | String | Used to temporarily store c_type1 and type2 within the function.|\n|numb1 | Float | Used to store how many of the currency you wish to convert.|\n----------------------------------------------------------------------------------------------------\n###Development:\n----------------------\nWhile developing Task1, I ammended my code because I started with the following 'if' statements: \n\n '''if c_type1 == \"EUR\":\n print \"exchange rate is\", currencies[\"Euro\"] \n \n if c_type1 == \"USD\":\n print \"exchange rate is\", currencies[\"US Dollar\"] \n \n if c_type1 == \"JPY\":\n print \"exchange rate is\", currencies[\"Japanese Yen\"] \n \n if c_type2 == \"GBP\":\n print \"to\", currencies[\"Pound Sterling\"] \n \n if c_type2 == \"EUR\":\n print \"to\", currencies[\"Euro\"] \n \n if c_type2 == \"USD\":\n print \"to\", currencies[\"US Dollar\"] \n \n if c_type2 == \"JPY\":\n print \"to\", currencies[\"Japanese Yen\"]''' \n \n \nThese could have worked but it was a waste of time because I realised it would be much easier to iterate through in a 'for' loop with a while statement, making sure that the c_type1 and type2 are equal to a valid currency at all times.\n\n----------------------------------------------------------------------------------------------------\n###Evaluation:\n----------------------\nLooking back on what I have done, I think that I managed to get a good quality, working end result but if my pseudocode had have been better, I would have been able to achieve this quicker. But apart from that, the whole task went successfully.\n\n----------------------------------------------------------------------------------------------------\n##Task 2-\n----------------------\n###Design:\n----------------------\n####What I Will Need to Include-\n\n1) A system that stores:\n a) Surname and First name.\n b) Two lines of address and postcode.\n c) A telephone number.\n d) date of birth.\n e) email address.\n\n2) A search feature which iterates through the entries.\n\n3) Can search through the surname entries and displays a contact.\n\n4) Can search through the date of birth entries by month and display entries within this month.\n\n----------------------------------------------------------------------------------------------------\n####Pseudocode-\n\n```\nBEGIN\n\nCHOOSE either search by surname or search by birthday month\nIF by surname:\n INPUT surname to be searched\n FOR each surname:\n IF surname == surname entered:\n PRINT whole address entry\n\nELSE IF by birthday month:\n INPUT birthday month to be searched (number format)\n FOR each birthday month digits (mm of dd/mm/yy) in each address entry:\n IF birthday month == birthday month entered:\n PRINT whole address entry\n \nEND\n```\n-----------------------------------------------------------------------------------------------------\n####Variables-\n\n|Variables Used | Type of Variable | Discussion|\n|:---|:---:|---:|\n|choice | String | Used to store the decision text, deciding on which to search by.|\n|surname_search | String | Used to store which surname wishes to be searched.|\n|month_search | Integer | Used to store which month number wishes to be searched.|\n|count | Integer | Used to count how many entries fit in the search criteria.|\n-----------------------------------------------------------------------------------------------------\n###Development:\n----------------------\n\n----------------------------------------------------------------------------------------------------\n##Task 3-\n----------------------\n####Design:\n----------------------\n####What I Will Need to Include-\n\n1) The inputted number should be only 10 digits long.\n\n2) It should only contain numbers when inputted.\n\n3) It should convert any 10-digit number into an 11-digit number.\n\n\n============================\nMADE BY GLEN HEBBERD\n" }, { "alpha_fraction": 0.5321391224861145, "alphanum_fraction": 0.572181224822998, "avg_line_length": 34.57692337036133, "blob_id": "81488534594a735b7287f5e0f202cf734c60ebd2", "content_id": "7c963af93c25fbe0fd28aa6ca3b4228a789fc31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 123, "num_lines": 26, "path": "/Task1Tests.py", "repo_name": "danwilliams34/Controlled_Assessment-1", "src_encoding": "UTF-8", "text": "import Task1\r\nimport unittest\r\n\r\nclass TestSequenceFunctions(unittest.TestCase):\r\n \r\n def setUp(self):\r\n pass\r\n\r\n def test1(self):\r\n assert(Task1.c_type1 in [\"GBP\", \"EUR\", \"USD\", \"JPY\"]) #Test 1 Fails, currency from does not equal a valid currency.\r\n \r\n def test2(self):\r\n assert(Task1.c_type2 in [\"GBP\", \"EUR\", \"USD\", \"JPY\"]) #Test 1 Fails, currency to does not equal a valid currency.\r\n \r\n def test3(self):\r\n if Task1.c_type1 == \"GBP\" and Task1.c_type2 == \"EUR\":\r\n assert(Task1.conversion(\"GBP\", \"EUR\", 10) == 12) #Test 2 Fails, converting from GBP does not work.\r\n \r\n def test4(self):\r\n if Task1.c_type1 == \"EUR\" and Task1.c_type2 == \"GBP\":\r\n numb1 = 12\r\n Task1.conversion(Task1.c_type1, Task1.c_type2, Task1.numb1)\r\n assert(z == 10) #Test 3 fails, converting to GBP does not work.\r\n\r\nif __name__ == '__main__':\r\n unittest.main()" } ]
2
gogosanka/questionoftheday
https://github.com/gogosanka/questionoftheday
28dbc8761461121e8bf1ccf35952e22398ec0dd6
9f60c67c506c21ecba4d61407f9253a6b887e9a8
66459dee6fcfa45dc396dd04b4d7b0063e34fe97
refs/heads/master
2021-01-10T10:50:51.830921
2015-10-12T21:24:34
2015-10-12T21:24:34
43,079,837
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6976743936538696, "alphanum_fraction": 0.7137746214866638, "avg_line_length": 25.66666603088379, "blob_id": "3b75c90324ef1250a73c971ac42559a6671b643a", "content_id": "87c96c4d4a8780cb4eda3d4941b1d88714269b97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 69, "num_lines": 21, "path": "/run.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from smh import db\nfrom smh.models.models import *\nfrom datetime import datetime\nimport os\nfrom smh import app\nnow = datetime.utcnow()\n\ntry:\n\tif User.query.filter_by(nickname=\"admin\").first() != None:\n\t\tapp.run(debug=True, threaded=True, port=5000)\n\telse:\n\t\tu = User(nickname=\"admin\", password=\"shinobi1\", created=now)\n\t\tq = Question(body=\"No further Questions.\", timestamp=now, author=u)\n\t\tqotd = QOTD(qotd=1)\n\t\tdb.session.add(q)\n\t\tdb.session.add(qotd)\n\t\tdb.session.add(u)\n\t\tdb.session.commit()\n\t\tapp.run(debug=True, threaded=True, port=5000)\nexcept:\n\tpass" }, { "alpha_fraction": 0.6779741048812866, "alphanum_fraction": 0.678209662437439, "avg_line_length": 33.795082092285156, "blob_id": "c7a3dc05277c33252c47ac20347d41ced242d9c1", "content_id": "83d319b936fb804105dd1b267d9cfb446c694a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4245, "license_type": "no_license", "max_line_length": 148, "num_lines": 122, "path": "/smh/blogic/__init__.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from smh import db\nfrom smh.models.models import *\nfrom flask import session, redirect, url_for\nfrom datetime import datetime\n\ndefault_titles = ['Lily', 'Jeanna', 'Angelika', 'Ronald', 'Brandie', 'Doreatha', 'Leann', 'Vivienne', 'Sabina', 'Elois', 'Bernita', 'Londa', 'Rosa',\n'Alba', 'Blanche', 'Doug', 'Mana', 'Sherrill', 'Masako', 'Rod', 'Herb', 'Myriam', 'Ciara', 'Katy', 'Kisha', 'Kym', 'Xochitl', 'Flo',\n 'Sherill', 'Anika', 'Jannie', 'Patti', 'Jamar', 'Delilah', 'Maris', 'Glenna', 'Ling', 'Roselyn', 'Beatris', 'Rae']\n\n#post transactions\n\ndef update(body,author,postid,title=\"Untitled\"):\n '''user and data scope is for the database to understand\n who the user is, and then create a Post db object\n containing the author of the post, and the body of\n the post.'''\n post_record = Post.query.filter_by(id=postid).first()\n post_record.body = body\n post_record.title = title\n db.session.commit()\n\ndef delete(post):\n '''delete a post.'''\n db.session.delete(post)\n db.session.commit()\n \ndef recycle(post):\n '''recycle a post'''\n if post.rebin == 'true':\n post.rebin = 'false'\n elif post.rebin == 'false':\n post.rebin = 'true'\n db.session.commit()\n\ndef new(post,author,title=\"Untitled\"):\n '''create a new post.'''\n created_time = datetime.utcnow()\n user = User.query.filter_by(nickname=author).first()\n entry = Post(body=post, author=user, title=title, timestamp=created_time, rebin='false', public='true')\n db.session.add(entry)\n db.session.commit()\n\n#user transactions\n\ndef check_username(username,password):\n user = User.query.filter_by(nickname=username).first()\n passw = user.password\n if passw == password:\n session['current_user'] = username\n return redirect(url_for('posts'))\n\ndef add_user(user):\n db.session.add(user)\n db.session.commit()\n #make user follow themself\n db.session.add(user.follow(user))\n db.session.commit()\n\n#vibe transactions \ndef send_vibe(sender, vibe, recipient):\n created_time = datetime.utcnow()\n vibe.created_by = sender.nickname\n vibe.created = created_time\n if vibe.private != True:\n sender.vibes_to_date += 1\n recipient.vibes.append(vibe)\n db.session.add(recipient, vibe)\n db.session.commit()\n\ndef delete_vibe(user, vibe):\n user.vibes.remove(vibe)\n db.session.add(user)\n db.session.commit()\n\n#recipient is me, the one accepting the user's vibe\ndef accept_vibe(recipient, user, vibe):\n recipient.accept_vibe(user, vibe)\n recipient.follow_vibe(vibe)\n db.session.add(recipient)\n db.session.commit()\n\ndef push_vibe(self, user, vibe):\n creator = User.query.filter_by(nickname=user.nickname).first()\n note = (str(self.nickname) + \" is vibing with you \" + \"<placeholder for 'with-for-on-during-etc>: \" + vibe.message)\n message = Message(message=note, sent_by=self.nickname)\n creator.inbox.append(message)\n db.session.add(creator, message)\n db.session.commit()\n \n#make sure that User objects are objects and not fields. specify the field FOR the transaction\n#also make sure the message is a MESSAGE OBJECT\n#also alter this to take in multiple recipients if need be. probably not, now that i think about it\ndef send_message(sender, message, recipient):\n message.sent_by = sender.nickname\n recipient.messages.append(message)\n db.session.add(recipient, message)\n db.session.commit()\n\n\n#QOTD LOGIC\n\ndef add_question(body, author):\n timestamp = datetime.utcnow()\n user = User.query.filter_by(nickname=author).first()\n entry = body\n question = Question(body=entry, author=user, timestamp=timestamp)\n db.session.add(question)\n db.session.commit()\n\ndef delete_question(question_id):\n question = Question.query.filter_by(id=question_id).first()\n db.session.delete(question)\n db.session.commit()\n\ndef add_response(question_id,responder,response):\n '''create a new response to a question.'''\n created_time = datetime.utcnow()\n user = User.query.filter_by(nickname=responder).first()\n question = Question.query.get(question_id)\n entry = Response(body=response, author=user, question=question, timestamp=created_time)\n db.session.add(entry)\n db.session.commit()\n" }, { "alpha_fraction": 0.6964824199676514, "alphanum_fraction": 0.6984924674034119, "avg_line_length": 28.264705657958984, "blob_id": "2239cb5534c79017f910ffa8f633d971e69755fb", "content_id": "9ff411b9c1f93cc8687ff77306c1f7e2dd8ec1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/db_repository/versions/002_migration.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from sqlalchemy import *\nfrom migrate import *\n\n\nfrom migrate.changeset import schema\npre_meta = MetaData()\npost_meta = MetaData()\nquestion_tag = Table('question_tag', post_meta,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('text', String(length=20)),\n Column('timestamp', DateTime),\n)\n\nquestion_tags = Table('question_tags', post_meta,\n Column('question_tag_id', Integer),\n Column('question_id', Integer),\n)\n\n\ndef upgrade(migrate_engine):\n # Upgrade operations go here. Don't create your own engine; bind\n # migrate_engine to your metadata\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['question_tag'].create()\n post_meta.tables['question_tags'].create()\n\n\ndef downgrade(migrate_engine):\n # Operations to reverse the above upgrade go here.\n pre_meta.bind = migrate_engine\n post_meta.bind = migrate_engine\n post_meta.tables['question_tag'].drop()\n post_meta.tables['question_tags'].drop()\n" }, { "alpha_fraction": 0.596819281578064, "alphanum_fraction": 0.5997257828712463, "avg_line_length": 42.626792907714844, "blob_id": "00fb241df4e6f182a2f779660936a2448b757d59", "content_id": "7004aae19a162f3ce5925c3a49c71d6968943d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18235, "license_type": "no_license", "max_line_length": 193, "num_lines": 418, "path": "/smh/views/__init__.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from flask import render_template, flash, redirect, session, url_for, request, abort\nfrom flask.ext.login import login_user, logout_user, current_user, login_required\nfrom smh import app, db, lm, blogic\nfrom smh.forms import LoginForm, NameForm, SignupForm, AskForm, VibeMeForm, QuestionForm, ResponseForm, ChangeQuestion\nfrom smh.models.models import User, Post, Vibe, Question, QOTD, Response\nfrom datetime import datetime\nfrom smh.auth import *\nimport time\n\[email protected]('/admin', methods=['GET'])\ndef jeffadmin():\n return render_template('admindashboard.html')\n\[email protected]('/', methods=['GET', 'POST'])\n#@app.route('/homepage', methods=['GET'])\ndef questions():\n form = ResponseForm()\n title = \"Exploring Social Issues Through Anonymity\"\n question_id = QOTD.query.get(1)\n question = Question.query.filter_by(id=question_id.qotd).first()\n user = 'Unregistered'\n if current_user.is_authenticated():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n if request.method == 'POST':\n question_id = request.form['question_id']\n responder = request.form['responder']\n response = request.form['response']\n blogic.add_response(question_id, responder, response)\n flash('Responded successfully!')\n return redirect(url_for('questions'))\n if question:\n return render_template('smh/qotd.html', title=title, user=user, question=question, form=form)\n return render_template('smh/qotd.html',\n title=title,\n user=user)\n\[email protected]('/questions', methods=['GET', 'POST'])\n@login_required\ndef send():\n body = request.form['question']\n author = request.form['author']\n if body:\n if author:\n blogic.ask(body,author,title)\n flash(\"submitted question successfully!\")\n return redirect(url_for('questions', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n\n\[email protected]('/xyadmin', methods=['GET', 'POST'])\n@login_required\ndef admindash():\n user = User.query.filter_by(nickname='admin').first()\n form = QuestionForm()\n questions = Question.query.all()\n question_id = QOTD.query.get(1)\n current_q = Question.query.filter_by(id=question_id.qotd).first()\n if user and (current_user.nickname == user.nickname):\n if request.method == 'POST':\n if form.validate_on_submit():\n body = form.question.data\n author = current_user.nickname\n blogic.add_question(body, author)\n flash('Added question to database!')\n return redirect(url_for('admindash', user=user, form=form, questions=questions, current=current_q))\n elif request.method == 'GET':\n return render_template('admin/admin.html', user=user, form=form, questions=questions, current=current_q)\n return redirect(url_for('nopage'))\n\[email protected]('/set_question/<question_id>', methods=['GET'])\n@login_required\ndef admindash2(question_id):\n user = User.query.filter_by(nickname='admin').first()\n question_id = int(question_id)\n if user and (current_user.nickname == user.nickname):\n qotd = QOTD.query.get(1)\n qotd.set(question_id)\n flash('Changed question!')\n return redirect(url_for('admindash'))\n return redirect(url_for('nopage'))\n\[email protected]('/delete_question/<question_id>', methods=['GET'])\n@login_required\ndef admindash3(question_id):\n user = User.query.filter_by(nickname='admin').first()\n if user and (current_user.nickname == user.nickname):\n blogic.delete_question(question_id)\n flash('Changed question!')\n return redirect(url_for('admindash'))\n return redirect(url_for('nopage'))\n\n\[email protected]('/404')\n@login_required\ndef nopage():\n message = 'four-oh-4-0-y-does-Dis-h4ppen-ta-MI'\n return render_template('404.html', message=message)\n\[email protected]('/<nickname>/accept/<int:vibeid>', methods=['POST'])\n@login_required\ndef accept_vibe(nickname, vibeid):\n vibe = Vibe.query.get(vibeid)\n user = User.query.filter_by(nickname=nickname).first()\n recipient = User.query.filter_by(nickname=current_user.nickname).first()\n if request.method == 'POST':\n if current_user.is_authenticated and current_user.nickname != nickname:\n if user:\n if vibe:\n if vibe in current_user.vibes:\n blogic.accept_vibe(recipient, user, vibe)\n return render_template('profile', nickname=current_user.nickname)\n\[email protected]('/<nickname>', methods=['GET', 'POST'])\ndef profile(nickname):\n form = AskForm()\n user = User.query.filter_by(nickname=current_user.nickname).first()\n if current_user.is_authenticated and nickname == current_user.nickname:\n #if the page is the current user's, load the dashboard. otherwise load the profile pages\n #this way we keep people from changing or seeing other people's information\n return render_template('smh/dashboard.html',\n title=\"Dashboard\",\n user=user,\n form=form)\n else:\n #open the generic viewing of profile pages, not the dashboard. will need to create this template and remove the \"profile.html\" below\n user = User.query.filter_by(nickname=nickname).first()\n form = VibeMeForm()\n if user:\n return render_template('smh/dashboard.html', title=(str(nickname) + \"'s Activity\"), user=user, form=form)\n else:\n flash('Could not find user %s!' % (nickname))\n return redirect(url_for('questions'))\n\[email protected]('/update', methods=['POST'])\n@login_required\ndef update_post():\n body = request.form['body']\n author = request.form['author']\n postid = request.form['postid']\n title = request.form['title']\n post = Post.query.get(postid)\n if post.author.nickname == current_user.nickname:\n blogic.update(body,author,postid,title)\n return redirect(url_for('posts', nickname=current_user.nickname))\n return redirect(url_for('posts', nickname=current_user.nickname))\n\[email protected]('/edit/<int:postid>/', methods=['GET'])\n@login_required\ndef edit(postid):\n if current_user.is_authenticated():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n posts_count = Post.query.filter_by(author=user, rebin='false').count()\n post = Post.query.get(postid)\n if post:\n if post.author.nickname == current_user.nickname:\n return render_template('edit.html',\n title=\"Edit Post\",\n user=user,\n post=post, #recognize that it is written singular tense here, as we are showing 1 post not multiple\n posts_count=posts_count,\n bin_posts=bin_posts,\n bin_count=bin_count,\n follower=follower)\n else:\n return redirect(url_for('posts', nickname=current_user.nickname))\n return render_template('404.html')\n else:\n return render_template('404.html')\n\[email protected]('/show/<int:postid>/', methods=['GET'])\n@login_required\ndef show(postid):\n feed = Post.query.filter_by(rebin='false').all()\n follower = '0 for now' #count for followers. will need to update the db model\n user = 'Stranger'\n if current_user.is_authenticated():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n posts_count = Post.query.filter_by(author=user, rebin='false').count()\n bin_posts = Post.query.filter_by(author=user, rebin='true').all() #all recycled posts object \n bin_count = Post.query.filter_by(author=user, rebin='true').count() #recycled posts count\n post = Post.query.get(postid)\n if post:\n return render_template('show.html',\n title=\"View Post\",\n user=user,\n post=post, #recognize that it is written singular tense here, as we are showing 1 post not multiple\n posts_count=posts_count,\n bin_posts=bin_posts,\n bin_count=bin_count,\n follower=follower)\n else:\n return render_template('404.html')\n\[email protected]('/delete/<vibeid>/', methods=['GET'])\n@login_required\ndef delete_vibe(vibeid):\n vibe = Vibe.query.filter_by(id=vibeid).first()\n if vibe:\n if current_user.is_authenticated and current_user.is_following_vibe(vibe):\n user = User.query.filter_by(nickname=current_user.nickname).first()\n blogic.delete_vibe(user, vibe)\n flash(\"Deleted Vibe!\")\n return redirect(url_for('profile', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n else:\n return render_template('404.html')\n\n\[email protected]('/delete/<postid>/', methods=['GET'])\n@login_required\ndef delete(postid):\n post = Post.query.filter_by(id=postid).first()\n if post:\n blogic.delete(post)\n flash(\"Deleted post!\")\n return redirect(url_for('bin', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n\[email protected]('/recycle/<postid>/', methods=['GET'])\n@login_required\ndef recycle(postid):\n post = Post.query.filter_by(id=postid).first()\n if post:\n blogic.recycle(post)\n if post.rebin == 'true':\n flash(\"Post was sent to recycling bin!\")\n return redirect(url_for('profile', nickname=current_user.nickname))\n flash(\"Your post was restored!\")\n return redirect(url_for('bin', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n\[email protected]('/<nickname>/bin', methods=['GET', 'POST'])\n@login_required\ndef bin(nickname=current_user):\n feed = Post.query.filter_by(rebin='false').all()\n follower = '0 for now' #count for followers. will need to update the db model\n user = 'Stranger'\n if current_user.is_authenticated():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n posts = Post.query.filter_by(author=user).all()\n posts_count = Post.query.filter_by(author=user, rebin='false').count()\n bin_posts = Post.query.filter_by(author=user, rebin='true').all() #all recycled posts object \n bin_count = Post.query.filter_by(author=user, rebin='true').count() #recycled posts count\n #hidden_posts = Post.query.filter_by(author=user, rebin='false', hidden='true').all() #make sure to change blogic so that when hidden items are deleted their status goes back to visible\n return render_template('bin.html',\n title=\"Recycling Bin\",\n user=user,\n post=posts,\n posts_count=posts_count,\n bin_posts=bin_posts,\n bin_count=bin_count,\n follower=follower,\n feed=feed)\n return render_template('auth/login.html',\n title=\"Discover\",\n feed=feed,\n user=user,\n follower=follower)\n\[email protected]('/visible/<postid>/', methods=['GET'])\n@login_required\ndef visible(postid):\n post = Post.query.filter_by(id=postid).first()\n if post:\n if post.public == 'true':\n post.hide()\n flash(\"Vibe was hidden.\")\n return redirect(url_for('profile', nickname=current_user.nickname))\n post.unhide()\n flash(\"Now sharing vibe.\")\n return redirect(url_for('profile', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n\[email protected]('/create', methods=['POST'])\n@login_required\ndef create():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n post = request.form['body']\n author = request.form['author']\n title = request.form['title']\n if post:\n if author:\n blogic.new(post,author,title)\n flash(\"created post successfully!\")\n return redirect(url_for('posts', nickname=current_user.nickname))\n else:\n return render_template('404.html')\n\[email protected]('/nopage')\ndef no_page():\n return render_template('404.html')\n\[email protected]('/<nickname>/posts/new')\n@login_required\ndef new(nickname):\n feed = Post.query.filter_by(rebin='false').all()\n follower = '0 for now' #count for followers. will need to update the db model\n user = 'Stranger'\n if current_user.is_authenticated():\n user = User.query.filter_by(nickname=current_user.nickname).first()\n posts = Post.query.filter_by(author=user).all()\n posts_count = Post.query.filter_by(author=user, rebin='false').count()\n bin_posts = Post.query.filter_by(author=user, rebin='true').all() #all recycled posts object \n bin_count = Post.query.filter_by(author=user, rebin='true').count() #recycled posts count\n #hidden_posts = Post.query.filter_by(author=user, rebin='false', hidden='true').all() #make sure to change blogic so that when hidden items are deleted their status goes back to visible\n return render_template('create.html',\n title=\"Discover\",\n user=user,\n posts_count=posts_count,\n bin_posts=bin_posts,\n bin_count=bin_count,\n follower=follower,\n feed=feed)\n return render_template('posts.html',\n title=\"New Post\",\n user=user,\n follower=follower)\n\[email protected]('/login', methods=['GET', 'POST'])\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n user = 'Stranger'\n if form.validate_on_submit():\n user = User.query.filter_by(nickname=form.nickname.data).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n if request.args.get('next') is url_for('auth.login'):\n return redirect(url_for('questions'))\n return redirect(request.args.get('next') or url_for('questions'))\n flash('Invalid username or password.')\n return render_template('auth/login.html',\n title=\"Log In\",\n form=form,\n user=user)\n return render_template('auth/login.html',\n title=\"Log In\",\n form=form,\n user=user)\[email protected]('/logout')\[email protected]('/logout')\ndef logout():\n logout_user()\n flash('You are now logged out.')\n return redirect(url_for('questions'))\n\[email protected]('/signup', methods=['GET', 'POST'])\ndef signup():\n form = SignupForm()\n user = 'Stranger'\n created_time = datetime.utcnow()\n check_email = User.query.filter_by(email=form.email.data).first()\n check_nickname = User.query.filter_by(nickname=form.nickname.data).first()\n if form.validate_on_submit():\n if not check_email and not check_nickname:\n user = User(nickname=form.nickname.data, created=created_time, email=form.email.data, password=form.password.data, catchphrase=form.catchphrase.data, vibes_to_date=0)\n blogic.add_user(user)\n login_user(user, form.remember_me.data)\n flash('Account created successfully!')\n return redirect(request.args.get('next') or url_for('questions'))\n flash('Username or password is already taken. If this is you please sign in.')\n return render_template('signup.html',\n title=\"Log In\",\n form=form,\n user=user)\n\[email protected]('/follow/<nickname>')\n@login_required\ndef follow(nickname):\n #user is the db object of the nickname argument\n user = User.query.filter_by(nickname=nickname).first()\n #current is the db object of the current logged in user\n current = User.query.filter_by(nickname=current_user.nickname).first()\n if user is None:\n flash('User %s not found.' % nickname)\n return redirect(url_for('discover'))\n #check if the current logged in user is the same as the one we are trying to follow\n if user.id == current_user.id:\n flash('You can\\'t follow yourself!')\n return redirect(url_for('profile', nickname=nickname))\n #otherwise, let's follow the user!\n u = current.follow(user)\n if u is None:\n flash('Already following ' + nickname + '.')\n return redirect(url_for('profile', nickname=nickname))\n db.session.add(u)\n db.session.commit()\n flash('You are now following ' + nickname + '!')\n return redirect(url_for('profile', nickname=nickname))\n\[email protected]('/unfollow/<nickname>')\n@login_required\ndef unfollow(nickname):\n user = User.query.filter_by(nickname=nickname).first()\n #current is the db object of the current logged in user\n current = User.query.filter_by(id=current_user.id).first()\n if user is None:\n flash('User %s not found.' % nickname)\n return redirect(url_for('discover'))\n if user.id == current_user.id:\n flash('You can\\'t unfollow yourself!')\n return redirect(url_for('profile', nickname=nickname))\n u = current.unfollow(user)\n if u is None:\n flash('You are not following ' + nickname + '.')\n return redirect(url_for('profile', nickname=nickname))\n db.session.add(u)\n db.session.commit()\n flash('You have stopped following ' + nickname + '.')\n return redirect(url_for('profile', nickname=nickname))\n\nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.6385733485221863, "alphanum_fraction": 0.6429167985916138, "avg_line_length": 41.154930114746094, "blob_id": "ee9c3c32bca6b7322dd5b7a6c5b98b13951d8c90", "content_id": "250dff33e6ef1d687373c904548c21139f88eedf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11972, "license_type": "no_license", "max_line_length": 198, "num_lines": 284, "path": "/smh/models/models.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from smh import db\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask.ext.login import UserMixin\nfrom smh import lm\nfrom smh.blogic import *#imports depend on where you're importing from, specifically if it's from the app or within another folder\nfrom hashlib import md5\n\[email protected]_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n'''the db.Model class has a all() method which queries the db\n and returns all the db rows created. For example,\n users = User.query.get(1) #returns the 1st user object\n users.posts.all() #will return all the posts associated with user 1\n the Post class is defined below, and has a relationship within\n the User class, which is why this works.'''\n\nfollowers = db.Table('followers',\n db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),\n db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))\n)\n\nvibes = db.Table('vibes',\n db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n db.Column('vibe_id', db.Integer, db.ForeignKey('vibe.id'))\n)\n\nvibes_accepted = db.Table('vibes_accepted',\n db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n db.Column('vibe_id', db.Integer, db.ForeignKey('vibe.id'))\n)\n\nmessages = db.Table('messages',\n db.Column('user_id', db.Integer, db.ForeignKey('user.id')),\n db.Column('message_id', db.Integer, db.ForeignKey('message.id'))\n)\n\ntags = db.Table('tags',\n db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),\n db.Column('post_id', db.Integer, db.ForeignKey('post.id'))\n)\n\nquestion_tags = db.Table('question_tags',\n db.Column('question_tag_id', db.Integer, db.ForeignKey('question_tag.id')),\n db.Column('question_id', db.Integer, db.ForeignKey('question.id'))\n) \n\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n nickname = db.Column(db.String(32), index=True, unique=True)\n email = db.Column(db.String(64), index=True, unique=True)\n post = db.relationship('Post', backref='author', lazy='dynamic')\n question = db.relationship('Question', backref='author', lazy='dynamic')\n responses = db.relationship('Response', backref='author', lazy='dynamic')\n album = db.relationship('Album', backref='author', lazy='dynamic')\n images = db.relationship('Image', backref='author', lazy='dynamic')\n cover = db.relationship('Cover', backref='author', lazy='dynamic')\n vibes_to_date = db.Column(db.Integer)\n catchphrase = db.Column(db.String(32))\n created = db.Column(db.DateTime)\n about_me = db.Column(db.String(140))\n last_seen = db.Column(db.DateTime)\n followed = db.relationship('User',\n secondary=followers,\n primaryjoin=(followers.c.follower_id == id),\n secondaryjoin=(followers.c.followed_id == id),\n backref=db.backref('followers', lazy='dynamic'),\n lazy='dynamic')\n vibes = db.relationship('Vibe',\n secondary=vibes,\n backref=db.backref('recipients', lazy='dynamic'),\n lazy='dynamic')\n vibes_accepted = db.relationship('Vibe',\n secondary=vibes_accepted,\n backref=db.backref('was_accepted_by', lazy='dynamic'),\n lazy='dynamic')\n inbox = db.relationship('Message',\n secondary=messages,\n backref=db.backref('recipient_inbox', lazy='dynamic'),\n lazy='dynamic')\n \n def followed_posts(self):\n return Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc()) #read this thoroughly to understand it\n password_hash = db.Column(db.String(128))\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n @password.setter\n def password(self,password):\n self.password_hash = generate_password_hash(password)\n def verify_password(self,password):\n return check_password_hash(self.password_hash,password)\n def is_anonymous():\n return False\n def get_id(self):\n return (self.id)\n def __repr__(self):\n return (self.nickname)\n #handle following a user\n def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self\n #handle unfollowing a user\n def unfollow(self, user):\n if self.is_following(user):\n self.followed.remove(user)\n return self\n #handle checking if a user is being followed\n def is_following(self, user):\n return self.followed.filter(followers.c.followed_id == user.id).count() > 0\n #handle if a vibe is to be followed\n def follow_vibe(self, vibe):\n if not self.is_following_vibe(vibe):\n self.vibes.append(vibe)\n return self\n def unfollow_vibe(self, vibe):\n if self.is_following_vibe(vibe):\n self.vibes.remove(vibe)\n self.vibes_accepted.remove(vibe)\n return self\n def is_following_vibe(self, vibe):\n return self.vibes.filter(vibes.c.vibe_id == vibe.id).count() > 0\n #takes in a user so that the message shows who accepted the vibe\n def accept_vibe(self, user, vibe):\n if not self.has_accepted_vibe(vibe):\n self.vibes_accepted.append(vibe)\n blogic.push_vibe(self, creator, message, vibe)\n return self\n #alert all watchers of vibe, ie vibe followers, of updates. in other words, mass message them\n #the below code is crap. refer to accept_vibe for help\n def alert_watchers(self, user, vibe):\n if not self.has_accepted_vibe(vibe):\n creator = User.query.filter_by(nickname=vibe.created_by.nickname).first()\n #message should check if a message label is created, and return the label as a placeholder instead\n self.vibes_accepted.append(vibe)\n blogic.push_vibe(self, user, vibe)\n return self\n def has_accepted_vibe(self, vibe):\n return self.vibes.filter(vibes.c.vibe_id == vibe.id).count() > 0\n def send_message(self, message):\n self.messages.append(message)\n return self\n def push_vibes(self, vibe):\n if not self.has_accepted_vibe(vibe):\n self.vibes_accepted.append(vibe)\n blogic.push_vibe(self, vibe)\n return self\n def has_pushed_vibe(self, vibe):\n return self.vibes.filter(vibes.c.vibe_id == vibe.id).count() > 0\n def avatar(self, size):\n return 'http://www.gravatar.com/avatar/%s?d=mm&s=%d' % (md5(self.email.encode('utf-8')).hexdigest(), size)\n\n#Vibe Vibes are the easiest way for people to get in touch to do something in particular.\n#user vibes are vibes that follow a user\n#user vibing are vibes that the user follows\nclass Vibe(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n image = db.Column(db.LargeBinary)\n message = db.Column(db.String(77))\n accepted_by = db.Column(db.Boolean)\n public = db.Column(db.Boolean)\n private = db.Column(db.Boolean)\n created_by = db.Column(db.String(32))\n created = db.Column(db.DateTime)\n seen_timestamp = db.Column(db.DateTime)\n def __repr__(self):\n return (self.message)\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n message = db.Column(db.Text)\n sent_by = db.Column(db.String(32))\n created_timestamp = db.Column(db.DateTime)\n seen_timestamp = db.Column(db.DateTime)\n #be cute! randomize the <says> to say other things like \"vibes, explains, announces, mumbles, bellows\" etc.\n #I left a placeholder for now, but try randomizing those words and replace <says> with a variable\n def __repr__(self):\n return (self.sent_by + \" <says>: \"+ self.message)\n\nclass Image(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n image = db.Column(db.LargeBinary)\n timestamp = db.Column(db.DateTime)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n post_id = db.Column(db.String(500), db.ForeignKey('post.id'))\n\nclass Tag(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(20))\n timestamp = db.Column(db.DateTime)\n\nclass QuestionTag(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(20))\n timestamp = db.Column(db.DateTime)\n\nclass Album(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(20))\n timestamp = db.Column(db.DateTime)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n #create an association table for the many-to-many relationship\n\nclass Cover(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n cover = db.Column(db.LargeBinary, index=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\nclass Post(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n rebin = db.Column(db.String(5))\n public = db.Column(db.String(8))\n body = db.Column(db.String(500))\n title = db.Column(db.String(32))\n timestamp = db.Column(db.DateTime)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n album_id = db.Column(db.Integer, db.ForeignKey('album.id'))\n tags = db.relationship('Tag', secondary = tags, backref=db.backref('posts', lazy='dynamic'))\n image = db.relationship('Image', backref='post', lazy='dynamic')\n rating = db.Column(db.Boolean)\n def hide(self):\n self.public = 'false'\n db.session.commit()\n def unhide(self):\n self.public = 'true'\n db.session.commit()\n def __repr__(self):\n repre = \"%r\" % self.body\n return str(repre)\n\n#Upon database creation, make sure to generate the first question in your instance scripts\n#Have the question say something like, \"no further questions\"\n#Make sure to instruct those who use this boiler plate to change the default question\nclass Question(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n approved = db.Column(db.String(5))\n body = db.Column(db.String(500))\n timestamp = db.Column(db.DateTime)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n responses = db.relationship('Response', backref='question', lazy='dynamic')\n question_tags = db.relationship('QuestionTag', secondary = question_tags, backref=db.backref('questions', lazy='dynamic'))\n def hide(self):\n self.approved = 'false'\n db.session.commit()\n def unhide(self):\n self.approved = 'true'\n db.session.commit()\n def __repr__(self):\n repre = \"%r\" % self.body\n return str(repre)\n\n#to generate responses, create a Response instance that includes \"question\" as one of the attributes.\n#Make sure the \"question\" is a Question instance object.\nclass Response(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n body = db.Column(db.String(500))\n timestamp = db.Column(db.DateTime)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n question_id = db.Column(db.Integer, db.ForeignKey('question.id'))\n def hide(self):\n self.public = 'false'\n db.session.commit()\n def unhide(self):\n self.public = 'true'\n db.session.commit()\n def __repr__(self):\n repre = \"%r\" % self.body\n return str(repre)\n\n#make sure to create an instance of QOTD and set the qotd variable to \"1\"\n#the qotd variable stands for the question id that will be displayed by default\nclass QOTD(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n qotd = db.Column(db.Integer)\n timestamp = db.Column(db.DateTime)\n def set(self, identifier):\n self.qotd = int(identifier)\n db.session.commit()\n def __repr__(self):\n repre = \"%r\" % self.qotd\n return str(repre)\n" }, { "alpha_fraction": 0.6842650175094604, "alphanum_fraction": 0.6994478702545166, "avg_line_length": 45.015872955322266, "blob_id": "66bd8a4ffdb6f66aff01710900dc9bbbaa45f973", "content_id": "e40aa16480817d2b3088c767e91218eb00972258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2898, "license_type": "no_license", "max_line_length": 134, "num_lines": 63, "path": "/smh/forms/__init__.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "from flask.ext.wtf import Form\nfrom wtforms import StringField, SubmitField, PasswordField, TextField, BooleanField, SelectField, HiddenField\nfrom wtforms.validators import Required, Length, Email, EqualTo\nfrom datetime import datetime\nfrom smh.models.models import User as User\n\nclass LoginForm(Form):\n nickname = StringField('Username', validators=[Required(), Length(1, 64)])\n password = PasswordField('Password', validators=[Required()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\nclass NameForm(Form):\n name = StringField('What is your name?', validators=[Required()])\n submit = SubmitField('Submit')\n\nclass AskForm(Form):\n question = StringField('Submit a question:', validators=[Required()])\n submit = SubmitField('Submit')\n\nclass SignupForm(Form):\n email = StringField('Email', validators=[Required(), Length(1, 64), Email()])\n catchphrase = StringField('Catchphrase', validators=[Length(1, 32)])\n password = PasswordField('Password', validators=[Required()])\n confirm = PasswordField('Password', validators=[Required(), EqualTo('password', message='Passwords do not match!')])\n nickname = StringField('Username', validators=[Required(), Length(1, 16)]) #confirm if making name longer than 16 handles properly\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Sign Up')\n\nclass VibeMeForm(Form):\n message = StringField('Vibe', validators=[Required(), Length(1, 77)])\n recipient = TextField('Send to:', validators=[Required()])\n private = BooleanField('Private vibe')\n submit = SubmitField('Send')\n\n'''class VibeBroadcast(Form):\n followed = User.query.all()\n vibe = StringField('Vibe', validators=[Required(), Length(1, 77)])\n followed_list = SelectField('Send to:', choices=[(f,f) for f in followed], validators=[Required()])\n submit = SubmitField('Send')'''\n\nclass UserInformation(Form):\n first_name = TextField('First Name', validators=[Required(), Length(1, 77)])\n last_name = TextField('First Name', validators=[Required(), Length(1, 77)])\n mobile_number = TextField('First Name', validators=[Required(), Length(1, 77)])\n age = TextField('First Name', validators=[Required(), Length(1, 77)])\n city = TextField('First Name', validators=[Required(), Length(1, 77)])\n state = TextField('First Name', validators=[Required(), Length(1, 77)])\n submit = SubmitField('Send')\n\nclass QuestionForm(Form):\n question = StringField('Question', validators=[Required(), Length(1, 77)])\n submit = SubmitField('Send')\n\nclass ResponseForm(Form):\n response = StringField('Response', validators=[Required(), Length(1, 77)])\n question_id = HiddenField()\n responder = HiddenField()\n submit = SubmitField('Share Anonymously')\n\nclass ChangeQuestion(Form):\n question_id = StringField('Ender Question ID')\n submit = SubmitField('Send')" }, { "alpha_fraction": 0.725806474685669, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 20, "blob_id": "6b46693cfd690677ec531e019b3c4fd3f4d66647", "content_id": "a60390e7faacb29fe99fe5177cdd36e912d898bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 40, "num_lines": 3, "path": "/flaskserver.py", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "import os\nwhile True:\n os.system('python newrun.py|color 0b')" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 18, "blob_id": "099516e5979466ce62d5094cda9973007ce2d64d", "content_id": "a7d985c2827b89e42e0d49f4365f24482a67e0b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/README.md", "repo_name": "gogosanka/questionoftheday", "src_encoding": "UTF-8", "text": "# questionoftheday \n" } ]
8
myracheng/bread
https://github.com/myracheng/bread
0bde2702850ce4740d916938325ab87367ab8154
1c31a2c44db828a1796fcce236a05dd76b7d3005
6ba7f7d5000c082613e77652e3523310d21c3f83
refs/heads/main
2023-03-24T18:41:10.668135
2021-03-23T06:31:05
2021-03-23T06:31:05
316,401,663
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7221908569335938, "alphanum_fraction": 0.7481648921966553, "avg_line_length": 54.375, "blob_id": "7c1db5cbac2c0081a75aca077f0140ea8b693991", "content_id": "305f111a0cc5f9c1c0abe7969f957834b5b94bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1775, "license_type": "no_license", "max_line_length": 328, "num_lines": 32, "path": "/cake/redbeanniangao.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Red Bean Nian Gao\n\n<center><img src=\"../img/niangao.jpg\" alt=\"nian gao\" style=\"width:40%;\"/></center>\n\nSource: [https://whattocooktoday.com/nian-gao.html](What to Cook Today)\n\nTime:\n\nNotes: This made too much, would probably do 2/3 next time. and also it was reallyyyy sticky so would try a recipe with rice flour, and less sugar. want to try [this](https://dusdoughs.com/2020/10/23/baked-sweet-nian-gao-mochi-cake/) next time.\n\nStory: I finally got my hands on some sweet rice flour by going to Ranch 99 with Nora! I wanted to make some form of ็ฒ˜็ณ• to celebrate the new year, and I've never made a steamed version before (always the butter mochi version). I think I still like that version better, but this was cool to try out.\n\n## Ingredients\n\n|Ingredient|Amount|\n|---|---:|\n|sweet rice flour|420 g|\n|water|420 g|\n|sugar|250 g|\n|molasses|30 g|\n|bamboo leaves|4|\n|jujubes (decoration)|4|\n|dried red beans|3/4 cup|\n## Instructions\n1. Soak the red beans for 8 hours and then cook in instant pot for 30 minutes.\n2. Blanch the banana leaves in hot boiling water for about 5 minutes to soften it. Then pat dry and set aside\nCut the banana leaves into 7-inch width and about 7-8 inch length. Line the leaf horizontally and then vertically like a \"+\" (overlapping at the bottom of the dish) and then diagonally like an \"X\" to make sure I cover all the side and bottom of the dish. Repeat this pattern for 2 more times. (Lol or just freestyle line it...)\n3. Place both sugar and water in a saucepan and bring to a gentle simmer or until sugar is melted. Remove from the heat and let it cool down completely.\n4. Gradually pour the sugar mixture into the flour and stir until smooth.\n5. Add cooked, drained red beans.\n6. Steam in instant pot for 90 minutes\n7. Let cool overnight" }, { "alpha_fraction": 0.7348503470420837, "alphanum_fraction": 0.7590897679328918, "avg_line_length": 97.63414764404297, "blob_id": "ca64b1fa19b124eac184c99d8c169f9b583246dd", "content_id": "50392440f5770224db3380ebdd00897a5de66e45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4063, "license_type": "no_license", "max_line_length": 580, "num_lines": 41, "path": "/bread/bagels.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Bagels bagels bagels\n\n<center><img src=\"../img/title.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n\nSource: [Artisan Breads Every Day](https://www.epicurious.com/recipes/food/views/bagels-366757)\n\nTime: 2 days\n\nNotes: The normal ones turned out great (a bit small because I made 8) but the raisin ones just looked totally wonky - possibly because of the sugar/water/yeast that the broken raisins released, or the over-kneading to incorporate raisins into the dough? I would also put less molasses next time. But my stomach kinda hurt and the only thing that I wanted to eat is bagels, so I ate like 1.5 demented cinnamon raisin bagels. There are so many variations I want to try: whole-wheat, rye, cinnamon-raisin properly, matcha/turmeric/cocoa/rainbow, chili flakes.... maybe a steam bath?\n\nStory: idk man i just wanted to make bagels. \n\n## Ingredients\n### Dough\n|Ingredient|Amount|\n|---|---:|\n|all-purpose flour|440 g|\n|vital wheat gluten|14 g|\n|molasses|21 g (too much!)|\n|water|255 g|\n|yeast|3 g|\n|salt|10.5 g|\n### Poaching liquid\n|Ingredient|Amount|\n|---|---:|\n|lye water|100 g|\n|salt|7 g|\n|water|medium sauce/stew pot to the two dot line|\n\n## Instructions\n1. Combine dough ingredients for 3 minutes until well blended. The dough should form a stiff, coarse ball, and the flour should be fully hydrated; if it isnโ€™t, stir in a little more water. Let the dough rest for 5 minutes.\n2. Resume mixing with the dough hook on the lowest speed for another 3 minutes. The dough should be stiff yet supple, with a satiny, barely tacky feel. If the dough seems too soft or overly tacky, mix or knead in a little more flour.\nPlace the dough in a clean, lightly oiled bowl, cover the bowl tightly with plastic wrap, and stick in fridge overnight or up to 2 days.\n3. When youโ€™re ready to shape the bagels, prepare a sheet pan by lining it with parchment paper. Divide the dough into 6 to *8* equal pieces. Form each piece into a loose ball by rolling it on a clean, dry work surface with a cupped hand. (Donโ€™t use any flour on the work surface. If the dough slides around and wonโ€™t ball up, wipe the surface with a damp paper towel and try again; the slight bit of moisture will provide enough traction for the dough to form into a ball.)\n4. Two methods to shape: The first method is to poke a hole through the center of the ball to create a donut shape. Holding the dough with both thumbs in the hole, rotate the dough with your hands, gradually stretching it to create a hole about 2 inches in diameter. The second method, preferred by professional bagel makers, didnt work for me...maybe makign the ropes more even would have been [good](https://www.kingarthurbaking.com/blog/2020/01/15/how-to-shape-bagels).\n5. Proof 60 to 90 minutes, and check whether the bagels are ready for baking using the โ€œfloat testโ€: Place one of the bagels in a small bowl of cold water. If it sinks and doesnโ€™t float back to the surface, shake it off, return it to the pan, and wait for another 15 to 20 minutes, then test it again. If they pass the float test before you are ready to boil and bake them, return them to the refrigerator so they donโ€™t overproof. \n6. Preheat oven to 500ยฐF (260ยฐC) and gather and prepare your garnishes (seeds, onions, garlic, and so on).\n7. To make the poaching liquid, fill a pot with 2 to 3 quarts (64 to 96 oz / 181 to 272 g) of water, making sure the water is at least 4 inches deep. Cover, bring to a boil, then lower the heat to maintain at a simmer. Stir in the GOODS.\n8. Gently lower each bagel into the simmering poaching liquid, adding as many as will comfortably fit in the pot. They should all float to the surface within 15 seconds. After 1 minute, use a slotted spoon to turn each bagel over. Poach for another 30 to 60 seconds, then use the slotted spoon to transfer it back to the pan, domed side up. \n9. Sprinkle on a generous amount of whatever toppings you like as soon as the bagels come out of the water.\n10. Transfer the pan of bagels to the oven, then lower the oven heat to 450ยฐF (232ยฐC). Bake for 15 minutes." }, { "alpha_fraction": 0.6694970726966858, "alphanum_fraction": 0.7318745851516724, "avg_line_length": 44.04411697387695, "blob_id": "a167df647597c7f4ae15367afbba412a19084afa", "content_id": "fd26a514e78cc5eb46f84819bd96bc7f5556e661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3062, "license_type": "no_license", "max_line_length": 203, "num_lines": 68, "path": "/README.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# BREADME: A Recipe Log\n\nHi! Here are some of the things I've baked and the narratives surrounding them. Asterisks denote recipes I would make again (and again and again).\n\n### Values\n- Slow down and maximize resting; allow time for flavor development\n- Minimize waste (use up random ingredients that have been lying around), bowl-washing and sifting\n- Pizza dough setting in bread machine\n- Taste over looks, intuition over precise measurements\n- Less sugar, more nuts\n- Question, explore, and develop origins and narratives\n\n## Bread\n**Jan 07, 2021:** [Seafood Pancake](bread/seafoodpancake.md)\n\n**Jan 01, 2021:** [Whole wheat milk bread](bread/wholewheatmilkbread.md)\n\n**Dec 29, 2020:** [Bagels bagels bagels](bread/bagels.md)\n\n**Dec 23, 2020**: Peter Reinhart's 100% WW sandwich bread, Artisan Breads Everyday\n\n**Dec 17, 2020**: Rustic italian ciabatta, [King Arthur](https://www.kingarthurbaking.com/recipes/rustic-italian-ciabatta-recipe)*\n\n**Dec 14, 2020**: Pita bread, Hot Bread Kitchen\n\n**Dec 12, 2020:** Russian rye bread, [King Arthur](https://www.kingarthurbaking.com/recipes/russian-rye-bread-rizhsky-khleb-recipe)\n\n**Dec 9, 2020:** Rustic batard, Hot Bread Kitchen\n\n**Finals week:** Turtle (9 g matcha + 9 g water) challah, Hot Bread Kitchen\n\n**Nov 30, 2020:** Xian bing, [Omnivore's Cookbook](https://omnivorescookbook.com/chinese-beef-meat-pie/) (needed more water)\n\n**Nov 29, 2020:** Homemade dumpling wrappers, [Omnivore's Cookbook](https://omnivorescookbook.com/recipes/how-to-make-chinese-dumplings) (make 10g instead of 12-14, steam)\n\n**Nov 28 - Dec 2, 2020:** 100% whole wheat oat bread, [SK](https://smittenkitchen.com/2015/09/oat-and-wheat-sandwich-bread/) (proved 24 hrs & 72 hrs)\n\n## Cake\n**Dec 31, 2020:** [Red Bean Nian Gao](cake/redbeanniangao.md)\n\n**Dec 27, 2020:** Gingerbread, [Smitten Kitchen](https://smittenkitchen.com/2015/12/gingerbread-layer-cake/) (1/3 in 6\" pan)\n\n**Dec 23, 2020:** [Chocolate babka](cake/chobab.md)\n\n**Dec 12, 2020:** Persimmon cake, [David Lebovitz](https://www.davidlebovitz.com/persimmon-bread/)\n\n**Nov 27, 2020:** [Orange-cardamom chiffon cake](cake/orangecard.md)\n\n## Cookies\n**Jan 06, 2020:** Cat icebox cookies, [Delish](https://www.delish.com/holiday-recipes/christmas/a30210300/easy-icebox-cookies-recipe/) and Jennifer Xiao (chill in freezer and make the cocoa part lighter)\n\n**Dec 21, 2020:** Tiger stripe sables, [Susan Spugen](https://cherrybombe.com/recipes/susan-spungen-tiger-striped-sables) (more like tree logs...)\n\n**Dec 19, 2020:** Chocolate rye cookies, [Pastry Love](https://www.epicurious.com/recipes/food/views/double-chocolate-rye-cookies)\n\n## Dinner\n**Jan 04, 2020**: Seafood pancake\n**Dec 28, 2020**: Oysters with garlic sauce, [China Sichuan Food](https://www.chinasichuanfood.com/oyster-with-garlic-sauce/)\n\n**Dec 14, 2020**: Hummus, Hot Bread Kitchen*\n\n**Dec 11, 2020**: Chickpea stew\n\n## Real Estate (!)\n**Dec 22, 2020**: [Gingerbread house](cake/gingerbreadhouse.md)\n\n## MISC\n**Jan 05, 2021:** Almond butter, [Minimalist Baker](https://minimalistbaker.com/how-to-make-nut-butter/)" }, { "alpha_fraction": 0.7230392098426819, "alphanum_fraction": 0.7490196228027344, "avg_line_length": 51.33333206176758, "blob_id": "52b722623fb1b69c75203ddf46a6e6c16edcb7e8", "content_id": "b709461a5c97a1f32c4d920a568515d7226f1eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2044, "license_type": "no_license", "max_line_length": 308, "num_lines": 39, "path": "/bread/old.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Seafood Pancake\n\n<center><img src=\"../img/seafpancake0.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n<center><img src=\"../img/seafpancake1.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n\nSource: [Hooni Kim](https://food52.com/recipes/83213-korean-scallion-pancakes-pajeon-recipe), [Korean Bapsang](https://www.koreanbapsang.com/haemul-pajeon/), [Omnivore's Cookbook](https://omnivorescookbook.com/korean-seafood-pancake/)\n\nTime: 30 min\n\nNotes: I mixed some of the seafood into the batter, which made it kind of uneven. I also kinda skimped on the scallions.\n\nStory:\n\n## Ingredients\n\n|Ingredient|Amount|\n|---|---:|\n|scallions|many bunches|\n|chopped seafood|2 cups|\n|all-purpose flour|3/4 cup|\n|tapioca flour|1/4 cup|\n|sugar|pinch|\n|salt|pinch|\n|ice-cold water|1 cup + 2 tbsp|\n|eggs|1|\n|oil|2 tbsp|\n\n\n## Instructions\n1. Combine the dry ingredients of the batter in a medium-sized bowl. Slowly stir in the water and mix with a spatula until it forms a smooth, runny batter that can just coat the back of a spoon (thinner than pancake, thicker than crepe). Add a bit more water if the batter is still a bit thick and mix again.\n2. Heat 2 tablespoons of oil over medium-high heat in a large skillet until hot.\n3. Add half the green onions to the pan in a single even layer, forming a rectangle about 7โ€ by 9โ€ (18 cm by 23 cm).\n4. Drizzle about 1/4 cup of batter evenly over the green onions.\n5. Spread 1/3 cup of seafood mix evenly across the pancake and drizzle another 1/4 cup of batter evenly over the seafood.\n6. Turn the heat to medium and allow the pancake to cook for 1 minute.\n7. Add half of the beaten egg over the pancake and spread it with a brush or a spoon to form an even layer.\n8. Cook the pancake for another 1 to 2 minutes, occasionally checking underneath, until it is browned and crisp.\n9. In one fluid motion, flip the pancake using a spatula (or two spatulas). It may be necessary to add another tablespoon of oil to the pan.\n10. Cook the pancake until the seafood side is browned and cooked through, another 2 to 4 minutes." }, { "alpha_fraction": 0.7430762648582458, "alphanum_fraction": 0.7590541243553162, "avg_line_length": 78.5762710571289, "blob_id": "0938d1c30139eec1a8606e1193cd25277f4c8a44", "content_id": "b16cc8883adb2bbfafd54f14f06a644b12ecfe60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4713, "license_type": "no_license", "max_line_length": 744, "num_lines": 59, "path": "/cake/chobab.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Chocolate babka\n<center><img src=\"../img/babka.jpg\" alt=\"babka\" style=\"width:40%;\"/></center>\n\nSource: [Smitten Kitchen](https://smittenkitchen.com/2014/10/better-chocolate-babka/), [Melissa Weller](https://cherrybombe.com/recipes/melissa-weller-chocolate-babka)\n\nTime: 2 days\n\nNotes: The filling was too crumbly since I didn't want to make it too sweet with the molasses. BUt this is sooo delicious. It would be good to roll it out thinner and make sure the filling is spread all the way to the edge for the most elaborate spiral, and I'd probably add half the amount of cracker crumbs so that the filling is more spreadable and less crumbly. Or maybe add some heavy cream? But it's already so rich...\n\nStory: I've long been hesitant about babka - I already feel satisfied by the red bean swirl/milk bread combination, and have a hard time imagining a sweet bread better than that. But I love finding recipes that allow me to use up random almost-empty boxes of cheerios and graham crackers, and the swirls in the Food and Wine magazine looked really beautiful and clean. In retrospect, I would decidedly classify this as a (yeasted) cake, but it's so yummy! I totally get why this would be appropriate as a birthday cake. My sister says \"it looks like red bean paste from afar, but then you realize it's chocolate and get excited\" - the opposite of the raisins-masquerading-as-chocolate phenomenon. So I guess here's yet another Jewish recipe... \n\n## Ingredients\n### Dough\n|Ingredient|Amount|\n|---|---:|\n|all-purpose flour|265 g|\n|sugar|50 g|\n|water|60 g|\n|yeast|1 tsp|\n|salt|3/8 tsp|\n|eggs|2|\n|zest|half a small orange|\n|butter|75 g, room temp (accidentally salted)|\n### Filling\n|Ingredient|Amount|\n|---|---:|\n|molasses|10 g|\n|chocolate|120 g|\n|cheerios and graham crackers (lol)|70 g|\n|butter|62 g|\n\n### Glaze\n\n|Ingredient|Amount|\n|---|---:|\n|water|40 g|\n|sugar|40 g|\n\n\n## Instructions\n1. Make the dough: Activate the yeast with a bit of warm water. Combine the flour, sugar, yeast and zest. Add eggs and 1/2 cup water, mixing with the dough hook until it comes together; this may take a couple minutes. Itโ€™s okay if itโ€™s on the dry side, but if it doesnโ€™t come together at all, add extra water, 1 tablespoon at a time, until the dough forms a mass. \n\n2. Add the salt, then the butter, a spoonful at a time, mixing until itโ€™s incorporated into the dough. Then, mix on medium speed for 10 minutes until dough is completely smooth; youโ€™ll need to scrape the bowl down a few times. I usually found that after 10 minutes, the dough began to pull away from the sides of the bowl. If it doesnโ€™t, you can add 1 tablespoon extra flour to help this along.\n\n3. Put dough in airtight container and refrigerate for half a day, preferably overnight. (Dough will not fully double)\n\n4. Make filling: Melt chocolate and butter. Stir in cookie crumbs and molasses. Let cool until spreadable consistency (this got way too dense and crumbly :( ))\n\n5. Line 8x4 pan with parchment paper. Roll out on a well-floured counter to about a 10-inch width (the side closest to you) and as long in length (away from you) as you can when rolling it thin, likely 10 to 12 inches.\n\n6. Use an offset spatula (or hands lol) to spread two-thirds of one portion of the filling evenly over the surface of the dough, *taking care to spread the filling to the edges*. Gently roll the dough away from you to form a fat roll. Pick the babka roll up with two hands and give it a gentle pull on both ends to elongate it slightly. Set the roll down and cut it in half through the middle so you have 2 rolls, each approximately 10 inches long.\n\n7. Spread the remaining portion of filling across the top of one of the 2 halves and place the second half on top of the filling to form an X with the 2 rolls of dough. Twist the ends of the bottom roll up and over the top roll to cover the filling. (basically just twist the two strands around each other)\n\n8. Cover and leave to rise another 3 hours (?) at room temperature. (The dough will not double, but it will look lighter)\n\n9. Heat oven to 350ยฐF. Bake for 40 minutes, but thereโ€™s no harm in checking for doneness at 25 minutes. A skewer inserted into an underbaked babka will feel stretchy/rubbery inside and may come back with dough on it. When fully baked, youโ€™ll feel almost no resistance. \n\n10. While babkas are baking, make syrup: Bring sugar and water to a simmer until sugar dissolves. Remove from heat and set aside to cool somewhat. As soon as the babkas leave the oven, brush the syrup all over each. It will seem like too much, but will taste just right โ€” glossy and moist. Let cool about halfway in pan, then transfer to a cooling rack to cool the rest of the way before eating." }, { "alpha_fraction": 0.7328072190284729, "alphanum_fraction": 0.7559188008308411, "avg_line_length": 49.71428680419922, "blob_id": "61d84d1fd661b88289596e72293163803c807d78", "content_id": "ee678a83fc114b9b15b52eb5ce1a1af34140b99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1777, "license_type": "no_license", "max_line_length": 197, "num_lines": 35, "path": "/cake/orachicak.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Orange-Cardamom Chiffon Cake\n\nSource: [Just One Cookbook](https://www.justonecookbook.com/meyer-lemon-chiffon-cake/)\n\nTime: 1 hour\n\nNotes: made as a last-minute Thanksgiving dessert\n\n\n## Ingredients\n \n|Ingredient|Amount|\n|---|---:|\n|all-purpose flour |69 g|\n|tapioca starch |6g|\n|sugar|85 g|\n|baking powder|1 tsp|\n|eggs|3|\n|oil|40 g|\n|navel orange|1 large|\n|ground cardamom|1 tsp|\n\n## Instructions\n1. Gather all the ingredients and a deep 6-inch cake pan. Grease the bottom of the cake pan only.\n2. Preheat the oven to 325ยบF. Separate 3 eggs to yolks and whites. \n3. Add 1/3 of the sugar to the egg yolks. Add oil, cardamom,, and orange zest.\n4. Juice the orange to obtain 60 g of juice, and add it to the egg mixture. Also add the flour, tapioca starch, and baking powder. Whisk until totally incorporated and make sure there are no lumps.\n5. Whip the egg whites till opaque and fomay, Then add the rest of the sugar, and whisk until stiff peaks. (until the tip is soft enough that it folds over, like taking a bow.)\n6. Using a whisk, fold in โ…“ of the egg whites in the batter until the mixture is homogeneous.\n7. Fold in the rest of egg whites in 2-3 increments and mix gently until the mixture is homogeneous.\n8. Pour into pan, and drop the pan gently to reduce uneven air pockets. Do not scrape the bowl! (Pour the extras into a separate vessel)\n9. Bake for 30 minutes.\n10. Drop it gently on the kitchen counter to stop shrinking. The cake must be cooled upside down in its pan so that it stretches downward instead of collapsing. Let it cool completely.\n11. To extract the cake, run a thin sharp knife or thin offset spatula around both the outer and inner edge of the cake.\n12. Remove the cake from the pan and run the knife on the bottom. Move onto a serving plate." }, { "alpha_fraction": 0.6242038011550903, "alphanum_fraction": 0.6337579488754272, "avg_line_length": 10.25, "blob_id": "8fae06e66a3b52d367b654158460ae666fe8ccd1", "content_id": "c5f4a7ef7eaf1944b4e6e5ca10cb507b09021efe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 314, "license_type": "no_license", "max_line_length": 77, "num_lines": 28, "path": "/bread/template.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# TITLE HERE\n\n<center><img src=\"../img/title.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n\nSource: [TODO](link)\n\nTime:\n\nNotes:\n\nStory:\n\n## Ingredients\n\n|Ingredient|Amount|\n|---|---:|\n|all-purpose flour|g|\n|wholewheat flour|g|\n|sugar|g|\n|water|g|\n|yeast|g|\n|salt|g|\n|eggs|g|\n|oil|g|\n\n\n## Instructions\n1. Something" }, { "alpha_fraction": 0.7052631378173828, "alphanum_fraction": 0.7401914000511169, "avg_line_length": 51.275001525878906, "blob_id": "aeee1bcfdceb5bdc5083fdbb7bd3a3754f7e6132", "content_id": "1d768cbaa20637e1467df217ea5ac005d97e501d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2092, "license_type": "no_license", "max_line_length": 375, "num_lines": 40, "path": "/bread/wholewheatmilkbread.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Whole wheat milk bread\n\n<center><img src=\"../img/wwpdm.png\" alt=\"ww pdm\" style=\"width:40%;\"/></center>\n\nSource: [King Arthur Flour](https://www.kingarthurbaking.com/recipes/100-whole-wheat-pain-de-mie-recipe)\n\nTime: few hours\n\nNotes: This made a 8\" round of rolls and a 8\" pain de mie. I think I'll try making exactly 60% for the PDM loaf and making sure the loaf is really long and even to get the full PDM effect:\n52 apf, 318 wwf, 21 sugar, 270 milk, 4 yeast, 7 salt, 51 oil. and make sure to grease the aluminum foil lid!\nOr maybe even 50%?\n\nStory: This is my favorite all-whole-wheat bread, at least on day 1! I'll have to see how long it lasts...\n\n## Ingredients\n\n|Ingredient|Amount|\n|---|---:|\n|all-purpose flour|87 g|\n|wholewheat flour|530 g|\n|sugar|35 g|\n|milk|450 g|\n|yeast|7 g|\n|salt|12 g|\n|(coconut) oil|85 g|\n\n## Instructions\n1. Combine all of the ingredients, and form a smooth, supple dough.\n\n2. Alow the dough to rise until puffy though not necessarily doubled in bulk, about 1 1/2 hours.\n\n3. Lightly grease a 8x4 pain de mie (pullman) pan. Transfer the risen dough to a lightly greased work surface, shape it into a log, and fit it into the pan. Flatten the top as much as possible. Cover the pan with lightly greased plastic wrap, and allow the dough to rise until it's about 1/2\" below the lip of the pan, about 45 minutes. > mine rose way more than that... hmmm\n\n4. Preheat your oven 350ยฐ. Carefully slip the cover onto the pan, and let it rest an additional 15 minutes while the oven heats.\n\n5. Bake the bread for 25 minutes. Remove the rolls from the oven. PDM: Remove the pan from the oven, carefully remove the lid, and return the bread to the oven to bake for an additional 10 to 15 minutes, until it's golden-brown on top and tests done; an instant-read thermometer inserted into the center will register 190ยฐF.\n\n6. Remove the bread from the oven, and turn it out of the pan onto a rack to cool completely. For a soft, flavorful crust, brush the loaf with melted butter while warm.\n\n7. Store, well-wrapped, on the counter for 5 days, or freeze for up to 3 months." }, { "alpha_fraction": 0.7600089907646179, "alphanum_fraction": 0.7744039297103882, "avg_line_length": 87.94000244140625, "blob_id": "3c6b45f5eb0af23ac15f5b9efb87fdd4501ae6d4", "content_id": "e94d5a37a40f3ac04bf2916774e04bd44cd35284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4448, "license_type": "no_license", "max_line_length": 1130, "num_lines": 50, "path": "/cake/gingerbreadhouse.md", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# Gingerbread house\n\n<center><img src=\"../img/gingerbreadhouse.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n<center><img src=\"../img/gingerhouse2.jpg\" alt=\"title\" style=\"width:40%;\"/></center>\n\nSource: [Stella Parks](https://www.seriouseats.com/recipes/2016/10/construction-gingerbread-recipe.html), [Template](http://www.gingerbread-house-heaven.com/support-files/free-gingerbread-house-pattern.pdf)\n\nTime: tooooo long\n\nNotes: First I only made one batch of the gingerbread dough, and then I realized that I had printed out REALLY BIG template - in general, we followed a philosophy of \"go big or go home\" since decorating a pre-made gingerbread house is so easy lol. Also, I broke our hand mixer trying to mix the cement-like royal icing. The hardest part of this was probably figuring out the ratios of how much gingerbread, powdered sugar, and egg white we actually needed; the below are estimates of the amount we actually used.\n\nStory: After seeing pretty gingerbread houses on r/baking, I found myself browsing through different templates online when I had trouble falling asleep. I don't have the patience to decorate and work with icing, but I know my sister does, so we took a bit of a [Homemade Gingerbread House Kit](https://www.youtube.com/watch?v=mc4fBTpB6eQ) approach where I made the cookie component and then allowed her the creative freedom to do the \"fun part.\" I had the most fun putting a ceramic pig figurine inside and coming up with metaphors about arresting the police - next time, I would like to put a gingerbread man inside to continue the \"trapped in my flesh house\" theme. There is also something incredibly satisfying about building a house from scratch, and knowing that each of the six pieces, with all their jagged edges and cracks, were made from raw ingredients. Due to COVID, we haven't been going grocery shopping, so it was much harder to buy small boxes of candy like gumdrops; I'm proud of our ingenuity in repurposing random candies that have been sitting on our counter, like a pack of Altoids, some Japanese gummies, etc.\n\n## Ingredients\n### Gingerbread\n|Ingredient|Amount|\n|---|---:|\n|sugar|138 g|\n|molasses|30 g|\n|cinnamon|2 tbsp|\n|ground ginger|4 tsp|\n|salt|1/4 tsp|\n|corn syrup|345 g|\n|very soft butter|140 g|\n|all-purpose flour|535 g|\n\n### Royal icing\n|Ingredient|Amount|\n|---|---:|\n|egg whites|3|\n|powdered sugar|765 g|\n\n### For decorating\nM&M's, candy canes, frosted wheats, random candy that we've collected over the years (altoids, Japanese gummies, jelly beans...)\n\n## Instructions\n### Gingerbread\n1. Trim a sheet of parchment paper to fit the interior of a half sheet pan. Adjust oven rack to lower-middle position and preheat to 350ยฐF (175ยฐC). Combine everything but the flour and mix on low until smooth, then sprinkle in flour and continue mixing to form a stiff dough. Knead until smooth. Proceed immediately, or wrap in plastic and set aside at room temperature until needed, up to 24 hours. (Larger batches should be divided into 14-ounce portions.)\n\n2. Sprinkle prepared parchment with oil, place dough on top, and flatten into a rectangle. Sprinkle with more oil and roll to fit just within edges of parchment, leaving dough about 3/16 inch thick. Using both hands, transfer parchment to a half sheet pan. Cut according to your gingerbread house template, using an X-Acto or knife. Leave a narrow border of dough around cutouts to minimize spreading, but trim away larger areas of excess dough to gather and re-roll.\n\n3. Bake gingerbread until dry to the touch and golden brown, about 20-25 minutes. Immediately cut along pre-scored lines with a sharp knife or pizza wheel. Cool completely in pan, then transfer cutouts to a safe place. Scraps can be nibbled, or ground in a food processor to use in recipes that call for cookie crumbs.\n\n### Royal icing\n1. Combine egg white with half the powdered sugar. Mix at low speed until sugar disappears into a paste, then gradually sprinkle in the rest. Increase speed to medium and beat until smooth and light, about 2 minutes. If too stiff, thin icing with a few drops of water. Use immediately; keep covered tightly in plastic to prevent icing from drying in bowl. Royal icing can be kept in an airtight container at room temperature up to 12 hours.\n\n### Construction\n1. Construct the base first, and let it dry for at least 30 minutes.\n2. Add lights/figurines inside, then add the roof. \n3. Let sit overnight to dry, and then decorate!" }, { "alpha_fraction": 0.6187542676925659, "alphanum_fraction": 0.6235455274581909, "avg_line_length": 20.82089614868164, "blob_id": "e625f9c9c8b17f3a220af446f35785811ceeda4e", "content_id": "372291b6b57f59cb6ff445fac0e55596b6aaff3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1461, "license_type": "no_license", "max_line_length": 88, "num_lines": 67, "path": "/publish.py", "repo_name": "myracheng/bread", "src_encoding": "UTF-8", "text": "# from irene\nimport argparse\nimport time\nimport pdb\nimport re, string\nimport shutil\nimport os\nfrom urllib.request import urlopen\nfrom os.path import join\n\nPDF_DIR = 'pdfs/'\n# WRITEUP_DIR = 'recipes/'\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--folder\", required = True, default=\"bread\",help=\"writeup folder\")\n\targs = parser.parse_args()\n\n\tfolder = args.folder\n\tf = open(os.path.join(folder, 'today.md'), 'r')\n\twriteup = f.readlines()\n\tf.close()\n\n\tinfo_line = writeup[2]\n\tif 'TODO' in info_line:\n\t\traise ValueError('today.md has no new information.')\n\t\n\tmd_title = writeup[0][2:-1].lower().replace(\" \", \"\") \n\n\ttitle = writeup[0][2:-1]\n\tmd_fname = join(folder, md_title + '.md')\n\n\tg = open(md_fname, 'w')\n\tg.writelines(writeup)\n\tg.close()\n\n\tprint('Wrote file to %s' % md_fname)\n\n\tf = open('README.md', 'r')\n\treadme = f.readlines()\n\tf.close()\n\n\tg = open('README_new.md', 'w')\n\tfound = False\n\tfor i, line in enumerate(readme):\n\t\tg.write(line)\n\t\tif '## %s' % folder in line.lower():\n\t\t\tfound = True\n\t\t\tdate = time.strftime(\"%b %d, %Y\")\n\t\t\t\n\t\t\tnew_line = '**%s:** [%s](%s)' % (\n\t\t\t\tdate, title, md_fname\n\t\t\t\t)\n\t\t\tg.write(new_line + '\\n')\n\t\t\tg.write('\\n')\n\t\t\t\n\tg.close()\n\n\tshutil.copy('README.md', 'README_old.md')\n\tshutil.move('README_new.md', 'README.md')\n\n\tshutil.copy('%s/today.md'% folder, '%s/old.md'%folder)\n\tshutil.copy('%s/template.md' %folder, '%s/today.md'%folder)\n\tprint('Updated README.md')\n\nif __name__ == '__main__':\n\tmain()" } ]
10
jpscaletti/proper-form
https://github.com/jpscaletti/proper-form
d40daf6a8e56ffe6a666bb1a80321e8188b6cfca
d7bbce962d806e0053be3695dbc9aa046794746a
dcd8beb53e4e67b5301345c6fd0f2a8cc2c06d0c
refs/heads/master
2020-06-17T06:33:54.879033
2020-04-06T23:00:32
2020-04-06T23:00:32
195,831,781
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 17.5, "blob_id": "77d56d9aeb5ec66482cb1ef38e167cddb2b833b2", "content_id": "0ae93aee229b2e94837f57708761368e06298520", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "permissive", "max_line_length": 20, "num_lines": 2, "path": "/tests/conftest.py", "repo_name": "jpscaletti/proper-form", "src_encoding": "UTF-8", "text": "# import proper_form\n# import pytest\n" }, { "alpha_fraction": 0.5128498077392578, "alphanum_fraction": 0.5276984572410583, "avg_line_length": 21.44871711730957, "blob_id": "1ed3f1aa0424e6d0d9cdcd656275fc8cb5e117e7", "content_id": "a80063a5f7d2e81d8252988dbaacf7120323361b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1751, "license_type": "permissive", "max_line_length": 74, "num_lines": 78, "path": "/mm.py", "repo_name": "jpscaletti/proper-form", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"\nThis file generates all the necessary files for packaging for the project.\nRead more about it at https://github.com/jpscaletti/mastermold/\n\"\"\"\ndata = {\n \"title\": \"Proper Form\",\n \"name\": \"proper_form\",\n \"pypi_name\": \"proper_form\",\n \"version\": \"0.200406\",\n \"author\": \"Juan-Pablo Scaletti\",\n \"author_email\": \"[email protected]\",\n \"description\": \"A not-terrible Python form library.\",\n \"copyright\": \"2019\",\n \"repo_name\": \"jpscaletti/proper-form\",\n \"home_url\": \"https://github.com/jpscaletti/proper-form\",\n # Displayed in the pypi project page\n \"project_urls\": {\n \"Documentation\": \"https://jpscaletti.github.io/proper-form\",\n },\n\n \"development_status\": \"4 - Beta\",\n \"minimal_python\": 3.6,\n \"install_requires\": [\n \"email-validator ~= 1.0\",\n \"idna ~= 2.8\",\n \"markupsafe ~= 1.1\",\n \"python-slugify ~= 3.0\",\n ],\n \"testing_requires\": [\n \"pytest\",\n \"pytest-cov\",\n 'pony;python_version<\"3.8\"',\n \"sqlalchemy\",\n ],\n \"development_requires\": [\n \"pytest-flake8\",\n \"flake8\",\n \"ipdb\",\n \"tox\",\n \"mkdocs\",\n \"mkdocs-material\",\n \"pymdown-extensions\",\n \"pygments\",\n \"pygments-github-lexers\",\n ],\n \"entry_points\": \"\",\n\n \"coverage_omit\": [],\n}\n\nexclude = [\n \"hecto.yml\",\n \"README.md\",\n \".git\",\n \".git/*\",\n \".venv\",\n \".venv/*\",\n \".DS_Store\",\n \"CHANGELOG.md\",\n]\n\n\ndef do_the_thing():\n import hecto\n\n hecto.copy(\n # \"gh:jpscaletti/mastermold.git\",\n \"../mastermold\", # Path to the local copy of Master Mold\n \".\",\n data=data,\n exclude=exclude,\n force=False,\n )\n\n\nif __name__ == \"__main__\":\n do_the_thing()\n" }, { "alpha_fraction": 0.7614313960075378, "alphanum_fraction": 0.7614313960075378, "avg_line_length": 54.88888931274414, "blob_id": "2429ae9c034ca88a02e18ffcecccc0d53400e6ce", "content_id": "2667fdbd9737bd963ba577f208b9172beb734715", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 503, "license_type": "permissive", "max_line_length": 168, "num_lines": 9, "path": "/README.md", "repo_name": "jpscaletti/proper-form", "src_encoding": "UTF-8", "text": "# Proper Form\n\nA *not-terrible* Python form library.\n\n[![Coverage Status](https://coveralls.io/repos/github/jpscaletti/proper-form/badge.svg?branch=master)](https://coveralls.io/github/jpscaletti/proper-form?branch=master)\n[![](https://travis-ci.org/jpscaletti/proper-form.svg?branch=master)](https://travis-ci.org/jpscaletti/proper-form/) \n[![](https://img.shields.io/pypi/pyversions/proper-form.svg)](https://pypi.python.org/pypi/proper-form)\n\nDocumentation: https://jpscaletti.github.io/proper-form/\n" } ]
3
juhosa/RasPiTimelapse
https://github.com/juhosa/RasPiTimelapse
a34ade92f1c04ab7969d076584a5ae933ebf4258
4e188cbfd1162f8a14a7541e44cd9b3fbf96265e
3a3953f4069f5f23adbcf5251e89d4f0c2472103
refs/heads/master
2021-01-20T05:14:14.465937
2017-08-25T17:52:18
2017-08-25T17:52:18
101,423,098
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7293233275413513, "alphanum_fraction": 0.7518796920776367, "avg_line_length": 37, "blob_id": "797ff80e56831754753aa089eb30b68dd9bab3fc", "content_id": "28b69cdc1fe2bf0c2cd9781c95ad6f4e4147f756", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 266, "license_type": "permissive", "max_line_length": 169, "num_lines": 7, "path": "/README.md", "repo_name": "juhosa/RasPiTimelapse", "src_encoding": "UTF-8", "text": "# Raspberry Pi Timelapse\n\n## Setup\nTo install the camera and code depencies see [https://picamera.readthedocs.io/en/release-1.13/install.html](https://picamera.readthedocs.io/en/release-1.13/install.html)\n\n## Running\nTo run the code, just run ```python script.py```\n" }, { "alpha_fraction": 0.6728110313415527, "alphanum_fraction": 0.7073732614517212, "avg_line_length": 17.08333396911621, "blob_id": "199ec52f8b8d125beacbccb153c55f9d22332a08", "content_id": "10e5f5aa7632f2fe066f47c49f5973d34ab4f544", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "permissive", "max_line_length": 67, "num_lines": 24, "path": "/script.py", "repo_name": "juhosa/RasPiTimelapse", "src_encoding": "UTF-8", "text": "from time import sleep\nfrom picamera import PiCamera\n\ncamera = PiCamera(resolution = (1280, 720))\n\ncamera.iso = 100\n\nsleep(3)\n\ncamera.shutter_speed = camera.exposure_speed\n\ncamera.exposure_mode = 'off'\n\ng = camera.awb_gains\n\ncamera.awb_mode = 'off'\ncamera.awb_gains = g\n\ncamera.start_preview()\n\nfor fn in camera.capture_continuous('images/img{counter:03d}.jpg'):\n print('Captured %s' % fn)\n # take pic every 2 secs\n sleep(2)\n" } ]
2
FernandoMarcon/learning_notes
https://github.com/FernandoMarcon/learning_notes
c1d04f9f326fc94662c52b40b335c98acc136ff4
092b65a63c6ff3544ec65759f801200f7700d7e0
73d7ba788ad6ebb8abe5e75fcaa470a439dc538a
refs/heads/main
2023-08-07T20:31:18.268871
2021-09-19T21:09:38
2021-09-19T21:09:38
353,436,381
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7259615659713745, "alphanum_fraction": 0.745192289352417, "avg_line_length": 40.599998474121094, "blob_id": "316e96dd8f306ca81cf535b72c15eb8c292b5a42", "content_id": "869566ac40dfa4bb20a05710ac599ab1170f7d9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 199, "num_lines": 20, "path": "/finances/Algorithmic Trading/VIX_moving_average.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nTrying to understand wheter any of the prices seem artificially high or low, over a particular period of time.\n\nEx: Maybe the stock seems to have an artifical level of exuberance around it: people have been very excited about it, and buying pressures have pushed the prices up a little too much, and vice versa.\n\nThe simplest way to convey this idea is through moving average: smooths out some inter-day fluctuations in the stock, and gives us a better idea of where the trend is over time.\n'''\n\nimport pandas as pd\n\ndata = pd.read_excel('finances/data/02_01_Begin.xlsx')\ndata = data.drop(index=[6841,6842], axis=0)\ndata.head()\n\n#--- Transform Date varible in datetime\ndata['Date'] = pd.to_datetime(data['Date'])\n\ndata['MA'] = data['Adj Close'].rolling(window=14).mean()\n\ndata['MA'].plot(title='VIX\\nMoving Average of 2 Weeks')\n" }, { "alpha_fraction": 0.787162184715271, "alphanum_fraction": 0.787162184715271, "avg_line_length": 62.081966400146484, "blob_id": "6f0171d09b78a279e7ca9c3c9376e5c97a8f2abc", "content_id": "77d6c572341ff785812970e7b88edd2ad58bea7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3866, "license_type": "no_license", "max_line_length": 314, "num_lines": 61, "path": "/finances/crypto/investment_strategies.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Types of strategies:\n* __HODL Strategy__: Long-term investor with a couple of trades per year\n* __Swing Trader__: A medium-term investor with a couple of trades per month.\n* __Day trader__: High-frequency trader with several trades per week or even per day.\n\n# Choosing the Cryptocurrency\nBased on:\n- Liquidity and Trading Volume\n- Volatility\n- Availability\n- Spread and Fees\n- Potential\n\n## Best Cryptocurrency to trade based on Liquidity\n> Liquidity: how much value is exchanged during a certain time frame.\n\n- high liquidity in the market โ†’ creates less volatility and the risk for a large market gap decreases\n- liquidity correlates well with market capitalization โ†’ the larger the market cap is, the higher the liquidity.\n\n__Pros__: High liquidity reduces the risk of the market gap\n__Cons__: Low liquidity creates larger turns and entails higher risk\n_Example_: Bitcoin and the top cryptocurrencies offers high liquidity and high trading volume.\n\n## Best Cryptocurrency to trade based on Volatility\n> Volatility: how much an asset or a cryptocurrency moves at a certain time interval.\n\n- volatility โ†’ change in price โ†’ what creates a potential for profitable trades\n- Bitcoin (BTC) is the least volatile cryptocurrency.\n- Volatility often correlates with total market value โ†’ the higher the market value is, the lower the volatility.\n\n__Pros__: High volatility creates prerequisite for profit\n__Cons__: High volatility increases the risk of large losses\n_Example_: Bitcoin and the top cryptocurrencies offers low volatility compared to the altcoins with lower market cap.\n\n## Best Cryptocurrency to trade based on Availability\n__Pros__: If accessibility is high, you can choose between several platforms\n__Cons__: You may need to choose a platform that lacks trading features you are looking for\n_Example_: Bitcoin and the top cryptocurrencies offers high availability compared to the altcoins with lower market cap.\n\n## Best Cryptocurrency to trade based on Spread and Fees\n> spread: difference between a buy and a sell order in the order book.\n- For some trading platforms, this is the only โ€œfeeโ€ you pay while using their service.\n- It can vary between different trading platforms\n- very important for high-frequency traders since they might have several hundreds of trades during the day.\n\n__Pros__: Tight spreads reduce costs and increase your trading margin\n__Cons__: High spread and fees can eat up your winning margin if you are a high-frequency trader.\n_Example_: Bitcoin and the top cryptocurrencies offers tighter spread compared to the altcoins with lower market cap.\n\n## Best Cryptocurrency to trade based on Potential\n__Pros__: With great potential, you can do a higher profit in terms of percentual gain.\n__Cons__: When you are looking for small altcoins with high potential you need to conduct thorough research. It is very easy to fall for ICO scams or promising projects that will, most likely, fail. This is a borderline to a lottery.\n_Example_: Bitcoin and the top cryptocurrencies offers lower potential than the most promising altcoins. Nevertheless, many altcoins donโ€™t have any potential at all.\n\n# Best Cryptocurrency for Day Trading and Swing Trading\n\n__HODL Strategy__: You should look for availability and great potential. The spread and fees are not important at all. The liquidity and volatility are nothing to consider either. If the cryptocurrency is very volatile you should split your investment into several deposits to minimize the risk buying at the peak.\n\n__Swing Trader__: You should look for high liquidity, trading volume, availability and low fees. You donโ€™t need extreme low fees but still, they will cut your margin.\n\n__Day trader__: You should look for high liquidity, very high trading volume, high volatility, and extremely low spread or fees. The long-term potential is not highly important.\n" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.738095223903656, "avg_line_length": 40.5, "blob_id": "2a2ea905d904de31b406125e642cf6edca88bc61", "content_id": "02312662cc4eee75a83bc0ebfbb95816f19995a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 45, "num_lines": 2, "path": "/Security/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "\n# Tools for testing network security\n- [Pentest-Tools](https://pentest-tools.com/)\n" }, { "alpha_fraction": 0.6764018535614014, "alphanum_fraction": 0.6985981464385986, "avg_line_length": 17.212766647338867, "blob_id": "fbd1d8766cd8e69f84fa8dba77858df2bedbe01b", "content_id": "0ea392aa557d2df381954c464d3753f8c6a30dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 856, "license_type": "no_license", "max_line_length": 112, "num_lines": 47, "path": "/finances/Algorithmic Trading/r_and_bond_trading.R", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "##########\n#= Bond Trading with R\n#= source: Algorithmic Trading and Finance Models with Python, R, and Stata Essential Training (LinkedIn Course)\n##########\n\n#--- Load Libraries\nlibrary(quantmod)\n\n#--- Data Retrieval and Visualization with `quantnid`\n# Get Apple (AAPL) Stock Prices\ngetSymbols('AAPL')\nhead(AAPL)\n\nbarChart(AAPL)\n\nbarChart(AAPL, subset='last 28 days')\n\n# Get Microsoft (MSFT) Stock Prices\nrm(AAPL)\ngetSymbols('MSFT')\nchartSeries(MSFT)\n\naddMACD()\n\naddBBands()\n\nchartSeries(MSFT, subset='last 60 days')\n\naddMACD()\n\naddBBands()\n\n#--- Data Analysis\nsummary(MSFT)\n\nlibrary(BatchGetSymbols)\n\nfirst.date <- Sys.Date() - 60\nlast.date <- Sys.Date\nfreq.data <- 'daily'\ntickers <- c('MSFT','AAPL','SPY')\ndf.SP500 <- GetSP500Stocks()\ntickers <- df.SP500$Tickers\n\n#--- Regressions\nlinearMod <- lm(Cash ~ EBITDA, data = x03_05_Start_R)\nprint(linearMod)\n" }, { "alpha_fraction": 0.792119562625885, "alphanum_fraction": 0.7948369383811951, "avg_line_length": 65.90908813476562, "blob_id": "0218dc43b4f67f556998dfe6706a60587013e848", "content_id": "8248c5a12a385cf9edc10ad1588dbfa0fe5f1fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 736, "license_type": "no_license", "max_line_length": 115, "num_lines": 11, "path": "/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Learning Notes\n\n## Subjects:\n- [Probability](https://github.com/FernandoMarcon/learning_notes/tree/main/probability/intro_to_probability_notes)\n- [Machine Learning](https://github.com/FernandoMarcon/learning_notes/tree/main/machine_learning)\n- Programming Languages\n\t+ [Python](https://github.com/FernandoMarcon/learning_notes/tree/main/python)\n\t+ [Julia](https://github.com/FernandoMarcon/learning_notes/tree/main/julia/julia_academy)\n- [Finances](https://github.com/FernandoMarcon/learning_notes/tree/main/finances)\n\t+ [Capital Markets](https://github.com/FernandoMarcon/learning_notes/blob/main/finances/README.md)\n\t+ [Algorithmic Trading](https://github.com/FernandoMarcon/learning_notes/tree/main/finances/Algorithmic%20Trading)\n" }, { "alpha_fraction": 0.7140864729881287, "alphanum_fraction": 0.7196652889251709, "avg_line_length": 26.576923370361328, "blob_id": "78e4bff780e4f87c6ba055aac15f697048669446", "content_id": "c65cd38779dffd9d2ce679a65dc5820c8094ec1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 88, "num_lines": 26, "path": "/finances/Algorithmic Trading/technical_analysis/supertrend_bot/bot.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import ccxt\nimport ta\nimport config\nimport schedule\nfrom ta.volatility import BollingerBands, AverageTrueRange\nimport pandas as pd\n\nexchange = ccxt.binance({\n 'apiKey':config.API_KEY,\n 'secret':config.API_SECRET\n})\n\n# markets = exchange.load_markets()\nbars = exchange.fetch_ohlcv('BTC/USDT',limit=100)\n\ndf = pd.DataFrame(bars[:-1], columns=['timestamp','open','high','low','close','volume'])\nprint(df)\n\nbb_indicator=BollingerBands(df['close'])\ndf['upper_band'] = bb_indicator.bollinger_hband()\ndf['lower_band'] = bb_indicator.bollinger_lband()\ndf['moving_avg'] = bb_indicator.bollinger_mavg()\n\natr_indicator = AverageTrueRange(df['high'],df['low'],df['close'])\ndf['atr'] = atr_indicator.average_true_range()\ndf\n" }, { "alpha_fraction": 0.7257575988769531, "alphanum_fraction": 0.7378787994384766, "avg_line_length": 25.399999618530273, "blob_id": "132b67e95738c8027b87319fcaa039ee28eaaf3e", "content_id": "db83b9443f48da3e4daaea7dc1a70e4bee54c191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 660, "license_type": "no_license", "max_line_length": 113, "num_lines": 25, "path": "/python/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "> **_The Zen of Python_** by _Tim Peters_\n\n``import this``\n\n## Python Philosophy\n- Beautiful is better than ugly\n- Explicit is better than implicit\n- Simple is better than complex\n- Complex is better than complicated\n- Readability counts\n\n\n## Python 3 Main Characteristics\n- Not backward compatible\n- Everything is an object\n- Print is a function\n- One integer type\n- All text is now Unicode. The Unicode character set in the editor should match the Unicode standard of Python 3.\n\n**Shebang line:**\n- `#!/usr/bin/env python3`\n- `#!/usr/local/bin/python3`\n\n## Sources:\n- [Python Essential Training - LinkedIn](linkedin.com/learning/python-essential-training-2018/)\n" }, { "alpha_fraction": 0.6908240914344788, "alphanum_fraction": 0.7106955051422119, "avg_line_length": 46.52777862548828, "blob_id": "09a15139a37263ce3710ec3207a4b10de135844e", "content_id": "e10aab25528c59adebd9c41d7b107ff01b0f8f8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1711, "license_type": "no_license", "max_line_length": 233, "num_lines": 36, "path": "/finances/Algorithmic Trading/return_mean_algorithm.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nData: VIX product from 2010-2017\nThe VIX is the market's fear gauge, is a stationary measure: over time, while it fluctuates up and down, it will always return to its' mean value. Even when it spikes, it ultimately returns, or reverts to it's mean value.\nYou can't buy and sell VIX directly.\nCase: trade products that are associated with the VIX.\nGoal: develop a trading strategy that tries to capitalize on this.\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 20, 10\n\ndata = pd.read_excel('finances/data/03_02_Begin.xlsx', index_col='Date')\ndata.head()\n\n#--- Desing an algorithm\ndata['Buy Column'] = data.apply(lambda x: 1 if x['Moving Avg. VIX'] > x['VIX'] else 0, axis=1)\ndata['L-5 Mean Reversion'] = data.apply(lambda x: x['Long Vol Ret'] if x['Buy Column'] == 1 else 0, axis=1)\n\n#--- Test algorithm accuracy\ndata = pd.read_excel('finances/data/03_03_Begin.xlsx', index_col='Date')\ndata.head()\n\ndata['Stand Dev.'] = data['Adj Close'].std()\ndata['Moving Ave'] = data['Adj Close'].rolling(14).mean()\n\ndata[['Open','High','Low','Adj Close']].plot()\n\n\ndata[['Adj Close','Moving Ave']].plot(color={'Adj Close':'yellow', 'Moving Ave':'blue'},linewidth=2)\n\n\n#--- One way to ensure that we're really buying the VIX at a time when it's particularly undervalued or overvalued, and thus, likely to revert to its mean, is to ensure that there is a significant portion away from its mean overtime.\n# Only buy the VIX if it is at least one stardard deviation bellow its moving average, and sell if it's at least one standard deviation above its moving average.\ndata.apply(lambda x: 1 if (x['Adj Close'] + x['Adj Close']*x['Stand Dev.'] ) < 1 else 0 , axis=1)\n\ndef buy(data):\n" }, { "alpha_fraction": 0.6538655757904053, "alphanum_fraction": 0.6700353622436523, "avg_line_length": 25.039474487304688, "blob_id": "03bd4ba55c65620b01fa768f0dda814fe2e30c5d", "content_id": "e28a8d7944b974578d7c3a1124f88892e80e0284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1979, "license_type": "no_license", "max_line_length": 112, "num_lines": 76, "path": "/finances/Algorithmic Trading/triangular_arbritage.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 20,10\n\n#--- Read and clean data\ndata = pd.read_excel('finances/data/04_02_Begin.xlsx')\ndata = data.drop(labels=['Unnamed: 4'], axis=1)\n\n# Parse Date column\ndata[['Date', 'DoW']] = data.Date.str.split(expand = True)\n\n# Clean Date column\ndow = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\ndow = dict(zip(dow,set(range(1, len(dow) + 1))))\ndata['DoW'] = data['DoW'].map(dow, data['DoW'])\ndata.head()\n\n# Convert Date column to datetime\ndata['Date'] = pd.to_datetime(data['Date'])\n\n# Rename columns\ndata.rename(columns = {'USD to JPY': 'USD-JPY', 'JPY to THB':'JPY-THB', 'USD to THB':'USD-THB'}, inplace = True)\ndata.head()\n\n#--- Visualization\ndata['USD-JPY'].hist()\n\ndata['JPY-THB'].hist()\n\ndata['USD-THB'].hist()\n\n\n#--- Triangular Arbitrage\ndef calc_triangleArb(c1, c2):\n return c1 * c2\n\ndef calc_arbDelta(original, arbitrage):\n return original - arbitrage\n\ndef calc_arbProfit(investent, arbProfit, trading_cost):\n return investiment*abs(arbProfit) - trading_cost\n\ndef set_exposureLimit(profit, limit = 50):\n '''\n will only gonna trade if the profit expected is > limit\n '''\n return [0 if x < limit else x for x in profit]\n\ndef buy(profit, limit = 50):\n '''\n buy if expected profit is > limit\n '''\n return [1 if x > limit else 0 for x in profit]\n\ninvestment = 1000000\ntrading_cost = 9\n\ndata['triangleArb'] = calc_triangleArb(data['USD-JPY'],data['JPY-THB'])\ndata.head()\ndata[['USD-THB','triangleArb']].plot()\n\ndata['ArbProfit'] = calc_arbDelta(original=data['USD-THB'], arbitrage=data['triangleArb'])\ndata['profits'] = calc_arbProfit(investment, data['ArbProfit'], 9)\ndata['profits'] = set_exposureLimit(data['profits'])\ndata['BuyDecision'] = buy(data['profits'])\n\ndef plot_returns(data):\n data['profits'].plot(x=data['Date'])\n plt.title('Profits using Triangle Arbitrage')\n plt.show()\n\n\n\nplot_returns(data)\n\ntype(data['Date'])\n" }, { "alpha_fraction": 0.7661449313163757, "alphanum_fraction": 0.7802183628082275, "avg_line_length": 59.341270446777344, "blob_id": "64816dee26416e83d53db844c08e70ec5de39338", "content_id": "f0489365021a89ee0567ee766012886af03c5312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 15236, "license_type": "no_license", "max_line_length": 431, "num_lines": 252, "path": "/finances/Financial Markets/intro_financial_markets.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Introduction to Financial Markets\n\nFinancial institutions are a pillar of civilized society, directing resources across space and time to their best uses,\nsupporting and incentivizing people in their productive ventures, and managing the economic risks they take on. The workings\nof these institutions are important to comprehend if we are to predict their actions today and their evolution in the\ncoming of information age.\n\n# VaR\n- \"Value at risk\"\n- Invented after stock market crash of 1987\n- Usually quoted in units of $ for a given probability and time horizon\n- 1% one-year VaR of $10 million means 1% chance that a portfolio will lose $10 million in a year\n\n# Stress Tests\n- Originally, term referred to a medical procedure to test for cardiovascular fitness\n- OFHEO started testing firms' ability to withstand economic crisis before the 2008 crisis, failed.\n- Dodd Frank Act 2010 requires the Federal Reserve to do annual stress tests for nonbank financial institutions it supervises for at least three different economic scenarios.\n- European Banking Authority, created 2011\n- UK, China, etc.\n- Critics of stress tests such as Anat Admati find them inadequate.\n\n# S&P 500\n- Used as a benchmark for return\n\n# Beta\n- The CAPM implies that the expected return on the ith asset Millennials, also known as Generation Y (or simply Gen Y), are the demographic cohort following Generation X and preceding Generation Z. Researchers and popular media use the early 1980s as starting birth years and the mid-1990s to early 2000s as ending birth years, with 1981 to 1996 being a widely accepted defining range for the generation.is determined from its beta\n- Beta ($\\beta_i$) is the regression slope coefficient when the return on the ith asset is regressed on the return on the market.\n$$\\beta_i = \\frac{Cov (r_i, r_{market})}{Var (r_{market})}$$\nwhere $r$ stands for _return_\n\n# Market Risk versus Idiosyncratic risk\n- By construction, the residuals or oerror terms in a regression are uncorrelated with the fitter or predicted value\n- So, the variance of the return of a stock is equal to its beta squared times the variance of tyhe market return (systematic risk) plus the variance of the residual in the regression (idiosyncratic risk)\n\n# Distributions and outliers\n- In finance, things tends to not follow the normal distribution\n- Normal vs. Cauchy (Fat-tailed) distributions\n\n## Central Limit Theorem\n- Averages of a large number of independent identically distributed shocks (whose variance is finite) are approximatelly normally distributed\n- Can fail if the underlying shocks are fat tailed\n- Can fail if the underlying shocks lose their independence\n\n# Insurance\n## Fundamental Insurance Principles and Issues\n- __Risk Pooling__ is the source of all value in insurance\n- If _n_ policies, each has independent probability _p_ of a claim, then the number of claims follows the binomial distribution. The standard deviation of the fraction of policies that result in a claim is\n$$\\sqrt{(1-p)/n}$$\n- _Law of large numbers_: as _n_ gets large, standard deviation approaches zero.\n- _Moral Hazards_ dealt with partially by deductions and co-insurance.\n- _Selection Bias_ dealt with by group policies, by testing and referrals, and by mandatory government insurance.\n## Radical Financial Innovation - Example I: Insurance\n- Burial societies ancient Rome, true insurance policies appeared in Italy in 14th century\n- Rapid development of actuarial theory starting in 1600s with notion of probability\n- Morris Robinson Mutual Life of NY 1840: highly-paid salesmen (agency theory)\n- Henry Hyde Equitable Life Assurance Society 1880s: large cash value (psychological framing)\n- Viviana Zelizer: challenging God, tempting fate (psychological framing)\n- Inventions copied around the world\n- Life insurance is a relic, of a day when people died young\n\n# Portfolio Management: An Alternative to Insurance\n- Investment core idea: is inherintly risky, if it were not risky woudn't give you any return\n```\n Don't put all your eggs in one basket\n```\n- Diversification of ownership\n\n## A Later Insight\n- If people are all like me, all calculating with the same data, all wanting to hold portfolios on the frontier, then they all want to hold the same portfolio (and cash)\n- so __THAT HAS TO BE THE MARKET PORTFOLIO__\n\n## Portfolio Diversification\n- All that should matter to an investor is the performance of the entire portfolio\n- Mean and variance of portfolio matter\n- Law of large numbers means that spreading over many independent assets reduces risk, has no effect on expected return\n\n# Capital Asset Pricing Model (CAPM)\n- CAPM asserts that all investors hold their optimial portfolio\n- Consequence of the mutual fund theorem: all investors hold thhe same portfolio of risky assets, the tangency portfolio\n- Therefore the CAPM says that the tangency portfolio equals the maket portfolio\n\n## Beta\n- The CAPM implies that the expected return on the ith asset is determined from its beta\n- Beta ($ฮฒ_i$) is the regression slope coefficient when the return on the ith asset is regressed on the\nreturn on the market\n\n## Investment Companies as Providers of Diversification\n- Investment trust (before 1040s)\n- Mutual funds (especially index funds)\n- Closed end investment companies\n- Unit investment trust\n - All these institutions can enable small investors to overcome transactions cost and lumpiness problems in achieving diversified portfolios\n\n## Doubts about Diversification\n- Complete diversification would imply holding much in fixed incomes, real state, etc. But hasn't stock market outperformed these?\n- [Risk/Return Pyramid](https://www.investopedia.com/thmb/F1dQqU5OOp9p1KpkBsNgito7J6Y=/6250x0/filters:no_upscale():max_bytes(150000):strip_icc():format(webp)/DeterminingRiskandtheRiskPyramid3-1cc4e411548c431aa97ac24bea046770.png)\n\n# Short Sales\n- How do you own a negative quantity of a stock? โ†’ you buy the share and sell them, now you own the shares of someone else (?)\n- Brokers can enable you to hold a negative quantity of a tradable asset: they borrow the security and sell it, escrow the proceeds, you receive the proceeds, owe the security\n- [Short sales in the United States were briefly abolished in September 2008 for a list of 799 stocks.](https://www.sec.gov/news/press/2008/2008-211.htm)\n- Short selling, which is defined as the sale of a security that the seller has borrowed, is motivated by the belief that: The price of the security will decline. Buying back the security at a lower price will allow you to make a profit.\n\n## In the Capital Asset Pricing Model (CAPM):\n- A stock cannot have an optimial holding value which is negative. Otherwise, everyone would be shorting, which cannot happen in equilibrium since you need an investor to provide the stock to be shorted.\n- The optimal portfolio in on the \"efficient portfolio frontier\"\n\n# Gordon Growth Model\n- Myron Gordon\n- a formula that to calculate the present value of an asset that yields an infinite amount of value in the future\n$$PV = \\frac{x}{1 + r} + \\frac{x(1 + g)}{(1 + r)^2} + \\frac{x(1 + g)^2}{(1 + r)^3} + ... $$\n$$PV = \\frac{x}{r - g}$$\n\n# Efficient Frontier\n- An efficient portfolio is a combination of assets which achieves the highest return for a given risk.\n\n## [Expected Return and Variance for a Two Asset Portfolio](https://financetrain.com/expected-return-and-variance-for-a-two-asset-portfolio/)\n### Expected Return for a Two Asset Portfolio\nThe expected return of a portfolio is equal to the weighted average of the returns on individual assets in the portfolio.\n\n$$R_p = w_1R_1 + w_2R_2$$\nR_p = expected return for the portfolio\nw_1 = proportion of the portfolio invested in asset 1\nR_1 = expected return of asset 1\n\n### Expected Variance for a Two Asset Portfolio\nThe variance of the portfolio is calculated as follows:\n\n$$ฯƒ_p^2 = w_1^2ฯƒ_1^2 + w_2^2ฯƒ_2^2 + 2w_1w_2Cov_{1,2}$$\n\n$Cov_{1,2}$ = covariance between assets 1 and 2\n$Cov_{1,2} = ฯ_{1,2} * ฯƒ_1 * ฯƒ_2$; where ฯ = correlation between assets 1 and 2\n\nThe above equation can be rewritten as:\n$$ฯƒ_p^2 = w_1^2ฯƒ_1^2 + w_2^2ฯƒ_2^2 + 2w_1w_2 ฯ_{1,2}ฯƒ_1ฯƒ_2$$\n\n# Limited Liability\n- Limited Liability New York State 1811\n- Divided up an enterprise into shares, and no shareholder is liable for more than he or she put in\n- Other states were very skeptical\n- New York produced many failed corporations, a few spectacular successes\n- Investors, in order to be encouraged to invest in businesses, should be protected against liability for what the managers of the business do.\n- Investors in stocks cannot be pursued for the mistakes of the company they are investing in.\n\n## Inflation Indexed Debt\nIndexing the value of debt to an index:\n- Is a better indexation method than indexing debt to a single commodity with a potentially unstable price evolution.\n- Protects investors from currency instability. Currency instability can become a concern, especially if the government prints the currency.\n\n# Real Estate Risk Management Devices\n- Value of homes is a major source of risk\n- Casualty insurance\n- Securitized mortgages\n- Home price futures and options 2006, housing.cme.com\n- Equity-protected mortgages\n\n# Limited Liability\nIn his work, David Moss describes how investorsโ€™ psychology favored limited liability after the early 19th century New York experiment. In fact, the comparison between investorsโ€™ psychologies in the context of unlimited liability and lottery tickets is asymmetrical. Unlimited liability investors tend to overestimate the minimum probability of loss, whereas in lottery tickets, they overestimate the minimum probability of win.\n\n# Inflation indexed debt\nThe introduction of inflation indexed debt was motivated by:\n- An incentive to hedge from inflation volatility.\n- Historical examples of nominal debt being wiped out in real terms by high inflation.\n- An incentive to have a debt contract fixed in real terms.\n\n# Unidad de Fomento\nChile introduce the Unidad de Fomento ro create a unit of account indexed to inflation, in order to counteract the impact of hyperinflation.\n\n# Equity-protected mortgages\nThe concept of equity-protected mortgages consists in mortgages that include house price insurance. As an example, if the house price falls below the amount you owe, the mortgage debt will be corrected down.\n\n# Forecasting - The Efficient Markets Hypothesis\nAn efficient market is defined as one in which asset prices quickly and fully reflect all available information.\n\n## Random Walk & AR-I Models\n- Random Walk:\n$$x_t = x_{t-1} + ฯต_t$$\n- First-order autoregressive (AR-I) Model:\n$$x_t=100+ฯ(x_{t-1}-100) + ฯต_t$$\nMean reverting (to 100), -1 < ฯ < 1\n- Random walk as approximate implication of unpredictability of returns\n- Similarity of both random walk and AR-I to actual stock prices\n\n## Intuition of Efficiency\n- Reuter's pigeons and the telegraph\n- Beepers & the internet\n- Must be hard to get rich\n\n## Price as PDV of Expected Dividends\nThe Dividend Discount Model (or Gordon Growth Model) can be stated as follows: let the investorโ€™s discount rate be equal to r .If earnings equal dividends, and if dividends grow at the long-run rate g, then the price of the stock P can be written as follows: $P = E/(r-g)$\n- Gordon Model: If earnings equal dividends and if dividends grow at long-run rate $g$, then by growing consol model\n$$P=E/(r-g)$$\n$$P/E = 1 / (r-g)$$\n- So, efficient markets theory purports to explain why P/E varies across stocks\n- Low P/E does not mean that the stock is a \"bargain\", it ontly means that earnings are rationally forecasted to decrease in future\n- Efficient markets denies that any rule works\n- The price-to-earnings ratio (P/E) tell you how much investors are willing to pay per unit of a companyโ€™s earnings.\n\n## Reasons to Think Markets Ought to be Efficient\n- Marginal investor determines prices\n- Smart money dominates trading\n- Survival of the fittest\n\n# [Hedging](https://www.investopedia.com/terms/h/hedge.asp)\n- A hedge is an investment that is made with the intention of reducing the risk of adverse price movements in an asset. Normally, a hedge consists of taking an offsetting or opposite position in a related security.\n- Hedging is a strategy that tries to limit risks in financial assets.\n- Popular hedging techniques involve taking offsetting positions in derivatives that correspond to an existing position.\n- Other types of hedges can be constructed via other means like diversification. An example could be investing in both cyclical and counter-cyclical stocks.\n- Most common way of hedging in the market is through derivatives.\n\n## Derivatives\n- The underlying assets can be stocks, bonds, commodities, currencies, indices or interest rates.\n- Derivatives can be effective hedges against their underlying assets, since the relationship between the two is more or less clearly defined.\n- Itโ€™s possible to use derivatives to set up a trading strategy in which a loss for one investment is mitigated or offset by a gain in a comparable derivative.\n- The effectiveness of a derivative hedge is expressed in terms of delta, sometimes called the \"hedge ratio.\" Delta is the amount the price of a derivative moves per $1 movement in the price of the underlying asset.\n\n# Elliott Wave Theory\n- Ralph Nelson Elliott (1930)\n- Elliott believed that stock markets, generally thought to behave in a somewhat random and chaotic manner, in fact, traded in repetitive patterns.\n- Elliott proposed that financial price trends result from investors' predominant psychology.\n- He found that swings in mass psychology always showed up in the same recurring fractal patterns, or \"waves,\" in financial markets\n- Elliott's theory somewhat resembles the Dow theory in that both recognize that stock prices move in waves. Because Elliott additionally recognized the \"fractal\" nature of markets, however, he was able to break down and analyze them in much greater detail.\n## Impulse and Corrective Waves\n- _impulse wave_: net travels in the same direction as the larger trend, always shows five waves in its pattern\n- Five waves move in the direction of the main trend, followed by three waves in a correction (totaling a 5-3 move). This 5-3 move then becomes two subdivisions of the next higher wave move.\n- The underlying 5-3 pattern remains constant, though the time span of each wave may vary.\n- _corrective wave_: net travels in the opposite direction of the main trend.\n- On a smaller scale, within each of the impulsive waves, five waves can again be found.\n- This next pattern repeats itself _ad infinitum_\nat ever-smaller scales.\n\n## Wave Degrees\nElliott identified nine degrees of waves, which he labeled as follows, from largest to smallest:\n\n- Grand Super Cycle\n- Super Cycle\n- Cycle\n- Primary\n- Intermediate\n- Minor\n- Minute\n- Minuette\n- Sub-Minuette\n\nSince Elliott waves are a fractal, wave degrees theoretically expand ever-larger and ever-smaller beyond those listed above.\n\nTo use the theory in everyday trading, a trader might identify an upward-trending impulse wave, go long and then sell or short the position as the pattern completes five waves and a reversal is imminent.\n\n\n\n\nPrice action is divided into trends and corrections. Trends show the main direction of prices, while corrections move against the trend.\n" }, { "alpha_fraction": 0.7201645970344543, "alphanum_fraction": 0.748971164226532, "avg_line_length": 26, "blob_id": "6ecf1dc81a1ff6a109e7fc99817517379ae7d8ef", "content_id": "d08e990e30d526475f6403a77d20c590003c31ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 486, "license_type": "no_license", "max_line_length": 109, "num_lines": 18, "path": "/machine_learning/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "https://pub.towardsai.net/machine-learning-algorithms-for-beginners-with-python-code-examples-ml-19c6afd60daa\n\n## Major Machine Learning Algorithms\n1. **Regression (Prediction)**\n - Linear Regression\n - Polynomial Regression\n - Exponential Regression\n - Logistic Regression\n - Logarithimic Regression\n\n2. **Classification**\n3.\n3. **Clustering**\n4. **Association**\n5. **Anomaly Detection**\n6. **Sequence Pattern Mining**\n7. **Dimensionality Reduction**\n8. **Recomendation Systems**\n" }, { "alpha_fraction": 0.6412485241889954, "alphanum_fraction": 0.6795732975006104, "avg_line_length": 24.059406280517578, "blob_id": "30d2ef8258b0defd6309a8e43883aaa343056ce3", "content_id": "a41bbd1c776b30b1f1283aeda8661f972a8f2ba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2531, "license_type": "no_license", "max_line_length": 78, "num_lines": 101, "path": "/finances/sentdex_tutorial/intro_and_getting_stock_price_data.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import datetime as dt\nimport matplotlib.pyplot as plt\n# from matplotlib.finance import candlestick_ohlc\nimport mplfinance as mpf\nfrom mplfinance import candlestick_ohlc\nfrom mplfinance import candlestick_ohlc\nfrom matplotlib.finance import candlestick_ohlc\n\nimport matplotlib.dates as dates\nfrom matplotlib import style\nimport pandas as pd\nimport pandas_datareader.data as web\n\nstyle.use('ggplot')\n\n# start = dt.datetime(2000,1,1)\n# end = dt.datetime(2016, 12, 31)\n#U\n# df = web.DataReader('TSLA','yahoo', start, end)\n# df.to_csv('sentdex_tutorial/tsla.csv')\n\n#--- Read data\ndf = pd.read_csv('sentdex_tutorial/tsla.csv', parse_dates=True, index_col = 0)\ndf.plot()\n\ndf['100ma'] = df['Adj Close'].rolling(window=100, min_periods=0).mean()\ndf.head()\n\nax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)\nax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex = ax1)\n\nax1.plot(df.index, df['Adj Close'])\nax1.plot(df.index, df['100ma'])\nax2.plot(df.index, df['Volume'])\n\ndf_ohlc = df['Adj Close'].resample('10D').ohlc()\ndf_volume = df['Volume'].resample('10D').sum()\n\ndf_ohlc.head()\ndf_ohlc.reset_index(inplace=True).values, width=2, colorup = 'g'\ndf_ohlc.head()\ndf_ohlc['Date'] = df_ohlc['Date'].map(dates.date2num)\n\nmpf.plot(df.head(100), type='candle', style='charles',\n title='S&P 500, Nov 2019',\n ylabel='Price ($)',\n ylabel_lower='Shares \\nTraded',\n volume=True,\n mav=(3,6,9))\n\n\n\n\n# import required packages\nimport matplotlib.pyplot as plt\nimport mplfinance as mpf\nimport pandas as pd\nimport matplotlib.dates as mpdates\n\nplt.style.use('dark_background')\n\n# extracting Data for plotting\ndf = pd.read_csv('sentdex_tutorial/tsla.csv')\ndf = df[['Date', 'Open', 'High', 'Low', 'Close']]\ndf['Date'] = pd.to_datetime(df['Date']) # convert into datetime object\ndf['Date'] = df['Date'].map(mpdates.date2num) # apply map function\n\nmpf.plot(df)\n\n# creating Subplots\nfig, ax = plt.subplots()\n\n# plotting the data\nmpf.candlestick_ohlc(ax, df.values, width = 0.6,\n colorup = 'green', colordown = 'red',\n alpha = 0.8)\n\n# allow grid\nax.grid(True)\n\n# Setting labels\nax.set_xlabel('Date')\nax.set_ylabel('Price')\n\n# setting title\nplt.title('Prices For the Period 01-07-2020 to 15-07-2020')\n\n# Formatting Date\ndate_format = mpdates.DateFormatter('%d-%m-%Y')\nax.xaxis.set_major_formatter(date_format)\nfig.autofmt_xdate()\n\nfig.tight_layout()\n\n# show the plot\nplt.show()\n\n\nlen(df)\n\nmpf.plot(df[1400:1640], type='candle', volume = True, mav=(20,40))\n" }, { "alpha_fraction": 0.5855966806411743, "alphanum_fraction": 0.6205761432647705, "avg_line_length": 31.83783721923828, "blob_id": "8ce1c62cdc4b0e4a01efd23c8378289b2efaa4a1", "content_id": "d380243cf049815e6402b44e2d4716e6604b4257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2430, "license_type": "no_license", "max_line_length": 102, "num_lines": 74, "path": "/finances/crypto/get_data/ccxt_binance.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport numpy as np\nimport pandas as pd\nimport ccxt\nimport calendar\nfrom datetime import datetime, date, timedelta\n\nbinance = ccxt.binance()\n\ndef min_ohlcv(dt, pair, limit):\n # UTC native object\n since = calendar.timegm(dt.utctimetuple())*1000\n ohlcv1 = binance.fetch_ohlcv(symbol=pair, timeframe='1m', since=since, limit=limit)\n ohlcv2 = binance.fetch_ohlcv(symbol=pair, timeframe='1m', since=since, limit=limit)\n ohlcv = ohlcv1 + ohlcv2\n return ohlcv\n\ndef ohlcv(dt, pair, period='1d'):\n ohlcv = []\n limit = 1000\n if period == '1m':\n limit = 720\n elif period == '1d':\n limit = 365\n elif period == '1h':\n limit = 24\n elif period == '5m':\n limit = 288\n for i in dt:\n start_dt = datetime.strptime(i, \"%Y%m%d\")\n since = calendar.timegm(start_dt.utctimetuple())*1000\n if period == '1m':\n ohlcv.extend(min_ohlcv(start_dt, pair, limit))\n else:\n ohlcv.extend(binance.fetch_ohlcv(symbol=pair, timeframe=period, since=since, limit=limit))\n df = pd.DataFrame(ohlcv, columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume'])\n df['Time'] = [datetime.fromtimestamp(float(time)/1000) for time in df['Time']]\n df['Open'] = df['Open'].astype(np.float64)\n df['High'] = df['High'].astype(np.float64)\n df['Low'] = df['Low'].astype(np.float64)\n df['Close'] = df['Close'].astype(np.float64)\n df['Volume'] = df['Volume'].astype(np.float64)\n df.set_index('Time', inplace=True)\n return df\n\n# pair = 'BTC/USDT'\n# period = '1h'\n# start_date ='20190101'\n# end_date = '20200101'\n# df = ohlcv([start_date,end_date], pair, period)\n# print(df.head())\n# df.to_csv('data/ccxt_binance_'+pair.replace('/','-')+'_'+period+'_'+start_date+'-'+end_date+'.csv')\n\ndef get_datelist(start_day, end_day):\n start_dt = datetime.strptime(start_day, \"%Y%m%d\")\n end_dt = datetime.strptime(end_day, \"%Y%m%d\")\n days_num = (end_dt - start_dt).days + 1\n datelist = [start_dt + timedelta(days=x) for x in range(days_num)]\n datelist = [date.strftime(\"%Y%m%d\") for date in datelist]\n return datelist\n\n\npair='BTC/USDT'\nstart_day = \"20210101\"\nend_day = \"20210913\"\nperiod='1h'\n\ndatelist = get_datelist(start_day, end_day)\ndf = ohlcv(datelist, 'BTC/USDT', '1h')\nprint(df.head())\nprint(df.tail())\n\nfname='data/ccxt_binance_'+pair.replace('/','-')+'_'+period+'_'+start_day+'-'+end_day+'.csv'\ndf.to_csv(fname)\n" }, { "alpha_fraction": 0.6976950168609619, "alphanum_fraction": 0.7039006948471069, "avg_line_length": 27.200000762939453, "blob_id": "bb74befd9ffdfaa1841e752e81effc307940d2ba", "content_id": "8edc2b6e568bf22c6863f7dbd5126c554491384e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1128, "license_type": "no_license", "max_line_length": 124, "num_lines": 40, "path": "/Web Scraping/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "## How the Internet Works? A Brief Summary\n\n1. **Physical Layer**\n - Actual electrons on a wire\n - High/low voltages\n\n2. **Data Link Layer**\n - Frames\n - MAC addresses and physical machines on a local network\n\n3. **Network Layer**\n - Router to router\n - Creates network IP addresses\n - Variable-length packets\n\n4. **Transport Layer**\n - Persistent communication channels\n - TCP, UDP, ports\n\n5. **Session Layer**\n - Open, Close, manage sessions\n - AppleTalk, SCP\n\n6. **Presentation Layer**\n - String encoding, encryption/decryption\n - Object serialization, files, compression\n\n7. **Application Layer**\n - HTTP\n - POST and GET requests\n - REST APIs\n\n### How to think about the internet\n- Each request goes through many layers of wrapping and unwrapping to get to its destination and back\n- These requests do not require a web browser\n- Requests can be examined, replicated, and saved\n- Anything the browser can do, we can do with a web scraper.\n\n## Sources\n- [ Web Scraping with Python (LinkedIn)](https://www.linkedin.com/learning/web-scraping-with-python/hello-world-with-scrapy)\n" }, { "alpha_fraction": 0.5975494980812073, "alphanum_fraction": 0.6512723565101624, "avg_line_length": 23.674419403076172, "blob_id": "759a60568fd940d1bdbec2253dd3cc64aa617fbf", "content_id": "0ec64f103baa2352b66048874d3e4801d759ea61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 111, "num_lines": 43, "path": "/finances/Algorithmic Trading/Backtrading/tests/backtrader_test1.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import backtrader as bt\nimport backtrader.feeds as btfeed\nfrom datetime import datetime\n\ndata = btfeed.GenericCSVData(\n dataname='finances/crypto/data/ccxt_binance_BTC-USDT_1h_20210101-20210913.csv',\n\n dtformat=('%Y-%m-%d %H:%M:%S'),\n datetime=0,\n time=-1,\n open=1,\n high=2,\n low=3,\n close=4,\n volume=5,\n openinterest=-1\n)\n\nclass PrintClose(bt.Strategy):\n\n def __init__(self):\n self.dataclose = self.datas[0].close\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.date[0]\n print(f'{dt.isoformat()} {txt}')\n\n # def next(self):\n # self.log('Close: ',self.dataclose[0])\n\n def next(self):\n self.log('Close, %.2f' % self.dataclose[0])\n\ncerebro = bt.Cerebro()\n\n# Create a data feed\n# data = bt.feeds.YahooFinanceData(dataname='MSFT',fromdate=datetime(2011, 1, 1),todate=datetime(2012, 12, 31))\ndata = bt.feeds.YahooFinanceData(dataname='TSLA', fromdate=datetime(2021, 1, 1),todate=datetime(2021, 6, 1))\ncerebro.adddata(data)\n\ncerebro.addstrategy(PrintClose)\n\ncerebro.run()\n" }, { "alpha_fraction": 0.5634556412696838, "alphanum_fraction": 0.5993883609771729, "avg_line_length": 20.983192443847656, "blob_id": "9208b064a1727f42a997268995f5c2d75172e489", "content_id": "1de990314ea7fcfccc3006f911eded6ef8328d40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2616, "license_type": "no_license", "max_line_length": 112, "num_lines": 119, "path": "/finances/crypto/analysis/time_to_1perc.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nplt.rcParams['figure.figsize']=15,10\n\nperiod_name='hours'\npair='BTC/USDT'\nfname='finances/crypto/data/ccxt_binance_BTC-USDT_1h_20210101-20210913.csv'\n\ndata = pd.read_csv(fname,index_col='Time')\n\ndef test_perc(high, low_ref, perc=1):\n temp = ((high/low_ref*100)-100) >= perc\n return next((i for i,e in enumerate(temp) if e), False)\n\ndef calc_perct(df,perc=1):\n return pd.Series(\n [test_perc(df.High[i:],value, perc) for i, value in enumerate(df['Low'])],\n dtype='int64',index=df.index)\n\nperc=1\ncname='tperc'+str(perc)\ndata[cname] = calc_perct(data,perc)\n\nperc=2\ncname='tperc'+str(perc)\ndata[cname] = calc_perct(data,perc)\n\nperc=3\ncname='tperc'+str(perc)\ndata[cname] = calc_perct(data,perc)\n\ndef plot_freq(df):\n tfreq =df[['tperc1','tperc2','tperc3']].apply(lambda x: x.value_counts())#.T.stack()\n ax = tfreq.head(20).plot(kind='barh', xlabel=period_name, ylabel='Num. of cases',\n title='How long does it take until price increase in {}% after buy-order?'.format(perc))\n ax.invert_yaxis()\n ax.legend(['1%','2%','3%'])\n # ax.show()\n\npd.starts\n\na=1\ndv = 0.02\nc=1.1\n\ndef d(a):\n return (a + a*dv*c)\na=1\ninitial = a + a*dv\nd(initial)\nd(d(initial))\nd(d(d(initial)))\nd(d(d(d(initial))))\nd(d(d(d(d(initial)))))\nd(d(d(d(d(d(initial))))))\nfinal = d(d(d(d(d(d(d(initial)))))))\n(1- final)*100\n\nfinal/initial\n\n\n\n\nso=.02\nso_scale=1.1\nmax_so=7\nSOs=[]\nprices=[]\nso_flag=0\nbase = 1000\nstart = base -1\n\nfor i in reversed(range(500,base)):\n prices.append(i)\n deviation = (start - i)/start\n if deviation >= so:\n so*=so_scale\n if so_flag <= max_so:\n start=i\n SOs.append(start)\n so_flag+=1\n else:\n SOs.append(start)\n else:\n SOs.append(start)\ndf = pd.DataFrame({'price':prices,'SOs':SOs})\n\np = df.price.plot()\n[p.axhline(y=l, linestyle='--',color='red') for l in df.SOs.unique()]\n\n\n\n\n\nso=.02\nso_scale=1.1\nmax_so=7\nso_flag=0\ndata = pd.read_csv('finances/crypto/data/ccxt_binance_BTC-USDT_1h_20190101-20200831.csv',index_col='Time')\nstart, base = data.Close[0],data.Close[0]\n\nSOs=[]\nprices=[]\n\nfor i in reversed(data.Close):\n deviation = (start - i)/start\n if deviation >= so:\n so*=so_scale\n if so_flag <= max_so:\n start=i\n SOs.append(start)\n so_flag+=1\n else:\n SOs.append(start)\n else:\n SOs.append(start)\ndf = pd.DataFrame({'price':data.Close,'SOs':SOs})\np = df.price.plot()\n[p.axhline(y=l, linestyle='--',color='red') for l in df.SOs.unique()]\n" }, { "alpha_fraction": 0.7309481501579285, "alphanum_fraction": 0.734704852104187, "avg_line_length": 59.10752868652344, "blob_id": "78eaea6e4478e5b03f21a6062fa872a1613c4662", "content_id": "0501fa23e6b6349b7fb9282737f25aa2d2e7c6ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5590, "license_type": "no_license", "max_line_length": 443, "num_lines": 93, "path": "/finances/data/definitions.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "> **_Share_**: An ownership share in a company\n\n> **_Price_**: Cost of buyin one share of stock.\n\n> **_Market Capitalization_**: Market cap represents the total value of all the stocks that is available in a particular company. It's essentially a measure of the equity in the firm, and equals to the price of a share, multiplied by the number of shares outstanding.\n\n> **_Dividend_**: It's a share of profits that are paid out to shareholder in a particular company.\n\n> **_Measure for risk_**: volatity or standard deviation\n\n> **_Price-to-eaarnings ratio_**: is just the stock's price in the market, divided by it's earnings per share. So if a stock costs \\$20 per share, and it earns \\$2 per share in profit, then it has a $PE\\ ratio = \\frac{\\$\\ 20}{\\$2} = 10\\ times$.\n\n> **_Price-to-book ratio_**: If the stock is trading for \\$20 per share, but the book value from an accounting point of view is only \\$5 per share, then the stock's price-to-book ratio is $\\frac{\\$20}{\\$5} = 4\\ times$\n\n> **_Market Efficiency_**: a stock already incorporates all known information into its value. That means that there are no stocks that are undervalued, nor overvalued. All of the prices out there are fair given what we know at a particular point in time.\n\n> **_Listed_**: Stocks trade on the exchange that buyers and sellers go through\n\n> **_Bonds_**: Form of debt companies sell to investors in eschange for receiving a type of interest payment over time\n\n> **_IPO_**: Initial Public Offering, the first time a stock is sold to the general public\n\n> **_Diversification_**: Owning a variety of stocks to minimize the risk of your portfolio\n\n> **_ETF_**: Exchange traded fund; a basket of stocks that trade through an exchange and can be bought, as a basket, by investors\n\n> **_zero-coupon bonds_**: Bonds that do not pay interest but are instead issued below par value, which is the face value of the bond returned to investors at maturity\n\n> **_EPS_**: Earnings per share; the amount of money a company earns for every share of stock\n\n> **_equity_**: Another term for stock, which is ownership shares in a company\n\n> **_ADR_**: American Depository Receipts, are foreign firms trading in the U.S.\n\n> **_Deposit Ratio_**: is a regulatory banking metric.\n\n> **_Taylor Rule_**: Tells what the level of interest rates should be in the economy. Is optimized to evaluate the appropriate level of interest rates in financial markets\n\n> **_Primate Rate_**: is a federal interest rate.\n\n> **_CPI_**: is a measure of inflation.\n\n> **_Going Long_**: hope the price will rise\n\n> **_Going short_**: : hope the price will fall\n\n> **_Required Rate of Return_**: how much stockers expect to receive in returns from a given firm.\n\n> **_Earnings_**: are the amount of money a company earns over time. Reported in a quaterly basis. Beacause of seasonality, some companies will naturally earn more in some quarters than others.\n\n> **_Treasury Bills (T-Bills)_**: Short term bonds issued by the US Treasury that mature in less than a year\n\n> **_Call Option_**: Gives you the right but not the obligation to **buy** a stock at a particular price until some time in the future.\n\n> **_Put Option_**: Gives you the right but not the obligation to **sell** a stock at a particular price until some time in the future.\n\n> **_Bond_**: Long-term promissory note, issued by a borrower, promising to its holder a predetermined and fixed amount of interest per year and repayment of principal at maturity.\n\n> **_Par value_**: Face value of the bond, returned to the bondholder at maturity\n\n> **_Price_**: Represented as a percentage of face value or par\n\n> **_Coupon_**: Percentage of the par value of bond that will be paid periodically in the form of interest.\n\n> **_Maturity_**: Length of time until bond issuer returns the par value to the bondholder and terminates or redeems the bond\n\n> **_Yield_**: Rate of return the investor will earn on the bond after taking into account the price paid for the bond and the coupon on the bond\n\n> **_Call Provision_**: Gives a corporation the option to redeem the bonds before the maturity date\n\n> **_Bond Rating_**: Reflects future risk potential of bonds\n\n> **_Junk Bonds_**: High-risk bonds with ratings of BB or below by Moody's and Standard&Poor's\n\n> **_Treasury Bond_**: are debt securities issued by the US government\n\n> **_T-Bills_**: Short-term obligations with maturities of 13, 26, or 52 weeks.\n\n> **_Treasury Bonds (T-Bonds)_**: Longer-term bonds that pay semiannual coupons in addition to their face value at maturity\n\n> **_Treasury auctions_**: essentially the government is selling bonds to the general public and to institutional investors, and you put in a higher price, which is equivalent to a lower yield, if you wanna buy these bonds. The highest prices get awarded the bonds, and people who put in lower prices do not get awarded them. The prices that are set for the bonds is determined based on all the bids, across all the participants in the market.\n\n> **_Municipal (Muni) Bonds_**: Bonds issued by state and local governments and other nonprofit entities (e.g. universities).\n\n> **_Triangular Arbitrage_**: Capitalizing on differences in FX trading across three currencies\n\n> **_The Shrinking Stock Market_**: The number of publicly traded firms over time has shrunk.\n\n> **_Four-factor Model_**: Identifies characteristics of stocks or firms that do well on average over time\n\n> **_Market Making_**: Trading that attempts to capitalize on the bid-ask spread\n\n> **_TRACE_**: Trade Reporting and Complience Engine, used to track corporate bond sales over time.\n" }, { "alpha_fraction": 0.6615608930587769, "alphanum_fraction": 0.6630197167396545, "avg_line_length": 30.159090042114258, "blob_id": "fc7ad973407fcec6cfe1c53f3ca7c2808ab5b98f", "content_id": "b0f9d277e312444326cc7954dbffd02059c0b532", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1371, "license_type": "no_license", "max_line_length": 173, "num_lines": 44, "path": "/finances/Algorithmic Trading/algo_trading_general_process_diagram.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "from diagrams import Diagram, Cluster\nfrom diagrams.aws.database import Redshift\nfrom diagrams.gcp.iot import IotCore\nfrom diagrams.gcp.compute import AppEngine, Functions\n\ngraph_attr = {\"fontsize\": \"45\",\n \"bgcolor\": \"transparent\",\n # 'center': 'true'\n # 'concentrate':'false'\n 'labelloc':\"t\"\n }\n\nwith Diagram('Algorithmic Trading General Process', direction='LR', filename='finances/Algorithmic Trading/algo_trading_general_process_diagram',graph_attr=graph_attr) as d:\n with Cluster('Researcg'):\n data = IotCore('Data')\n data_time = IotCore('Real-time/Historical')\n data_type = IotCore('Market/Non-market Data')\n\n data_time - data\n data_type - data\n\n with Cluster('Pre-trade Analysis'):\n pretrade_analysis = [Redshift('Alpha Model'),\n Redshift('Risk Model'),\n Redshift('Transaction Cost Model')]\n data >> pretrade_analysis\n\n with Cluster('Trading Signal'):\n trading_signal = AppEngine('Portfolio Construction Model')\n\n data >> trading_signal\n pretrade_analysis >> trading_signal\n\n with Cluster('Trade Execution'):\n trade_execution = Functions('Execution Model')\n\n data >> trade_execution\n trading_signal >> trade_execution\n\n post_trade_analysis = Redshift('Post-trade Analysis')\n\n trade_execution >> post_trade_analysis\n\nd\n" }, { "alpha_fraction": 0.7313916087150574, "alphanum_fraction": 0.7475188970565796, "avg_line_length": 41.620689392089844, "blob_id": "87fab0f360194ead69a4885993e82b75bb1b735e", "content_id": "7ec93991d8a5b782a3bffae79b96a6a61907015b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18547, "license_type": "no_license", "max_line_length": 274, "num_lines": 435, "path": "/finances/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Capital Markets\n\n## Basics\nMain questions before investing:\n 1. Am I being realistic about how much I should earn?\n 2. Am I prepared for the __risk__?\n 3. Am I confortable with __daily changes__ in my portfolio?\n\n**Types of Investors**\n- Individual\n- Institutional\n\n+ Stock returns have an arithmetic of mean of 11.3% for the period 1928 to 2018.\n+ Bonds are not traded on an exchange; they are purchased from the issuer and traded between investors.\n+ The average IPO today is from companies older and more stable than IPOs offered in the past. Companies are waiting longer to go public, with the downside to investors that the company has already seen its highest growth.\n\n### Exchanges\n- **NASDAQ**: National Association of Securities Dealers Automated Quotation, an over-the-counter (OTC) exchange\n- **NYSE**: Wall Street (New York City), stocks phisically and eletronically traded\n\n### The shrinking stock market\nUS Publicly traded companies:\n - 1998 -> ~10,000, 2018 -> ~ 4,000\n - Large companies buying smaller ones\n - Fewer going public\n\nIPOs per Year\n - Historically: 300 to 500\n - Now: 100 or less\n\nIPOs Now\n - Older\n - More stable (Facebook, Uber)\n - Today's IPOs have already seen strongest growth before going public.\n - Fastest growth is early on\n\n### Regulations\n+ [SEC](www.sec.gov)\n - writes rules for the investment markets based on laws passed by Congress\n - inform and protect investors\n - facilitate capital formation\n - enforce federal securities laws\n - regulate securities markets\n - provide data\n\n+ [FINRA](www.finra.org)\n - regulates industries\n\n+ [CFTC](cftc.gov) (Commodity Futures Trading Commision)\n - Industry oversight\n - law & regulation\n - ensure the integrity of the futures & swaps markets\n\n## Stock Markets\nSystem that enables companies to sell partial ownership in themselves to people in exchange for cash.\n\n**Common stocks**\n- A certificate that indicates ownership in part of a corporation\n- have the highest rate of return\n- Your liability is limited to the ammount of your investment.\n\n**Shareholder Rights**\n- Elect board of directors\n- Change corporate charter\n\n**Investment Returns**\n- Earning part of a company's profit after getting an ownership stake via stock.\n- Companies also raise money by **borrowing** it.\n- Companies **borrow** money by selling **bonds**.\n\n**Stocks** (ownership stake in a firm) + **Bonds** (loan to a firm) = Capital Markets\n\n**The bid-ask spread**\n- Bid: how much a buyer is willing to pay for a given stock\n- Ask: how much an owner of a given stock is willing to sell\n- Bid-ask spread: the difference between the best ask and bid of a given stock\n- stock price: median value between best bid and ask prices for a given stock\n\n**Types of orders**\n- Market order: pay for a stock based on the current best ask price\n- Limit order: the buy order for a given stock is only executed if its price reach some level of price\n\n**Short and going long**\n- _Going Long_: hope the price will rise\n - Buy stock *now* for $61.54\n - Sell in the future for $80 - make money ($80 - $61.54 = $18.46 per share profit)\n - Sell in the *future* for $40 - lose money ($40 - $61.54 = $21.54 per share loss)\n\n- _Going short_: : hope the price will fall\n - Buy it back in the future for $50 - make money ($61.54 - $50 = $11.54 per share *profit*)\n - Buy it back in the *future* for $70 - lose money ($61.54 - $70 = $8.46 per share *loss*)\n\n**Mutual Funds and Exchange Traded Funds (ETFs)**\n> Basket of individual stocks that can be bought to allow instant diversification\n\n_Russel 1000 Value ETF_\n- Buy online or via a broker\n- Buy one share\n- Fraction of a share of 1000 firms\n- Instant diversification\n\n_Mutual Fund_\n- is a common choice made by people investing on their own.\n- Investing in a Mutual Fund\n - Select a fund\n - Send money\n - Money is pooled\n - Managers invest pooled money\n- Focus on asset classes\n - Domestic equities\n - Bonds\n - International equities\n- Cost of Fund\n\n**Basic of stock valuation**\n$$Valuation = \\frac{Expected\\ Dividend}{Required\\ Rate\\ of\\ Return\\ -\\ Growth\\ Rate}$$\n\n$Growth Rate = This\\ Year's\\ Revenue - Last\\ Year's\\ Revenue$\n\n**How often do stocks do poorly?**\n- Do stocks outperform Treasury Bills (Bessinbinder Research)?\n\n**Treasury Bills (T-Bills)**\nShort term bonds issued by the US Treasury that mature in less than a year\n- Peopฤบe buy T-Bills as a way to earn just a very small return on their cash with no risk.\n- Pretty close to leave your money in a savings account\n- Traditionally, T-bills have very low returs\n- Often times in recent years, it's been less than 2%\n- About 50% of stocks do _not_ outperform T-bills\n- About 50% of the stocks in your portfolio will _break-even_ investments or even _lose money_.\n- How can the returns on the S&P 500 be almost 11% per year?\n - The performance of the US stock market is powered by a handful of superstar stocks\n - We don't know which stocks these will be in any given year, but they will be there\n - Picking stocks is _risky_\n - That's why it's often a good idea for small investors to buy a low cost exchange traded fund or mutual fund product and own the market as a whole.\n\n**Building your portfolio**\n1. Value companies\n2. Growth companies\n\n+ _Value companies_\n + Modestly priced\n + Mature firms\n + More stable\n + Industries that aren't rapidly growing\n + Value Stocks\n + Railroad\n + Utility\n + Industrial equipment makers\n + Energy providers\n+ _Growth companies_\n + Fast growing\n + Lower profit levels\n + Relatively expensive\n + Growth Stocks\n + Information technology\n + Internet companies\n + Popular consumer goods\n\n- Historically, _growth stocks_ have **lower average returns** than value stocks do.\n - Growth stocks: ~9% average returns\n - Value stocks: ~13% average returns\n\n- _Value stocks_ tend to be _more volatile_ than the overall market.\n - Go through slumps\n - Negative returns for lonog periods\n - Fast appreciation over short period\n\n- _Growh Stocks_\n - More frequent positive returns\n - Lower returns\n - Smaller dividends paid\n\n+ Stocks should only be a part of your portfolio\n\n- Only institutional investors can trade post-close and pre-open.\n- The median price for stock is usually higher than the mean (the average).\n- The market opens at 9:30 a.m. Eastern on all non-holiday weekdays.\n- While growth stocks often do not pay dividends, they more often have positive returns than value stocks.\n\n## Bond Markets\n\nIssued by corporations, government, state, local municipalities\n\n**Major characteristics of bonds:**\n- _Par value_\n- _Price_\n- _Coupon_\n - $1,000 par value x 5% annual coupon rate = $50 annually\n - Zero Coupon Bonds:\n - Zero or low coupon rate\n - Don't pay interest\n - Issued at discount below par value\n- _Maturity_\n- _Yield_\n- _Call Provision_\n\n**Bond Rating**\n- Moody's: AAA, AA1, AA2, AA3, A1, A2, A3, BAA1, BAA2, BAA3, BA1, BA2, BA3, B1, B3, CAA1, CAA2, CAA4, CA, ..., D\n- The lower the bond rating is the higher the probability of default.\n- When the bond defaults, that means that it stops paying coupon payments and interest to the investor, and investors may not get all their money back.\n- Bond rating Agency's are jusding how risky the bond is and how likely the company is to pay back the money the investor has let to them by buyuing that company's bonds.\n- Riskier bonds means an investor won't get paid (all) their money back, and also that the rate of return demanded by capital markets is going to be higher.\n- If a bond is riskier, you're going to need more in yield, or coupon payments, in order to buy that bond in the first place.\n\n**Junk Bonds**\n- High-yield\n- Higher interest (3-5% more) than AAA-rated bonds\n\n**Bond evaluation**\nEx:\n- Maturity: 10\n- Par: $1000\n- Coupon: Par x 0.08 = 80\n- Yield: 0.06\n- **Value of Bond**: $1,147.20\n- **Price**: $-114.72017\n\n[Present Value](https://www.investopedia.com/ask/answers/040315/how-do-you-calculate-present-value-excel.asp): present-value(Yield, Maturity, Coupon, Par, 0)\n\n**Bond Yield Calculations**\n- Maturity: 10\n- Coupon: 80\n- Par Value: 1000\n- Price: 1000\n- **Yield**: 8%\n\n[rate](https://www.investopedia.com/ask/answers/051815/how-can-i-calculate-bonds-coupon-rate-excel.asp) = rate(Maturity, Coupon, -Price,Par Value)\n\n### Three important relationships in bonds\n+ The value of a bond is inversily related to changes in the level of interest rates\n+ The coupon ties directly to the yield. The market value of the bond is less than par value if the yield on similar bonds is above the coupon interest rate.\n+ Long term bonds have greater interest rate risks than short term bonds do.\n\n### Treasury bond markets\nYour income level and tax circumstances play a major role in determining how you should allocate the bond portion of your portfolio.\n\n**Treasury Bond**\n- Safest securities\n- About 4% returns over last 50 years.\n- Exempt from state and local income taxes\n - Specially usefull in states like California, Oregon, Iowa, and New Jersey where the state income taxes are high.\n - Some major cities, like New York and Washington DC, have local income taxes, which also make treasury bonds an exceptionally good investment choice.\n- The US Treasury Department finances government debt by issuing **short- and long-term** bonds.\n- _T-Bills_:\n - Don't pay coupon payments, instead pay face value at maturity.\n - Denominations as small as $1,000.\n - Sold at a discount basis\n- _T-Bonds_:\n - Pay coupons, which are semiannual\n - The interest rate on these bonds is the coupon rate. And that's paid in addition to their face value.\n - That face value is paid out at maturity.\n - Denominations as small as $1,000\n - Very safe and risk free\n - Substitute for CDs and savinds accounts.\n - Liquid, Easy to sell\n\n[Tresury Direct](www.tresurydirect.gov)\n\n### Municipal bond markets\nFor investors in the **highest income tax brackets**, the best choic of bonds is often **municipal bonds**.\n\n**Muni Bonds**\n- Safe\n- Odds of default less than 1%\n- Returns average 4-6% of amount invested\n\n**Tax-Backed Bonds**_\n- Issued by states, contries, districts, cities, towns, school districts\n- Secured by tax revenue\n- Revenue Bonds\n- Issued for special projects (such as a new water treatment plant)\n- Bondholders paid back from revenues generated by the project financed\n\n**Tax Risk for Muni Bonds**\n1. Individual federal income tax rate reduced. The lower the federal tax rate is, the less valuable that muni tax exemption is.\n\n2. May eventually be taxable by the IRS:\n - **_1898 Supreme Court Decision_**: Muni bond coupon payments -> No federal income taxes\n - The US Supreme Court had a landmark 1898 decision which said municipal bonds are not taxable.\n - However, that doesn't mean that the decision couldn't be challenged or reviewed in the future by the IRS, and ultimately by the Supreme Court.\n\n\n**Tex Yield Valuation**\n$$Equivalent\\ Tex\\ Yield = \\frac{Tax\\ Exempt\\ Yield}{1 - Marginal\\ tax\\ rate}$$\n\nThe municipal securities market is characterized by **low liquidity**.\n - Municipal securities trade rarely.\n - There are more municipal bonds out there than any other form of security.\n - There's over 80,000 municipal bond issuers, and over 800,000 municipal bonds.\n - In 2017, about 99% of municipal securities didn't trade on any given day.\n - Those bonds that do trade, the numbers are very low:\n - Average of 14 customer trades during first 60 days after issuance.\n - Newly issued municipal bonds are the mos actively traded\n - It's rather an buy-and-hold investment, rather than a trading investment.\n - Almost all municipal bonds do trade in the first month after issuance, but by the time we get to the second month it's only about 15% of them trading, and then after that it's even less.\n\nThe market for municipal debt is called the tax-exempt market.\n\n**Pricing Information on Trades**\n\nThat secondary market where investors trade bonds between in and one another, for municipal bonds it's just not very trasnparent.\n\nWe do have price information comming from MSRB, but pre-trade information is not broadly available.\n\nThe MSRB is the Municipal Securities Ruling Board and they are:\n - Muni bond regulatory group\n - Since 1995\n\n### Bond ratings and municipal bonds\n- [Invesco](www.invesco.com)\n- Municipal bonds have a default rate of 0.00%, whhere Corporate bonds have a rate of 0.38%\n- All rated municipal bonds have a lower 10-year cumulative default rate htan AAA-rated corporate bonds.\n\n### Trading in municipal bonds\n[EMMA] (emma.msrb.org)\n\n- The annual interest payments over the term of the bond, plus the money paid at redemption, will be more than what was paid for the bond. an investor pay more than par value for a bond when the annual interest payments on the bond yields more than the par value at maturity\n- Long-terms bonds have greater interest rates than the interest rates with short-term bonds. The higher interest rates reflect the greater risk associated with long-term bonds.\n- What is the term for money an investor receives from a bond issuer at maturity? Par is the term for face value of bond received at maturity.\n- This is the future value you will receive from the bond.\n\n## Other Capital Markets\n### FX markets\n- Foreign Exchange Market\n- Many large companies use the foreign exchange market in order to hedge their risks against changes in the currency exchange rate. This lets large companies hedge their risks against changes in the exchange rates that are based on the countries' economies.\n- FX traders often use what are called futures contracts, which lets them lever up: they can trade a million dollars at a time, even if they only have a few thousand dollars in their own personal accounts.\n- Trading FX is about trading expectations on the economy.\n- It's very risky, and small changes get magnified a lot through the power of futures.\n- When your are dealing with FX trading you are often captilizing on very, very small mover over time.\n- The bid-ask spread can be significant if you're not careful.\n\n**Buying and selling in FX**\n- [FOREX](www.forex.com)\n\n#### Trading Strategies\n**FX arbitrage**\n- _Triangular Arbitrage_\n - _Option 1_: USD to JPY: 1 USD = 108 JPY\n - _Option 2_: USD to EUR to JPY: 1 USD = 110 JPY\n\n### Commodity markets\nCommodities includes a variety of phisical products and they're bought and sold through exchanges.\n- Agricutural products\n- Metals (silver, gold, etc)\n- Energy (natural gas, electriciy, etc)\n\nCommodity prices are stationary and related to inflation.\n\nTrading:\n- **_SPOT trading_**: Buying goods for delivery _today_\n- **_Futures trading_**: Buying goods for deliverly _in the future_.\n- Because of the price for storing commodities, _futures prices_ are usually _higher_ than spot prices.\n\nFactors that drive commodity prices:\n- Economic growth\n- Exploration or developments that can change the supplies/demands\n- Weather\n- Storage\n- Any kind of short-term supply shocks\n- Regulations\n\nInvestments:\n - Equity in commodity-related companies\n - Physical commodity via the spot market\n - Commodity derivatieves (futures, swaps, etc.)\n\nInformation:\n- [Bloomberg](www.bloomberg.com/markets/commodities)\n- [NASDAQ](nasdaq.com)\n\nCommodities and contango:\n - Contango (aka forwardation) is a situation where the futures price of a commodity is higher than the expected spot price at maturity\n - it dramatically impacts some of the products, such as USO (US Oil)\n - part of the contributing factor might be that essentially it has to pay for storage costs through the futures prices\n - Contango leads the futures price to be higher than the spot price because of certain other contributing factors.\n\n## Basics of Stock Markets\n\n### The Shrinking Stock Market\n- Fewer companies means more efficient markets.\n- If there are fewer companies out there, it means that are more people paying attention per stock.\n- Greater attention from investors means that it's gotten harder to \"pick stocks\" over time.\n- Algorithmic trading is a partial solution to this problem.\n\n#### Disappearing Alpha\n\n[False discoveries in mutual fund performance: measuring luck in estimated alphas](https://web.mit.edu/econometrics/fdr2version.pdf)\n\nThe number of mutual funds has risen, the average ฮฑ, or the average excess return for the typical mutual fund has fallen.\n\n![fig1](figs/disappearing_alpha_fig1.jpeg 'Total Number of Funds and Average ฮฑ')\n\nIn the early 90's, the average mutual fund was able to produce 50 to 100 basis points per year in excess returns. In returns over and above the stock market for its investors.\n\nAs we've gotten more and more mutual funds, competition has heaated up and today ฮฑ are actually negative. The average mutual fund today not only can't beat the stock market, they actually underperform:\n\n![fig2](figs/disappearing_alpha_fig2.jpeg 'Proportion of Unskilled and Skilled Funds')\n- 75.4% of the mutual funds have zero ฮฑ.\n- Of those with non-zero ฮฑ, the remaining 24.6%:\n - 24 of the 24.6 are unskilled, meaning that they can't consistently produce positive alpha.\n - Only 0.6% of mutual funds can consistently produce positive ฮฑ.\n\n### Exchange-Traded Funds (ETFs)\n- Group of stocks trading on exchange under a ticker symbol (eg., SPY)\n- \"New\" financial innovation: started in the 1990s, but growing rapidly today\n- More ETFs than stocks today\n- ETFs cover US and foreign markets\n\n## Bid, Ask, Spread\n$Bid\\ Ask\\ Spread = Ask - Bid$\n\nThe difference between the bid and the ask determines if a trade happens, you have to meet in the middle somewhere.\n\nThe bigger and wider the spread is, the less trading goes on.\n\nThe spread matter a lot for liquidity, and vice versa. The more often a stock trades, the smaller and narrower that bid-ask spread will be.\n\n## Market Orders\nBuying and selling stocks involves placing order\n\n### Market orders\n- Involves buying a stock at the best available price.\n- Orders routed by price and time placed.\n\n### Limit Orders\n- are like haggling\n- I'm willing to buy stock XYZ at a price no higher than $100\n - If price hits that level, my order is automatically executed\n - If does not hit that level, my order does not go through\n\n## Sources:\n- [Understanding Capital Markets (LinkedIn)](https://www.linkedin.com/learning/understanding-capital-markets/)\n- [Algorithmic Trading and Stocks Essential Training](https://www.linkedin.com/learning/algorithmic-trading-and-stocks-essential-training/)\n- [Introduction to Financial Markets Course](https://www.coursera.org/learn/financial-markets-global)\n" }, { "alpha_fraction": 0.6550765037536621, "alphanum_fraction": 0.6680575013160706, "avg_line_length": 29.380281448364258, "blob_id": "5e6e491ba3b75b8d09c5c6a2f302fa63475156fe", "content_id": "66fa6d7b0eecc05d71a4c6a63bad3721ab6c6f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 118, "num_lines": 71, "path": "/finances/crypto/binance/bot_supertrend.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager\nimport mplfinance as mpf\nimport sys\nsys.path.insert(1, '/home/marcon/Documents/exchange/')\nimport config\n\n# Authenticate\nclient = Client(config.API_KEY, config.API_SECRET)\n\n# Settings\nsymbol = 'BTCUSDT'\ninterval = client.KLINE_INTERVAL_15MINUTE\nsince = \"1 day ago UTC\"\n\n#--- FUNCTIONS\n# Account Balance\ndef getBTC():\n balance = client.get_asset_balance('BTC')['free']\n return float(balance)\n\ndef getUSDT():\n balance = client.get_asset_balance('USDT')['free']\n return float(balance)\n\n# Buy/Sell\ndef buy(symbol = 'BTCUSDT', quant = 100):\n order = client.order_market_buy(symbol=symbol,quantity=quant)\n return order\n\ndef sell(symbol = 'BTCUSDT'):\n quant = getBTC()\n order = client.order_market_sell(symbol=symbol,quantity=quant)\n return order\n\n# Historical Data\ndef get_data(symbol, interval, since):\n hist_df=pd.DataFrame(client.get_historical_klines(symbol,interval, since),\n columns = ['Open Time','Open','High','Low','Close','Volume','Close Time','Quote Asset Volume',\n 'Number of Trades','TB Base Volume','TB Quote Volume','Ignore'])\n\n hist_df['Open Time'] = pd.to_datetime(hist_df['Open Time']/1000, unit = 's')\n hist_df['Close Time'] = pd.to_datetime(hist_df['Close Time']/1000, unit = 's')\n numeric_columns = ['Open','High','Low','Close','Volume','Quote Asset Volume','TB Base Volume','TB Quote Volume']\n hist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)\n\n return hist_df\n\n#--- RUN\ndata = get_data(symbol, interval, since)\ndata.head()\ndata.tail()\n\n# Viz\nmpf.plot(data.set_index('Close Time'),\n type = 'candle',style='charles',volume=True,\n title = 'BTCUSDT Last 30 Min', mav = (10,20,30))\n\n\n\n\nfrom tradingview_ta import TA_Handler, Interval, Exchange\n\ntesla = TA_Handler(\n symbol=\"BTCUSDT\",\n screener=\"binance\",\n exchange=\"BINANCE\",\n interval=Interval.INTERVAL_1_DAY\n)\nprint(tesla.get_analysis().summary)\n# Example output: {\"RECOMMENDATION\": \"BUY\", \"BUY\": 8, \"NEUTRAL\": 6, \"SELL\": 3}\n" }, { "alpha_fraction": 0.761800229549408, "alphanum_fraction": 0.7747530341148376, "avg_line_length": 46.94736862182617, "blob_id": "b2c9272e8698918c93bb94ee832dee9de41f3bb0", "content_id": "6e09845b4e7e41839d73b70b4f382ead227f1289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4675, "license_type": "no_license", "max_line_length": 171, "num_lines": 95, "path": "/finances/crypto/essentials.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "source:\n(๐Ÿ”ด O Guia Bรกsico do Bitcoin: o que รฉ, como funciona, como comprar e guardar os seus ๐Ÿ”ด)[https://www.youtube.com/watch?v=ptU7uEFKFOU&list=PLEejHiwxEoccvQ7y2EpamjnS12Sz4R3Ml]\n\n# Economia 4.0\n## Fases\n - Fase 1: O vapor impulsionando a revoluรงรฃo industrial (XVIII)\n - Fase 2: Eletricidade e linhas de montagem\n - Fase 3: anos 70 automaรงรฃo, computadores e redes conectadas\n - Fase 4: Economia: e o digital e o real se misturam de forma indissociรกvel (I.A.)\n- Internet e Descentralizaรงรฃo\n\n# Bitcoin\nA primeira moeda mundial descentralizada\n## Fundamentos:\n1) Distibuiรงรฃo de Banco de Dados P2P\n2) Criptografia\n\n## O que รฉ o Bitcoin?\n- moeda 100% digital que nรฃo รฉ emitida por nenhum banco central\n- ร‰ uma moeda eletrรดnica, descentralizada e escassa\n- P2P\n- Ela รฉ \"criada\" a partir de Mineraรงรฃo\n\n## Caracterรญsticas\n1) Escassez, apenas 21 milhรตes de unidade de Bitcoin poderรฃo ser mineradas\n2) Cada Bitcoin pode ser fracionado ou dividido em atรฉ 8 casas decimais (0.0000001) - Este valor em fraรงรฃo รฉ conhecido como \"satoshi\"\n3) Tecnologia inovadora que permite a seguranรงa e transparencia das transaรงรตes, alรฉm de tornar o Bitcoin \"descentralizado\"\n4) Taxas de transaรงรตes tendem a ser muito menores, funcionam 24 horas por dia, todos os dias da semana\n\n## Tecnologia do Bitcoin - Blockchain\n- A cada 10 min surge um bloco novo na rede\n- Informaรงรฃo das pessoas que estรฃo enviando e recebendo a transaรงรฃo\n- Dados da transaรงรฃo:\n - Chave Pรบblica: Carteira e transaรงรตes\n - Chave Privada: Autenticar a movimentaรงรฃo de Bitcoins\n- chave pรบblica e privada se conectam, e a chave privada assina todas as negociaรงรตes q sรฃo realizadas\n\n## Mineraรงรฃo\n- Sistema de recompensa\n- A cada 4 anos o sistema de recompensa de mineraรงรฃo diminui pela metade a recompensa em Bitcoins\n\n# Wallets\n- [Exodus](https://www.exodus.com/)\n- [Mycelium Bitcoin Wallet](https://wallet.mycelium.com/)\n- [Uniswap](https://app.uniswap.org)\n\n# Geraรงรฃo de Criptomoedas\n1. Bitcoin\n2. Ethereum\n3. Cardano, criada para resolverem 3 problemas principais:\n- Escalabilidade\n- Interoperabilidade\n- Sustentabilidade\n\n# Altcoin\n## CARDANO\n- Cardano foi lanรงada em setembro de 2017, apรณs 2 anos de desenvolvimento\n- Cardano foi conceituado por Charles Hoskinson, co-fundador da Etherum\n- A Fundaรงรฃo Cardano รฉ uma entidade regulamentada sem fins lucrativos que รฉ a organizaรงรฃo de custรณdia de Cardano\n- Caracterรญsticas รบnicas:\n - Considerada a terceira geraรงรฃo das criptomoedas e a รบnica desenvolvida por pesquisa acadรชmica revisada por pares\n- Utilizam a tecnologia como Proof-of-Stake ao invรฉs do Proof-of-Work (bitcoin)\n- Transaรงรตes por banda larga\n - No caso da cardano, eles pretendem subdividir a rede em vรกrias sub redes โ†’ RINA (Recursive Internetwork Architecture)\n - Cada nรณ farรก parte de uma sub-rede especรญfica e pode comunicar com outras sub-redes se assim for necessรกrio.\n- Armazenamento\n - Utiliza 3 tรฉcnicas p/ tornar o armazenamento menor:\n - Poda\n - Assinaturas\n - Compressรฃo\n- Interoperabilidade\n - Problemas a serem superados:\n 1. Existem muitas moedas digitais\n 2. Os Bancos e Governos tรชm receio das criptomoedas por perder o controle\n 3. KYC (Know Your Customer)\n 4. Complience e Atomic Swaps entre as criptomoedas\n 5. Controle de metadados entre as Blockchains, levando transparencia das transaรงรตes entre as diversas moedas se assim o desejar\n 6. Quantum Resistance\n- Sistema de Tesouro\n - Toda vez que um bloco รฉ adicionado ร  cadeia, uma parte dessa recompensa de bloco serรก adicionado ร  tesouraria.\n - Entรฃo, se alguem quiser desenvover e tranzer algumas mudanรงas para o ecossistema, ele envia uma proposta para o Tesouro para pedir doaรงรตes.\n - As partes interessadas do ecossistema de Cardano votam e decidem se4 a cรฉdula deve ser concedida ou nรฃo\n - Se o fizerem, o remetente da cรฉdula recebera a concessรฃo para o desenvolvimento.\n - Vantagens:\n - o tesouro continua a encher-se ร  medida que mais e mais blocos sรฃo descobertos\n - ร‰ diretamente proporcional ao tamanho da rede. Quanto maior a rede, mais os recursos disponรญveis e o sistema de votaรงรฃo tambรฉm se torna mais descentralizado\n- [Cardano Calculator](https://antipalos.github.io/cardano-calculator/#calculator)\n\nO processo de Proof-of-Work รฉ lento, demanda muita energia computacional e desperdiรงa muita energia elรฉtrica\nSistema PoS escolhe nรณs da rede para fazer a validaรงรฃo das transaรงรตes. Estes nรณs sรฃo conhecidos como \"slot lรญderes\"\n\nStake Pool remuneram quem possui tokens da momeda para quem esta na rede\n\n# Masternode\n- [MasterNode.Online](https://masternodes.online/)\n" }, { "alpha_fraction": 0.7698151469230652, "alphanum_fraction": 0.7743732333183289, "avg_line_length": 63.73770523071289, "blob_id": "dc66d7e1016887befec0eef5735dae927bd8bef2", "content_id": "fd3ab29e9bb10ba2ad0eb6e6fb0b17fd8cfe3f05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3981, "license_type": "no_license", "max_line_length": 415, "num_lines": 61, "path": "/finances/Algorithmic Trading/Backtrading/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Backtrading\n\nTo backtest a trading strategy using Python, you can:\n 1) run your backtests with pre-existing libraries,\n 2) 2) build your own backtester, or\n 3) 3) use a cloud trading platform.\n\nThere are 2 popular libraries for backtesting:\n 1. [Backtrader](https://www.backtrader.com/)\n 2. [Zipline](https://www.zipline.io/)\n\n## Backtrader\nIt's a Python library that aids in strategy development and testing for traders of the financial markets.\n\nIt is an open-source framework that allows for strategy testing on historical data.\n\nIt can be used to optimize strategies, create visual plots, and can even be used for live trading.\n\n### Pros:\n- **_Backtesting_** โ€“ This might seem like an obvious one but Backtrader removes the tedious process of cleaning up your data and iterating through it to test strategies. It has built-in templates to use for various data sources to make importing data easier.\n- **_Optimizing_** โ€“ Adjusting a few parameters can sometimes be the difference between a profitable strategy and an unprofitable one. After running a backtest, optimizing is easily done by changing a few lines of code.\n- **_Plotting_** โ€“ If youโ€™ve worked with a few Python plotting libraries, youโ€™ll know these are not always easy to configure, especially the first time around. A complex chart can be created with a single line of code.\n- **_Indicators_** โ€“ Most of the popular indicators are already programmed in the Backtrader platform. This is especially useful if you want to test out an indicator but youโ€™re not sure how effective it will be. Rather than trying to figure out the math behind the indicator, and how to code it, you can test it out first in Backtrader, probably with one line of code.\n- **_Support for Complex Strategies_** โ€“ Want to take a signal from one dataset and execute a trade on another? Does your strategy involve multiple timeframes? Or do you need to resample data? Backtrader has accounted for the various ways traders approach the markets and has extensive support.\n- **_Open Source_** โ€“ There is a lot of benefit to using open-source software\n- **_Active Development_** โ€“ This might be one area where Backtrader especially stands out. The framework was originally developed in 2015 and constant improvements have been made since then. Just a few weeks ago, a pandas-based technical analysis library was released to address issues in the popular and commonly used TA-Lib framework. Further, with a wide user base, there is also active third-party development.\n- **_Live Trading_** โ€“ If youโ€™re happy with your backtesting results, it is easy to migrate to a live environment within Backtrader. This is especially useful if you plan to use the built-in indicators offered by the platform.\n\n\n## Overview\nThe libraryโ€™s most basic functionality is to iterate through historical data and to simulate the execution of trades based on signals given by your strategy.\n\n- A Backtrader โ€œanalyzerโ€ can be added to provide useful statistics.\n\n- Strategy: This is where all the logic goes in determining and executing your trade signals. It is also where indicators can be created or called, and where you can determine what getโ€™s logged or printed to screen.\n - The cerebro engine is the core of Backtrader. This is the main class and we will add our data and strategies to it before eventually calling the cerebro.run() command.\n\nBasic Backtrader setup\n\n```Python\nimport backtrader as bt\n\nclass MyStrategy(bt.Strategy):\n def next(self):\n pass # do something\n\n# Instantiate Cerebro\ncerebro = bt.Cerebro()\n\n# Add strategy to Cerebro\ncerebro.addstrategy(MyStrategy)\n\n# Run Cerebro Engine\ncerebro.run()\n```\n\n- `log()` allows us to pass in data via the txt variable that we want to output to the screen.\n- `next()` gets called every time Backtrader iterates over the next new data point.\n\n## Source\n[algotrading101 - Backtrader for backtesting](https://algotrading101.com/learn/backtrader-for-backtesting/)\n" }, { "alpha_fraction": 0.6577986478805542, "alphanum_fraction": 0.667188286781311, "avg_line_length": 37.34000015258789, "blob_id": "658f7b6bf12b9a3173a7df3aa9bb04f1628762d6", "content_id": "164b176dbbd65aacefa9cb418eb784bcd6ccdab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1917, "license_type": "no_license", "max_line_length": 198, "num_lines": 50, "path": "/finances/Algorithmic Trading/dual_share_class_pairs_trading.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nDual share class pairs trading\n\nViacom Class A and B shares - both refers to the same company, and in fact they both give you ownership in the same underlying firm and in the same underlying firm and the same share of the profits.\n\nAssumption:\n - In theory, these two stocks ought to have exacly the same value.\n - In practice, because Viacom B share have a larger float, that is there's more shares outstanding, they tend to trade more frequently then Viacon Class A.\n\nSo, theres a relationship between these two classes of shares, we might want to try and capitalize on that relationship.\n'''\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 20,10\n\ndata = pd.read_excel('finances/data/02_03_Begin.xls', index_col='Date',usecols=['Date','VIA','VIA.B'])\ndata.head()\n\n#--- Ratio between VIA and VIA.B\ndata['Ratio'] = data['VIA'] / data['VIA.B']\n\nprint('Average Ratio: ', round(data['Ratio'].mean(), 3))\nprint('Ratio Variation: ', round(data['Ratio'].var(), 3))\n\ndata['Ratio'].plot(title='Ratio between Viacon Class A and Class B')\n\ndata['MA'] = data['Ratio'].rolling(14).mean()\n\ndata['buyVIA'] = data.apply(lambda x: 1 if x['VIA'] < x['MA'] else 0,axis=1)\ndata['buyVIA.B'] = data.apply(lambda x: 1 if x['VIA.B'] > x['MA'] else 0, axis=1)\n\ndef calc_returnLongOnly(data):\n via = data['buyVIA'] * (data['VIA'].diff()/ data['VIA'])\n via_b = data['buyVIA.B'] * (data['VIA.B'].diff()/data['VIA.B'])\n return via + via_b\n\ndef calc_returnLongShort(data):\n via = data['buyVIA'] * ((data['VIA'].diff()/data['VIA']) - (data['VIA.B'].diff()/data['VIA.B']))\n via_b = data['buyVIA.B'] * ((data['VIA.B'].diff()/data['VIA.B']) - (data['VIA'].diff()/data['VIA']))\n return via + via_b\n\ndata['Return - Long Only'] = calc_returnLongOnly(data)\ndata['Return - Long/Short'] = calc_returnLongShort(data)\n\ndata.head()\n\n\n# data['Profit - Long Only']\n# data['Profit - Long/Short']\n" }, { "alpha_fraction": 0.6616702079772949, "alphanum_fraction": 0.6616702079772949, "avg_line_length": 17.68000030517578, "blob_id": "140e143b1f2438ceb8dd585a77bf7abab18302fe", "content_id": "a5a456f5c6f3013479197f7e9ed71506bb0383a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 52, "num_lines": 25, "path": "/finances/Algorithmic Trading/building_algorithms.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nGather data from FRED using the FRED API\nsource: https://lvngd.com/blog/fred-api-python/\n\nFRED IPA:\n Sources - data sources\n Releases - release of data from a source\n Series (time series)\n Series observation values\n Categories\n Tags\n'''\n\nimport requests\n\nfred_key = 'a1ae8901952eee6f396bb591555c0687'\n\nendpoint = 'https://api.stlouisfed.org/fred/sources'\n\nparams = {'api_key':fred_key,\n 'file_type':'json'\n }\n\nres = requests.get(endpoint, params)\nres.json()\n" }, { "alpha_fraction": 0.6127409934997559, "alphanum_fraction": 0.6571667790412903, "avg_line_length": 30.394737243652344, "blob_id": "a726f3938b30bc1195a7baafcfdbd97680acc8b1", "content_id": "5a9643fafc868a922cf1ff3cadbcd0e628a82571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 126, "num_lines": 38, "path": "/finances/analysis/causality_test.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# How to measure statistical causality: A Transfer Entropy Approach with Financial Applications\n# source: https://towardsdatascience.com/causality-931372313a1c\n# Open Quant Book: https://www.ebook.openquants.com/index.html\n# GOAL: Use statistics and information theory to uncover complex causal relationships from observational data\n# OBJECTIVES:\n# *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Random-like non-linear 2-dimensional system\ndef x1(n):\n if n > 0:\n return 0.441 * x1(n - 1) + np.random.normal(0,1)\n else:\n return 0\n\ndef x2(n):\n if n > 0:\n return 0.51 * (x1(n-1)**2) + np.random.normal(0,1)\n else:\n return 0\n\ndef nonlinear_sys(n_max = 1000):\n x1_vec = [x1(a) for a in range(0,n_max)]\n x2_vec = [x2(a) for a in range(0,n_max)]\n\n plt.scatter(x1_vec, x2_vec, alpha = .5)\n plt.title('2D Non-linear System')\n plt.xlabel('x1')\n plt.ylabel('x2')\n\n return x1_vec, x2_vec\n\nx1_vec, x2_vec = nonlinear_sys()\n\n#### =============== Statistical Causality =============== ####\n# Granger causality:\n# X Granger-cause Y if the future realizations o Y is better explained using the past information from X and Y rather Y alone.\n" }, { "alpha_fraction": 0.5734536051750183, "alphanum_fraction": 0.6043814420700073, "avg_line_length": 22.876922607421875, "blob_id": "5eb1667159c7eb98392c000dd34d3172be24deb3", "content_id": "1665e1b75b3d2843b06dc912e7813c354703ecb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1552, "license_type": "no_license", "max_line_length": 106, "num_lines": 65, "path": "/finances/crypto/3commas/commabot1.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n#########\ndef SO(max_so=7,start_so=1,last_so=0,so=.02, so_step=1.1):\n if start_so == 1:\n last_so = so\n else:\n last_so = so + last_so*so_step\n\n if start_so < max_so:\n start_so+=1\n return SO(max_so=max_so,start_so=start_so,last_so=last_so)\n else:\n return last_so\n\ndef getSOperc(max_so=7):\n return [SO(i) for i in range(1,max_so+1)]\n\n#########\ndata = pd.read_csv('finances/crypto/data/ccxt_binance_BTC-USDT_1h_20190101-20200101.csv',index_col='Time')\ndata.head()\n\ndef getPriceLeves(base_order, percentages):\n return [(base_order - base_order*i) for i in percentages]\n\n\n# prices = data['Close'].tolist()\n\norder_size=100\nposition=0\ntake_profit=0.01\nso_flag=0\n\nfinal_price = base_order\nbought=0\n\nwallet=pd.DataFrame([{'USDT':800,'BTC':0}])\nmaxSO=int(wallet['USDT']/amount)\n\nfor i in range(len(data)):\n price = data.Close[i]\n\n base_order = prices[0]\n percentages = getSOperc()\n price_levels=getPriceLeves(base_order, percentages)\n\n\n if so_flag==0 and position == 0:\n print('buy: '+str(order_size))\n wallet['BTC']+=order_size/price\n wallet['USDT'] -= order_size\n\n if price > (final_price + final_price*take_profit):\n print('Sell')\n wallet['USDT']+=wallet['BTC']*price\n wallet['BTC']=0\n\n elif price <= price_levels[so_flag]:\n so_flag+=1\n print('SO [{}] - buy: '.format(so_flag,amount))\n final_price=(price + final_price)/2\n else:\n print('Hold!!!')\n pass\n" }, { "alpha_fraction": 0.7554970979690552, "alphanum_fraction": 0.761439859867096, "avg_line_length": 42.62592697143555, "blob_id": "72d528d1c756e2d9655d5099b020782f7779bb10", "content_id": "e0418b75fc4f60cfd6a55d20d7d6cd7b670c7b70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11781, "license_type": "no_license", "max_line_length": 281, "num_lines": 270, "path": "/finances/Algorithmic Trading/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Algorithmic Trading\nFinancial industry is evolving, with computers playing a bigger roles. Algorithmic, or computer-driver, trading now makes up the large majority of trades - more than 90% of orders by some metrics and estimates.\n\n## General Process\n![diagram](algo_trading_general_process_diagram.png \"Algorithmic Trading General Process\")\n\n## Example algorithm\n- Get Time, Price, Index\n- Calculate **Natural Price**\n$$ Natural\\ Price = Last\\ Price - Relative\\ Index\\ Change * Std.\\ Dev.$$\n- **Buy/No Buy**\n - if( Natural Price > Price ): _Buy_\n - else: _No Buy_\n\n## Basics\nAlgo Trading relies on programmable objective criteria, and essentially comes in two flavors:\n\n- Market making: Market-making trades attempt to capitalize on the bid-ask spread - typically associated with high-frequency traders\n\n- Data mining: data-mining trades based on patterns in data, including stock prices and outside information\n - We look for correlations between stock prices and other data points\n\n### Market making\nMarketing making is based on bid-ask spreads.\n\n**Centralized Order Book: Orders, Stacks, and Matching**\n**Order types**:\n - _Market order_ - immediately\n - _Limit order_ - specific price\n - _Iceberg order_ - large single order that has been divided into smaller lots\n\n**Order conditions**:\n - _Time in force_\n - _Day order_ - valid only for less than a day\n - _Good till cancelled_ - valid until executed or cancelled\n - _Fill or kill_ - immediately execute or cancel\n\n**Conditional Orders**:\n - _Stop order_ - to sell/buy when the price of a security falls/rises to a designated level\n - _Stop limit order_ - executed at the exact price or better\n\n**Discretionary order**:\n - Traditional orders\n - Broker decides when and price\n\n## Steps in Building an Algo\n- Define trading hypothesis and goal\n- Set operating time horizon and constraints\n- Algo testing\n\n**Maintaining an Algorithm**\n- Continual monitoring and maintenance\n - Monitor performance\n - Monitor market conditions\n- Maintenance and rejuvenation\n\n**Algorithmic Trading Requirements**\n- Centralize order book\n- Access to the (highly liquid) markets\n- Systems (three types):\n - In-house systems\n - Client systems\n - Vendor systems\n- Information exchange\n\n## An Algo Trading Example\n- Renaissance Technologies is one of the most famous hedge funds pursuing algorithmic trading. RenTech gave an [example](www.bloomberg.com/news/articles/2016-11-21/how-renaissance-s-medallion-fund-became-finance-s-blackest-box) of the type of trade they pursue:\n - When skies are cloudy, equities markets tend to perform worse then when skies are clear\n - In theory, we can buy or sell based on data about weather forecasts then\n - Practically speaking it's hard to trade on weather patterns - they are imprecise and the correlations are low\n - Correlations between stock prices and weather are low\n- Another [example](www.econ.yale.edy/~shiller/behfin/2006-04/cohen-frazzini.pdf) of algo trading is supplier/customer realtionships\n - When a samall firm reports earnings, it has implications for other large companies that are custormers or suppliers of that small company\n - Think Apple and some of the small vendors it uses\n\n## Types of trading\n- Long-term trading, spaning several days before a trade is made.\n- Intraday trading, a high-frequency trade done in a single day, between the market opens and closes.\n\n## Big Data in Finance\n- Two ways of making investment decisions:\n - Intuition\n - Data\n- Each method has adherents\n\n### Data\n#### Quantitative\n> refers to using data to try and identify attractive investment opportunities.\n\n- Smart beta vs. fundamentals vs. technicals\n\n**Stock Price Data**\nUsually comes with the following information:\n- _Date_\n- _Open_: is the price that a stock opens at in any given day.\n- _High_: the highest price for a stock in any given day.\n- _Low_: the lowest price for a stock in any given day.\n- _Close_: Is the price that a stock closes at any given day.\n- _Adj. Close_: This price represents not just the close on a particular day, but the close after taking into account any dividends that are paid out. Not all stock pay out dividends, but if it do pay out a dividend the stock price is automatically adjusted to compensate for that.\n\n#### Qualitative\n**Textual Data**\nKnowledge field known as Textual Analysis, Natural language processing, Sentiment analysis, content analysis, computational linguistics.\n\nPull qualitative information and transform into quantitative signals that can be used in a model\n\nA dictionary/list of words and their associated content (positive, negative, etc.) is required for sentiment analysis in text.\n\nIncreased inerest attributable to:\n - Bigger, faster computers\n - Availability of large quantities of text\n - New technologies derived from search engines\n\nExamples of textual data sources:\n- EDGAR (1994-2011) - 22.7 million filings\n- WSJ News Archive (2000 to present) - XML encapsulated\n- Audio transcripts (such as conference calls) - [SeekingAlpha](Seekingalpha.com)\n- Websites\n- Google searchs\n- Twitter/StockTwits\n\nTextual Analysis Softwares:\n- Block boxes, such as WordStat, Lexalytics, Diction\n- Two critical components:\n - Ability to download data and convert into string/character variable\n - Ability to parse large quantities of text\n - Most modern languages provide for both of these functions (Perl, Python, SAS Text Miner, VB.NET)\n\n### Why is Big Data Important?\n**Avoid the HiPPO**\nIn the absence of data, business decisions are often made by the HiPPO (highest paid person's opinion).\n\n### Big Data Project Steps\nAll big data projects require the same set of steps:\n1. Gather and clean data\n2. Analyze data\n3. Test choice with data\n4. Make a decision\n\n## Regression Analysis\nRegression analysis can be user as business tool for prediction\n\n$$y = ax + b$$\n\nA regression predicts the dependent variable ($y$) based upon values of the independent variables ($x$)\n- Simple regression --> fits straight line to the data\n- Multiple regression --> fits a straight line using several independent variables\n\n### Predictions and Errors\nSteps to prediction\n1. Run regression\n2. Save coefficients (e.g., impact of each inch of insulation)\n3. Use _coefficients_ and _expected values_ for the future to get prediction\n\nEx:\n$$Estimated\\ Heating\\ Oil = 526.15 - 5.432 (Temperature) - 20.012 (Insulation)$$\n\nFor each observation, the variation can be described as:\n$$y = \\hat{y} + ฯต$$\n$y$: actual, $\\hat{y}$: explained, $ฯต$: error\n\n\n## Algorithmic Strategies\n- _Mean Reversion_\n - Simplest strategies are based on mean reversion\n - Pairs trades: Walgreens and CVS, Ford and GM, etc.\n\n- _Four-Factor Model_\n - Four-factor model of Fama and French\n - Identifies characteristics of firms that do well on average over time\n - Uses portfolios of companies with these characteristics\n - Too much noise in individual stocks\n - Stock returns predicted by factors\n - _Size_: market of the firm\n - Smal firms have higher returns than large firms over time\n - _Book-to-market (B/M) value_: proxy for firm valuation\n - Firms with higher B/M ratios have higher returns\n - Book value is accounting metric\n - B/M appears to be an indicator for financial distress\n - Distressed firms sometimes bounce back strongly\n - _Beta_: measures volatility of a stock\n - A beta of 1 means correlation of 1 between stock market and the firm. When market rises 1%, the stock on average rises 1% as well\n - A beta of 2 means a market rise of 1% is associated with a stock rise of 2%\n - _Momentum_: related to the concept of a \"hot hand\"\n - Companies that have done well in the recent past continue to do well in the future\n - Momentum can be measured by earnings or stock price\n\n## Building Algorithms\n### Data Gathering\n- [FRED Data](https://fred.stlouisfed.org/)\n - [FRED API](https://fred.stlouisfed.org/docs/api/api_key.html)\n - R\n - https://github.com/jcizel/FredR\n - https://github.com/markushhh/FredApi\n - https://github.com/onnokleen/alfred\n - https://github.com/sboysel/fredr\n - Python\n - https://github.com/7astro7/full_fred\n - https://github.com/avelkoski/FRB\n - https://github.com/jjotterson/datapungi_fed\n - https://github.com/letsgoexploring/fredpy\n - https://github.com/zachwill/fred\n - https://github.com/mortada/fredapi\n\n## Algorithmic Trading with Python\n- [Stock Trading with Python](https://github.com/FernandoMarcon/learning_notes/blob/main/finance/stock_trading_with_python.py)\n- [Trying to understand wheter any of the prices seem artificially high or low, over a particular period of time.](https://github.com/FernandoMarcon/learning_notes/blob/main/finances/Algorithmic%20Trading/VIX_moving_average.py)\n- [ETF Pairs Trading](https://github.com/FernandoMarcon/learning_notes/blob/main/finances/Algorithmic%20Trading/ETF_pairs_trading_with_algorithms.py)\n- [Dual share class pairs trading](https://github.com/FernandoMarcon/learning_notes/blob/main/finances/Algorithmic%20Trading/dual_share_class_pairs_trading.py)\n\n## Algorithmic Trading with R\n- [R and Bond Trading](https://github.com/FernandoMarcon/learning_notes/blob/main/finance/r_and_bond_trading.R)\n\n## Algo Trading as a Carrer\n- What do algo traders do?\n- What skills do they need?\n- How does the job/team work?\n- [LinkedIn](https://www.linkedin.com/jobs/search/?keywords=algorithmic%20trading&location=Worldwide)\n\nTypical Job Description\n- Design of frameworks and functionality for development of trading algos\n- Implementation, testing, and production\n- System tuning and optimization\n- Calibration and optimization of parameters\n- Proactive identification of problems and issues and resolution of them\n\n## Buying and Selling with Algorithms\n- Strategies and rules for an algorithm are jsut the beginning\n- Process for buying and selling varies significantly\n- High-frequency trading firms tap directly into exchange networks to trade\n- Most quant firms have separate systems for research versus trading\n\n### Trade Considerations\n- Automating an algorithm to make trade decisions directly can be dangerous\n- Raft of factors to take into account\n - Liquidity in a stock\n - Bid-ask sprea imbalances\n - Removal of human \"brake\"\n\n### Make-or-Take Liquidity\n- At-large asset managers are a key consideration in reducing trading costs\n- Make-or-take liquidity plays an important role\n- Providing liquidity in markets can reduce trade cost\n- Will your firm be providing liquidity by buying when everyone else sells?\n\n### Expanding Algorithms\n- diversification is key to any portfolio\n- Look for algo strategies that complement one another\n- Consider evaluating returns based on days when capital is used rather than absolute annual returns\n\n### Correlations and Bonds\n- Bonds are a good source of low-correlation trades with equities\n- Bond trading is harder but not impossible\n- Remember: Correlations across all assets go to 1 in times of stress\n\n### TRACE and MSRB\n- Looking for data on bonds\n- Consider two free sources of pricing\n- TRACE: Trade Reporting and Complience Engine\n- MSRB: Municipal Securities Rulemaking Board\n\n### Scenario Analysis\n- Risk management in algo trading is crucial\n- Consider these two tools:\n - Value at Risk (VAR): Is a statistical measure that tells us the expected losses under certain ordinary market conditions.\n - Expected shortfall: captures tail risks\n\n## Sources:\n- [Algorithmic Trading and Finance Models with Python, R, and Stata Essential Training (LinkedIn Course)](https://www.linkedin.com/learning/algorithmic-trading-and-finance-models-with-python-r-and-stata-essential-training/)\n- [Algorithmic Trading and Stocks Essential Training](https://www.linkedin.com/learning/algorithmic-trading-and-stocks-essential-training/)\n" }, { "alpha_fraction": 0.6580706834793091, "alphanum_fraction": 0.670487105846405, "avg_line_length": 23.928571701049805, "blob_id": "29cb925bcb04cae359f344b4aef8429cd0e6f793", "content_id": "e4f87a84d066525f8f6b5012555f13efc1178f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 60, "num_lines": 42, "path": "/finances/crypto/biscoint/api_test.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Source\n# https://pypi.org/project/biscoint-api-python/\n# https://github.com/Biscoint/biscoint-api-python\n\nimport json\nimport requests\n\nfrom biscoint_api_python import Biscoint\n\napi_data = {\n 'api_key': '',\n 'api_secret': '',\n \n}\nbsc = Biscoint(api_data['api_key'], api_data['api_secret'])\n\ntry:\n ticker = bsc.get_ticker()\n print(json.dumps(ticker, indent=4))\n\n fees = bsc.get_fees()\n print(json.dumps(fees, indent=4))\n\n meta = bsc.get_meta()\n print(json.dumps(meta, indent=4))\n\n balance = bsc.get_balance()\n print(json.dumps(balance, indent=4\n\n trades = bsc.get_trades(op='buy', length=1)\n print(json.dumps(trades, indent=4\n\n offer = bsc.get_offer('buy', '0.002', False)\n print(json.dumps(offer, indent=4))\n\n # WARNING: this will actually execute the buy operation!\n offerConfirmation = bsc.confirm_offer(offer['offerId'])\n print(json.dumps(offerConfirmation, indent=4))\n\nexcept requests.exceptions.HTTPError as error:\n print(error)\n print(json.dumps(error.response.json(), indent=4))\n" }, { "alpha_fraction": 0.7672584056854248, "alphanum_fraction": 0.7712031602859497, "avg_line_length": 30.040817260742188, "blob_id": "66be902fee7ad189788781045c7b4f1bfb7725fe", "content_id": "0fb12e467426992da740533ed704bfa4e08b6e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1521, "license_type": "no_license", "max_line_length": 152, "num_lines": 49, "path": "/finances/Financial Forecasting/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Financial Forecasting with Big Data\n\n## Business Intelligence (BI)\n- Term coined in 1993\n- Not just software\n- BI statistical tools\n- Takes you from raw data to business insight\n\n### How BI Works\nCombines data and models to help make choices.\n\n![bi works](fig/image18.png \"How BI Works\")\n\n### BI Advantages\n- Avoid guessing\n- Improve forecasting\n- Ensure business continuity\n- Reduce subjectivity\n\n### BI Disadvantages\n- Complexity\n- Upfront investment - BI is a process!\n- Analysis paralysis\n- Risk of black boxes\n\n## Conventional Finantial Forecasting\n### The Percent of Sales Method\n- This is the most common method\n- It begins with the sales forecast at an annual growth rate in revenue\n- Balance sheet and income statement change proportionally with sales\n- Long-term forecasts are based on the compoound annual growth rate (CAGR)\n\n### Income Statement Forecasts\n- Income statement is generated by finding costs that change directly with sales\n- Plowback: the percentage of net income is reinvested by the firm as retained earnings\n- Income statement forecast determines the growth rate of future sales\n\n### Balance Sheet Forecasts\n- Balance sheet item - some vary with sales; some do not\n- To determine variation with sales, review historial accounts\n- Often varying directly with sales:\n - Working Capital Accounts\n - Inventory\n - A/R\n - A/P\n\n## Source\n- [ Financial Forecasting with Big Data ](https://www.linkedin.com/learning/financial-forecasting-with-big-data/basics-of-financial-regression-analysis)\n-\n" }, { "alpha_fraction": 0.6900489330291748, "alphanum_fraction": 0.7101685404777527, "avg_line_length": 36.42856979370117, "blob_id": "2cc50ec478dde5b776c9899f33313020e80e24d3", "content_id": "1fd785f883233c3cbcb940ccb117d720ecc3a1c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1839, "license_type": "no_license", "max_line_length": 112, "num_lines": 49, "path": "/finances/analysis/binance_EDA.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager\nimport sys\nsys.path.insert(1, '/home/marcon/Documents/exchange/')\nimport config\n\n# Authenticate\nclient = Client(config.API_KEY, config.API_SECRET)\n\n# Get Tickers\ntickers = client.get_all_tickers()\nticker_df = pd.DataFrame(tickers)\nticker_df.set_index('symbol', inplace=True)\nticker_df.head()\nticker_df.tail()\n\nticker_df.loc['BTCUSDT']\n# Market Depth\ndepth = client.get_order_book(symbol='BTCUSDT')\ndepth_df = pd.DataFrame(depth['bids'])\ndepth_df.columns = ['Price', 'Volume']\ndepth_df.head()\n\n# Get Historical Data\nhistorical = client.get_historical_klines('BTCUSDT',Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')\nhistorical\nhist_df = pd.DataFrame(historical)\nhist_df.columns = ['Open Time','Open','High','Low','Close','Volume','Close Time','Quote Asset Volume',\n 'Number of Trades','TB Base Volume','TB Quote Volume','Ignore']\nhist_df.head()\n\n# Preprocess Historical Data\nhist_df.dtypes\nhist_df['Open Time'] = pd.to_datetime(hist_df['Open Time']/1000, unit = 's')\nhist_df['Close Time'] = pd.to_datetime(hist_df['Close Time']/1000, unit = 's')\nnumeric_columns = ['Open','High','Low','Close','Volume','Quote Asset Volume','TB Base Volume','TB Quote Volume']\nhist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)\nhist_df.head()\nhist_df.tail()\nhist_df.describe()\nhist_df.info()\n\n# Viz\nimport mplfinance as mpf\nmpf.plot(hist_df.set_index('Close Time').tail(100))\nmpf.plot(hist_df.set_index('Close Time').tail(100), type = 'candle',style='charles')\nmpf.plot(hist_df.set_index('Close Time').tail(100), type = 'candle',style='charles',volume=True)\nmpf.plot(hist_df.set_index('Close Time').tail(120), type = 'candle',style='charles',volume=True,\n title = 'BTCUSDT Last 120 Days', mav = (10,20,30))\n \n" }, { "alpha_fraction": 0.6724042296409607, "alphanum_fraction": 0.684619665145874, "avg_line_length": 32.98113250732422, "blob_id": "235ccb70db0f10bc2ad92d29ca088b712bd9fc05", "content_id": "8fbe9b6614d58f40e704ea0e39dbf7f3c2399b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1801, "license_type": "no_license", "max_line_length": 185, "num_lines": 53, "path": "/finances/Algorithmic Trading/ETF_pairs_trading_with_algorithms.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nETF Pairs Trading\nOIH: ETF that's focused on oil producers\nXOP: Also an oil company-based ETF\nUSO: The price of oil\n\nAssumption: These two ETFs should move in sync with one another.\n'''\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 20,10\n\ndata = pd.read_excel('finances/data/02_02_Begin.xlsx',index_col='Date',usecols=[0,1,2,3])\ndata.head()\n\n#--- Visualize ETFs\ndata[['OIH','XOP']].plot(title='Oil Company-based ETFs')\n\n#--- Visualize ETFs with Oil prices\ndata[['OIH','USO']].plot(title='OIH ETF and Oil Price (USO)')\n\ndata[['XOP','USO']].plot(title='XOP (ETF) and Oil Price (USO)')\n\n#--- Correlations\ncor_matrix = data[['OIH','XOP','USO']].corr()\ncor_matrix.style.background_gradient(cmap='coolwarm')\n\nprint('Correlations ( OIH & XOP ): ', round(cor_matrix.loc['OIH','XOP'], 3))\nprint('Correlations ( OIH & USO ): ', round(cor_matrix.loc['OIH','USO'], 3))\nprint('Correlations ( XOP & USO ): ', round(cor_matrix.loc['XOP','USO'], 3))\n\n# Correlations between ETFs are higher than correlations between ETFs and oil prices: while OIH and XOP do tend to move in sync with one another, other factors probably drive the price.\n\n# How to capitalize on this?\n# - look at the ratio between OIH and XOP\n\n#--- OIH/XOP Ratio\ndata['Ratio'] = data['OIH'] / data['XOP']\ndata['Ratio'].plot(title = 'Ratio between OIH and XOP')\n\nprint('Average Ratio: ', round(data['Ratio'].mean(), 3))\nprint('Ratio Variation:', round(data['Ratio'].var(),3))\n\n# Is there some way we could determine if we should buy OIH or XOP?\n# - Buy OIH when the ratio is relatively low, and XOP and its relatively high.\ndef buy(ratio):\n if ratio < 0.65: return 'OIH'\n elif ratio > 0.8: return 'XOP'\n else: return None\n\ndata['Buy'] = data['Ratio'].apply(lambda x: buy(x))\n\ndata.Buy.value_counts()\n" }, { "alpha_fraction": 0.7821782231330872, "alphanum_fraction": 0.7821782231330872, "avg_line_length": 78.35713958740234, "blob_id": "7101a2c87d196d64f0937fe1efba814873a62150", "content_id": "9185c4b32ac0e66f41698fa1e1fe7d50afe52852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1111, "license_type": "no_license", "max_line_length": 309, "num_lines": 14, "path": "/finances/Financial Markets/definitions.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Definitions\n\n> __RSI__: Relative Strength Index is a techinical indicator intended to chart the current and historical stregth or weakness of a stock or market based on the closing prices of a recent trading period. Measures the magnitude of recent price changes to evaluate overbought or oversold conditions in the price.\n\n> __Risk__: standar deviation of return.\n\n> __Efficient/portfolio frontier__: It is the set of portfolios which satisfy the condition that no other portfolio exists with a higher expected return but with the same standard deviation of return (e.g. the risk)\n\n> __Markowitz bullet__: the hyperbola in efficient frontier, and its upward sloped portion is the efficient frontier if no risk-free asset is available. With a risk-free asset, the straight line is the efficient frontier.\n\n> __Money__: In economics, money is often defined in terms of the three functions it provides: a store of value, a unit of account and a medium of transaction\n\n> __Derivatives__: securities that move in correspondance to one or more underlying assets.\n - options, swaps, futures and forward contracts\n" }, { "alpha_fraction": 0.6337209343910217, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 14.636363983154297, "blob_id": "c5ba0c12550fd3bdff7aecb4426c6ffeba062972", "content_id": "62675cf3fe8477acf4e87c3bbd769dcceb5989a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/JSON.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# JSON - JavaScript Object Notation\n\n_Notation_: a way of presenting information\nObjects in JavaScript:\n\nlet cart = {\n items: 0,\n total: 0,\n tax: 0,\n taxRate: 0.075\n};\n" }, { "alpha_fraction": 0.7202796936035156, "alphanum_fraction": 0.7202796936035156, "avg_line_length": 34.75, "blob_id": "4f8206e405b79128a2105b94c2878d868c905874", "content_id": "a4beb0b588e2dc0811f9311214675f7da928205b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 300, "license_type": "no_license", "max_line_length": 85, "num_lines": 8, "path": "/finances/Financial Markets/literature.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Literature\n\n## Books:\n- The Black Swan: The Impact of the Highly Improbable. (Nassim Nicholas Nicholas Taleb)\n- โ€˜Personal Finance for Dummiesโ€™ by Eric Tyson\n- โ€˜Broke Millennial: Stop Scraping By and Get Your Financial Life Togetherโ€™ by Erin Lowry\n- โ€˜Why Didnโ€™t They Teach Me This in School?โ€™ by Cary Siegel\n- Antifrigile\n" }, { "alpha_fraction": 0.7542997598648071, "alphanum_fraction": 0.7542997598648071, "avg_line_length": 49.875, "blob_id": "43484b0d63acf0039c25337559295d385882ab37", "content_id": "af7ca1c583402666476366472ca2f99415292839", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 411, "license_type": "no_license", "max_line_length": 150, "num_lines": 8, "path": "/finances/Financial Markets/questions.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "- How to calculate the correlation between two stocks?\n- How to infer if two firms are independent?\n- Which is better: invest in correlated business or independent? โ†’ independent, you need to look for low covariance. Risk is determined by covariance.\n- What are hedge funds?\n- What are bubles? What characteristics define them?\n- Systemic risk?\n- Stock broker?\n- What are the measures of risk? โ†’ VaR, ... ?\n" }, { "alpha_fraction": 0.780415415763855, "alphanum_fraction": 0.781899094581604, "avg_line_length": 73.88888549804688, "blob_id": "8ad92c192888f5c96eacd6f4d8765f70819f8f69", "content_id": "e333adf88acf0f06530130dacb107602e37d81d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 674, "license_type": "no_license", "max_line_length": 210, "num_lines": 9, "path": "/pinescript/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Pine Script\n\n[Pine Script](https://www.tradingview.com/pine-script-docs/en/v4/Introduction.html) is the programming language used on the [TradingView](https://www.tradingview.com/) charting platform.\n\n## Resources\n- [PineCoders](https://www.pinecoders.com/learning_pine_roadmap/)\n- [Kodify.net](https://kodify.net/tradingview-programming-articles/): the largest repository of Pine-related articles.\n- [Pine Videos](https://www.pinecoders.com/resources/#videos)\n- [Backtest Rookies](https://backtest-rookies.com/category/tradingview/) also has some articles on Pine. They produce quality material illustrating many of the typical thinigs Pine coders want to do or explore.\n" }, { "alpha_fraction": 0.5315408706665039, "alphanum_fraction": 0.5915201902389526, "avg_line_length": 29.21875, "blob_id": "2c0aafcef4acd563d5cec75241ea867e82805a60", "content_id": "6faf8d064a6e9a767bb1f778bcf5d69e2a550fa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 141, "num_lines": 32, "path": "/finances/crypto/3commas/helper_functions.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nCalculate all N price-drop percentages for placing Safety Orders (SOs).\nThe percentage levels are based on a constant value (e.g. SO=2%), combined with a stepping factor that scale up the levels at each iteration:\n\nFORMULA => SO + SO-STEP * PREVIOUS_%_DROP (or, if its the first: SO)\nEx: For SO = 2% and SO-STEP=1.1%, this would give:\n\nSO(1): 2% (only SO)\nSO(2): 4,2% (+step)\nSO(3): 6.62% (+step)\nSO(4): 9.282% (+step)\nSO(5): 12.2102% (+step)\nSO(6): 15.43122% (+step)\nSO(7): 18.974342% (+step)\n\nThis would cover a drop of 18.97% in the coin price.\n'''\n\ndef SO(max_so=7,start_so=1,last_so=0,so=.02, so_step=1.1):\n if start_so == 1:\n last_so = so\n else:\n last_so = so + last_so*so_step\n\n if start_so < max_so:\n start_so+=1\n return SO(max_so=max_so,start_so=start_so,last_so=last_so)\n else:\n return last_so\n\ndef getSOperc(max_so=7):\n return [SO(i) for i in range(1,max_so+1)]\n" }, { "alpha_fraction": 0.6613110303878784, "alphanum_fraction": 0.7075835466384888, "avg_line_length": 30.1200008392334, "blob_id": "0861b98317c4208e4647f3fa73b399b0296dda6c", "content_id": "d1a7d89efce9f365d580d0b4746419def47f2494", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1556, "license_type": "no_license", "max_line_length": 109, "num_lines": 50, "path": "/finances/Algorithmic Trading/stock_trading_with_python.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "'''\nStock Trading with Python\nsource: Algorithmic Trading and Finance Models with Python, R, and Stata Essential Training (LinkedIn Course)\n'''\nimport pandas as pd\npd.core.common.is_list_like = pd.api.types.is_list_like\nfrom pandas_datareader import data\n\n#### --------------- Data Retrieval --------------- ####\n#--- Yahoo Finances\ndf = data.get_data_yahoo('MSFT', '2018-01-01', '2019-01-01')\ndf.head()\n\n#--- Quandl\n# !pip install quandl\nimport quandl\naapl = quandl.get('WIKI/AAPL',start_date='2014-01-01', end_date='2016-01-01')\naapl.describe()\n\n#--- Financial Data Manipulation, Storage and Visualization\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 20,10\nimport pandas_datareader as reader\nimport datetime\n\n# Store data as CSV file\naapl=reader.get_data_yahoo('AAPL', start=datetime.datetime(2008,1,1), end=datetime.datetime(2012,1,1))\naapl.describe()\n\naapl.to_csv('finances/data/aapl_02_04.csv')\ndownload_aapl='02_04CSV.csv'\n\n# Create new variables\naapl['Change'] = aapl.Open - aapl.Close\naapl['Pct_Chg'] = aapl.Change/aapl.Open\n\n# Plot Percent Change\naapl['Pct_Chg'].plot(grid=True)\n\n#--- Building financial databases\ndef get(tickers, startdate, enddate):\n def data(ticker):\n return(reader.get_data_yahoo(ticker, start=startdate, end=enddate))\n datas=map(data, tickers)\n return pd.concat(datas, keys=tickers, names=['Ticker', 'Date'])\n\ntickers = ['MSFT', 'CRM','GE','MMM']\nall_data = get(tickers, datetime.datetime(2015,1,1), datetime.datetime(2019,1,1))\nall_data\nall_data.to_csv('finances/data/alldata_02_06.csv')\n" }, { "alpha_fraction": 0.7204968929290771, "alphanum_fraction": 0.7515528202056885, "avg_line_length": 39.25, "blob_id": "d94983705b6da388807550724cb33388bb9c514e", "content_id": "04c50921f4140cb67cbb4be8470d8222765db513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "no_license", "max_line_length": 101, "num_lines": 8, "path": "/finances/Algorithmic Trading/data_acquisition/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Data Acquisition\n\n- [Quandl: A Step-by-Step Guide](https://algotrading101.com/learn/quandl-guide/)\n- [Google Finance API and 9 Alternatives](https://algotrading101.com/learn/google-finance-api-guide/)\n- [Yahoo Finance API โ€“ A Complete Guide](https://algotrading101.com/learn/yahoo-finance-api-guide/)\n- CCXT\n- Binance\n-\n" }, { "alpha_fraction": 0.6198347210884094, "alphanum_fraction": 0.6556473970413208, "avg_line_length": 24.928571701049805, "blob_id": "5d35fd665e5c6a42758de7a7980def02e0bc5910", "content_id": "25c227fc9a3765bb26f56ab59e85c5c4ff720fff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 98, "num_lines": 14, "path": "/finances/Algorithmic Trading/golden_cross_strategy/moving_average.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import os\n\nclosing_price_sum=0\nwindow = 200\nwith open('finances/Algorithmic Trading/golden_cross_and_backtrader/data/spy_2000-2020.csv') as f:\n content = f.readlines()[-window:]\n for line in content:\n print(line)\n tokens = line.split(',')\n close = tokens[4]\n\n closing_price_sum += float(close)\n\nprint(closing_price_sum / window)\n" }, { "alpha_fraction": 0.7285557389259338, "alphanum_fraction": 0.7453016638755798, "avg_line_length": 52.41796875, "blob_id": "3d38cc96761c4233f0b309dd979ab87424e9f6b4", "content_id": "67f72be01699e14b6edd9edca00b53c8fdbb33a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13675, "license_type": "no_license", "max_line_length": 475, "num_lines": 256, "path": "/finances/analysis/mean_variance_analysis.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# https://towardsdatascience.com/cryptocurrencies-the-new-frontier-part-1-940e787c7ab9\n# https://towardsdatascience.com/cryptocurrencies-the-new-frontier-part-2-7218c6a489f9\n# https://github.com/JoBe10/Mean_Variance_Portfolio_Optimisation/blob/master/Efficient_Frontiers_Cryptos.ipynb\n\n# Measures in finance\n## return: the % change in the stock price over a given ime interval\n### - rate of return\n### - logarithmic return\n\n## rate of return vs. logarithmic return: https://chandlerfang.com/2017/01/09/arithmetic-vs-logarithmic-rates-of-return/\n\n## volatility: standard deviation of the return\n\n## expected return of the total portfolio is the weighted average of the expected returns of the individual stocks in the portfolio.\n\n## expected variance of the portifolio is a product of the variance of the individual stocks, their respective weights in the overall portfolio and the correlation between each pair of stocks.\n\n# Mean-Variance Analysis\n## aka, Modern Portfolio Theory (MPT)\n## Harry Markowitz, 1952\n## main ideia: by tweaking the weights of individual assets in a portfolio it is possible to construct optimal portfolios, which offer the maximum possible expected return for a given level of risk.\n## key insight: an individual asset's return and volatility should not be assessed by itself, but rather by how it contributes to a portfolio's overall return and volatility.\n## The optimal portofolios can be plotted on a graph where the line that connects the optimial portfolios will be an upward sloping hyperbola, which is called the Efficient Frontier.\n## \"Efficient\" because the portfolios that lie on it provide the highest expected return for a given level of risk.\n\n\n#### =============== Extract Stock Prices from Yahoo Finance =============== ####\n# Gathering the data\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport pandas_datareader as dr\nfrom pandas_datareader import data\nfrom datetime import datetime\nimport cvxopt as opt\nfrom cvxopt import blas, solvers\n\n# Define start and end date\nend = datetime(2020, 7, 9)\nstart = datetime(2015, 8, 6)\n\n# Create a list of the ticker symbols to be used in this project\ntickers = ['AMZN', 'GOOGL', 'JNJ', 'V', 'PG', 'UNH', 'JPM', 'HD', 'VZ', 'NFLX', 'DIS', 'MRK', 'PEP', 'BAC', 'KO',\n 'WMT','CVX', 'ABT', 'AMGN', 'MCD', 'COST', 'NKE', 'PM', 'QCOM', 'LOW', 'BA', 'LMT', 'SBUX', 'UPS', 'CAT']\n\n# Obtain the adjusted closing prices from Yahoo Finance\nprices = pd.DataFrame()\nfor tick in tickers:\n prices[tick] = data.DataReader(tick, data_source = 'yahoo', start = start, end = end)['Adj Close']\nprices.columns = tickers\n\nprices\n\n# Plot the time series\nnormalised = prices / prices.iloc[0] * 100\nnormalised.plot(figsize=(20, 10))\nplt.title('Stock Time Series 2015 - 2020', fontsize=20)\n\n#--- Mean-Variance Portfolio Allocation\n# Calculate the log returns\nlog_r = np.log(prices / prices.shift(1))\n\n# Compute the annualised returns\nannual_r = log_r.mean() * 255\nannual_r\n# Under the assumptions of independent and identically distributed returns we can also annualise the covariance matrix using trading days.\n\ncov_matrix = log_r.cov() * 255\nvar = log_r.var() * 255\n\n# Next, I will generate random weights for all of the 30 stocks, which will make up the randomly generated portfolios, under a combination of assumptions. The assumptions are that only long positions are allowed, which ultimately means that the investor's wealth has to be divided among all available stocks through positive positions, and the positions have to add up to 100%, i.e. no additional borrowing and investing more than 100% of wealth.\n# Get the total number of stocks used\nnum_stocks = len(tickers)\n\n# Generate 30 random weights between 0 and 1\nweights = np.random.random(num_stocks)\n\n# Constrain these weights to add up to 1\nweights /= np.sum(weights)\nweights\n# Assuming that historical mean performance of the stocks making up the portfolio is the best estimator for future, i.e. expected, performance, expected portfolio return can be calculated as a product of the transpose of the weights vector and the expected returns vector of the stocks making up the portfolio.\n# Example of what the portfolio return would look like given the above weights\nptf_r = np.sum(annual_r * weights)\nptf_r\n\n# Given the portfolio covariance matrix computed above, the expected portfolio variance can be calculatd as the dot product of the transpose of the weights vector, the covariance matrix and the weights vector.\n# Compute portfolio variance\nptf_var = np.dot(weights.T, np.dot(cov_matrix, weights))\nptf_var\n\n# Using the computational concepts introduced so far we can generate many random portfolios and plot their returns against their risk (standard deviation), often referred to as volatility.\n# Define a function to generate N number of random portfolios given a DataFrame of log returns\ndef generate_ptfs(returns, N):\n ptf_rs = []\n ptf_stds = []\n for i in range(N):\n weights = np.random.random(len(returns.columns))\n weights /= np.sum(weights)\n ptf_rs.append(np.sum(returns.mean() * weights) * 252)\n ptf_stds.append(np.sqrt(np.dot(weights.T, np.dot(returns.cov() * 252, weights))))\n ptf_rs = np.array(ptf_rs)\n ptf_stds = np.array(ptf_stds)\n return ptf_rs, ptf_stds\n\n# Comparing portfolio returns and volatilities across portfolios is made a lot easier by computing a ratio of the two measures. The most common ratio that takes into consideration is the Sharpe ratio, which is a measure of the amount of excess return an investor can expect per unit of volatility (remember this is a measure of risk) that a portfolio provides. Because we assume that investors want to maximise returns while minimising risk, the higher this ratio the better.\n# Generate the return and volatility of 5000 random portfolios\nptf_rs, ptf_stds = generate_ptfs(log_r, 5000)\n\n# Plot the 5000 randomly generated portfolio returns and volatilities and colormark the respective Sharpe ratios\nplt.figure(figsize=(15,8))\nplt.scatter(ptf_stds, ptf_rs, c = (ptf_rs - 0.01)/ptf_stds, marker = 'o')\nplt.grid(True)\nplt.xlabel('Expected Volatility')\nplt.ylabel('Expected Return')\nplt.colorbar(label = 'Sharpe Ratio')\nplt.title('5000 Randomly Generated Portfolios In the Risk-Return Space')\n\n# Finding the optimal portfolios requires a constrained optimisation in which we maximise the Sharpe ratio. To begin, we need a function that returns the portfolio statistics that we computed previously, namely weights, portflio return, portfolio volatility and, based on the latter two, the portfolio Sharpe ratio.\n# Define a function that returns the portfolio statistics\ndef ptf_stats(weights):\n weights = np.array(weights)\n ptf_r = np.sum(log_r.mean() * weights) * 252\n ptf_std = np.sqrt(np.dot(weights.T, np.dot(log_r.cov() * 252, weights)))\n return np.array([ptf_r, ptf_std, (ptf_r - 0.01) / ptf_std])\n\n# Import the optimize sublibrary\nimport scipy.optimize as sco\n\n# Minimize the negative value of the Sharpe ratio\ndef min_sharpe(weights):\n return -ptf_stats(weights)[2]\n\n# Write the constraint that the wights have to add up to 1\ncons = ({'type':'eq', 'fun': lambda x: np.sum(x) - 1})\n\n# Bound the weights (parameter inputs) to be within 0 and 1\nbnds = tuple((0,1) for x in range(num_stocks))\n\n# Starting parameter (weights) list as equal distibution\nstarting_ws = num_stocks * [1. / num_stocks, ]\n\n# Call the minimisation function\nopts = sco.minimize(min_sharpe, starting_ws, method='SLSQP', bounds = bnds, constraints=cons)\nopts\n# In the results of the optimisation, the variable x stores the weights for the stocks making up the optimal portfolio. In the case of the 30 US stocks, there seem to be quite a few stocks with weights of zero, i.e. no capital allocated to them.\n# Obtain the optimal weights\nweights_opt = opts['x'].round(3)\nweights_opt\n# Plugging these weights into the portfolio statistics function above we can get the expected return, expected volatility and Sharpe ratio of the portfolio with the optimal weights.\n# Plug optimal weights into the statistics function\nptf_stats(weights_opt)\n# expected return:\nptf_stats(weights_opt)[0]\n\n# expected volatility:\nptf_stats(weights_opt)[1]\n\n# Sharpe ratio:\nptf_stats(weights_opt)[2]\n\n# Next, we can obtain the absolute minimum variance portfolio. As the name suggests, in order to obtain this portfolio, we minimise the portfolio variance.\n# Define a function that minimises portfolio variance\ndef min_var(weights):\n return ptf_stats(weights)[1]**2\n\n# Call the optimisation function\nopt_var = sco.minimize(min_var, starting_ws, method = 'SLSQP', bounds = bnds, constraints=cons)\nopt_var\n\n# For the absolute minimum variance portfolio, more portflios are invested in or, put differently, there are less stocks with weighst of zero.\n# Obtain the optimal weigths\nweights_opt_var = opt_var['x'].round(3)\nweights_opt_var\n\n# Get the statistics for the absolute minimum variance portfolio\nptf_stats(weights_opt_var)\n\n\n\n\n# Using the same logic applied previously, we can compute all optimal portfolios, i.e. all portflios with the maximum return for a given risk level, by iterating over multiple starting conditions.\n# Set up two conditions, one for the target return level and one for the sum of the portfolio weights\ncons2 = ({'type':'eq', 'fun':lambda x: ptf_stats(x)[0] - r},\n {'type':'eq', 'fun':lambda x: np.sum(x) - 1})\n# The boundary condition stays the same\nbnds2 = tuple((0,1) for x in weights)\n\n# return the volatility of a portfolio given a vector of weights\ndef min_port(weights):\n return ptf_stats(weights)[1]\n\n# Get the target and volatilities given a range of returns\ndef efficient_frontier(start_r, end_r, steps):\n target_rs = np.linspace(start_r, end_r, steps)\n target_stds = []\n for r in target_rs:\n cons2 = ({'type':'eq', 'fun': lambda x: ptf_stats(x)[0] - r},\n {'type':'eq', 'fun': lambda x: np.sum(x) - 1})\n bnds2 = tuple((0,1) for x in weights)\n res = sco.minimize(min_port, starting_ws, method = 'SLSQP', bounds = bnds2, constraints = cons2)\n target_stds.append(res['fun'])\n target_stds = np.array(target_stds)\n return target_rs, target_stds\n\n# Based on the random portfolio visualisation above it seems as though a target return of 30% would be a good upper bound\n# Obtain the target returns and volatilities based on 50 target returns\ntarget_rs, target_stds = efficient_frontier(0.0, 0.30, 50)\n\n# Plot the efficient frontier in the same visualisation as the randomly generted portfolios\nplt.figure(figsize=(15, 8))\nplt.scatter(ptf_stds, ptf_rs, c=(ptf_rs - 0.01) / ptf_stds, marker = 'o')\nplt.scatter(target_stds, target_rs, c = (target_rs - 0.01)/target_stds, marker = 'x')\nplt.plot(ptf_stats(opts['x'])[1], ptf_stats(opts['x'])[0], 'r*', markersize=20.0)\nplt.plot(ptf_stats(opt_var['x'])[1], ptf_stats(opt_var['x'])[0], 'b*', markersize=20.0)\nplt.grid(True)\nplt.xlabel('Expected Volatility')\nplt.ylabel('Expected Return')\nplt.xlim(0.14, 0.24)\nplt.colorbar(label = 'Sharpe Ratio')\nplt.title('Efficient Frontier Using 30 US Stocks')\n# Blue star: the absolute minimum variance portfolio.\n# Red star: the absolute maximum Sharpe ratio portfolio.\n# Any portfolio that lies on the frontier but is below the blue star is not an optimal or efficient portfolio as it does not dominate all other portfolios in terms of expected return givven a certain risk level but rather is dominated by the others.\n\n# why is the efficient frontier so far away from the cluster of randomly selected portfolios?\n# the portfolio weights used to randomly generate the random portfolios lie between 0 and 1. This means that every stock in the portfolio has at least some positive weight.\n# in both the absolute minimum variance portflio as well as the maximum Sharpe ratio portfolio a lot of the stocks in the portflio have a weight of zero. This is because the minimisation function determined the optimal weights for each stock in the portfolio based on the stocks expected return and covariance with all other stocks. Due to the expected return and covariance profiles of some stocks, the optimal weight for those just happened to be zero.\n\n# In order to understand this further, I will use the very first set of randomly selected weights from early in this project and include its expected return and expected volatility in the above visualisation. In the visualisation below, the white star represents the portfolio based on the initially generated random weights.\n# Include the initially generated random weights\nplt.figure(figsize=(15, 8))\nplt.scatter(ptf_stds, ptf_rs, c = (ptf_rs - 0.01)/ptf_stds, marker = 'o')\nplt.scatter(target_stds, target_rs, c=( target_rs - 0.01)/target_stds, marker = 'x')\nplt.plot(ptf_stats(opts['x'])[1], ptf_stats(opts['x'])[0], 'r*', markersize=20.0)\nplt.plot(ptf_stats(opt_var['x'])[1], ptf_stats(opt_var['x'])[0], 'b*', markersize=20.0)\nplt.plot(ptf_stats(weights)[1], ptf_stats(weights)[0], 'w*', markersize=20.0)\nplt.grid(True)\nplt.xlabel('Expected Volatility')\nplt.ylabel('Expected Return')\nplt.xlim(0.14, 0.24)\nplt.colorbar(label='Sharpe Ratio')\nplt.title('Efficient Frontier Using 30 US Stocks')\n\n# portfolio composition of the maximum Sharpe ratio portfolio and the one represented by the white star.\n# Create DataFrame of the weights assigned to each ticker\ncomposition = {'Expected Return': annual_r.round(3), 'Maximum Sharpe':weights_opt, 'White Star':weights.round(3)}\ncomp = pd.DataFrame(composition, columns = ['Expected Return', 'Maximum Sharpe', 'White Star'], index = tickers)\ncomp.head()\n\n# Inspect the correlation matrix\ncorr_matrix = log_r.corr()\ncorr_matrix\n\n\n# Capital Market Line\n" }, { "alpha_fraction": 0.7352185249328613, "alphanum_fraction": 0.7493573427200317, "avg_line_length": 24.09677505493164, "blob_id": "9adc60277cc8987d5a117cda4c4952c7e5c908be", "content_id": "3173081646c98d5090a771386a0b967904bfc3e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 74, "num_lines": 31, "path": "/finances/Algorithmic Trading/golden_cross_strategy/run.py", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "import os, sys, argparse\nimport pandas as pd\nimport backtrader as bt\nfrom strategies.GoldenCross import GoldenCross\nfrom strategies.BuyHold import BuyHold\n\nstrategies = {\n 'golden_cross':GoldenCross,\n 'buy_hold':BuyHold\n}\n\nparser = argparse.ArgumentParser()\nparser.add_argument('strategy', help='which strategy to run', type=str)\nargs = parser.parse_args()\n\nif not args.strategy in strategies:\n print('Invalid strategy, must be one of {}'.format(strategies.keys()))\n sys.exit()\n\ncerebro = bt.Cerebro()\ncerebro.broker.setcash(800)\n\nfname='data/spy_2000-2020.csv'\ndata = pd.read_csv(fname, index_col='Date', parse_dates=True)\n\nfeed = bt.feeds.PandasData(dataname=data)\ncerebro.adddata(feed)\n\ncerebro.addstrategy(strategies[args.strategy])\ncerebro.run()\ncerebro.plot()\n" }, { "alpha_fraction": 0.8034188151359558, "alphanum_fraction": 0.8290598392486572, "avg_line_length": 38, "blob_id": "5ca961b977e7ebc23d66b1f8ac75178ddb9131a6", "content_id": "63137bcab936aba40455585101e597918fa44941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 117, "license_type": "no_license", "max_line_length": 82, "num_lines": 3, "path": "/finances/sentdex_tutorial/README.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Python Programming for Finance\n\n[source](https://www.youtube.com/playlist?list=PLQVvvaa0QuDcOdF96TBtRtuQksErCEBYZ)\n" }, { "alpha_fraction": 0.6838487982749939, "alphanum_fraction": 0.7030927538871765, "avg_line_length": 32.068180084228516, "blob_id": "30a09f470aa55b2ec0eb7bce7fe07f51c255c14f", "content_id": "ea2bf5625053f4decc8b9ab8dabb3bb2cc630d69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 88, "num_lines": 44, "path": "/finances/crypto/money_management.md", "repo_name": "FernandoMarcon/learning_notes", "src_encoding": "UTF-8", "text": "# Money Management\n\n## Risk\n- 2% risk = 2% is the most you can lose on a trade\n- When you lose, you usually won't lose 2%\n### ATR\n- How many pips, top to bottom a currency pair moves per candle, on average\n- 1.5X the ATR for a currency pair\n- Your Stop Loss should be 1.5x the ATR away from price is now.\n- ATR to find pip value\n * Find out what 2% of your account is. Call this your __RISK__.\n * Figure out 1.5X ATR of the currency pair\n * __RISK__/1.5 ATR = Pip Value\n### Over-leverage\n#### How to\n- Do NOT trade the same currency more than once at 2% risk\n- Don't:\n - EUR/USD short, AUD/USD short, USD/JPY long all at 2% risk\n - you now have 6% of your trade on the USD!!\n#### How Not to\n- Go with the first trade entry for that currency and ride it\n- Go half/half (1% and 1%)\n- Go half-risk, and leave the door open for another trade later\n\n---\n> PIP\n- Percentage in point/price interest point\n- Forex currency pairs are quoted in terms of 'pips,' short for percentage in points.\n- a pip is one-hundredth of one percent, or the fourth decimal place (0.0001).\n- Currency base pairs are typically quoted where the bid-ask spread is measured in pips.\n---\n\n## Forex Risk Ratios\nCommon Ratios\n- 2:1\n- 3:1\n- TP is 2 or 3 x SL\n\nBig Mistakes\n- Setting your TP by using a tool in the Dirty Dozen video\n- Not scaling out\n- Capping your upside\n * You _WANT_ big runs as a trend trader\n * This is what actually goes into your pocket at the end of the year\n" } ]
44
shichaogeng/get-fund-data
https://github.com/shichaogeng/get-fund-data
ec802bf7606f8b0bb3ed638f3badc26fde9ad718
63b99493e22cdd10b189723f2222488674df982e
3ccc74b3aec3fab961476d73ce30719d500e2515
refs/heads/master
2020-03-23T10:00:58.936252
2018-04-15T09:32:39
2018-04-15T09:32:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6724709868431091, "alphanum_fraction": 0.676616907119751, "avg_line_length": 25.217391967773438, "blob_id": "c5c5af9fe378a366dbff4ecb429fbee0ee3d8338", "content_id": "67825c69b3278c87f38c97584f73e7f0662db60b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1206, "license_type": "no_license", "max_line_length": 68, "num_lines": 46, "path": "/get_fund.py", "repo_name": "shichaogeng/get-fund-data", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom setting import Setting\nimport os\nimport re\nimport threadpool\n\nclass Get_fund:\n\tdef __init__(self,url,regex):\n\t\tself.url = url\n\t\tself.regex = regex\n\n\tdef download(self,user_agent='wswp'):\n\t\tprint('Downloading:',self.url)\n\t\theaders = {'User-agnet':user_agent}\n\t\trequest = urllib.request.Request(self.url,headers=headers)\n\t\ttry:\n\t\t\thtml = urllib.request.urlopen(self.url).read().decode(\"UTF-8\")\n\t\texcept urllib.request.URLError as e:\n\t\t\tprint('Download error:',e.reason)\n\t\treturn html\n\n\tdef get_data(self, html):\n\t\treturn re.findall(self.regex, html)\n\n\ndef get_fund_data(url, regex, result_data):\n\tfund = Get_fund(list(url.values())[0], regex)\n\tdata = fund.download()\n\tresult = re.findall('[-+]\\d+\\.\\d+%', fund.get_data(data)[0])\n\tresult_data[list(url.keys())[0]] = result[0]\n\treturn result\n\nif __name__ == '__main__':\n\tsettings = Setting()\n\turl_queue = settings.get_url()\n\tregex = settings.regex\n\tresult_data = {}\n\tthreadpool.threadpool(get_fund_data, url_queue, regex, result_data)\n\tprint(result_data)\n\tkeys = list(result_data.keys())\n\tfp = open(settings.result_path, 'w')\n\tfor key in keys:\n\t\tline = key + ',' + result_data[key] + '\\n'\n\t\tfp.write(line)\n\tfp.close()\n\t# print(data)\n" } ]
1
p9s/flasky
https://github.com/p9s/flasky
d48dbd954bed53439031a94df6572d29fd9ab239
cf381b198fb9b8ac067e72fdd7eda1b0c701872b
7f05067d527227a6c369711d2dbd59969ba2bcba
refs/heads/master
2020-03-31T20:59:05.137025
2018-10-12T15:53:51
2018-10-12T15:53:51
152,562,767
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5865834355354309, "alphanum_fraction": 0.595943808555603, "avg_line_length": 19.03125, "blob_id": "54d5c5199738fb75781f38a6c25b1c2f3dbe1980", "content_id": "1ad54e48fa2664c9992cadf2d8192dad0667e5b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 54, "num_lines": 32, "path": "/app.py", "repo_name": "p9s/flasky", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import request\nfrom flask import current_app\n\napp = Flask(__name__)\n\n\n\[email protected]( '/' )\ndef index():\n user_agent = request.headers.get( 'User-Agent' )\n\n response = make_response( '<h1>Hello Flask</h1>' )\n response.set_cookie( 'anawer', '42' )\n return response\n\n\[email protected]( '/user/<name>' )\ndef user(name):\n return 'User: {}!'.format( name )\n\n\n# app.add_url_rule( '/', 'index', index )\n\n\n# @app.route( '/user/<name>' )\n# def user( name ):\n# return '<h1>Hello, {}!</h1>'.format( name )\n# \n# # int string float path\n# # path ๅฏไปฅๅŒ…ๅซๆญฃๆ–œ็บฟ, ๅ…ถๅฎƒไธŽ string ็ฑปไผผ\n# @app.route( '/user/<int:id>' )\n" } ]
1
kavishme/spamfilter_tf-idf
https://github.com/kavishme/spamfilter_tf-idf
b23219cd63a2ba5d73fed57566f6c1064384224f
d813eb492c81058024c3b80c6f268f2bbd1ef319
6d640f8be0646cd8edd29b149119fc153391e090
refs/heads/master
2020-07-15T01:13:34.625167
2016-11-16T03:26:42
2016-11-16T03:26:42
73,871,602
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5746170878410339, "alphanum_fraction": 0.58293217420578, "avg_line_length": 21.19417381286621, "blob_id": "a8fdfc10958f9fc7af3257eaa374246c36d704be", "content_id": "bc0573a962b192ddb43dd742aeeae1dfd224b3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2285, "license_type": "no_license", "max_line_length": 105, "num_lines": 103, "path": "/spamfilter.py", "repo_name": "kavishme/spamfilter_tf-idf", "src_encoding": "UTF-8", "text": "#!python3\n\nimport os\nfrom math import log\nfrom collections import OrderedDict\n\nDICTFILE = 'dict.txt'\nMAILFOLDER = './mails/'\nSEP = 150\nfformat = '.3f'\n\ndef getSpamDictionary(filename):\n\tf = open(filename, 'r')\n\twords = f.read()\n\twords = [word.strip().lower() for word in words.split('\\n')]\n\treturn words\n\n\ndef main():\n\n\tsdict = getSpamDictionary(DICTFILE)\n\t\n\tdocs = OrderedDict()\n\tidf = OrderedDict()\n\n\tfor fi in os.listdir(MAILFOLDER):\n\t\tif fi.endswith(\".txt\"):\n\t\t\tf = open(MAILFOLDER + fi, 'r')\n\t\t\tdocs[fi] = OrderedDict()\n\t\t\tdocs[fi]['data'] = f.read().strip().lower().replace('-', ' ').replace('\\n', ' ')\n\n\tfor word in sdict:\n\t\tfor doc in docs:\n\t\t\tdata = docs[doc]['data'].split(' ')\n\t\t\tcount = []\n\t\t\tfor w in word.split(' '):\n\t\t\t\tcount.append(data.count(w))\n\n\t\t\tif 'count' not in docs[doc].keys():\n\t\t\t\tdocs[doc]['count'] = OrderedDict()\n\n\t\t\tdocs[doc]['count'][word] = min(count)\n\n\n\tfor doc in docs:\n\t\tif 'tf' not in docs[doc].keys():\n\t\t\tdocs[doc]['tf'] = OrderedDict()\n\t\tfor word in sdict:\n\t\t\tcount = docs[doc]['count'][word]\n\t\t\tif count:\n\t\t\t\tdocs[doc]['tf'][word] = 1 + log( 1 + log(count, 10), 10)\n\t\t\telse:\n\t\t\t\tdocs[doc]['tf'][word] = 0\n\n\n\tfor word in sdict:\n\t\tfreq = 0\n\t\tfor d in docs:\n\t\t\tif docs[d]['count'][word] > 0:\n\t\t\t\tfreq += 1\n\t\tif freq:\n\t\t\tidf[word] = log((1+len(docs))/freq, 10)\n\t\telse:\n\t\t\tidf[word] = 0\n\n\n\tfor doc in docs:\n\t\tif 'tfidf' not in docs[doc].keys():\n\t\t\tdocs[doc]['tfidf'] = OrderedDict()\n\t\tfor word in sdict:\n\t\t\tdocs[doc]['tfidf'][word] = idf[word] * docs[doc]['tf'][word]\n\n\n\tprint('-'*SEP)\n\tprint('***Spam dictionary***', ','.join(sdict), sep='\\n\\n')\n\n\tprint('-'*SEP)\n\tprint('***Word frequency table***')\n\tprint('\\n')\n\tfor doc in docs:\n\t\tprint(doc, \" \\t\".join([str(docs[doc]['count'][word]) for word in docs[doc]['count']]))\n\n\n\tprint('-'*SEP)\n\tprint('***IDF***')\n\tprint('\\n')\n\tprint(\" \\t\".join([str(format(idf[word],fformat)) for word in sdict]))\n\n\tprint('-'*SEP)\n\tprint('***TF table***')\n\tprint('\\n')\n\tfor doc in docs:\n\t\tprint(doc, \" \\t\".join([str(format(docs[doc]['tf'][word], fformat)) for word in docs[doc]['tf']]))\n\n\tprint('-'*SEP)\n\tprint('***TF-IDF table***')\n\tprint('\\n')\n\tfor doc in docs:\n\t\tprint(doc, \" \\t\".join([str(format(docs[doc]['tfidf'][word], fformat)) for word in docs[doc]['tfidf']]))\n\tprint('-'*SEP)\n\nif __name__ == '__main__':\n\tmain()" } ]
1
whosonfirst-data/whosonfirst-data-venue
https://github.com/whosonfirst-data/whosonfirst-data-venue
5bb37d04815aa0d221a5089f853deb2e85a6d17b
b1c52c581137f9251682fac86eac9bffe5fe2827
8ced73c351b38c9143766d3de3bb894b68a7b67a
refs/heads/master
2020-12-24T06:49:19.214768
2020-05-01T15:40:27
2020-05-01T15:40:27
58,162,595
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.5763418674468994, "alphanum_fraction": 0.5784262418746948, "avg_line_length": 23.922077178955078, "blob_id": "d5673b417c7c3044144c73304473dcc1993d30ab", "content_id": "8700f96daa7bc65465fe7c7b41391b7e28042655", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1919, "license_type": "no_license", "max_line_length": 133, "num_lines": 77, "path": "/bin/mk-data-markdown.py", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\n\nimport csv\nimport json\n\nimport datetime\n\nimport mapzen.whosonfirst.utils\n\nif __name__ == '__main__':\n\n import optparse\n opt_parser = optparse.OptionParser()\n\n opt_parser.add_option('-s', '--data', dest='data', action='store', default=None, help='The path to your data.json file')\n opt_parser.add_option('-o', '--out', dest='out', action='store', default=None, help='Where to write data (default is STDOUT)')\n\n opt_parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Be chatty (default is false)')\n options, args = opt_parser.parse_args()\n\n if options.verbose:\t\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n data = os.path.abspath(options.data)\n fh = open(data, 'r')\n\n data = json.load(fh)\n codes = {}\n\n for details in data:\n name = details['name']\n parts = name.split('-')\n country = parts[-1]\n codes[country] = details\n\n countries = codes.keys()\n countries.sort()\n\n fh = sys.stdout\n\n if options.out:\n out = os.path.abspath(options.out)\n fh = open(out, 'w')\n\n fh.write(\"# whosonfirst-data-venue stats\\n\\n\")\n\n dt = datetime.date.today()\n ymd = dt\n\n fh.write(\"_This file was generated by robots on %s, derived from the [data.json](data.json) file_\\n\\n\" % (ymd))\n\n for code in countries:\n\n details = codes[code]\n\n count = details['count']\n count = int(count)\n\n fh.write(\"## %s\\n\\n\" % details['description'])\n \n fh.write(\"* %s\\n\" % details['url'])\n\n if count == 0:\n fh.write(\"* _0 venues_\\n\")\n elif count == 1:\n fh.write(\"* one venue\\n\")\n else:\n count = \"{:,}\".format(details['count'])\n fh.write(\"* %s venues\\n\" % count)\n\n fh.write(\"\\n\")\n" }, { "alpha_fraction": 0.7772455215454102, "alphanum_fraction": 0.7772455215454102, "avg_line_length": 36.95454406738281, "blob_id": "b9952a02c7eeee901a4fc6b57954e08abf5b92f4", "content_id": "471f905621875b6ff9bdf86766eeedd987b7638e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 835, "license_type": "no_license", "max_line_length": 186, "num_lines": 22, "path": "/README.md", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "# whosonfirst-data-venue\n\nVenues in Who's On First.\n\n## Important\n\n_There is no data in this repository._\n\nThis respository is a starting point for venue data in Who's On First. All of the actual data has been stored in per-country (and in some case per country-region) repositories.\n\nDetails for the various repositories that contain WOF data are availible in human-readable and machine-readable forms through the [DATA.md](DATA.md) and [data.json](data.json) files.\n\nAlso, it's probably worth having a quick look at the [\"whosonfirst-data versus whosonfirst-data-SOMETHING repositories\"](https://whosonfirst.mapzen.com/data/#github-repos) documentation.\n\n## License\n\nhttps://github.com/whosonfirst/whosonfirst-data/blob/master/LICENSE.md\n\n## See also\n\n* https://whosonfirst.org/\n* https://spelunker.whosonfirst.org/placetypes/venue/\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 26.799999237060547, "blob_id": "d810a7e5aacc8484a4fa425465b523fe72f066c1", "content_id": "81d4cf7211aae9075ea44adb6ace8b532d50b1e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 279, "license_type": "no_license", "max_line_length": 116, "num_lines": 10, "path": "/Makefile", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "count:\n\tfind ./data -name '*.geojson' -print | wc -l\n\ndata:\tdata-json data-markdown\n\ndata-json:\n\tbin/mk-data-json.py -c /usr/local/data/whosonfirst-data/meta/wof-country-latest.csv -r /usr/local/data/ > data.json\n\ndata-markdown:\n\tbin/mk-data-markdown.py -s data.json -o DATA.md \n" }, { "alpha_fraction": 0.5669912099838257, "alphanum_fraction": 0.5683820247650146, "avg_line_length": 22.955554962158203, "blob_id": "70373518dfc48805ffcb8bd50c4100cd325c6209", "content_id": "4c45389f041ed9836260e6d86368795ef6a16495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 133, "num_lines": 90, "path": "/bin/mk-data-json.py", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\n\nimport csv\nimport json\n\nimport mapzen.whosonfirst.utils\n\nif __name__ == '__main__':\n\n import optparse\n opt_parser = optparse.OptionParser()\n\n opt_parser.add_option('-c', '--countries', dest='countries', action='store', default=None, help='')\n opt_parser.add_option('-r', '--root', dest='root', action='store', default=None, help='')\n opt_parser.add_option('-o', '--out', dest='out', action='store', default=None, help='')\n\n opt_parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Be chatty (default is false)')\n options, args = opt_parser.parse_args()\n\n if options.verbose:\t\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n countries = os.path.abspath(options.countries)\n root = os.path.abspath(options.root)\n\n fh = open(countries, 'r')\n reader = csv.DictReader(fh)\n\n stats = []\n\n for row in reader:\n\n country = row['wof_country']\n country = country.lower()\n\n if country == \"\":\n continue\n\n repo = \"whosonfirst-data-venue-%s\" % country\n local = os.path.join(root, repo)\n\n if not os.path.exists(local):\n continue\n\n data_json = os.path.join(local, \"data.json\")\n\n if os.path.exists(data_json):\n\n fh_local = open(data_json, \"r\")\n stats_local = json.load(fh_local)\n\n stats.extend(stats_local)\n continue\n\n remote = \"https://github.com/whosonfirst-data/%s\" % repo\n\n count = 0\n\n iter = mapzen.whosonfirst.utils.crawl(local)\n\n for i in iter:\n count += 1\n\n wofid = row['id']\n name = row['name']\n\n stats.append({\n 'name': repo,\n 'description': \"Who's On First venue data for %s (%s)\" % (name, country.upper()),\n 'url': remote,\n 'count': count,\n\n })\n\n fh.close()\n\n fh = sys.stdout\n\n if options.out:\n out = os.path.abspath(options.out)\n fh = open(out, 'w')\n\n json.dump(stats, fh, indent=2)\n sys.exit()\n\n" }, { "alpha_fraction": 0.7073847055435181, "alphanum_fraction": 0.7250813245773315, "avg_line_length": 24.434711456298828, "blob_id": "2ee26d6522719d4a74c055cf78293f0b48b9c98e", "content_id": "ed4ed8a49d0db48e92f545c761e6bd009462de3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31362, "license_type": "no_license", "max_line_length": 95, "num_lines": 1233, "path": "/DATA.md", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "# whosonfirst-data-venue stats\n\n_This file was generated by robots on 2016-09-22, derived from the [data.json](data.json) file_\n\n## Who's On First venue data for Andorra (AD)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ad\n* _0 venues_\n\n## Who's On First venue data for United Arab Emirates (AE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ae\n* 4 venues\n\n## Who's On First venue data for Afghanistan (AF)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-af\n* one venue\n\n## Who's On First venue data for Antigua and Barbuda (AG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ag\n* _0 venues_\n\n## Who's On First venue data for Ak (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ak\n* 21,849 venues\n\n## Who's On First venue data for Al (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-al\n* 154,037 venues\n\n## Who's On First venue data for Armenia (AM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-am\n* _0 venues_\n\n## Who's On First venue data for Netherlands (AN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-an\n* _0 venues_\n\n## Who's On First venue data for Angola (AO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ao\n* _0 venues_\n\n## Who's On First venue data for Antarctica (AQ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-aq\n* _0 venues_\n\n## Who's On First venue data for Ar (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ar\n* 134,534 venues\n\n## Who's On First venue data for Austria (AT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-at\n* 286,613 venues\n\n## Who's On First venue data for Australia (AU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-au\n* 697,614 venues\n\n## Who's On First venue data for Aruba (AW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-aw\n* _0 venues_\n\n## Who's On First venue data for Az (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-az\n* 218,066 venues\n\n## Who's On First venue data for Bosnia and Herzegovina (BA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ba\n* _0 venues_\n\n## Who's On First venue data for Barbados (BB)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bb\n* _0 venues_\n\n## Who's On First venue data for Bangladesh (BD)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bd\n* 3 venues\n\n## Who's On First venue data for Belgium (BE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-be\n* 770,124 venues\n\n## Who's On First venue data for Burkina Faso (BF)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bf\n* _0 venues_\n\n## Who's On First venue data for Bulgaria (BG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bg\n* 4 venues\n\n## Who's On First venue data for Bahrain (BH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bh\n* _0 venues_\n\n## Who's On First venue data for Burundi (BI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bi\n* _0 venues_\n\n## Who's On First venue data for Benin (BJ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bj\n* _0 venues_\n\n## Who's On First venue data for Brunei (BN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bn\n* _0 venues_\n\n## Who's On First venue data for Bolivia (BO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bo\n* _0 venues_\n\n## Who's On First venue data for Brazil (BR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-br\n* 19 venues\n\n## Who's On First venue data for The Bahamas (BS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bs\n* _0 venues_\n\n## Who's On First venue data for Bhutan (BT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bt\n* _0 venues_\n\n## Who's On First venue data for Botswana (BW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bw\n* _0 venues_\n\n## Who's On First venue data for Belarus (BY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-by\n* _0 venues_\n\n## Who's On First venue data for Belize (BZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-bz\n* _0 venues_\n\n## Who's On First venue data for Ca (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ca\n* 1,517,959 venues\n\n## Who's On First venue data for Indian Ocean Territories (CC)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cc\n* _0 venues_\n\n## Who's On First venue data for Democratic Republic of the Congo (CD)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cd\n* _0 venues_\n\n## Who's On First venue data for Central African Republic (CF)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cf\n* _0 venues_\n\n## Who's On First venue data for Republic of Congo (CG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cg\n* _0 venues_\n\n## Who's On First venue data for Switzerland (CH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ch\n* 589,050 venues\n\n## Who's On First venue data for Ivory Coast (CI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ci\n* _0 venues_\n\n## Who's On First venue data for Chile (CL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cl\n* one venue\n\n## Who's On First venue data for Cameroon (CM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cm\n* _0 venues_\n\n## Who's On First venue data for China (CN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cn\n* 12 venues\n\n## Who's On First venue data for Co (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-co\n* 241,176 venues\n\n## Who's On First venue data for Costa Rica (CR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cr\n* _0 venues_\n\n## Who's On First venue data for Ct (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ct\n* 147,491 venues\n\n## Who's On First venue data for Cuba (CU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cu\n* _0 venues_\n\n## Who's On First venue data for Cape Verde (CV)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cv\n* _0 venues_\n\n## Who's On First venue data for Curacao (CW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cw\n* _0 venues_\n\n## Who's On First venue data for Cyprus (CY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cy\n* _0 venues_\n\n## Who's On First venue data for Czech Republic (CZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-cz\n* 15 venues\n\n## Who's On First venue data for Dc (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-dc\n* 664 venues\n\n## Who's On First venue data for De (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-de\n* 38,172 venues\n\n## Who's On First venue data for Djibouti (DJ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-dj\n* _0 venues_\n\n## Who's On First venue data for Denmark (DK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-dk\n* 170,552 venues\n\n## Who's On First venue data for Dominica (DM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-dm\n* _0 venues_\n\n## Who's On First venue data for Dominican Republic (DO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-do\n* _0 venues_\n\n## Who's On First venue data for Algeria (DZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-dz\n* _0 venues_\n\n## Who's On First venue data for Ecuador (EC)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ec\n* _0 venues_\n\n## Who's On First venue data for Estonia (EE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ee\n* 10 venues\n\n## Who's On First venue data for Egypt (EG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-eg\n* _0 venues_\n\n## Who's On First venue data for Western Sahara (EH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-eh\n* _0 venues_\n\n## Who's On First venue data for Eritrea (ER)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-er\n* _0 venues_\n\n## Who's On First venue data for Spain (ES)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-es\n* 955,132 venues\n\n## Who's On First venue data for Ethiopia (ET)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-et\n* _0 venues_\n\n## Who's On First venue data for Finland (FI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-fi\n* 39 venues\n\n## Who's On First venue data for Fiji (FJ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-fj\n* _0 venues_\n\n## Who's On First venue data for Fl (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-fl\n* 841,886 venues\n\n## Who's On First venue data for Federated States of Micronesia (FM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-fm\n* _0 venues_\n\n## Who's On First venue data for France (FR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-fr\n* 189,355 venues\n\n## Who's On First venue data for Ga (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ga\n* 474,938 venues\n\n## Who's On First venue data for United Kingdom (GB)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gb\n* 1,769,025 venues\n\n## Who's On First venue data for Grenada (GD)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gd\n* _0 venues_\n\n## Who's On First venue data for Georgia (GE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ge\n* _0 venues_\n\n## Who's On First venue data for French Guiana (GF)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gf\n* _0 venues_\n\n## Who's On First venue data for Guernsey (GG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gg\n* _0 venues_\n\n## Who's On First venue data for Ghana (GH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gh\n* _0 venues_\n\n## Who's On First venue data for Greenland (GL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gl\n* _0 venues_\n\n## Who's On First venue data for Gambia (GM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gm\n* _0 venues_\n\n## Who's On First venue data for Guinea (GN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gn\n* _0 venues_\n\n## Who's On First venue data for Guadeloupe (GP)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gp\n* _0 venues_\n\n## Who's On First venue data for Equatorial Guinea (GQ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gq\n* _0 venues_\n\n## Who's On First venue data for Greece (GR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gr\n* 13 venues\n\n## Who's On First venue data for Guatemala (GT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gt\n* _0 venues_\n\n## Who's On First venue data for Guinea Bissau (GW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gw\n* _0 venues_\n\n## Who's On First venue data for Guyana (GY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-gy\n* _0 venues_\n\n## Who's On First venue data for Hi (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-hi\n* 50,740 venues\n\n## Who's On First venue data for Hong Kong S.A.R. (HK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-hk\n* 3 venues\n\n## Who's On First venue data for Honduras (HN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-hn\n* _0 venues_\n\n## Who's On First venue data for Croatia (HR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-hr\n* 7 venues\n\n## Who's On First venue data for Haiti (HT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ht\n* _0 venues_\n\n## Who's On First venue data for Hungary (HU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-hu\n* 8 venues\n\n## Who's On First venue data for Ia (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ia\n* 133,145 venues\n\n## Who's On First venue data for Id (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-id\n* 55,442 venues\n\n## Who's On First venue data for Ireland (IE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ie\n* 38 venues\n\n## Who's On First venue data for Il (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-il\n* 359,372 venues\n\n## Who's On First venue data for Isle of Man (IM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-im\n* _0 venues_\n\n## Who's On First venue data for In (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-in\n* 218,922 venues\n\n## Who's On First venue data for Iraq (IQ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-iq\n* _0 venues_\n\n## Who's On First venue data for Iran (IR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ir\n* _0 venues_\n\n## Who's On First venue data for Iceland (IS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-is\n* one venue\n\n## Who's On First venue data for Italy (IT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-it\n* 314,521 venues\n\n## Who's On First venue data for Jersey (JE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-je\n* _0 venues_\n\n## Who's On First venue data for Jamaica (JM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-jm\n* one venue\n\n## Who's On First venue data for Jordan (JO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-jo\n* _0 venues_\n\n## Who's On First venue data for Japan (JP)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-jp\n* 12 venues\n\n## Who's On First venue data for Kenya (KE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ke\n* _0 venues_\n\n## Who's On First venue data for Kyrgyzstan (KG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kg\n* _0 venues_\n\n## Who's On First venue data for Cambodia (KH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kh\n* _0 venues_\n\n## Who's On First venue data for Kiribati (KI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ki\n* _0 venues_\n\n## Who's On First venue data for Comoros (KM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-km\n* _0 venues_\n\n## Who's On First venue data for Saint Kitts and Nevis (KN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kn\n* _0 venues_\n\n## Who's On First venue data for North Korea (KP)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kp\n* _0 venues_\n\n## Who's On First venue data for South Korea (KR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kr\n* one venue\n\n## Who's On First venue data for Ks (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ks\n* 106,727 venues\n\n## Who's On First venue data for Kuwait (KW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kw\n* _0 venues_\n\n## Who's On First venue data for Ky (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ky\n* 122,335 venues\n\n## Who's On First venue data for Kazakhstan (KZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-kz\n* _0 venues_\n\n## Who's On First venue data for La (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-la\n* 150,574 venues\n\n## Who's On First venue data for Lebanon (LB)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lb\n* _0 venues_\n\n## Who's On First venue data for Saint Lucia (LC)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lc\n* _0 venues_\n\n## Who's On First venue data for Liechtenstein (LI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-li\n* one venue\n\n## Who's On First venue data for Sri Lanka (LK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lk\n* one venue\n\n## Who's On First venue data for Liberia (LR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lr\n* _0 venues_\n\n## Who's On First venue data for Lesotho (LS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ls\n* _0 venues_\n\n## Who's On First venue data for Lithuania (LT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lt\n* 5 venues\n\n## Who's On First venue data for Luxembourg (LU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lu\n* 3 venues\n\n## Who's On First venue data for Latvia (LV)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-lv\n* 4 venues\n\n## Who's On First venue data for Libya (LY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ly\n* _0 venues_\n\n## Who's On First venue data for Ma (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ma\n* 318,674 venues\n\n## Who's On First venue data for Monaco (MC)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mc\n* _0 venues_\n\n## Who's On First venue data for Md (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-md\n* 226,981 venues\n\n## Who's On First venue data for Me (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-me\n* 43,082 venues\n\n## Who's On First venue data for Madagascar (MG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mg\n* _0 venues_\n\n## Who's On First venue data for Marshall Islands (MH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mh\n* _0 venues_\n\n## Who's On First venue data for Mi (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-mi\n* 392,876 venues\n\n## Who's On First venue data for Macedonia (MK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mk\n* _0 venues_\n\n## Who's On First venue data for Mali (ML)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ml\n* _0 venues_\n\n## Who's On First venue data for Myanmar (MM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mm\n* _0 venues_\n\n## Who's On First venue data for Mn (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-mn\n* 276,970 venues\n\n## Who's On First venue data for Mo (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-mo\n* 231,399 venues\n\n## Who's On First venue data for Martinique (MQ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mq\n* _0 venues_\n\n## Who's On First venue data for Mauritania (MR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mr\n* _0 venues_\n\n## Who's On First venue data for Ms (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ms\n* 91,860 venues\n\n## Who's On First venue data for Mt (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-mt\n* 36,436 venues\n\n## Who's On First venue data for Mauritius (MU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mu\n* _0 venues_\n\n## Who's On First venue data for Maldives (MV)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mv\n* _0 venues_\n\n## Who's On First venue data for Malawi (MW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mw\n* _0 venues_\n\n## Who's On First venue data for Mexico (MX)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mx\n* 197 venues\n\n## Who's On First venue data for Malaysia (MY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-my\n* 5 venues\n\n## Who's On First venue data for Mozambique (MZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-mz\n* _0 venues_\n\n## Who's On First venue data for Namibia (NA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-na\n* _0 venues_\n\n## Who's On First venue data for Nc (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nc\n* 417,900 venues\n\n## Who's On First venue data for Nd (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nd\n* 35,676 venues\n\n## Who's On First venue data for Ne (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ne\n* 88,850 venues\n\n## Who's On First venue data for Nigeria (NG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ng\n* _0 venues_\n\n## Who's On First venue data for Nh (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nh\n* 62,840 venues\n\n## Who's On First venue data for Nicaragua (NI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ni\n* _0 venues_\n\n## Who's On First venue data for Nj (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nj\n* 481,777 venues\n\n## Who's On First venue data for Netherlands (NL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-nl\n* 363,304 venues\n\n## Who's On First venue data for Nm (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nm\n* 78,015 venues\n\n## Who's On First venue data for Norway (NO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-no\n* 24 venues\n\n## Who's On First venue data for Nepal (NP)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-np\n* _0 venues_\n\n## Who's On First venue data for Nauru (NR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-nr\n* _0 venues_\n\n## Who's On First venue data for Nv (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-nv\n* 109,860 venues\n\n## Who's On First venue data for Ny (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ny\n* 840,995 venues\n\n## Who's On First venue data for New Zealand (NZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-nz\n* 69,488 venues\n\n## Who's On First venue data for Oh (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-oh\n* 520,908 venues\n\n## Who's On First venue data for Ok (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ok\n* 123,376 venues\n\n## Who's On First venue data for Oman (OM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-om\n* one venue\n\n## Who's On First venue data for Or (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-or\n* 221,207 venues\n\n## Who's On First venue data for Pa (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-pa\n* 573,435 venues\n\n## Who's On First venue data for Peru (PE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pe\n* 2 venues\n\n## Who's On First venue data for Papua New Guinea (PG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pg\n* _0 venues_\n\n## Who's On First venue data for Philippines (PH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ph\n* one venue\n\n## Who's On First venue data for Pakistan (PK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pk\n* 3 venues\n\n## Who's On First venue data for Poland (PL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pl\n* 23 venues\n\n## Who's On First venue data for Saint Pierre and Miquelon (PM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pm\n* _0 venues_\n\n## Who's On First venue data for Palestine (PS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ps\n* _0 venues_\n\n## Who's On First venue data for Portugal (PT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pt\n* 21 venues\n\n## Who's On First venue data for Palau (PW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-pw\n* _0 venues_\n\n## Who's On First venue data for Paraguay (PY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-py\n* _0 venues_\n\n## Who's On First venue data for Qatar (QA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-qa\n* _0 venues_\n\n## Who's On First venue data for Reunion (RE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-re\n* _0 venues_\n\n## Who's On First venue data for Ri (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ri\n* 54,974 venues\n\n## Who's On First venue data for Romania (RO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ro\n* _0 venues_\n\n## Who's On First venue data for Serbia (RS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-rs\n* _0 venues_\n\n## Who's On First venue data for Russia (RU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ru\n* 8 venues\n\n## Who's On First venue data for Rwanda (RW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-rw\n* _0 venues_\n\n## Who's On First venue data for Saudi Arabia (SA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sa\n* one venue\n\n## Who's On First venue data for Solomon Islands (SB)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sb\n* _0 venues_\n\n## Who's On First venue data for Sc (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-sc\n* 143,778 venues\n\n## Who's On First venue data for Sd (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-sd\n* 39,398 venues\n\n## Who's On First venue data for Sweden (SE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-se\n* 74 venues\n\n## Who's On First venue data for Singapore (SG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sg\n* 29 venues\n\n## Who's On First venue data for Slovenia (SI)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-si\n* 9 venues\n\n## Who's On First venue data for Slovakia (SK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sk\n* _0 venues_\n\n## Who's On First venue data for Sierra Leone (SL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sl\n* _0 venues_\n\n## Who's On First venue data for San Marino (SM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sm\n* _0 venues_\n\n## Who's On First venue data for Senegal (SN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sn\n* _0 venues_\n\n## Who's On First venue data for Somalia (SO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-so\n* _0 venues_\n\n## Who's On First venue data for Suriname (SR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sr\n* _0 venues_\n\n## Who's On First venue data for South Sudan (SS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ss\n* _0 venues_\n\n## Who's On First venue data for Sao Tome and Principe (ST)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-st\n* _0 venues_\n\n## Who's On First venue data for El Salvador (SV)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sv\n* _0 venues_\n\n## Who's On First venue data for Sint Maarten (SX)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sx\n* _0 venues_\n\n## Who's On First venue data for Syria (SY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sy\n* _0 venues_\n\n## Who's On First venue data for Swaziland (SZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-sz\n* _0 venues_\n\n## Who's On First venue data for Chad (TD)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-td\n* _0 venues_\n\n## Who's On First venue data for Togo (TG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tg\n* _0 venues_\n\n## Who's On First venue data for Thailand (TH)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-th\n* 4 venues\n\n## Who's On First venue data for Tajikistan (TJ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tj\n* _0 venues_\n\n## Who's On First venue data for East Timor (TL)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tl\n* _0 venues_\n\n## Who's On First venue data for Turkmenistan (TM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tm\n* _0 venues_\n\n## Who's On First venue data for Tn (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-tn\n* 276,423 venues\n\n## Who's On First venue data for Tonga (TO)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-to\n* _0 venues_\n\n## Who's On First venue data for Turkey (TR)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tr\n* 56,173 venues\n\n## Who's On First venue data for Trinidad and Tobago (TT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tt\n* _0 venues_\n\n## Who's On First venue data for Tuvalu (TV)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tv\n* _0 venues_\n\n## Who's On First venue data for Taiwan (TW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tw\n* _0 venues_\n\n## Who's On First venue data for Tx (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-tx\n* 1,122,821 venues\n\n## Who's On First venue data for Tanzania (TZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-tz\n* _0 venues_\n\n## Who's On First venue data for Ukraine (UA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ua\n* _0 venues_\n\n## Who's On First venue data for Uganda (UG)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ug\n* _0 venues_\n\n## Who's On First venue data for United Nations (UN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-un\n* _0 venues_\n\n## Who's On First venue data for Ut (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-ut\n* 97,146 venues\n\n## Who's On First venue data for Uruguay (UY)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-uy\n* 5 venues\n\n## Who's On First venue data for Uzbekistan (UZ)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-uz\n* _0 venues_\n\n## Who's On First venue data for Va (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-va\n* 329,937 venues\n\n## Who's On First venue data for Saint Vincent and the Grenadines (VC)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-vc\n* _0 venues_\n\n## Who's On First venue data for Venezuela (VE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ve\n* _0 venues_\n\n## Who's On First venue data for Vietnam (VN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-vn\n* _0 venues_\n\n## Who's On First venue data for Vt (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-vt\n* 31,247 venues\n\n## Who's On First venue data for Vanuatu (VU)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-vu\n* _0 venues_\n\n## Who's On First venue data for Wa (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-wa\n* 348,342 venues\n\n## Who's On First venue data for Wi (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-wi\n* 259,568 venues\n\n## Who's On First venue data for Samoa (WS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ws\n* _0 venues_\n\n## Who's On First venue data for Wv (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-wv\n* 41,930 venues\n\n## Who's On First venue data for Wy (US)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-us-wy\n* 27,251 venues\n\n## Who's On First venue data for Kosovo (XK)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-xk\n* _0 venues_\n\n## Who's On First venue data for Null Island (XN)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-xn\n* _0 venues_\n\n## Who's On First venue data for Somaliland (XS)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-xs\n* _0 venues_\n\n## Who's On First venue data for Yemen (YE)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-ye\n* _0 venues_\n\n## Who's On First venue data for Mayotte (YT)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-yt\n* _0 venues_\n\n## Who's On First venue data for South Africa (ZA)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-za\n* 9 venues\n\n## Who's On First venue data for Zambia (ZM)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-zm\n* _0 venues_\n\n## Who's On First venue data for Zimbabwe (ZW)\n\n* https://github.com/whosonfirst-data/whosonfirst-data-venue-zw\n* _0 venues_\n\n" }, { "alpha_fraction": 0.4873417615890503, "alphanum_fraction": 0.49367088079452515, "avg_line_length": 7.315789699554443, "blob_id": "e17d7128e229741df16ac6ccfe6b68cfa72a95b0", "content_id": "47cd5b2bbc4ed0f3ec729bca3bf7de3ed184dc10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 158, "license_type": "no_license", "max_line_length": 21, "num_lines": 19, "path": "/bin/mk-concordances.sh", "repo_name": "whosonfirst-data/whosonfirst-data-venue", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nfor REPO in \"$@\"\ndo\n\n cd ${REPO}\n\n if [ ! -d data ]\n then\n\tcontinue\n fi\n\n echo ${REPO}\n make concordances\n\n cd -\ndone\n\nexit 0\n" } ]
6
pshuan0226/happy_birthday
https://github.com/pshuan0226/happy_birthday
960c91d914cac795d777f25b51a629e7b839dd11
27208cd0e08493bb5dda7b5661ee286f8989a6d0
3d1ff42f129834c640ab06fb728f896a769e7bee
refs/heads/master
2020-04-11T19:53:27.432885
2018-12-17T01:00:02
2018-12-17T01:00:02
162,051,518
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7660818696022034, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 56.33333206176758, "blob_id": "83d2e5ffa51e38f72a0b46236ca05b263d63ff7c", "content_id": "ce740340801f4136ea2adeb09953fd415d02c1e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 71, "num_lines": 3, "path": "/convert.py", "repo_name": "pshuan0226/happy_birthday", "src_encoding": "UTF-8", "text": "from pydub import AudioSegment\nsound = AudioSegment.from_mp3(\"/root/Documents/dev/hi_cathleen/hp.mp3\")\nsound.export(\"/root/Documents/dev/hi_cathleen/hp.wav\", format=\"wav\")" }, { "alpha_fraction": 0.7445255517959595, "alphanum_fraction": 0.7445255517959595, "avg_line_length": 16.125, "blob_id": "d5753522c5245d8e78d96276b32b098a3b5b7aee", "content_id": "89c5edfad28f0519d58d54c085965f1fdb165d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 137, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/README.md", "repo_name": "pshuan0226/happy_birthday", "src_encoding": "UTF-8", "text": "# happy_birthday\nAn option menu created meant to congratulate my friend's birthday\n\n\n## Before you run:\n```python\npip install pygame\n```\n" } ]
2
OrangeUtang/GibberishGibbernimGibbs
https://github.com/OrangeUtang/GibberishGibbernimGibbs
d152a1f3beaae59c023db777f83d4491f1f8cd35
5e6872cbeee33dade5da1f62edfce1117d2d9e31
3c85c5256bd8409656dd66377b923da617f87008
refs/heads/master
2020-04-23T14:00:14.165073
2019-02-22T20:29:19
2019-02-22T20:29:19
171,216,508
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7641509175300598, "alphanum_fraction": 0.7641509175300598, "avg_line_length": 29.285715103149414, "blob_id": "acb5ec9e96979473634080b33ded03743ed8c3ef", "content_id": "d17fc3f94231e2a9375d4492effedb53c2e87fa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 76, "num_lines": 7, "path": "/ImgManager/manage.py", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "from main import app\nfrom ImgManager.models import db, Person, Album, Picture\n\n\[email protected]_context_processor\ndef make_shell_context():\n return dict(app=app, db=db, Person=Person, Album=Album, Picture=Picture)\n" }, { "alpha_fraction": 0.6025853753089905, "alphanum_fraction": 0.6229146122932434, "avg_line_length": 32.26893997192383, "blob_id": "9edbdefee2c6629a785f23d41d0c17d6e50438b7", "content_id": "f20110ab6131914799d4d79b702cdab0ac9976e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9051, "license_type": "no_license", "max_line_length": 112, "num_lines": 264, "path": "/ImgManager/routes.py", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\r\nimport sqlalchemy\r\nimport secrets\r\nimport os\r\nfrom PIL import Image\r\nfrom flask import jsonify, make_response, request\r\nfrom ImgManager import app, db, bcrypt\r\nfrom ImgManager.models import row2dict, Person, Album, Picture\r\nfrom flask_login import login_user, current_user, logout_user, login_required\r\n\r\n\r\[email protected](404)\r\ndef page_not_found(e):\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"404: Not Found\"}), 404)\r\n\r\n\r\[email protected]('/')\r\ndef soen487_a1():\r\n return jsonify({\"title\": \"SOEN487 Assignment 1\",\r\n \"student\": {\"id\": \"40035704\", \"name\": \"Joel Dusablon Senรฉcal\"}})\r\n\r\n\r\[email protected](\"/person\")\r\ndef get_all_person():\r\n person_list = Person.query.all()\r\n return jsonify([row2dict(person) for person in person_list])\r\n\r\n\r\[email protected](\"/person/<person_id>\")\r\ndef get_person(person_id):\r\n # id is a primary key, so we'll have max 1 result row\r\n person = Person.query.filter_by(id=person_id).first()\r\n if person:\r\n return jsonify(row2dict(person))\r\n else:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n\r\n\r\[email protected](\"/register\", methods=['GET', 'POST'])\r\ndef register():\r\n if current_user.is_authenticated:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"You are logged in please log out to register\"}), 403)\r\n\r\n # getting request info\r\n name = request.form.get(\"name\")\r\n pw = request.form.get(\"password\")\r\n\r\n # check info for errors\r\n if not name or not pw:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot put person. Missing mandatory fields.\"}), 403)\r\n\r\n if Person.query.filter_by(name=name).first():\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot put person. Name is already taken.\"}), 403)\r\n\r\n # if valid, add to db\r\n hpw = bcrypt.generate_password_hash(pw).decode('utf-8')\r\n user = Person(name=name, password=hpw)\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\[email protected](\"/login\", methods=['GET', 'POST'])\r\ndef login():\r\n # checking if already logged in\r\n if current_user.is_authenticated:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Already logged in\"}), 403)\r\n\r\n name = request.form.get(\"name\")\r\n pw = request.form.get(\"password\")\r\n\r\n # data field check\r\n if not name or not pw:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot Login, invalid fields\"}), 403)\r\n\r\n user = Person.query.filter_by(name=name).first()\r\n\r\n # existing user check\r\n if not user:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot login, invalid account\"}), 403)\r\n\r\n # password and username combination check\r\n if user and bcrypt.check_password_hash(user.password, pw):\r\n user.id = str(getattr(user, 'id'))\r\n login_user(user)\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n else:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot login, invalid password\"}), 403)\r\n\r\n\r\[email protected](\"/logout\", methods=['GET', 'POST'])\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\[email protected](\"/album\", methods={'GET'})\r\ndef get_all_album():\r\n album_list = Album.query.all()\r\n return jsonify([row2dict(album) for album in album_list])\r\n\r\n\r\[email protected](\"/album/<album_id>\", methods={'GET'})\r\ndef get_album(album_id):\r\n # id is a primary key, so we'll have max 1 result row\r\n album = Album.query.filter_by(id=album_id).first()\r\n if album:\r\n return jsonify(row2dict(album))\r\n else:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this album id.\"}), 404)\r\n\r\n\r\[email protected](\"/pictures\", methods={'GET'})\r\ndef get_all_pictures():\r\n picture_list = Picture.query.all()\r\n return jsonify([row2dict(picture) for picture in picture_list])\r\n\r\n\r\[email protected](\"/picture/<picture_id>\", methods={'GET'})\r\ndef get_picture(picture_id):\r\n # id is a primary key, so we'll have max 1 result row\r\n picture = Picture.query.filter_by(id=picture_id).first()\r\n if picture:\r\n return jsonify(row2dict(picture))\r\n else:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this picture id.\"}), 404)\r\n\r\n\r\[email protected](\"/createAlbum\", methods={'POST'})\r\n@login_required\r\ndef create_new_album():\r\n # getting request info\r\n name = request.form.get(\"name\")\r\n person_id = current_user.id\r\n\r\n if not name or not person_id:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot put person. Missing mandatory fields.\"}), 403)\r\n\r\n if Album.query.filter_by(name=name).first():\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot create a second album with that name.\"}), 403)\r\n\r\n # if valid, add to db\r\n album = Album(name=name, person_id=person_id)\r\n db.session.add(album)\r\n db.session.commit()\r\n\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\[email protected](\"/Album/<album_id>/addPicture\", methods=['POST', 'GET'])\r\n@login_required\r\ndef add_pic(album_id):\r\n # make sure its adding a pic to one of its own album\r\n album_owner_id = Album.query.filter_by(id=album_id).first()\r\n if album_owner_id.person_id is not current_user.id:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Cannot add picture to albums you don't own\"}), 403)\r\n\r\n form_picture = request.files['image']\r\n name = request.form.get(\"name\")\r\n\r\n if not name or not form_picture:\r\n return make_response(jsonify({\"code\": 403, \"msg\": \"Invalid fields\"}), 403)\r\n\r\n # taken from Corey M. Schafer code snippets\r\n random_hex = secrets.token_hex(8)\r\n _, f_ext = os.path.splitext(form_picture.filename)\r\n picture_fn = random_hex + f_ext\r\n picture_path = os.path.join(app.root_path + '\\pictures', picture_fn)\r\n\r\n # making sure the random hex hasn't already been produced\r\n while os.path.exists(picture_path):\r\n print(\"Creating new hex\")\r\n random_hex = secrets.token_hex(8)\r\n _, f_ext = os.path.splitext(form_picture.filename)\r\n picture_fn = random_hex + f_ext\r\n picture_path = os.path.join(app.root_path + '\\pictures', picture_fn)\r\n\r\n # if valid save picture with a valid path\r\n i = Image.open(form_picture)\r\n i.save(picture_path)\r\n new_pic = Picture(name=name, album_id=album_id, path=picture_path)\r\n db.session.add(new_pic)\r\n db.session.commit()\r\n\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\[email protected](\"/picture/<pic_id>\", methods={'GET'})\r\ndef show_one_pic(pic_id):\r\n picture = Picture.query.filter_by(id=pic_id).first()\r\n if picture:\r\n return jsonify(row2dict(picture))\r\n else:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n\r\n\r\[email protected](\"/picture/Album/<album_id>\", methods={'GET'})\r\ndef get_pic_by_album(album_id):\r\n album = Album.query.filter_by(id=album_id).first()\r\n pictures = Picture.query.filter_by(album_id=album_id)\r\n\r\n if not pictures or not album:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n else:\r\n return jsonify([row2dict(picture) for picture in pictures])\r\n\r\n\r\[email protected](\"/deletePic/<pic_id>\", methods={'GET'})\r\n@login_required\r\ndef delete_pic(pic_id):\r\n target_pic = Picture.query.filter_by(id=pic_id).first()\r\n\r\n # check if picture exists\r\n if not target_pic:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n\r\n # find the picture album\r\n target_pic_albid = target_pic.album_id\r\n target_pic_album = Album.query.filter_by(id=target_pic_albid).first()\r\n\r\n # make sure the album belongs to the user\r\n if not target_pic_album.person_id == current_user.id:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n\r\n if os.path.exists(target_pic.path):\r\n try:\r\n os.remove(target_pic.path)\r\n except OSError:\r\n pass\r\n\r\n db.session.delete(target_pic)\r\n db.session.commit()\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\[email protected](\"/deleteAlbum/<album_id>\", methods={'POST'})\r\n@login_required\r\ndef delete_alb(album_id):\r\n album = Album.query.filter_by(id=album_id).first()\r\n\r\n if not album:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot find this person id.\"}), 404)\r\n\r\n if not album.person_id == current_user.id:\r\n return make_response(jsonify({\"code\": 404, \"msg\": \"Cannot comply.\"}), 404)\r\n\r\n pictures = Picture.query.filter_by(album_id=album_id)\r\n\r\n if pictures:\r\n for picture in pictures:\r\n print(picture.path)\r\n if os.path.exists(picture.path):\r\n try:\r\n os.remove(picture.path)\r\n except OSError:\r\n pass\r\n db.session.delete(picture)\r\n\r\n db.session.delete(album)\r\n db.session.commit()\r\n return jsonify({\"code\": 200, \"msg\": \"success\"})\r\n\r\n\r\n" }, { "alpha_fraction": 0.7589454054832458, "alphanum_fraction": 0.7589454054832458, "avg_line_length": 29.235294342041016, "blob_id": "ae558a9721a5dbec949fb3dacb693e1f40b594de", "content_id": "74fe1b9e539888eae2a6df75cc15abeb759a8f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/ImgManager/__init__.py", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "from flask import Flask\r\nfrom ImgManager.config import DevConfig\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_bcrypt import Bcrypt\r\nfrom flask_login import LoginManager\r\n\r\n# need an app before we import models because models need it\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'\r\napp.config.from_object(DevConfig)\r\ndb = SQLAlchemy(app)\r\nbcrypt = Bcrypt(app)\r\nlogin_manager = LoginManager(app)\r\nlogin_manager.login_view = 'login'\r\nlogin_manager.login_message_category = 'info'\r\n\r\nfrom ImgManager import routes\r\n" }, { "alpha_fraction": 0.6545827388763428, "alphanum_fraction": 0.6614227294921875, "avg_line_length": 34.65853500366211, "blob_id": "f6fc05824122ddf9bd85e8833e9561ffaa8599fe", "content_id": "06f9cb07cf6244b6f38c1d464c0e2f8243a64f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 105, "num_lines": 41, "path": "/ImgManager/models.py", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "from ImgManager import db, login_manager\nfrom flask_login import UserMixin\n\n\ndef row2dict(row):\n return {c.name: str(getattr(row, c.name)) for c in row.__table__.columns if c.name is not \"password\"}\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return Person.query.get(int(user_id))\n\n\nclass Person(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text(), unique=True, nullable=False)\n password = db.Column(db.String(120), nullable=False)\n albums = db.relationship('Album', backref='owner', lazy=True)\n\n def __repr__(self):\n return \"<Person {}: {}>\".format(self.id, self.name)\n\n\nclass Album(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), unique=False, nullable=False)\n person_id = db.Column(db.Integer, db.ForeignKey('person.id'), nullable=False)\n pictures = db.relationship('Picture', backref='album', lazy=True)\n\n def __repr__(self):\n return \"<Album {}: {}, {}>\".format(self.id, self.name, self.person_id)\n\n\nclass Picture(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), unique=True, nullable=False)\n album_id = db.Column(db.Integer, db.ForeignKey('album.id'), nullable=False)\n path = db.Column(db.String(20), nullable=False, default='default.jpg')\n\n def __repr__(self):\n return \"<Album {}: {}, {}, {}>\".format(self.id, self.name, self.album_id, self.path)\n" }, { "alpha_fraction": 0.5893292427062988, "alphanum_fraction": 0.6095284819602966, "avg_line_length": 43.039878845214844, "blob_id": "f149acb8682cea6f9f544fafa7697aeeb1948286", "content_id": "30d2704cd6773d4f2b0fecf496323ba3731a8ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14357, "license_type": "no_license", "max_line_length": 176, "num_lines": 326, "path": "/tests/test_person.py", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "import unittest\nimport json\nimport io\nimport os\nfrom ImgManager import app as tested_app\nfrom ImgManager import db as tested_db\nfrom ImgManager.models import Person, Album, Picture\n\n# tested_app.config.from_object(TestConfig)\n\n\nclass TestPerson(unittest.TestCase):\n def setUp(self):\n # set up the test DB\n self.db = tested_db\n self.db.create_all()\n self.app = tested_app.test_client()\n\n # Setting up some testing Data\n self.app.post(\"/register\", data={\"name\": \"Alice\", \"password\": \"Alice123\"})\n self.app.post(\"/register\", data={\"name\": \"Bob\", \"password\": \"Bob123\"})\n\n self.app.post(\"/login\", data={\"name\": \"Bob\", \"password\": \"Bob123\"})\n self.app.post(\"/createAlbum\", data={\"name\": \"Album1\"})\n self.app.post(\"/logout\")\n picture_path = os.path.join(tested_app.root_path + '\\pictures', 'test_img.jpg')\n picture_path2 = os.path.join(tested_app.root_path + '\\pictures', 'test_img2.jpg')\n\n new_pic = Picture(name='tst_img', album_id=1, path=picture_path)\n new_pic2 = Picture(name='tst_img2', album_id=1, path=picture_path2)\n self.db.session.add(new_pic)\n self.db.session.add(new_pic2)\n self.db.session.commit()\n\n def tearDown(self):\n # clean up the DB after the tests\n Person.query.delete()\n Album.query.delete()\n Picture.query.delete()\n self.db.session.commit()\n\n def test_get_all_person(self):\n # send the request and check the response status code\n response = self.app.get(\"/person\")\n self.assertEqual(response.status_code, 200)\n\n # convert the response data from json and call the asserts\n person_list = json.loads(str(response.data, \"utf8\"))\n self.assertEqual(type(person_list), list)\n self.assertDictEqual(person_list[0], {\"id\": \"1\", \"name\": \"Alice\"})\n self.assertDictEqual(person_list[1], {\"id\": \"2\", \"name\": \"Bob\"})\n\n def test_get_person_with_valid_id(self):\n # send the request and check the response status code\n response = self.app.get(\"/person/1\")\n self.assertEqual(response.status_code, 200)\n\n # convert the response data from json and call the asserts\n person = json.loads(str(response.data, \"utf8\"))\n self.assertDictEqual(person, {\"id\": \"1\", \"name\": \"Alice\"})\n\n def test_get_person_with_invalid_id(self):\n # send the request and check the response status code\n response = self.app.get(\"/person/1000000\")\n self.assertEqual(response.status_code, 404)\n\n # convert the response data from json and call the asserts\n body = json.loads(str(response.data, \"utf8\"))\n self.assertDictEqual(body, {\"code\": 404, \"msg\": \"Cannot find this person id.\"})\n\n def test_register(self):\n # do we really need to check counts?\n initial_count = Person.query.count()\n\n # send a valid register request\n response = self.app.post(\"/register\", data={\"name\": \"bobby\", \"password\": \"bobby123\"})\n self.assertEqual(response.status_code, 200)\n\n # send an invalid register request\n response = self.app.post(\"/register\", data={\"name\": \"bobby\", \"password\": \"bobby123\"})\n self.assertEqual(response.status_code, 403)\n\n # assert db updated correctly\n # check if the DB was updated correctly\n updated_count = Person.query.count()\n self.assertEqual(updated_count, initial_count + 1)\n\n def test_login(self):\n # send an invalid login\n response = self.app.post(\"/login\", data={\"name\": \"Alce\", \"password\": \"Alice13\"})\n self.assertEqual(response.status_code, 403)\n\n # send an invalid password\n response = self.app.post(\"/login\", data={\"name\": \"Alice\", \"password\": \"b\"})\n self.assertEqual(response.status_code, 403)\n\n response = self.app.post(\"/login\", data={\"name\": \"Alice\", \"password\": \"Alice123\"})\n self.assertEqual(response.status_code, 200)\n\n def test_logout(self):\n response = self.app.post(\"/login\", data={\"name\": \"Bob\", \"password\": \"Bob123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/logout\")\n self.assertEqual(response.status_code, 200)\n\n def test_logout_notLogged(self):\n response = self.app.post(\"/logout\")\n self.assertEqual(response.status_code, 302)\n\n def test_add_Album(self):\n response = self.app.post(\"/login\", data={\"name\": \"Bob\", \"password\": \"Bob123\"})\n self.assertEqual(response.status_code, 200)\n\n init_alb_count = Album.query.count()\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Summer\"})\n self.assertEqual(response.status_code, 200)\n\n updated_count = Album.query.count()\n self.assertEqual(updated_count, init_alb_count + 1)\n\n def test_add_Album_invalid(self):\n response = self.app.post(\"/login\", data={\"name\": \"Alice\", \"password\": \"Alice123\"})\n self.assertEqual(response.status_code, 200)\n\n init_alb_count = Album.query.count()\n\n # lacking data\n response = self.app.post(\"/createAlbum\")\n self.assertEqual(response.status_code, 403)\n\n # should succeed\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Summer\"})\n self.assertEqual(response.status_code, 200)\n\n # should be invalid because same person has 2 album with same name\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Summer\"})\n self.assertEqual(response.status_code, 403)\n\n # should have only 1 added album\n updated_count = Album.query.count()\n self.assertEqual(updated_count, init_alb_count + 1)\n\n def test_display_all_album(self):\n response = self.app.post(\"/register\", data={\"name\": \"Paul\", \"password\": \"Paul123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"Paul\", \"password\": \"Paul123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album2\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album3\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get(\"/album\")\n album_list = json.loads(str(response.data, \"utf8\"))\n self.assertEqual(album_list[0], {\"id\": \"1\", \"name\": \"Album1\", \"person_id\": \"2\"})\n self.assertEqual(album_list[1], {\"id\": \"2\", \"name\": \"Album2\", \"person_id\": \"3\"})\n\n def test_display_one_album(self):\n response = self.app.post(\"/register\", data={\"name\": \"testAlb\", \"password\": \"testAlb123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"testAlb\", \"password\": \"testAlb123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album3\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get(\"/album/2\")\n album = json.loads(str(response.data, \"utf8\"))\n self.assertEqual(album, {\"id\": \"2\", \"name\": \"Album3\", \"person_id\": \"3\"})\n\n def test_add_pic(self):\n init_pic_count = Picture.query.count()\n response = self.app.post(\"/register\", data={\"name\": \"PicTest\", \"password\": \"PicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"PicTest\", \"password\": \"PicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album2\"})\n self.assertEqual(response.status_code, 200)\n\n with open('test_img.jpg', 'rb') as img1:\n try:\n imgBytesIO = io.BytesIO(img1.read())\n except:\n print(\"File can't be read.\")\n\n response = self.app.post('/Album/2/addPicture', content_type='multipart/form-data',\n data={'image': (imgBytesIO, 'test_img.jpg'), 'name': 'testImg1'})\n self.assertEqual(response.status_code, 200)\n\n pic_count = Picture.query.count()\n self.assertEqual(init_pic_count+1, pic_count)\n\n def test_display_all_pic(self):\n response = self.app.get(\"/pictures\")\n picture_list = json.loads(str(response.data, \"utf8\"))\n self.assertEqual(picture_list[0], {\"id\": \"1\", \"name\": \"tst_img\", \"album_id\": \"1\", \"path\": 'C:\\\\Users\\\\joedu\\\\Desktop\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img.jpg'})\n self.assertEqual(picture_list[1], {\"id\": \"2\", \"name\": \"tst_img2\", \"album_id\": \"1\", \"path\": 'C:\\\\Users\\joedu\\\\Desktop\\\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img2.jpg'})\n\n def test_get_picture(self):\n # send the request and check the response status code\n response = self.app.get(\"/picture/1\")\n self.assertEqual(response.status_code, 200)\n\n # convert the response data from json and call the asserts\n picture = json.loads(str(response.data, \"utf8\"))\n self.assertDictEqual(picture, {\"id\": \"1\", \"name\": \"tst_img\", \"album_id\": \"1\", \"path\": 'C:\\\\Users\\\\joedu\\\\Desktop\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img.jpg'})\n\n def test_add_pic_OtherAlbum(self):\n response = self.app.post(\"/register\", data={\"name\": \"PicTest\", \"password\": \"PicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"PicTest\", \"password\": \"PicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album2\"})\n self.assertEqual(response.status_code, 200)\n\n with open('test_img.jpg', 'rb') as img1:\n try:\n imgBytesIO = io.BytesIO(img1.read())\n except:\n print(\"File can't be read.\")\n\n response = self.app.post('/Album/1/addPicture', content_type='multipart/form-data',\n data={'image': (imgBytesIO, 'test_img.jpg'), 'name': 'testImg1'})\n self.assertEqual(response.status_code, 403)\n\n def test_delete_pic(self):\n init_pic_count = Picture.query.count()\n response = self.app.post(\"/login\", data={\"name\": \"Bob\", \"password\": \"Bob123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get(\"/deletePic/1\")\n self.assertEqual(response.status_code, 200)\n\n final_pic_count = Picture.query.count()\n self.assertEqual(init_pic_count - 1, final_pic_count)\n self.assertEqual(False, os.path.exists('C:\\\\Users\\\\joedu\\\\Desktop\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img.jpg'))\n\n def test_delete_invalid_pic(self):\n response = self.app.post(\"/register\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get(\"/deletePic/1\")\n self.assertEqual(response.status_code, 404)\n\n def test_delete_pic_dont_exists(self):\n response = self.app.post(\"/register\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.get(\"/deletePic/10000000\")\n self.assertEqual(response.status_code, 404)\n\n def test_display_album(self):\n response = self.app.get(\"/picture/Album/1\")\n picture_list = json.loads(str(response.data, \"utf8\"))\n self.assertEqual(picture_list[0], {\"id\": \"1\", \"name\": \"tst_img\", \"album_id\": \"1\",\n \"path\": 'C:\\\\Users\\\\joedu\\\\Desktop\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img.jpg'})\n self.assertEqual(picture_list[1], {\"id\": \"2\", \"name\": \"tst_img2\", \"album_id\": \"1\",\n \"path\": 'C:\\\\Users\\joedu\\\\Desktop\\\\SOEN487_A1\\\\ImgManager\\\\pictures\\\\test_img2.jpg'})\n\n def test_display_album_invalid_id(self):\n response = self.app.get(\"/picture/Album/100000\")\n self.assertEqual(response.status_code, 404)\n\n def test_delete_album_invalid_id(self):\n response = self.app.post(\"/register\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/deleteAlbum/1\")\n self.assertEqual(response.status_code, 404)\n\n def test_delete_album(self):\n response = self.app.post(\"/register\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/login\", data={\"name\": \"deletePicTest\", \"password\": \"deletePicTest123\"})\n self.assertEqual(response.status_code, 200)\n\n response = self.app.post(\"/createAlbum\", data={\"name\": \"Album2\"})\n self.assertEqual(response.status_code, 200)\n\n with open('test_img.jpg', 'rb') as img1:\n try:\n imgBytesIO = io.BytesIO(img1.read())\n except:\n print(\"File can't be read.\")\n\n response = self.app.post('/Album/2/addPicture', content_type='multipart/form-data',\n data={'image': (imgBytesIO, 'test_img.jpg'), 'name': 'testImg1'})\n self.assertEqual(response.status_code, 200)\n\n with open('test_img.jpg', 'rb') as img1:\n try:\n imgBytesIO = io.BytesIO(img1.read())\n except:\n print(\"File can't be read.\")\n\n response = self.app.post('/Album/2/addPicture', content_type='multipart/form-data',\n data={'image': (imgBytesIO, 'test_img.jpg'), 'name': 'testImg2'})\n self.assertEqual(response.status_code, 200)\n\n init_pic_count = Picture.query.count()\n\n response = self.app.post(\"/deleteAlbum/2\")\n self.assertEqual(response.status_code, 200)\n\n pic_count = Picture.query.count()\n self.assertEqual(init_pic_count - 2, pic_count)\n" }, { "alpha_fraction": 0.4726687967777252, "alphanum_fraction": 0.6881029009819031, "avg_line_length": 14.473684310913086, "blob_id": "a5b84caf150c84459d8fc85f3c28a229d2482c80", "content_id": "fa3c76bdcd2f891ad88b19d90f0298c2e3ceb19e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 311, "license_type": "no_license", "max_line_length": 22, "num_lines": 19, "path": "/requirement.txt", "repo_name": "OrangeUtang/GibberishGibbernimGibbs", "src_encoding": "UTF-8", "text": "Click\t7.0\r\nFlask\t1.0.2\r\nFlask-Bcrypt\t0.7.1\r\nFlask-Login\t0.4.1\r\nFlask-SQLAlchemy\t2.3.2\r\nFlask-WTF\t0.14.2\r\nJinja2\t2.10\r\nMarkupSafe\t1.1.0\r\nPillow\t5.4.1\r\nSQLAlchemy\t1.2.17\r\nWTForms\t2.2.1\r\nWerkzeug\t0.14.1\r\nbcrypt\t3.1.6\r\ncffi\t1.11.5\r\nitsdangerous\t1.1.0\r\npip\t9.0.3\t19.0.2\r\npycparser\t2.19\r\nsetuptools\t39.0.1\r\nsix\t1.12.0" } ]
6
MATE-Programming/7_Functions
https://github.com/MATE-Programming/7_Functions
707b814b9e3a0407335126cf580492dbb063bdc1
e38c0599ce1d4453a606e52bb45bd5cbfa365a4d
0493ffe9cfc46b64928a6fd0dc05668735ca5662
refs/heads/main
2023-05-21T03:18:10.721989
2021-07-14T07:49:22
2021-07-14T07:49:22
354,830,805
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6496034860610962, "alphanum_fraction": 0.6741167902946472, "avg_line_length": 36.486488342285156, "blob_id": "83fc145dbd3b4fbe4b4ae592c667f7df5e23bff0", "content_id": "6e8170331743835a87e244bd60b308f541b76b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2131, "license_type": "no_license", "max_line_length": 158, "num_lines": 37, "path": "/README.md", "repo_name": "MATE-Programming/7_Functions", "src_encoding": "UTF-8", "text": "![alt MATE Programming Lab](https://github.com/MATE-Programming/Lab_logo/blob/main/lab_6.svg?raw=true)\n\n# ะŸั€ะฐะบั‚ะธั‡ะตัะบะพะต ะทะฐะดะฐะฝะธะต ะฝะฐ ั‚ะตะผัƒ ะคัƒะฝะบั†ะธะธ\n\n#### 1. ะŸะพะปัƒั‡ะธั‚ะต ะดะฒะฐ ั†ะตะปั‹ั… ั‡ะธัะปะฐ (a ะธ b) ะพั‚ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั, ะฒั‹ั‡ะธัะปะธั‚ะต ััƒะผะผัƒ ั‡ะธัะตะป ะพั‚ a ะดะพ b. \n -ะŸั€ะตะดะฟะพะปะพะถะธะผ, ั‡ั‚ะพ a <= b. \n -ะกะพะทะดะฐะนั‚ะต ั„ัƒะฝะบั†ะธัŽ sum_ab, ะบะพั‚ะพั€ะฐั ะฟั€ะธะฝะธะผะฐะตั‚ ะดะฒะฐ ั†ะตะปั‹ั… ั‡ะธัะปะฐ ะธ ะฒะพะทะฒั€ะฐั‰ะฐะตั‚ ััƒะผะผัƒ ั‡ะธัะตะป ะพั‚ a ะดะพ b.\n\n| 1 10 | 55 |\n\n{% spoiler \"ะŸะพะดัะบะฐะทะบะฐ\" %}\nะ˜ัะฟะพะปัŒะทัƒะน ั†ะธะบะป for ะธะปะธ while ะดะปั ัั‚ะพะน ะทะฐะดะฐั‡ะธ, ั‚ะฐะบะถะต ั‚ะตะฑะต ะฟั€ะธะณะพะดะธั‚ัั if;)\n{% endspoiler %}\n\n{% next \"ะžั‚ะบั€ั‹ั‚ัŒ ะทะฐะดะฐะฝะธะต 2\" %}\n#### 2. ะกะพะทะดะฐะนั‚ะต ะฟั€ะพะณั€ะฐะผะผัƒ, ะบะพั‚ะพั€ะฐั ะฟะพะปัƒั‡ะฐะตั‚ ะพั‚ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฟะพะปะพะถะธั‚ะตะปัŒะฝะพะต ั†ะตะปะพะต ั‡ะธัะปะพ X ะธ ัะพะทะดะฐะนั‚ะต ั„ัƒะฝะบั†ะธัŽ, ะบะพั‚ะพั€ะฐั ะฒั‹ั‡ะธัะปัะตั‚ ั€ะตะทัƒะปัŒั‚ะฐั‚ ัะปะตะดัƒัŽั‰ะตะณะพ ัƒั€ะฐะฒะฝะตะฝะธั.\n - 1+(1+2)+(1+2+3)+(1+2+3+4)+....+(1+2+...+X)\n\n| Given X | OUTPUT |\n\n\n{% next \"ะžั‚ะบั€ั‹ั‚ัŒ ะทะฐะดะฐะฝะธะต 3\" %}\n#### 3.ะŸะพะปัƒั‡ะธั‚ะต ะพั‚ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ั‡ะตั‚ั‹ั€ะต ั†ะตะปั‹ั… ั‡ะธัะปะฐ ะดะปั ะฟะตั€ะตะผะตะฝะฝั‹ั… a, b, c, x ะธ ะฟะพะดัั‚ะฐะฒัŒั‚ะต ะฟะพะด ั„ะพั€ะผัƒะปัƒ.\n -ะŸะพะปัƒั‡ะธั‚ะต ั‡ะตั‚ั‹ั€ะต ะฒะฒะพะดะฐ ะพั‚ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะธ ะฒั‹ะทะพะฒะธั‚ะต ั„ัƒะฝะบั†ะธัŽ, ะฟะตั€ะตะดะฐะฒ ัั‚ะธ ะฐั€ะณัƒะผะตะฝั‚ั‹\n -ะกะพะทะดะฐะนั‚ะต ั„ัƒะฝะบั†ะธัŽ, ะบะพั‚ะพั€ะฐั ะฟั€ะธะฝะธะผะฐะตั‚ ั‡ะตั‚ั‹ั€ะต ะฐั€ะณัƒะผะตะฝั‚ะฐ x, a, b, c. ะ’ะตั€ะฝัƒั‚ัŒ ั€ะตะทัƒะปัŒั‚ะฐั‚ a * x ^ 2 + b * x + c\n \n{% next \"ะžั‚ะบั€ั‹ั‚ัŒ ะทะฐะดะฐะฝะธะต 4\" %}\n#### 4. ะกะพะทะดะฐะนั‚ะต ะฟั€ะพะณั€ะฐะผะผัƒ, ะบะพั‚ะพั€ะฐั ะฟะตั€ะตะฒะพะดะธั‚ ะฒะฒะตะดั‘ะฝะฝะพะต ั‡ะธัะปะพ (ะพั‚ 1 ะดะพ 20) ะฒ ัะปะพะฒะพ. \n\n ะ’ะฒะตะดะธั‚ะต ั‡ะธัะปะพ: 4\n ะ ะตะทัƒะปัŒั‚ะฐั‚: ะงะตั‚ั‹ั€ะต\n \n ะ’ะฒะตะดะธั‚ะต ั‡ะธัะปะพ: 10\n ะ ะตะทัƒะปัŒั‚ะฐั‚: ะ”ะตััั‚ัŒ\n \n ะ’ะฒะตะดะธั‚ะต ั‡ะธัะปะพ: 20\n ะ ะตะทัƒะปัŒั‚ะฐั‚: ะ”ะฒะฐะดั†ะฐั‚ัŒ\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 47, "blob_id": "08486e741f218f41fad91a160a04e07d7ed8aa18", "content_id": "7e30a9e6c4b5407c45ebbb5dccf57d320aa4f31d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 47, "num_lines": 1, "path": "/problem2.py", "repo_name": "MATE-Programming/7_Functions", "src_encoding": "UTF-8", "text": "#Write the solution of the second exercise here\n" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 46, "blob_id": "994d014d6c5137ee75e7cde8ffb6155d59063623", "content_id": "dd8fdd165bc02f81d6baab68f06ab6e48a7e341c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/problem3.py", "repo_name": "MATE-Programming/7_Functions", "src_encoding": "UTF-8", "text": "#Write the solution of the third exercise here\n" } ]
3
bmath3w/aws_lambdaToDynamo
https://github.com/bmath3w/aws_lambdaToDynamo
32f67225f5e95e54c7940ef023a581b69d4d509d
cb588e751d37e82e1d5415e76dbcedb95a9cafd7
c3d4fa20273d1124ae0dde682482d44a5da14c6a
refs/heads/master
2020-05-23T19:34:23.320410
2019-05-15T23:21:22
2019-05-15T23:21:22
186,915,000
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5962508916854858, "alphanum_fraction": 0.6085075736045837, "avg_line_length": 23.76785659790039, "blob_id": "040b06cbe2681110fdf599f29f44d050b2c887e0", "content_id": "8eb83623be6cf83afe43f9294f7927998af72403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 64, "num_lines": 56, "path": "/lambda_function.py", "repo_name": "bmath3w/aws_lambdaToDynamo", "src_encoding": "UTF-8", "text": "from __future__ import print_function # Python 2/3 compatibility\nimport boto3\nimport json\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\ndynamodb = boto3.resource('dynamodb')\n\ndef lambda_handler(event, context):\n response = read_data_all(event)\n return {\n 'statusCode': 200,\n 'body': json.dumps(response)\n }\n \ndef insert_data():\n table = dynamodb.Table('employee')\n record = {}\n record['username']='bobby5'\n record['lastname']='mathew5'\n table.put_item(\n Item={\n 'username': record['username'],\n 'lastname': record['lastname']\n }\n )\n return 50*50\n\ndef read_data_primary():\n table = dynamodb.Table('employee')\n response = table.get_item(\n Key={\n 'username': 'bobby'\n }\n )\n return response\n\ndef read_data_all(event):\n table = dynamodb.Table('employee')\n filter_key = 'lastname'\n try:\n filter_value = event[\"queryStringParameters\"]['name']\n except Exception as e:\n return {\n 'statusCode': 400,\n 'body': 'invalid parameter'\n }\n\n if filter_key and filter_value:\n filtering_exp = Key(filter_key).eq(filter_value)\n response = table.scan(FilterExpression=filtering_exp)\n else:\n response = table.scan()\n \n return response['Items']\n" } ]
1
TrackPickUniMannheim/Server-TrackPick
https://github.com/TrackPickUniMannheim/Server-TrackPick
26b51ebc513f0dea9ff145dcd214bf790c46f637
84879810c42b8dfedd45c1bca878b35462a27cd2
cc91ec72ae338f9a4b0d52303f3d9eaa816b4bf3
refs/heads/master
2021-01-17T10:02:13.893441
2017-10-24T21:21:45
2017-10-24T21:21:45
84,002,009
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5137270092964172, "alphanum_fraction": 0.5230709910392761, "avg_line_length": 39.55078125, "blob_id": "6ff0c357c9cafdb4ed3b793c9d3af47f4b66b00c", "content_id": "057f612378e4de631456c0438d59faafe01531bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10381, "license_type": "no_license", "max_line_length": 324, "num_lines": 256, "path": "/Server-TrackPick/DatabaseController.py", "repo_name": "TrackPickUniMannheim/Server-TrackPick", "src_encoding": "UTF-8", "text": "import pika\nimport sys\nimport time\nimport json\nimport csv\nfrom pika import exceptions\nimport logging\nimport os\nfrom collections import deque\n\n#@Developers: Niranjan Basnet, Zuli Wu\n\n#Module Description: Handling incoming streams for database interaction.\n\n# Expected Stream format: \"{\"deviceid\":\"d37825daa97041dd\",\"sensortype\":\"accelerometer\",\"\n# data\":[{\"timestamp\":\"1496078282698\",\"x\":\"2.23517e-7\",\"y\":\"9.77631\",\"z\":\"0.812348\"},{\"timestamp\":\"1496078282698\",\"x\":\"2.23517e-7\",\"y\":\"9.77631\",\"z\":\"0.812348\"}]}\n\nclass _CallbackResult(object):\n \"\"\" CallbackResult is a non-thread-safe implementation for receiving\n callback results; INTERNAL USE ONLY!\n \"\"\"\n __slots__ = ('_value_class', '_ready', '_values')\n def __init__(self, value_class=None):\n\n\n\n self._value_class = value_class\n self._ready = None\n self._values = None\n self.reset()\n\n def reset(self):\n\n self._ready = False\n self._values = None\n\nLOGGER = logging.getLogger(__name__)\n\nuserid = \" \".join(sys.argv[1:]) # Userid from the terminal signifies the collection name for the user\ndatenow = time.strftime('%Y.%m.%d-%H.%M.%S') # Date time signifies the unique timestamp recorded in session\ndatenow = str(datenow)\ncollname = userid+'-'+datenow # Collection name when new instance is created\n\ntry:\n from pymongo import MongoClient\n client = MongoClient('localhost', 27017)\n db = client['test_database']\n collection = db['test_database'] # test_database to mock up and test instead of the actual database 'trackpick'\n if collection is not None: # See whether the instance of database is created or not\n\n print('Connecting with Database...')\n\n db.create_collection(collname)\n getcoll = db.get_collection(collname)\n getcoll = str(getcoll)\n getcoll = getcoll[129:-2]\n if getcoll == collname: # See whether collection is created or not\n print(\"Collection with name \"+ collname + \"successfully created\")\n else:\n print(\"Collection not created\")\nexcept:\n print(\"Database not found...Please start the MongoDB and try again...\") # Checks exception for no database\n exit()\n\n\nprint (\"Fetching data and inserting into database...\")\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) # Connection with the designated queue\nchannel = connection.channel()\n\nchannel.queue_declare(queue='trackPick')\n\ndef close(self,reply_code=200,reply_text='Normal Shutdown'):\n try:\n if self.is_closed:\n LOGGER.debug('Close called on closed connection (%s): %s',\n reply_code, reply_text)\n return\n LOGGER.info('Closing connection (%s): %s', reply_code, reply_text)\n self._user_initiated_close = True\n for impl_channel in pika.compat.dictvalues(self._impl._channels):\n channel = impl_channel._get_cookie()\n if channel.is_open:\n try:\n channel.close(reply_code, reply_text)\n except exceptions.ChannelClosed as exc:\n # Log and suppress broker-closed channel\n LOGGER.warning('Got Channel Closed exception while closing channel '\n 'from connection.close: %r', exc)\n finally:\n self._cleanup()\n\n self._impl.close(reply_code, reply_text)\n self._flush_output(self._closed_result.is_ready)\n\n except:\n print(\"Connection could not be closed\")\ndef cleanup(self):\n self._impl.ioloop.deactivate_poller()\n self._ready_events.clear()\n self._opened_result.reset()\n self._open_error_result.reset()\n self._closed_result.reset()\n\ndef queue_purge(self, queue=''):\n\n with _CallbackResult(self._MethodFrameCallbackResultArgs) as \\\n purge_ok_result:\n self._impl.queue_purge(callback=purge_ok_result.set_value_once,\n queue=queue,\n nowait=False)\n\n self._flush_output(purge_ok_result.is_ready)\n return purge_ok_result.value.method_frame\n\ndef queue_delete(self, queue='', if_unused=False, if_empty=False):\n\n with _CallbackResult(self._MethodFrameCallbackResultArgs) as \\\n delete_ok_result:\n self._impl.queue_delete(callback=delete_ok_result.set_value_once,\n queue=queue,\n if_unused=if_unused,\n if_empty=if_empty,\n nowait=False)\n\n self._flush_output(delete_ok_result.is_ready)\n return delete_ok_result.value.method_frame\n\n\ndef callback(ch, method, properties, indata):\n if indata is None:\n print(\"No data in the queue. Please restart the session to fill the queue first\")\n else:\n\n indata = indata.decode('utf-8')\n #Dequeue from incoming data stream here\n try:\n DONE = False\n if(indata=='sessionclosed'):\n print(\"Inside the Done folder\")\n DONE = True\n #Loading data from the incoming data\n else:\n data = json.loads(indata)\n #Passing onto the dictionary to parse first level data\n dict = data\n serverTime = dict['servertime']\n # Passing onto the dictionary to parse second level data\n clientData = dict['cdata']\n # Passing onto the dictionary to parse third level data\n dict = clientData\n sensorType = dict['sensortype']\n deviceId = dict['deviceid']\n mainClientDataAll = dict['data']\n #Removing the extra brackets to make it parsable for the data\n\n #Loop over buffered data and insert each value seperately into mongo\n for x in range(0,len(mainClientDataAll)):\n mainClientData = mainClientDataAll[x]\n dict = mainClientData\n #Main data from the client\n\n if sensorType == 'video':\n videoname = dict['videoname']\n insertData = '{\"servertime\":' + '\"' + serverTime + '\",' + '\"sensortype\":' + '\"' + sensorType +'\",' + '\"deviceid\":' + '\"' + deviceId + '\",' + '\"clienttime\":\"null\",\"x\":\"null\",\"y\":\"null\",\"z\":\"null\",\"videofilename\":' + '\"' + videoname + '\"}'\n else :\n clientTime = dict['timestamp']\n xData = dict['x']\n yData = dict['y']\n zData = dict['z']\n #Preparation of the data for effecient insertion\n insertData = '{\"servertime\":' + '\"' + serverTime + '\",' + '\"sensortype\":' + '\"' + sensorType +'\",' + '\"deviceid\":' + '\"' + deviceId + '\",' + '\"clienttime\":' + '\"' + clientTime + '\",' + '\"x\":' + '\"' + xData + '\",' + '\"y\":' + '\"' + yData + '\",' + '\"z\":' + '\"' + zData + '\",' + '\"videofilename\":\"null\"}'\n\n print(insertData)\n\n db.get_collection(name=getcoll).insert_one( # Database instance finaldata as \"Key\"\n # [insertdata] as \"Value\" as documents\n {\n\n \"session\": [insertData]\n\n }\n )\n #DONE = True\n except:\n print(\"Data Parse was not possible for the data. Please try again....\")\n\n #DONE = True\n print('Data successfully stored into MongoDB')\n\n if DONE is True:\n print(\"Creating CSV now\")\n try:\n coll_sensors = []\n coll_devices = []\n\n #from pymongo import MongoClient\n #client = MongoClient('localhost', 27017)\n # db = client['test_database']\n for x in db[collname].find():\n coll_record = []\n dict = json.loads(''.join(map(str, x['session'])))\n\n if not dict['deviceid'] in coll_devices:\n coll_devices.append(dict['deviceid'])\n\n if not dict['sensortype'] in coll_sensors:\n coll_sensors.append(dict['sensortype'])\n\n for y in coll_devices :\n for z in coll_sensors :\n coll_records = []\n for x in db[collname].find():\n coll_record = []\n dict = json.loads(''.join(map(str, x['session'])))\n\n if dict['deviceid'] == y and dict['sensortype'] == z:\n coll_record.append(dict['servertime'])\n coll_record.append(dict['sensortype'])\n coll_record.append(dict['deviceid'])\n coll_record.append(dict['clienttime'])\n coll_record.append(dict['x'])\n coll_record.append(dict['y'])\n coll_record.append(dict['z'])\n coll_records.append(coll_record)\n \n if not len(coll_records) == 0:\n csvname =collname+ y+'-'+z+'.csv'\n csvname = str(csvname)\n\n with open(csvname, \"w\") as f:\n fields = ['servertime','sensortype','deviceid','clienttime','x','y','z']\n writer = csv.DictWriter(f, fieldnames=fields)\n writer.writeheader()\n writer = csv.writer(f)\n writer.writerows(coll_records)\n\n\n print('Data Extraction was successfull!')\n try:\n channel.queue_delete(queue='trackPick')\n print(\"Queue is deleted\")\n except:\n print(\"Queue was not cleared\")\n connection.close()\n\n except Exception as e: print(e)\n\n\nchannel.basic_consume(callback, queue='trackPick')\n\nchannel.queue_declare(exclusive=True)\n\nchannel.start_consuming()\n\nconnection.close()\n" }, { "alpha_fraction": 0.5216480493545532, "alphanum_fraction": 0.5264199376106262, "avg_line_length": 38.05454635620117, "blob_id": "ebbeacf6d60fc3391eb4152ef4bfb649d5b3c373", "content_id": "3180e37d22e497116721b95c542ed4296b3a1d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8592, "license_type": "no_license", "max_line_length": 123, "num_lines": 220, "path": "/Server-TrackPick/ServerTCPReceiver.py", "repo_name": "TrackPickUniMannheim/Server-TrackPick", "src_encoding": "UTF-8", "text": "import sys\nimport socket\nimport threading\nimport time\nimport pika\nfrom pika import exceptions\nimport logging\n\n#@Developers: Niranjan Basnet, Zuli Wu\n\n#Module Description: Handling incoming streams for pushing into Rabbitmq queue.\n\nQUIT = False\nLOGGER = logging.getLogger(__name__)\n\nclass ClientThread(threading.Thread): # Class that implements the client threads in this server\n\n def __init__(self, client_sock): # Initialize the object, save the socket that this thread will use\n\n threading.Thread.__init__(self)\n self.client = client_sock\n\n def run(self): #Thread's main loop where this function returns and the thread is finished and terminated\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost')) # Connect via pika with localhost\n\n global QUIT # Declare QUIT as global, and method can change it\n done = False\n data = self.readline() # Read data from the socket and process it\n if data is None:\n #exit()\n sys.exit()\n\n\n else:\n while not done:\n # Case where data was received\n if data == \"disconnect\": # Check whether stream should be stopped or not\n done = True\n #print(\"Found disconnect\")\n self.client.close()\n #connection.close()\n return\n elif data.strip() == '': # Check for whitespaces in the incoming streams\n #print(\"Found disconnect again\")\n self.client.close()\n channel.basic_publish(exchange='',\n routing_key='trackPick', # Routing with key \"trackPick\"\n body=\"sessionclosed\")\n print(\"Session closed message sent\")\n connection.close()\n\n print(\"Connection Closed\")\n QUIT = True\n #channel.close()\n #exit()\n sys.exit()\n return\n elif data == \"connect\" or data is not None: # Initiates regardless with connect message or available data.\n # Main loop for data streaming income\n\n servertime = int(round(time.time() * 1000)) # Arrival time taken on each\n # thread and concatinated with the data\n # Wrapping with external JSON for server time\n\n for d in data.split(\"\\n\"): # For line wise data in incoming streams\n data = d\n outdata = '{\"servertime\":' + '\"' + str(servertime) + '\",\"cdata\":' + str(data) + '}'\n #print(outdata +\"\\n\")\n print(outdata)\n #connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n channel = connection.channel() # Connect channel through connection\n\n channel.queue_declare(queue='trackPick') # Queue declaration with \"trackPick\"\n channel.basic_publish(exchange='',\n routing_key='trackPick', # Routing with key \"trackPick\"\n body=outdata)\n\n print(\"Sending Data to queue...\")\n\n\n #connection.close()\n\n data = self.readline()\n\n return\n\n def readline(self):\n # Helper function, reads chars from the socket, and returns\n # them as a lowercase string (to make string uniform for all types)\n data = True\n buffer = ''\n while data:\n data = self.client.recv(1024)\n buffer += data.decode('utf-8')\n\n if (buffer.find('\\n') != -1):\n data = False\n buffer = buffer.strip().lower()\n return buffer\n\n def writeline(self, text):\n\n self.client.send(text.strip().decode('utf-8') + '\\n')\n def close(self,reply_code=200,reply_text='Normal Shutdown'):\n try:\n if self.is_closed:\n LOGGER.debug('Close called on closed connection (%s): %s',\n reply_code, reply_text)\n return\n LOGGER.info('Closing connection (%s): %s', reply_code, reply_text)\n self._user_initiated_close = True\n for impl_channel in pika.compat.dictvalues(self._impl._channels):\n channel = impl_channel._get_cookie()\n if channel.is_open:\n try:\n channel.close(reply_code, reply_text)\n except exceptions.ChannelClosed as exc:\n # Log and suppress broker-closed channel\n LOGGER.warning('Got Channel Closed exception while closing channel '\n 'from connection.close: %r', exc)\n\n self._impl.close(reply_code, reply_text)\n except:\n print(\"Connection could not be closed\")\n\n #self._flush_output(self._closed_result.is_ready)\n\n\n\nclass Server:\n\n # Server class that opens up a socket and listens for incoming connections.\n # Every time a new connection arrives, it creates a new ClientThread thread\n # object and defers the processing of the connection to it.\n\n def __init__(self):\n self.sock = None\n self.thread_list = []\n\n def run(self):\n\n # Server main loop that creates the server (incoming) socket, and listens on it of incoming\n # connections. Once an incoming connection is detected, creates a\n # \"ClientThread\" to handle it, and goes back to listening mode.\n\n all_good = False\n try_count = 0\n\n while not all_good: # Attempt to open the socket if execution is fine\n\n if 3 < try_count: # Handling if we try more than 3 times but without success that port is occupied.\n\n sys.exit(1)\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create the socket\n\n self.sock.bind(('0.0.0.0', 9999)) # Bind it to the interface and port we want to listen on\n\n self.sock.listen(5) # Listening to 5 Simultaneous connection\n all_good = True\n break\n except socket.error: # Handling Binding Error\n\n print('Socket connection error... Waiting 5 seconds to retry.') # Short 5 seconds\n # time for socket replinishment\n\n del self.sock\n time.sleep(5)\n try_count += 1\n\n print(\"Server is up and listening for stream...\")\n\n try:\n\n while not QUIT:\n try:\n\n self.sock.settimeout(0.500)\n client = self.sock.accept()[0] # Timeout handling for connection error\n except socket.timeout:\n\n # No connection detected, sleep for one second, then check\n # if the global QUIT flag has been set\n\n time.sleep(1)\n if QUIT:\n print(\"Received quit command. Shutting down...\")\n break\n continue\n\n # Create the ClientThread object and let it handle the incoming\n # connection and data\n\n new_thread = ClientThread(client)\n #print('Incoming Connection. Started thread ', ) # Current thread information\n #print(new_thread.getName())\n self.thread_list.append(new_thread)\n new_thread.start()\n\n for thread in self.thread_list: # Thread loop in the thread list\n if not thread.isAlive():\n self.thread_list.remove(thread)\n thread.join()\n\n except KeyboardInterrupt: # Handling keyboard interrupt for customer user interaction\n print('Ctrl+C pressed... Shutting Down')\n except Exception:\n print('Exception caught: %s\\nClosing...') # Clear the list of threads, giving each\n # thread 1 second to finish\n for thread in self.thread_list:\n thread.join(1.0)\n self.sock.close()\n\n\nif \"__main__\" == __name__:\n server = Server()\n server.run()\n\n print(\"Terminated\")\n" }, { "alpha_fraction": 0.7260241508483887, "alphanum_fraction": 0.736214280128479, "avg_line_length": 57.33598327636719, "blob_id": "3accc8336d1113ab9511a643d6ed2935bb675110", "content_id": "7055905739e5edd175dd8a736603fb495d7107fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29342, "license_type": "no_license", "max_line_length": 206, "num_lines": 503, "path": "/Server-TrackPick/Evaluation.py", "repo_name": "TrackPickUniMannheim/Server-TrackPick", "src_encoding": "UTF-8", "text": "import csv\nimport json\nimport numpy as np\nimport pymongo\nimport matplotlib.pyplot as plt\nfrom pymongo import MongoClient\n\n#Connection to MongoDB with specified Collection\nclient = MongoClient('localhost', 27017)\ndb = client['test_database']\ncollection = 'NIRANJANPICKINGHOME2-2017.10.24-20.37.16'\n\n# Client Timestamp Variables\ntimeold_client_phone_accelerometer = 0\ntimenew_client_phone_accelerometer = 0\ncoll_eval_client_phone_accelerometer = []\n\ntimeold_client_phone_gyroscope = 0\ntimenew_client_phone_gyroscope = 0\ncoll_eval_client_phone_gyroscope = []\n\ntimeold_client_phone_magnetic = 0\ntimenew_client_phone_magnetic = 0\ncoll_eval_client_phone_magnetic = []\n\ntimeold_client_watch_accelerometer = 0\ntimenew_client_watch_accelerometer = 0\ncoll_eval_client_watch_accelerometer = []\n\ntimeold_client_watch_gyroscope = 0\ntimenew_client_watch_gyroscope = 0\ncoll_eval_client_watch_gyroscope = []\n\ntimeold_client_watch_magnetic = 0\ntimenew_client_watch_magnetic = 0\ncoll_eval_client_watch_magnetic = []\n\ntimeold_client_glass_accelerometer = 0\ntimenew_client_glass_accelerometer = 0\ncoll_eval_client_glass_accelerometer = []\n\ntimeold_client_glass_gyroscope = 0\ntimenew_client_glass_gyroscope = 0\ncoll_eval_client_glass_gyroscope = []\n\ntimeold_client_glass_magnetic = 0\ntimenew_client_glass_magnetic = 0\ncoll_eval_client_glass_magnetic = []\n\n# Server Timestamp Variables\ntimeold_server_phone_accelerometer = 0\ntimenew_server_phone_accelerometer = 0\ncoll_eval_server_phone_accelerometer = []\n\ntimeold_server_phone_gyroscope = 0\ntimenew_server_phone_gyroscope = 0\ncoll_eval_server_phone_gyroscope = []\n\ntimeold_server_phone_magnetic = 0\ntimenew_server_phone_magnetic = 0\ncoll_eval_server_phone_magnetic = []\n\ntimeold_server_watch_accelerometer = 0\ntimenew_server_watch_accelerometer = 0\ncoll_eval_server_watch_accelerometer = []\n\ntimeold_server_watch_gyroscope = 0\ntimenew_server_watch_gyroscope = 0\ncoll_eval_server_watch_gyroscope = []\n\ntimeold_server_watch_magnetic = 0\ntimenew_server_watch_magnetic = 0\ncoll_eval_server_watch_magnetic = []\n\ntimeold_server_glass_accelerometer = 0\ntimenew_server_glass_accelerometer = 0\ncoll_eval_server_glass_accelerometer = []\n\ntimeold_server_glass_gyroscope = 0\ntimenew_server_glass_gyroscope = 0\ncoll_eval_server_glass_gyroscope = []\n\ntimeold_server_glass_magnetic = 0\ntimenew_server_glass_magnetic = 0\ncoll_eval_server_glass_magnetic = []\n\n# Time Difference Variables\ncoll_eval_difference_phone_accelerometer = []\ncoll_eval_difference_phone_gyroscope = []\ncoll_eval_difference_phone_magnetic = []\ncoll_eval_difference_watch_accelerometer = []\ncoll_eval_difference_watch_gyroscope = []\ncoll_eval_difference_watch_magnetic = []\ncoll_eval_difference_glass_accelerometer = []\ncoll_eval_difference_glass_gyroscope = []\ncoll_eval_difference_glass_magnetic = []\n\nall_dicts = []\n# Analyze Timestamps\nfor x in db[collection].find():\n all_dicts.append(json.loads(''.join(map(str, x['session']))))\n\nnewlist = sorted(all_dicts, key=lambda k: k['clienttime']) \n\nfor dict in newlist:\n #PHONE\n if dict['sensortype'] == 'accelerometer' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_client_phone_accelerometer = int(float(dict['clienttime']))\n if timeold_client_phone_accelerometer != 0:\n coll_eval_client_phone_accelerometer.append(timenew_client_phone_accelerometer - timeold_client_phone_accelerometer)\n timeold_client_phone_accelerometer = timenew_client_phone_accelerometer\n \n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_client_phone_gyroscope = int(float(dict['clienttime']))\n if timeold_client_phone_gyroscope != 0:\n coll_eval_client_phone_gyroscope.append(timenew_client_phone_gyroscope - timeold_client_phone_gyroscope)\n timeold_client_phone_gyroscope = timenew_client_phone_gyroscope\n \n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_client_phone_magnetic = int(float(dict['clienttime']))\n if timeold_client_phone_magnetic != 0:\n coll_eval_client_phone_magnetic.append(timenew_client_phone_magnetic - timeold_client_phone_magnetic)\n timeold_client_phone_magnetic = timenew_client_phone_magnetic\n\n #WATCH\n elif dict['sensortype'] == 'accelerometer' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_client_watch_accelerometer = int(float(dict['clienttime']))\n if timeold_client_watch_accelerometer != 0:\n coll_eval_client_watch_accelerometer.append(timenew_client_watch_accelerometer - timeold_client_watch_accelerometer)\n timeold_client_watch_accelerometer = timenew_client_watch_accelerometer\n\n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_client_watch_gyroscope = int(float(dict['clienttime']))\n if timeold_client_watch_gyroscope != 0:\n coll_eval_client_watch_gyroscope.append(timenew_client_watch_gyroscope - timeold_client_watch_gyroscope)\n timeold_client_watch_gyroscope = timenew_client_watch_gyroscope\n \n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_client_watch_magnetic = int(float(dict['clienttime']))\n if timeold_client_watch_magnetic != 0:\n coll_eval_client_watch_magnetic.append(timenew_client_watch_magnetic - timeold_client_watch_magnetic)\n timeold_client_watch_magnetic = timenew_client_watch_magnetic\n\n #GLASS\n elif dict['sensortype'] == 'accelerometer' and dict['deviceid'] == '62c7c4a8aa33b123':\n timenew_client_glass_accelerometer = int(float(dict['clienttime']))\n if timeold_client_glass_accelerometer != 0:\n coll_eval_client_glass_accelerometer.append(timenew_client_glass_accelerometer - timeold_client_glass_accelerometer)\n timeold_client_glass_accelerometer = timenew_client_glass_accelerometer\n\n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == '62c7c4a8aa33b123':\n timenew_client_glass_gyroscope = int(float(dict['clienttime']))\n if timeold_client_glass_gyroscope != 0:\n coll_eval_client_glass_gyroscope.append(timenew_client_glass_gyroscope - timeold_client_glass_gyroscope)\n timeold_client_glass_gyroscope = timenew_client_glass_gyroscope\n\n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == '62c7c4a8aa33b123':\n\n timenew_client_glass_magnetic = int(float(dict['clienttime']))\n if timeold_client_glass_magnetic != 0:\n coll_eval_client_glass_magnetic.append(timenew_client_glass_magnetic - timeold_client_glass_magnetic)\n timeold_client_glass_magnetic = timenew_client_glass_magnetic\n\nfor dict in all_dicts:\n #PHONE\n if dict['sensortype'] == 'accelerometer' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_server_phone_accelerometer = int(float(dict['servertime']))\n if timeold_server_phone_accelerometer != 0 and (timenew_server_phone_accelerometer - timeold_server_phone_accelerometer) != 0:\n coll_eval_server_phone_accelerometer.append(timenew_server_phone_accelerometer - timeold_server_phone_accelerometer)\n coll_eval_difference_phone_accelerometer.append(timeold_server_phone_accelerometer-timeold_client_phone_accelerometer)\n timeold_server_phone_accelerometer = timenew_server_phone_accelerometer\n timeold_client_phone_accelerometer = int(float(dict['clienttime']))\n \n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_server_phone_gyroscope = int(float(dict['servertime']))\n if timeold_server_phone_gyroscope != 0 and (timenew_server_phone_gyroscope - timeold_server_phone_gyroscope) != 0:\n coll_eval_server_phone_gyroscope.append(timenew_server_phone_gyroscope - timeold_server_phone_gyroscope)\n coll_eval_difference_phone_gyroscope.append(timeold_server_phone_gyroscope-timeold_client_phone_gyroscope)\n timeold_server_phone_gyroscope = timenew_server_phone_gyroscope\n timeold_client_phone_gyroscope = int(float(dict['clienttime']))\n \n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == 'ddd830aa7d688be5':\n timenew_server_phone_magnetic = int(float(dict['servertime']))\n if timeold_server_phone_magnetic != 0 and (timenew_server_phone_magnetic - timeold_server_phone_magnetic) != 0:\n coll_eval_server_phone_magnetic.append(timenew_server_phone_magnetic - timeold_server_phone_magnetic)\n coll_eval_difference_phone_magnetic.append(timeold_server_phone_magnetic-timeold_client_phone_magnetic)\n timeold_server_phone_magnetic = timenew_server_phone_magnetic\n timeold_client_phone_magnetic = int(float(dict['clienttime']))\n\n #WATCH\n elif dict['sensortype'] == 'accelerometer' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_server_watch_accelerometer = int(float(dict['servertime']))\n if timeold_server_watch_accelerometer != 0 and (timenew_server_watch_accelerometer - timeold_server_watch_accelerometer) != 0:\n coll_eval_server_watch_accelerometer.append(timenew_server_watch_accelerometer - timeold_server_watch_accelerometer)\n coll_eval_difference_watch_accelerometer.append(timeold_server_watch_accelerometer-timeold_client_watch_accelerometer)\n timeold_server_watch_accelerometer = timenew_server_watch_accelerometer\n timeold_client_watch_accelerometer = int(float(dict['clienttime']))\n\n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_server_watch_gyroscope = int(float(dict['servertime']))\n if timeold_server_watch_gyroscope != 0 and (timenew_server_watch_gyroscope - timeold_server_watch_gyroscope) != 0:\n coll_eval_server_watch_gyroscope.append(timenew_server_watch_gyroscope - timeold_server_watch_gyroscope)\n coll_eval_difference_watch_gyroscope.append(timeold_server_watch_gyroscope-timeold_client_watch_gyroscope)\n timeold_server_watch_gyroscope = timenew_server_watch_gyroscope\n timeold_client_watch_gyroscope = int(float(dict['clienttime']))\n \n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == 'f7c3857f13a0234f':\n timenew_server_watch_magnetic = int(float(dict['servertime']))\n if timeold_server_watch_magnetic != 0 and (timenew_server_watch_magnetic - timeold_server_watch_magnetic) != 0:\n coll_eval_server_watch_magnetic.append(timenew_server_watch_magnetic - timeold_server_watch_magnetic)\n coll_eval_difference_watch_magnetic.append(timeold_server_watch_magnetic-timeold_client_watch_magnetic)\n timeold_server_watch_magnetic = timenew_server_watch_magnetic\n timeold_client_watch_magnetic = int(float(dict['clienttime']))\n\n #GLASS\n elif dict['sensortype'] == 'accelerometer' and dict['deviceid'] == '62c7c4a8aa33b123':\n timenew_server_glass_accelerometer = int(float(dict['servertime']))\n if timeold_server_glass_accelerometer != 0 and (timenew_server_glass_accelerometer - timeold_server_glass_accelerometer) != 0:\n coll_eval_server_glass_accelerometer.append(timenew_server_glass_accelerometer - timeold_server_glass_accelerometer)\n coll_eval_difference_glass_accelerometer.append(timeold_server_glass_accelerometer-timeold_client_glass_accelerometer)\n timeold_server_glass_accelerometer = timenew_server_glass_accelerometer\n timeold_client_glass_accelerometer = int(float(dict['clienttime']))\n\n elif dict['sensortype'] == 'gyroscope' and dict['deviceid'] == '62c7c4a8aa33b123':\n timenew_server_glass_gyroscope = int(float(dict['servertime']))\n if timeold_server_glass_gyroscope != 0 and (timenew_server_glass_gyroscope - timeold_server_glass_gyroscope) != 0:\n coll_eval_server_glass_gyroscope.append(timenew_server_glass_gyroscope - timeold_server_glass_gyroscope)\n coll_eval_difference_glass_gyroscope.append(timeold_server_glass_gyroscope-timeold_client_glass_gyroscope)\n timeold_server_glass_gyroscope = timenew_server_glass_gyroscope\n timeold_client_glass_gyroscope = int(float(dict['clienttime']))\n\n elif dict['sensortype'] == 'magneticfield' and dict['deviceid'] == '62c7c4a8aa33b123':\n timenew_server_glass_magnetic = int(float(dict['servertime']))\n if timeold_server_glass_magnetic != 0 and (timenew_server_glass_magnetic - timeold_server_glass_magnetic) != 0:\n coll_eval_server_glass_magnetic.append(timenew_server_glass_magnetic - timeold_server_glass_magnetic)\n coll_eval_difference_glass_magnetic.append(timeold_server_glass_magnetic-timeold_client_glass_magnetic)\n timeold_server_glass_magnetic = timenew_server_glass_magnetic\n timenew_client_glass_magnetic = int(float(dict['clienttime']))\n\n# Delete last Server Timestamp Difference due to buffer flush in the end\nif (len(coll_eval_server_phone_accelerometer) != 0): del coll_eval_server_phone_accelerometer[-1]\nif (len(coll_eval_server_phone_gyroscope) != 0): del coll_eval_server_phone_gyroscope[-1]\nif (len(coll_eval_server_phone_magnetic) != 0): del coll_eval_server_phone_magnetic[-1]\nif (len(coll_eval_server_watch_accelerometer) != 0): del coll_eval_server_watch_accelerometer[-1]\nif (len(coll_eval_server_watch_gyroscope) != 0): del coll_eval_server_watch_gyroscope[-1]\nif (len(coll_eval_server_watch_magnetic) != 0): del coll_eval_server_watch_magnetic[-1]\nif (len(coll_eval_server_glass_accelerometer) != 0): del coll_eval_server_glass_accelerometer[-1]\nif (len(coll_eval_server_glass_gyroscope) != 0): del coll_eval_server_glass_gyroscope[-1]\nif (len(coll_eval_server_glass_magnetic) != 0): del coll_eval_server_glass_magnetic[-1]\n\n# Show general statistics\nprint('----------------Number of Measurements-------------------------------------')\nprint ('Measurements for Phone Accelerometer: \\t', len(coll_eval_client_phone_accelerometer) +1)\nprint ('Measurements for Phone Gyroscope: \\t', len(coll_eval_client_phone_gyroscope) +1) \nprint ('Measurements for Phone MagneticField: \\t', len(coll_eval_client_phone_magnetic) +1)\nprint ('Measurements for Watch Accelerometer: \\t', len(coll_eval_client_watch_accelerometer) +1)\nprint ('Measurements for Watch Gyroscope: \\t', len(coll_eval_client_watch_gyroscope) +1)\nprint ('Measurements for Watch MagneticField: \\t', len(coll_eval_client_watch_magnetic) +1)\nprint ('Measurements for Glass Accelerometer: \\t', len(coll_eval_client_glass_accelerometer) +1)\nprint ('Measurements for Glass Gyroscope: \\t', len(coll_eval_client_glass_gyroscope) +1)\nprint ('Measurements for Glass MagneticField: \\t', len(coll_eval_client_glass_magnetic) +1) \n \n\n# 1. Show Evaluation Client Timestamps\nplt.plot(coll_eval_client_phone_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Client Phone Accelerometer')\nplt.savefig('Images/Client_Phone_Accelerometer.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_phone_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Client Phone Gyroscope')\nplt.savefig('Images/Client_Phone_Gyroscope.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_phone_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Client Phone MagneticField')\nplt.savefig('Images/Client_Phone_MagneticField.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_watch_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Client Watch Accelerometer')\nplt.savefig('Images/Client_Watch_Accelerometer.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_watch_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Client Watch Gyroscope')\nplt.savefig('Images/Client_Watch_Gyroscope.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_watch_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Client Watch MagneticField')\nplt.savefig('Images/Client_Watch_MagneticField.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_glass_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Client Glass Accelerometer')\nplt.savefig('Images/Client_Glass_Accelerometer.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_glass_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Client Glass Gyroscope')\nplt.savefig('Images/Client_Glass_Gyroscope.png')\nplt.show()\nplt.clf()\n\nplt.plot(coll_eval_client_glass_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Client Glass MagneticField')\nplt.savefig('Images/Client_Glass_MagneticField.png')\nplt.show()\nplt.clf()\n\nprint('----------------Client Timestamp Average------------------------------------------')\nif (len(coll_eval_client_phone_accelerometer) != 0): print('Client Phone Accelerometer AVG: \\t', (sum(coll_eval_client_phone_accelerometer)/float(len(coll_eval_client_phone_accelerometer))))\nif (len(coll_eval_client_phone_gyroscope) != 0): print('Client Phone Gyroscope AVG: \\t\\t', (sum(coll_eval_client_phone_gyroscope)/float(len(coll_eval_client_phone_gyroscope))))\nif (len(coll_eval_client_phone_magnetic) != 0): print('Client Phone MagneticField AVG: \\t', (sum(coll_eval_client_phone_magnetic)/float(len(coll_eval_client_phone_magnetic))))\nif (len(coll_eval_client_watch_accelerometer) != 0): print('Client Watch Accelerometer AVG: \\t', (sum(coll_eval_client_watch_accelerometer)/float(len(coll_eval_client_watch_accelerometer))))\nif (len(coll_eval_client_watch_gyroscope) != 0): print('Client Watch Gyroscope AVG: \\t\\t', (sum(coll_eval_client_watch_gyroscope)/float(len(coll_eval_client_watch_gyroscope))))\nif (len(coll_eval_client_watch_magnetic) != 0): print('Client Watch MagneticField AVG: \\t', (sum(coll_eval_client_watch_magnetic)/float(len(coll_eval_client_watch_magnetic))))\nif (len(coll_eval_client_glass_accelerometer) != 0): print('Client Glass Accelerometer AVG: \\t', (sum(coll_eval_client_glass_accelerometer)/float(len(coll_eval_client_glass_accelerometer))))\nif (len(coll_eval_client_glass_gyroscope) != 0): print('Client Glass Gyroscope AVG: \\t', (sum(coll_eval_client_glass_gyroscope)/float(len(coll_eval_client_glass_gyroscope))))\nif (len(coll_eval_client_glass_magnetic) != 0): print('Client Glass MagneticField AVG: \\t', (sum(coll_eval_client_glass_magnetic)/float(len(coll_eval_client_glass_magnetic))))\n\nprint('----------------Client Timestamp Variance------------------------------------------')\nif (len(coll_eval_client_phone_accelerometer) != 0): print('Client Phone Accelerometer VAR: \\t', (np.var(coll_eval_client_phone_accelerometer)))\nif (len(coll_eval_client_phone_gyroscope) != 0): print('Client Phone Gyroscope VAR: \\t\\t', (np.var(coll_eval_client_phone_gyroscope)))\nif (len(coll_eval_client_phone_magnetic) != 0): print('Client Phone MagneticField VAR: \\t', (np.var(coll_eval_client_phone_magnetic)))\nif (len(coll_eval_client_watch_accelerometer) != 0): print('Client Watch Accelerometer VAR: \\t', (np.var(coll_eval_client_watch_accelerometer)))\nif (len(coll_eval_client_watch_gyroscope) != 0): print('Client Watch Gyroscope VAR: \\t\\t', (np.var(coll_eval_client_watch_gyroscope)))\nif (len(coll_eval_client_watch_magnetic) != 0): print('Client Watch MagneticField VAR: \\t', (np.var(coll_eval_client_watch_magnetic)))\nif (len(coll_eval_client_glass_accelerometer) != 0): print('Client Glass Accelerometer VAR: \\t', (np.var(coll_eval_client_glass_accelerometer)))\nif (len(coll_eval_client_glass_gyroscope) != 0): print('Client Glass Gyroscope VAR: \\t', (np.var(coll_eval_client_glass_gyroscope)))\nif (len(coll_eval_client_glass_magnetic) != 0): print('Client Glass MagneticField VAR: \\t', (np.var(coll_eval_client_glass_magnetic)))\n\n\n# 2. Show Evaluation Server Timestamps\nplt.plot(coll_eval_server_phone_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Server Phone Accelerometer')\nplt.savefig('Images/Server_Phone_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_server_phone_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Server Phone Gyroscope')\nplt.savefig('Images/Server_Phone_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_server_phone_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Server Phone MagneticField')\nplt.savefig('Images/Server_Phone_MagneticField.png')\nplt.clf()\n\nplt.plot(coll_eval_server_watch_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Server Watch Accelerometer')\nplt.savefig('Images/Server_Watch_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_server_watch_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Server Watch Gyroscope')\nplt.savefig('Images/Server_Watch_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_server_watch_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Server Watch MagneticField')\nplt.savefig('Images/Server_Watch_MagneticField.png')\nplt.clf()\n\nplt.plot(coll_eval_server_glass_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Server Glass Accelerometer')\nplt.savefig('Images/Server_Glass_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_server_glass_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Server Glass Gyroscope')\nplt.savefig('Images/Server_Glass_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_server_glass_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Server Glass MagneticField')\nplt.savefig('Images/Server_Glass_MagneticField.png')\nplt.clf()\n\nprint('----------------Server Timestamp Average------------------------------------------')\nif (len(coll_eval_server_phone_accelerometer) != 0): print('Server Phone Accelerometer AVG: \\t', (sum(coll_eval_server_phone_accelerometer)/float(len(coll_eval_server_phone_accelerometer))))\nif (len(coll_eval_server_phone_gyroscope) != 0): print('Server Phone Gyroscope AVG: \\t\\t', (sum(coll_eval_server_phone_gyroscope)/float(len(coll_eval_server_phone_gyroscope))))\nif (len(coll_eval_server_phone_magnetic) != 0): print('Server Phone MagneticField AVG: \\t', (sum(coll_eval_server_phone_magnetic)/float(len(coll_eval_server_phone_magnetic))))\nif (len(coll_eval_server_watch_accelerometer) != 0): print('Server Watch Accelerometer AVG: \\t', (sum(coll_eval_server_watch_accelerometer)/float(len(coll_eval_server_watch_accelerometer))))\nif (len(coll_eval_server_watch_gyroscope) != 0): print('Server Watch Gyroscope AVG: \\t\\t', (sum(coll_eval_server_watch_gyroscope)/float(len(coll_eval_server_watch_gyroscope))))\nif (len(coll_eval_server_watch_magnetic) != 0): print('Server Watch MagneticField AVG: \\t', (sum(coll_eval_server_watch_magnetic)/float(len(coll_eval_server_watch_magnetic))))\nif (len(coll_eval_server_glass_accelerometer) != 0): print('Server Glass Accelerometer AVG: \\t', (sum(coll_eval_server_glass_accelerometer)/float(len(coll_eval_server_glass_accelerometer))))\nif (len(coll_eval_server_glass_gyroscope) != 0): print('Server Glass Gyroscope AVG: \\t', (sum(coll_eval_server_glass_gyroscope)/float(len(coll_eval_server_glass_gyroscope))))\nif (len(coll_eval_server_glass_magnetic) != 0): print('Server Glass MagneticField AVG: \\t', (sum(coll_eval_server_glass_magnetic)/float(len(coll_eval_server_glass_magnetic))))\n\nprint('----------------Server Timestamp Variance------------------------------------------')\nif (len(coll_eval_server_phone_accelerometer) != 0): print('Server Phone Accelerometer VAR: \\t', (np.var(coll_eval_server_phone_accelerometer)))\nif (len(coll_eval_server_phone_gyroscope) != 0): print('Server Phone Gyroscope VAR: \\t\\t', (np.var(coll_eval_server_phone_gyroscope)))\nif (len(coll_eval_server_phone_magnetic) != 0): print('Server Phone MagneticField VAR: \\t', (np.var(coll_eval_server_phone_magnetic)))\nif (len(coll_eval_server_watch_accelerometer) != 0): print('Server Watch Accelerometer VAR: \\t', (np.var(coll_eval_server_watch_accelerometer)))\nif (len(coll_eval_server_watch_gyroscope) != 0): print('Server Watch Gyroscope VAR: \\t\\t', (np.var(coll_eval_server_watch_gyroscope)))\nif (len(coll_eval_server_watch_magnetic) != 0): print('Server Watch MagneticField VAR: \\t', (np.var(coll_eval_server_watch_magnetic)))\nif (len(coll_eval_server_glass_accelerometer) != 0): print('Server Glass Accelerometer VAR: \\t', (np.var(coll_eval_server_glass_accelerometer)))\nif (len(coll_eval_server_glass_gyroscope) != 0): print('Server Glass Gyroscope VAR: \\t', (np.var(coll_eval_server_glass_gyroscope)))\nif (len(coll_eval_server_glass_magnetic) != 0): print('Server Glass MagneticField VAR: \\t', (np.var(coll_eval_server_glass_magnetic)))\n\n# 3. Evaluate Timedifference Client Server\nplt.plot(coll_eval_difference_phone_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Phone Accelerometer')\nplt.savefig('Images/Difference_Phone_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_phone_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Phone Gyroscope')\nplt.savefig('Images/Difference_Phone_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_phone_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Phone MagneticField')\nplt.savefig('Images/Difference_Phone_MagneticField.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_watch_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Watch Accelerometer')\nplt.savefig('Images/Difference_Watch_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_watch_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Watch Gyroscope')\nplt.savefig('Images/Difference_Watch_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_watch_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Watch MagneticField')\nplt.savefig('Images/Difference_Watch_MagneticField.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_glass_accelerometer)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Glass Accelerometer')\nplt.savefig('Images/Difference_Glass_Accelerometer.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_glass_gyroscope)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Glass Gyroscope')\nplt.savefig('Images/Difference_Glass_Gyroscope.png')\nplt.clf()\n\nplt.plot(coll_eval_difference_glass_magnetic)\nplt.ylabel('timedifference in ms')\nplt.title('Difference Glass MagneticField')\nplt.savefig('Images/Difference_Glass_MagneticField.png')\nplt.clf()\n\nprint('----------------difference Timestamp Average------------------------------------------')\nif (len(coll_eval_difference_phone_accelerometer) != 0): print('Difference Phone Accelerometer AVG: \\t', (sum(coll_eval_difference_phone_accelerometer)/float(len(coll_eval_difference_phone_accelerometer))))\nif (len(coll_eval_difference_phone_gyroscope) != 0): print('Difference Phone Gyroscope AVG: \\t', (sum(coll_eval_difference_phone_gyroscope)/float(len(coll_eval_difference_phone_gyroscope))))\nif (len(coll_eval_difference_phone_magnetic) != 0): print('Difference Phone MagneticField AVG: \\t', (sum(coll_eval_difference_phone_magnetic)/float(len(coll_eval_difference_phone_magnetic))))\nif (len(coll_eval_difference_watch_accelerometer) != 0): print('Difference Watch Accelerometer AVG: \\t', (sum(coll_eval_difference_watch_accelerometer)/float(len(coll_eval_difference_watch_accelerometer))))\nif (len(coll_eval_difference_watch_gyroscope) != 0): print('Difference Watch Gyroscope AVG: \\t', (sum(coll_eval_difference_watch_gyroscope)/float(len(coll_eval_difference_watch_gyroscope))))\nif (len(coll_eval_difference_watch_magnetic) != 0): print('Difference Watch MagneticField AVG: \\t', (sum(coll_eval_difference_watch_magnetic)/float(len(coll_eval_difference_watch_magnetic))))\nif (len(coll_eval_difference_glass_accelerometer) != 0): print('Difference Glass Accelerometer AVG: \\t', (sum(coll_eval_difference_glass_accelerometer)/float(len(coll_eval_difference_glass_accelerometer))))\nif (len(coll_eval_difference_glass_gyroscope) != 0): print('Difference Glass Gyroscope AVG: \\t', (sum(coll_eval_difference_glass_gyroscope)/float(len(coll_eval_difference_glass_gyroscope))))\nif (len(coll_eval_difference_glass_magnetic) != 0): print('Difference Glass MagneticField AVG: \\t', (sum(coll_eval_difference_glass_magnetic)/float(len(coll_eval_difference_glass_magnetic))))\n\nprint('----------------difference Timestamp Variance------------------------------------------')\nif (len(coll_eval_difference_phone_accelerometer) != 0): print('Difference Phone Accelerometer VAR: \\t', (np.var(coll_eval_difference_phone_accelerometer)))\nif (len(coll_eval_difference_phone_gyroscope) != 0): print('Difference Phone Gyroscope VAR: \\t', (np.var(coll_eval_difference_phone_gyroscope)))\nif (len(coll_eval_difference_phone_magnetic) != 0): print('Difference Phone MagneticField VAR: \\t', (np.var(coll_eval_difference_phone_magnetic)))\nif (len(coll_eval_difference_watch_accelerometer) != 0): print('Difference Watch Accelerometer VAR: \\t', (np.var(coll_eval_difference_watch_accelerometer)))\nif (len(coll_eval_difference_watch_gyroscope) != 0): print('Difference Watch Gyroscope VAR: \\t', (np.var(coll_eval_difference_watch_gyroscope)))\nif (len(coll_eval_difference_watch_magnetic) != 0): print('Difference Watch MagneticField VAR: \\t', (np.var(coll_eval_difference_watch_magnetic)))\nif (len(coll_eval_difference_glass_accelerometer) != 0): print('Difference Glass Accelerometer VAR: \\t', (np.var(coll_eval_difference_glass_accelerometer)))\nif (len(coll_eval_difference_glass_gyroscope) != 0): print('Difference Glass Gyroscope VAR: \\t', (np.var(coll_eval_difference_glass_gyroscope)))\nif (len(coll_eval_difference_glass_magnetic) != 0): print('Difference Glass MagneticField VAR: \\t', (np.var(coll_eval_difference_glass_magnetic)))" }, { "alpha_fraction": 0.8108108043670654, "alphanum_fraction": 0.8108108043670654, "avg_line_length": 73, "blob_id": "0104ab3b9de92e7f8e72c5a1e9cd43dd24d544ff", "content_id": "662d7f81e88b62c0b3c90530cf4b4b583f7f8e62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 148, "license_type": "no_license", "max_line_length": 128, "num_lines": 2, "path": "/README.md", "repo_name": "TrackPickUniMannheim/Server-TrackPick", "src_encoding": "UTF-8", "text": "# Server-TrackPick\nThis is server side application which will intake the data from Android Client and attach the timestamp and save it to the data.\n" } ]
4
bsao/caipyra
https://github.com/bsao/caipyra
92eb133d39010cbc2ae88551d8a0308a206e0f16
952bfd5ee459409cc91b2e044ca96fbbb1c52cdc
33780622dcb2ad84bbe01671bce93ec3e7a61bb8
refs/heads/master
2021-01-20T19:05:55.070824
2016-06-24T22:05:12
2016-06-24T22:05:12
61,901,410
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.6447507739067078, "alphanum_fraction": 0.6484623551368713, "avg_line_length": 30.966102600097656, "blob_id": "e879cf5c18ff596ab903cd33f10da9979cec6624", "content_id": "800a488f91a1f270a23e92b56c21baa0de179da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1886, "license_type": "no_license", "max_line_length": 118, "num_lines": 59, "path": "/beatles/scrap.py", "repo_name": "bsao/caipyra", "src_encoding": "UTF-8", "text": "import json\n\nimport spotipy\nfrom PyLyrics import PyLyrics\n\nBEATLES_URN = 'spotify:artist:3WrFJ7ztbogyGnTHbHJFl2'\n\n# connection to PyLyrics service\nlyrics = PyLyrics()\n\n# connection to Spotify public API\nspotify = spotipy.Spotify()\n\n# get artists information\nartist = spotify.artist(BEATLES_URN)\nartist_name = artist.get('name')\n\n# get all artist albums\nartist_albums = spotify.artist_albums(artist.get('uri'), limit=50)\n\n# get all albums from artists\nalbums = artist_albums.get('items')\n\n# open file\nf = open('artists.json', 'w')\n\nfor album in albums:\n if 'US' not in album.get('available_markets'):\n continue\n\n # album good information\n album_tracks = spotify.album_tracks(album.get('uri'))\n album_id = album.get('id')\n album_name = album.get('name').replace(' (Remastered)', '')\n album_type = album.get('type')\n album_image = album.get('images')[0].get('url')\n\n # get album tracks\n tracks = album_tracks.get('items')\n for track in tracks:\n track_id = track.get('id')\n track_name = track.get('name').split(' -')[0].strip()\n track_disc_number = track.get('disc_number')\n track_duration = track.get('duration_ms')\n track_explicit = track.get('explicit')\n track_number = track.get('track_number')\n\n try:\n track_lyrics = lyrics.getLyrics(artist_name, track_name)\n except ValueError:\n track_lyrics = 'not found'\n continue\n\n line = dict(artist_name=artist_name, album_id=album_id, album_name=album_name, album_type=album_type,\n album_image=album_image, track_id=track_id, track_name=track_name,\n track_disc_number=track_disc_number, track_duration=track_duration, track_explicit=track_explicit,\n track_number=track_number, track_lyrics=track_lyrics)\n f.write(json.dumps(line) + '\\n')\nf.close()\n" } ]
1
lucianoplouvier/KnightTourGeneticIls
https://github.com/lucianoplouvier/KnightTourGeneticIls
a03123b59ddc1b10c5b16355cec71a7a58063ca6
367346e5bf20fba32bcd98346bb81876942fc0c6
59335f2ffab4c0da0a1ddcfc8e212d3ea0093d37
refs/heads/master
2023-07-27T05:14:51.971446
2021-09-11T13:48:36
2021-09-11T13:48:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 74, "blob_id": "617e9d199c831c7a76bbdab4320f2bb932b0e0a6", "content_id": "9802c7f982203db9178831ec49f52de457de5be5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 76, "license_type": "no_license", "max_line_length": 74, "num_lines": 1, "path": "/README.txt", "repo_name": "lucianoplouvier/KnightTourGeneticIls", "src_encoding": "UTF-8", "text": "Soluรงรฃo para o problema do Passeio do Cavalo com ILS e Simulated Annealing" }, { "alpha_fraction": 0.6289222240447998, "alphanum_fraction": 0.6400142312049866, "avg_line_length": 38.39018630981445, "blob_id": "59141672ba4c2ca8a12c71ef15a73ea3ecae8501", "content_id": "bc685da0b9aea7d8142b441549d6623217157cd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16879, "license_type": "no_license", "max_line_length": 159, "num_lines": 428, "path": "/main.py", "repo_name": "lucianoplouvier/KnightTourGeneticIls", "src_encoding": "UTF-8", "text": "import copy\nimport random\nimport math\nimport time\nimport xlsxwriter\nfrom multiprocessing import Pool, cpu_count\nfrom datetime import timedelta\n\n# random.seed(111)\n\ntamanho_tabuleiro = 8\ninicial_x = 2\ninicial_y = 2\niteracoes_simulated = 100 # 200\niteracoes_ils = 500 # 1000\nvalor_temperatura = 0.1\navaliacao_maxima = 64\nexecutar_ils_meio = False\n\n\ndef pega_posicao_pulo(x_atual: int, y_atual: int, movimento: int):\n x_destino = 0\n y_destino = 0\n if movimento == 0:\n x_destino = x_atual + 1\n y_destino = y_atual + 2\n elif movimento == 1:\n x_destino = x_atual + 2\n y_destino = y_atual + 1\n elif movimento == 2:\n x_destino = x_atual + 2\n y_destino = y_atual - 1\n elif movimento == 3:\n x_destino = x_atual + 1\n y_destino = y_atual - 2\n elif movimento == 4:\n x_destino = x_atual - 1\n y_destino = y_atual - 2\n elif movimento == 5:\n x_destino = x_atual - 2\n y_destino = y_atual - 1\n elif movimento == 6:\n x_destino = x_atual - 2\n y_destino = y_atual + 1\n elif movimento == 7:\n x_destino = x_atual - 1\n y_destino = y_atual + 2\n return x_destino, y_destino\n\n\ndef posicao_valida(x: int, y: int, matriz_avaliacao):\n if 0 <= x < tamanho_tabuleiro:\n if 0 <= y < tamanho_tabuleiro:\n if matriz_avaliacao[x][y] == 0: # Se ainda nรฃo foi visitado, valido\n return 1\n return 0\n\n\n# Avalia a solucao atual, retornando um score de 0 a tamanho_tabuleiro*tamanho_tabuleiro\n# indicando quantos pulos no total foram corretos\n# Cria-se uma matriz que guarda quantas vezes uma casa foi visitada.\n# Se a casa foi visitada exatamente uma vez, +1 no score se nรฃo for a casa final\n# Se for a casa final, verificar antes se รฉ o รบltimo movimento\ndef avalia_solucao(solucao: []):\n x = inicial_x\n y = inicial_y\n resultado_avaliacao = 1\n matriz_avaliacao = [[0 for x in range(tamanho_tabuleiro)] for y in range(tamanho_tabuleiro)]\n matriz_avaliacao[x][y] = 1\n for index in range(len(solucao)):\n x_destino, y_destino = pega_posicao_pulo(x, y, solucao[index])\n if (posicao_valida(x_destino, y_destino, matriz_avaliacao)) or (x_destino == inicial_x and y_destino == inicial_y and index == (avaliacao_maxima - 1)):\n resultado_avaliacao = resultado_avaliacao + 1\n x = x_destino\n y = y_destino\n matriz_avaliacao[x][y] = matriz_avaliacao[x][y] + 1\n return resultado_avaliacao\n\n\n# Criaรงรฃo aleatรณria de solucao\ndef cria_solucao_inicial():\n solucao_inicial = [random.randint(0, 7) for x in range(tamanho_tabuleiro * tamanho_tabuleiro)]\n return solucao_inicial\n\n\ndef altera_aleatorio(solucao: []):\n copia = copy.deepcopy(solucao)\n index = random.randint(0, len(copia) - 1)\n copia[index] = random.randint(0, 7)\n return copia\n\n\n# Executa a busca local do ILS. Simulated Annealing\ndef simulated_annealing(resposta_atual: []):\n melhor_global = copy.deepcopy(resposta_atual)\n melhor_avaliacao = avalia_solucao(melhor_global)\n for i in range(iteracoes_simulated):\n melhor_local = altera_aleatorio(melhor_global)\n avaliacao_local = avalia_solucao(melhor_local)\n if avaliacao_local > melhor_avaliacao:\n melhor_global = melhor_local\n melhor_avaliacao = avaliacao_local\n else:\n diferenca_avaliacao = melhor_avaliacao - avaliacao_local\n temp = valor_temperatura / float(i + 1)\n trocar_pior = math.exp(-diferenca_avaliacao/temp)\n if random.randint(0, 1) < trocar_pior:\n melhor_global = melhor_local\n melhor_avaliacao = avaliacao_local\n return melhor_global\n\n\n# Rola um dado. Se der 6, muda o pulo para um aleatรณrio.\ndef perturbacao(resposta_atual: [], avaliacao):\n if avaliacao == avaliacao_maxima:\n return resposta_atual\n resposta = copy.deepcopy(resposta_atual)\n for index in range(len(resposta)):\n rola_dado = random.randint(1, 5)\n if rola_dado > 5:\n resposta[index] = random.randint(0, 7)\n return resposta\n\n\ndef executa_ils(resposta_atual: []):\n melhor_global = copy.deepcopy(resposta_atual)\n avaliacao_global = avalia_solucao(melhor_global)\n iteracoes = 0\n while iteracoes < iteracoes_ils:\n melhor_local = perturbacao(melhor_global, avaliacao_global)\n melhor_local = simulated_annealing(melhor_local)\n avaliacao_iteracao = avalia_solucao(melhor_local)\n if avaliacao_iteracao > avaliacao_global:\n melhor_global = melhor_local\n avaliacao_global = avaliacao_iteracao\n iteracoes = iteracoes + 1\n\n return melhor_global\n\n\ndef avalia_populacao(populacao):\n avaliacoes = []\n for j in range(len(populacao)):\n avaliacoes.append(avalia_solucao(populacao[j]))\n return avaliacoes\n\n\ndef altera_cromossomo(cromossomo, qtd_genes_mutaveis):\n indices = random.sample(range(0, len(cromossomo) - 1), qtd_genes_mutaveis)\n for indice in indices:\n novo_movimento = random.randint(0, 7)\n cromossomo[indice] = novo_movimento\n\n\ndef mutacao_populacao(populacao, avaliacoes, taxa_mutacao, qtd_genes_mutaveis):\n for i in range(taxa_mutacao):\n individuos_escolhidos = random.sample(range(0, len(populacao) - 1), taxa_mutacao)\n for j in range(qtd_genes_mutaveis):\n cromossomo_atual = populacao[individuos_escolhidos[j]]\n if avaliacoes[j] != avaliacao_maxima:\n altera_cromossomo(cromossomo_atual, qtd_genes_mutaveis)\n\n\n# Escolhe n melhores individuos para reproduzir\ndef escolher_populacao(populacao_atual, avaliacoes, n_reproducoes):\n populacao = []\n avaliacoes_escolhidas = []\n indices_ordenados = sorted(range(len(avaliacoes)), key=avaliacoes.__getitem__)\n indices_ordenados.reverse()\n for i in range(min(n_reproducoes, len(indices_ordenados))):\n populacao.append(populacao_atual[indices_ordenados[i]])\n avaliacoes_escolhidas.append(avaliacoes[indices_ordenados[i]])\n\n return populacao, avaliacoes_escolhidas\n\n\ndef weighted_random_choice(populacao, choices):\n max_val = sum(choices.values())\n pick = random.uniform(0, max_val)\n current = 0\n for key, value in choices.items():\n current += value\n if current > pick:\n return populacao[key]\n\n\ndef crossover(cromossomo_a, cromossomo_b):\n ponto_corte = random.randint(0, avaliacao_maxima - 1)\n novo_cromossomo = []\n for i in range(0, ponto_corte):\n novo_cromossomo.append(cromossomo_a[i])\n\n for i in range(ponto_corte, avaliacao_maxima - 1):\n novo_cromossomo.append(cromossomo_b[i])\n\n return novo_cromossomo\n\n\ndef reproducoes(populacao, avaliacoes):\n nova_populacao = copy.deepcopy(populacao)\n\n dicionario_fitness = {}\n for i in range(len(populacao)):\n dicionario_fitness[i] = avaliacoes[i]\n\n for i in range(len(populacao)):\n cromossomo_a = weighted_random_choice(populacao, dicionario_fitness)\n cromossomo_b = weighted_random_choice(populacao, dicionario_fitness)\n nova_populacao.append(crossover(cromossomo_a, cromossomo_b))\n return nova_populacao\n\n\ndef pega_melhor_avaliacao(populacao):\n avaliacao = avalia_populacao(populacao)\n maior_valor = 0\n indice = 0\n for i in range(len(avaliacao)):\n aval = avaliacao[i]\n if aval > maior_valor:\n maior_valor = aval\n indice = i\n return maior_valor, populacao[indice]\n\n\ndef ils_paralelizado(populacao):\n with Pool(6) as p:\n resultado_ils = p.map(executa_ils, populacao)\n for indvs in range(len(populacao)):\n novo_individuo = resultado_ils[indvs]\n populacao[indvs] = novo_individuo\n\n\ndef algoritmo_genetico(tam_populacao, n_reproducoes, taxa_mutacao, qtd_genes_mutaveis, iteracoes, executar_local):\n populacao = [cria_solucao_inicial() for i in range(tam_populacao)]\n\n melhor_individuo = []\n maior_valor_global = 0\n\n for itrs in range(iteracoes):\n avaliacao_rodada = avalia_populacao(populacao)\n mutacao_populacao(populacao, avaliacao_rodada, taxa_mutacao, qtd_genes_mutaveis)\n populacao_escolhida, avaliacao_escolhidos = \\\n escolher_populacao(populacao, avalia_populacao(populacao), n_reproducoes)\n populacao = reproducoes(populacao_escolhida, avaliacao_escolhidos)\n\n maior_valor, individuo = pega_melhor_avaliacao(populacao)\n if maior_valor == avaliacao_maxima:\n maior_valor_global = maior_valor\n melhor_individuo = copy.deepcopy(individuo)\n break\n elif maior_valor > maior_valor_global:\n melhor_individuo = copy.deepcopy(individuo)\n maior_valor_global = maior_valor\n if (itrs + 1) == iteracoes/2:\n if executar_ils_meio and executar_local == 1: # Executa ILS na metade do caminho\n print(\"Comeรงando ILS. Como estava antes: \" + str(maior_valor_global) + \" Tam_populacao: \" + str(len(populacao)))\n populacao_local = []\n # Colocar o melhor individuo na populacao caso ele nรฃo esteja lรก.\n if populacao.__contains__(melhor_individuo) == 0:\n populacao_local.append(copy.deepcopy(melhor_individuo))\n ils_paralelizado(populacao)\n elif not executar_ils_meio:\n print(\"Pulando ILS no meio\")\n\n maior_valor, individuo = pega_melhor_avaliacao(populacao)\n\n if maior_valor == avaliacao_maxima:\n maior_valor_global = maior_valor\n melhor_individuo = copy.deepcopy(individuo)\n break\n elif maior_valor > maior_valor_global:\n maior_valor_global = maior_valor\n melhor_individuo = copy.deepcopy(individuo)\n print(\"Melhoria com ILS! foi para \" + str(maior_valor_global))\n if (itrs + 1) % 100 == 0:\n print(str(itrs + 1))\n\n if executar_local == 1: # Executa ILS no final\n print(\"Comeรงando ILS. Como estava antes: \" + str(maior_valor_global) + \" Tam_populacao: \" + str(len(populacao)))\n populacao_local = []\n # Colocar o melhor individuo na populacao caso ele nรฃo esteja lรก.\n if populacao.__contains__(melhor_individuo) == 0:\n populacao_local.append(copy.deepcopy(melhor_individuo))\n ils_paralelizado(populacao)\n maior_valor, individuo = pega_melhor_avaliacao(populacao)\n if maior_valor > maior_valor_global:\n maior_valor_global = maior_valor\n melhor_individuo = copy.deepcopy(individuo)\n print(\"Melhoria com ILS! foi para \" + str(maior_valor_global))\n\n return melhor_individuo, maior_valor_global\n\n\ndef tabuleiro_final(resposta):\n print(\"Melhor resposta: \" + str(avalia_solucao(resposta)))\n print(resposta)\n print(\"Tabuleiro:\")\n matriz_avaliacao = [[0 for x in range(tamanho_tabuleiro)] for y in range(tamanho_tabuleiro)]\n x = inicial_x\n y = inicial_y\n matriz_avaliacao[x][y] = -1000\n for r in range(len(resposta)):\n x_mov, y_mov = pega_posicao_pulo(x, y, resposta[r])\n if posicao_valida(x_mov, y_mov, matriz_avaliacao):\n x = x_mov\n y = y_mov\n matriz_avaliacao[x][y] = matriz_avaliacao[x][y] + r + 1\n\n matriz_avaliacao[inicial_x][inicial_y] = \"I\"\n for pos in range(tamanho_tabuleiro):\n print(matriz_avaliacao[pos])\n return matriz_avaliacao\n\n\ndef passeio_cavalo(populacao: int, n_reproducoes: int, taxa_mutacao: int, qtd_genes_mutaveis: int,\n iteracoes: int, executar_local, worksheet, rodadas: int, bold_format, cell_format):\n resposta_global = []\n avaliacao_melhor = 0\n soma_melhores = 0\n tempo_inicio_global = time.time()\n ini_x = \"X\" + str(inicial_x + 1)\n ini_y = \"Y\" + str(inicial_y + 1)\n pos_inicial = ini_x + \", \" + ini_y\n\n itrs_ils = 0\n itrs_simulated = 0\n vlr_temp = 0\n if executar_local == 1:\n itrs_ils = iteracoes_ils\n itrs_simulated = iteracoes_simulated\n vlr_temp = valor_temperatura\n\n for rodada in range(rodadas):\n print(str(rodada + 1))\n tempo_inicio = time.time()\n\n resposta, avaliacao = algoritmo_genetico(tam_populacao=populacao, n_reproducoes=n_reproducoes,\n taxa_mutacao=taxa_mutacao, qtd_genes_mutaveis=qtd_genes_mutaveis,\n iteracoes=iteracoes, executar_local=executar_local)\n if avaliacao > avaliacao_melhor:\n resposta_global = copy.deepcopy(resposta)\n avaliacao_melhor = avaliacao\n\n soma_melhores += avaliacao\n print(\"Avaliacao final: \" + str(avaliacao))\n print(str(resposta))\n tempo_fim = time.time()\n duracao = tempo_fim - tempo_inicio\n dur_delta = timedelta(seconds=duracao)\n print(\"Duracao: \" + str(dur_delta))\n porcentagem_acertos_atual = avaliacao*100/avaliacao_maxima\n linha_execucao = [str(populacao), str(n_reproducoes), str(taxa_mutacao), str(qtd_genes_mutaveis),\n str(iteracoes), pos_inicial, str(avaliacao), str(porcentagem_acertos_atual), str(rodada + 1),\n str(dur_delta), str(executar_local), str(itrs_ils), str(itrs_simulated), str(vlr_temp),\n str(resposta)]\n worksheet.write_row(1 + rodada, 0, linha_execucao, cell_format)\n\n tempo_fim_global = time.time()\n duracao_global = str(timedelta(seconds=(tempo_fim_global - tempo_inicio_global)))\n\n media_correta = soma_melhores/rodadas\n porcentagem_acerto = (media_correta*100/avaliacao_maxima)\n\n txt_exc_local = \"Nรฃo\"\n if executar_local == 1:\n txt_exc_local = \"Sim\"\n\n resultado_final = [str(populacao), str(n_reproducoes), str(taxa_mutacao), str(qtd_genes_mutaveis), str(iteracoes),\n pos_inicial, str(media_correta), str(porcentagem_acerto), str(rodadas), duracao_global,\n txt_exc_local, str(itrs_ils), str(itrs_simulated), str(vlr_temp), str(resposta_global)]\n\n linha_header_media = ['Populacao', 'Numero Reproducoes', 'Taxa Mutacao', 'Genes Mutaveis', 'Iteracoes',\n 'Posicao Inicial', 'Media Passos Corretos', '% Acerto Mรฉdia', 'Total de Execuรงรตes', 'Tempo Total',\n 'Executou Ils', 'Iteracoes Ils', 'Iteracoes Simulated', 'Temperatura', 'Movimentos da melhor soluรงรฃo']\n\n worksheet.write_row(1 + rodadas + 1, 0, linha_header_media, bold_format)\n\n # header + rodadas + linha nova agora\n worksheet.write_row(1 + rodadas + 2, 0, resultado_final, bold_format)\n\n # Desenha o tabuleiro\n offset_x = 1 + rodadas + 4\n\n for y in range(tamanho_tabuleiro):\n worksheet.write_row(offset_x + y, 0, str(y + 1), bold_format)\n\n tabuleiro = tabuleiro_final(resposta_global)\n\n for x in range(tamanho_tabuleiro):\n for y in range(tamanho_tabuleiro):\n t = str(tabuleiro[y][x])\n worksheet.write(offset_x + y, x + 2, t, bold_format)\n\n worksheet.write(offset_x + 9, 2, \"A\", bold_format)\n worksheet.write(offset_x + 9, 3, \"B\", bold_format)\n worksheet.write(offset_x + 9, 4, \"C\", bold_format)\n worksheet.write(offset_x + 9, 5, \"D\", bold_format)\n worksheet.write(offset_x + 9, 6, \"E\", bold_format)\n worksheet.write(offset_x + 9, 7, \"F\", bold_format)\n worksheet.write(offset_x + 9, 8, \"G\", bold_format)\n worksheet.write(offset_x + 9, 9, \"H\", bold_format)\n\n for y in range(tamanho_tabuleiro):\n worksheet.write(offset_x + 10, y + 2, str(y + 1), bold_format)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n workbook = xlsxwriter.Workbook('C:/Users/frien/Documents/Cavalo Conjunto 2 SEM LOCAL.xlsx')\n resultados_sheet = workbook.add_worksheet(name='Resultados')\n\n linha_header = ['Populacao', 'Numero Reproducoes', 'Taxa Mutacao', 'Genes Mutaveis', 'Iteracoes',\n 'Posicao Inicial', 'Passos Corretos', '% Acerto', 'Teste', 'Tempo Testes',\n 'Executou Ils', 'Iteracoes Ils', 'Iteracoes Simulated', 'Temperatura', 'Movimentos']\n\n cell_format_bold = workbook.add_format({'bold': True})\n cell_format_bold.set_align('center')\n cell_format_bold.set_font('Times New Roman')\n cell_format_bold.set_font_size(12)\n cell_format_normal = workbook.add_format()\n cell_format_normal.set_align('center')\n cell_format_normal.set_font('Times New Roman')\n cell_format_normal.set_font_size(12)\n resultados_sheet.write_row(0, 0, linha_header, cell_format=cell_format_normal)\n passeio_cavalo(populacao=200, n_reproducoes=180, taxa_mutacao=100, qtd_genes_mutaveis=1,\n iteracoes=1000, executar_local=1, worksheet=resultados_sheet, rodadas=10, bold_format=cell_format_bold, cell_format=cell_format_normal)\n workbook.close()\n" } ]
2
rdppathak/cython-cpp-wrapper
https://github.com/rdppathak/cython-cpp-wrapper
8f6b8b78e86a981230608177d46f167a0d76648e
def58ec48bacc6b57832fabbed7cab019c7a0fe5
287dfebb1a4db5054bcc94443de39241f4ffbbb1
refs/heads/master
2021-01-10T05:03:30.124046
2015-12-09T03:43:51
2015-12-09T03:43:51
47,635,655
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 19.5, "blob_id": "56625c00e33309b9bfe91388f2fa2d3c1a689908", "content_id": "21f2ce8527efe2229bacc2e24e783406ba8563a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/setup.py", "repo_name": "rdppathak/cython-cpp-wrapper", "src_encoding": "UTF-8", "text": "from distutils.core import setup\nfrom Cython.Build import cythonize\n\nsetup(\n ext_modules = cythonize(['pyclass.pyx'])\n)\n\n" } ]
1
cmcahoon01/PongNeuralNet
https://github.com/cmcahoon01/PongNeuralNet
262cbe25937ab2e09c6026408c44626eab75e28e
babee31082cc62997a4742d25365bad07d1b031f
ed07b935f927221269c149ed9bb449de06d12994
refs/heads/master
2020-04-05T18:48:34.187123
2018-11-11T19:25:06
2018-11-11T19:25:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6344812512397766, "alphanum_fraction": 0.6484307050704956, "avg_line_length": 39.96428680419922, "blob_id": "520189a1d364bfb9213e5d7f1a2072ad63a8bbbd", "content_id": "43a08ff65a100fbfd0c730a11bdb721d9cef2e32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4588, "license_type": "no_license", "max_line_length": 122, "num_lines": 112, "path": "/Neural/NeuralNet.py", "repo_name": "cmcahoon01/PongNeuralNet", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pylab as plt\nimport copy\n\nclass NeuralNetwork():\n\n def __init__(self,numInputs,numHiddenNodes,numHiddenLayers,numOutputs):\n self.inputs=np.zeros(numInputs)\n self.HiddenNodes=np.zeros([numHiddenLayers,numHiddenNodes])\n self.outputs=np.zeros(numOutputs)\n self.inWeights=np.zeros([numHiddenNodes,numInputs])\n self.hiddenWeights=np.zeros([numHiddenLayers-1,numHiddenNodes,numHiddenNodes])\n self.outWeights=np.zeros([numOutputs,numHiddenNodes])\n np.set_printoptions(precision=2)\n np.set_printoptions(suppress=True)\n self.score=0\n\n def randomizeWeights(self):\n self.inWeights=np.random.rand(len(self.HiddenNodes[0]),len(self.inputs))*2-1\n self.hiddenWeights=np.random.rand(len(self.HiddenNodes)-1,len(self.HiddenNodes[0]),len(self.HiddenNodes[0]))*2-1\n self.outWeights=np.random.rand(len(self.outputs),len(self.HiddenNodes[-1]))*2-1\n\n def propagate(self,givenInputs):\n self.inputs=np.array(givenInputs)\n self.HiddenNodes[0]=self.inWeights.dot(self.inputs)\n for i in range(1,len(self.HiddenNodes)):\n self.HiddenNodes[i]=self.hiddenWeights[i-1].dot(self.HiddenNodes[i-1])\n self.outputs=self.outWeights.dot(self.HiddenNodes[-1])\n return self.outputs.tolist()\n\n def mutate(self,mutationChance,mutationRate):\n for i in range(len(self.inWeights)):\n for j in range(len(self.inWeights[i])):\n if np.random.random()<mutationChance:\n self.inWeights[i][j]=self.adjust(self.inWeights[i][j],mutationRate)\n for i in range(len(self.hiddenWeights)):\n for j in range(len(self.hiddenWeights[i])):\n if np.random.random()<mutationChance:\n self.hiddenWeights[i][j]=self.adjust(self.hiddenWeights[i][j],mutationRate)\n for i in range(len(self.outWeights)):\n for j in range(len(self.outWeights[i])):\n if np.random.random()<mutationChance:\n self.outWeights[i][j]=self.adjust(self.outWeights[i][j],mutationRate)\n\n def adjust(self,toAdj,mutationRate):\n randChange=np.random.random()**(1/mutationRate)\n if np.random.random()>0.5:\n randChange*=-1\n return toAdj+randChange\n\n def clone(self):\n new=copy.deepcopy(self)\n new.clrScore()\n return new\n\n def setScore(self,n):\n self.score=n\n\n def addScore(self,n):\n self.score+=n\n\n def clrScore(self):\n self.score=0\n\n def getScore(self):\n return self.score\n\nclass sigmaNet(NeuralNetwork):\n def __init__(self,numInputs,numHiddenNodes,numHiddenLayers,numOutputs):\n self.inputs=np.zeros(numInputs)\n self.HiddenNodes=np.zeros([numHiddenLayers,numHiddenNodes])\n self.outputs=np.zeros(numOutputs)\n self.inWeights=np.zeros([numHiddenNodes,numInputs+1])\n self.hiddenWeights=np.zeros([numHiddenLayers-1,numHiddenNodes,numHiddenNodes+1])\n self.outWeights=np.zeros([numOutputs,numHiddenNodes+1])\n np.set_printoptions(precision=2)\n np.set_printoptions(suppress=True)\n self.score=0\n\n def randomizeWeights(self):\n self.inWeights=np.random.rand(len(self.HiddenNodes[0]),len(self.inputs)+1)*2-1\n self.hiddenWeights=np.random.rand(len(self.HiddenNodes)-1,len(self.HiddenNodes[0]),len(self.HiddenNodes[0])+1)*2-1\n self.outWeights=np.random.rand(len(self.outputs),len(self.HiddenNodes[-1])+1)*2-1\n\n def sigmoid(self,arr):\n return (arr+abs(arr))/2\n return 1 / (1 + np.exp(-arr))\n\n def propagate(self,givenInputs,p=False):\n self.inputs=np.array(givenInputs)\n self.inputs=np.append(self.inputs,1)\n if p:\n print(self.inputs)\n self.HiddenNodes[0]=self.inWeights.dot(self.inputs)\n if p:\n print(self.HiddenNodes[0])\n self.HiddenNodes[0]=self.sigmoid(self.HiddenNodes[0])\n if p:\n print(self.HiddenNodes[0])\n for i in range(1,len(self.HiddenNodes)):\n self.HiddenNodes[i]=self.hiddenWeights[i-1].dot(np.append(self.HiddenNodes[i-1],[1]))\n self.HiddenNodes[i]=self.sigmoid(self.HiddenNodes[i])\n self.outputs=self.outWeights.dot(np.append(self.HiddenNodes[-1],[1]))\n if p:\n print(self.outputs)\n self.outputs=self.sigmoid(self.outputs)\n return self.outputs.tolist()\n\n def adjust(self,toAdj,mutationRate):\n toAdj=toAdj+(np.random.random()*2-1)**(1/mutationRate)\n toAdj*=np.random.choice([-1,1])\n return toAdj\n" }, { "alpha_fraction": 0.49511781334877014, "alphanum_fraction": 0.5417263507843018, "avg_line_length": 29.847389221191406, "blob_id": "9104a2d60b5f29a177c2c372d79f42566168c918", "content_id": "85f46896085a1aa78b45d03f4b42ec09118062af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7681, "license_type": "no_license", "max_line_length": 129, "num_lines": 249, "path": "/playPong.py", "repo_name": "cmcahoon01/PongNeuralNet", "src_encoding": "UTF-8", "text": "from Neural import NeuralNet as nn\nimport pygame\nimport random\nimport time\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nclass Ball():\n def __init__(self, r, paddle1, paddle2, x=250, y=250, xS=0, yS=0):\n self.r=r\n self.x=x\n self.y=y\n self.xSpeed = xS\n self.ySpeed = yS\n self.paddle1 = paddle1\n self.paddle2 = paddle2\n\n def changeSpeed(self, dx, dy):\n self.xSpeed = dx\n self.ySpeed = dy\n\n def getXSpeed(self):\n return self.xSpeed\n\n def getYSpeed(self):\n return self.ySpeed\n\n def move(self,dx,dy):\n self.x+=dx\n self.y+=dy\n\n def step(self):\n if self.x - self.r<=0:\n self.xSpeed=abs(self.xSpeed)\n if self.x + self.r>=500:\n self.xSpeed=-abs(self.xSpeed)\n if self.y - self.r<=0:\n self.ySpeed=abs(self.ySpeed)\n if self.y + self.r>=600:\n self.ySpeed=-abs(self.ySpeed)\n if 550<=self.y + self.r<=560:\n if self.paddle1.x-self.paddle1.getWidth()/2<self.x+self.r and self.paddle1.x+self.paddle1.getWidth()/2>self.x-self.r:\n self.ySpeed=-abs(self.ySpeed)\n self.xSpeed+=(self.x-self.paddle1.x)/5\n if 40<=self.y - self.r<=50:\n if self.paddle2.x-self.paddle2.getWidth()/2<self.x+self.r and self.paddle2.x+self.paddle2.getWidth()/2>self.x-self.r:\n self.ySpeed=abs(self.ySpeed)\n self.xSpeed+=(self.x-self.paddle2.x)/5\n self.move(self.xSpeed, self.ySpeed)\n\nclass Paddle():\n def __init__(self,x,y,net,width=100,height=10):\n self.x=x\n self.y=y\n self.width=width\n self.height=height\n self.net=net\n\n def getWidth(self):\n return self.width\n\n def act(self, inputs):\n move = self.net.propagate(inputs)\n move = move.index(max(move))\n if move == 0:\n self.left()\n elif move == 2:\n self.right()\n\n def left(self):\n if self.x>self.width/2:\n self.x-=10\n\n def right(self):\n if self.x<500-self.width/2:\n self.x+=10\n\ndef resetPlayers(new=[]):\n players=[]\n for i in range(numPlayers):\n\n if len(new)==0:\n net1 = nn.NeuralNetwork(5,6,5,3)\n net1.randomizeWeights()\n net2 = nn.NeuralNetwork(5,6,5,3)\n net2.randomizeWeights()\n else:\n net1 = new[i*2]\n net2 = new[i*2+1]\n\n paddle1 = Paddle(250,555,net1)\n paddle2 = Paddle(250,45,net2)\n\n\n ball = Ball(10,paddle1,paddle2,255,300,random.randint(-10,10),random.choice([10,-10]))\n\n players.append([paddle1,paddle2,ball])\n return players\n\npygame.init()\nsize = [500, 600]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Play Pong\")\ndone = False\nclock = pygame.time.Clock()\n\nnumPlayers=100\n\nplayers=resetPlayers()\nenemyPaddle= nn.NeuralNetwork(1,1,1,1)\nn=0\nwhile not done:\n\n remaning = [x for x in range(numPlayers)]\n score=0\n nextGen=[]\n while len(remaning)>0:\n screen.fill(BLACK)\n for i in remaning:\n player = players[i]\n paddle1=player[0]\n paddle2=player[1]\n ball=player[2]\n inputs1 = [\n ball.y/300-1,\n ball.getXSpeed()/20,\n #paddle2.y/300-1,\n #paddle2.x/250-1,\n paddle1.y/300-1,\n paddle1.x/250-1,\n ball.x/250-1\n ]\n paddle1.act(inputs1)\n inputs2 = [\n ball.y/300-1,\n ball.getXSpeed()/20,\n #paddle1.y/300-1,\n #paddle1.x/250-1,\n paddle2.y/300-1,\n paddle2.x/250-1,\n ball.x/250-1\n ]\n paddle2.act(inputs2)\n ball.step()\n if score > 1000:\n remaning.remove(i)\n nextGen.append(random.choice([paddle1.net,paddle2.net]))\n elif (ball.y+10>570 or ball.y-10<30) and len(remaning)>0:\n if ball.y+10>570:\n nextGen.append(paddle2.net)\n else:\n nextGen.append(paddle1.net)\n remaning.remove(i)\n w=paddle1.width/2\n h=paddle1.height/2\n pygame.draw.circle(screen, WHITE, [round(ball.x), round(ball.y)], ball.r)\n pygame.draw.rect(screen, WHITE, [round(paddle1.x-w), round(paddle1.y-h), paddle1.width, paddle1.height])\n pygame.draw.rect(screen, WHITE, [round(paddle2.x-w), round(paddle2.y-h), paddle2.width, paddle2.height])\n score+=1\n if score > 50000:\n pygame.display.flip()\n #clock.tick(100)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n remaining=[]\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n done = True\n remaining=[]\n if not done:\n mutationChance=1\n mutationScale=1\n if score>100:\n mutationChance=0.5\n mutationScale=0.5\n if score>500:\n mutationChance=0.35\n mutationScale=0.4\n if score > 900:\n mutationChance=0.2\n mutationScale=0.3\n for org in nextGen:\n org.addScore(1)\n for _ in range(numPlayers*2-len(nextGen)):\n newOrg=random.choice(nextGen).clone()\n newOrg.mutate(mutationChance,mutationScale)\n nextGen.append(newOrg)\n print(nextGen[4].inWeights,nextGen[4].hiddenWeights,nextGen[4].outWeights)\n print(n+1)\n players=resetPlayers(nextGen)\n n+=1\n else:\n for org in nextGen:\n if org.getScore()>enemyPaddle.getScore():\n enemyPaddle=org\n print(enemyPaddle.getScore())\n\n\nplayerPaddle = Paddle(250,555,nn.NeuralNetwork(1,1,1,1))\nenemyPaddle = Paddle(250,45,enemyPaddle)\nball = Ball(10,playerPaddle,enemyPaddle,250,300,random.randint(-10,10),random.choice([10,-10]))\nleft=False\nright=False\nwhile done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done=False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT: # left arrow turns left\n left = True\n elif event.key == pygame.K_RIGHT:\n right = True\n elif event.key == pygame.K_SPACE:\n done = False\n elif event.type == pygame.KEYUP: # check for key releases\n if event.key == pygame.K_LEFT: # left arrow turns left\n left = False\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n right = False\n screen.fill(BLACK)\n inputs = [\n ball.y/300-1,\n ball.getXSpeed()/20,\n #playerPaddle.y/300-1,\n #playerPaddle.x/250-1,\n enemyPaddle.y/300-1,\n enemyPaddle.x/250-1,\n ball.x/250-1\n ]\n enemyPaddle.act(inputs)\n ball.step()\n if left:\n playerPaddle.left()\n elif right:\n playerPaddle.right()\n w=playerPaddle.width/2\n h=playerPaddle.height/2\n pygame.draw.circle(screen, WHITE, [round(ball.x), round(ball.y)], ball.r)\n pygame.draw.rect(screen, WHITE, [round(playerPaddle.x-w), round(playerPaddle.y-h), 2*w, 2*h])\n pygame.draw.rect(screen, WHITE, [round(enemyPaddle.x-w), round(enemyPaddle.y-h), enemyPaddle.width, enemyPaddle.height])\n if abs(ball.getXSpeed())>20:\n ball.xSpeed=abs(ball.getXSpeed())/ball.getXSpeed()*20\n pygame.display.flip()\n clock.tick(30)\n\npygame.quit()\n" } ]
2
raja456881/Lms3
https://github.com/raja456881/Lms3
617d3654b349265b71d365d06699f116b382048d
a71ae0cd5af8cfb1c49ce9db4c34e9f9388c5318
2c7aa6bfca1f9374434435feac2f168dfcba0288
refs/heads/main
2022-12-30T23:47:01.296996
2020-10-17T16:43:27
2020-10-17T16:43:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6900115013122559, "alphanum_fraction": 0.7089552283287048, "avg_line_length": 37.59090805053711, "blob_id": "e0d3e0595167077bc11b2ef2ac873426a587f1c5", "content_id": "81b387706c79b3d5aa148ef799851cff89834045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 88, "num_lines": 44, "path": "/student/models.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.db import models\r\nfrom inventory.models import CourseDetails\r\nfrom multiuser.models import User\r\nfrom phone_field import PhoneField\r\nfrom mptt.models import MPTTModel, TreeForeignKey\r\nfrom phonenumber_field.modelfields import PhoneNumberField\r\nfrom django.contrib.auth.models import AbstractUser\r\n\r\ngender=(('Male','Male'),('Female','Female'),('others','Transgender'))\r\n\r\n\r\nclass Student(models.Model):\r\n\r\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='student')\r\n student_FullName = models.CharField(max_length=150, blank=True)\r\n student_About=models.CharField(blank=True,null=False,max_length=2500)\r\n student_gender = models.CharField(choices=gender,max_length=50)\r\n\r\n student_Image = models.FileField(upload_to='Student_image/', null=True , blank=True)\r\n stdent_Email = models.EmailField(max_length=111)\r\n student_created_at = models.DateTimeField(auto_now_add=True)\r\n\r\n course_id = models.ManyToManyField(CourseDetails, default=1)\r\n student_PhoneNo1 = PhoneNumberField(null=False, blank=False, unique=False)\r\n student_PhoneNo2 = PhoneNumberField(null=False, blank=False, unique=False)\r\n student_Address=models.CharField(max_length=250,default='')\r\n student_City = models.CharField(max_length=100, default='')\r\n student_Zipcode= models.IntegerField(default=273003)\r\n student_State = models.CharField(max_length=100, default='')\r\n student_Country = models.CharField(max_length=100, default='')\r\n objects = models.Manager()\r\n\r\n def __str__(self):\r\n return self.student_FullName\r\n\r\n\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.Student_Image.url\r\n except:\r\n url = ''\r\n return url\r\n" }, { "alpha_fraction": 0.681219220161438, "alphanum_fraction": 0.6939196586608887, "avg_line_length": 43.615943908691406, "blob_id": "9114beb4e4d24d3d218b0cae8445440192609cdf", "content_id": "de69a8f5844b3c03869700f68fd4b8a3df6f089b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6299, "license_type": "no_license", "max_line_length": 102, "num_lines": 138, "path": "/multiuser/models.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import AbstractBaseUser,BaseUserManager, PermissionsMixin,AbstractUser\r\nfrom django.db import models\r\nfrom django.utils import timezone\r\nfrom django.db import models\r\nfrom phone_field import PhoneField\r\nfrom phonenumber_field.modelfields import PhoneNumberField\r\nfrom django.contrib.auth import get_user_model\r\n\r\n\r\napprovalChoice = (\r\n ('Verified', 'Verified'),\r\n ('Pending', 'Pending'),\r\n ('Discarded', 'Discarded'),\r\n )\r\n\r\ngenderChoice = (\r\n ('Male', 'Male'),\r\n ('Female', 'Female'),\r\n ('Other', 'Other'),\r\n )\r\n\r\nclass User(AbstractUser):\r\n\r\n is_trainer = models.BooleanField(default=False)\r\n is_admin = models.BooleanField(default=False)\r\n is_student=models.BooleanField(default=False)\r\n is_institute = models.BooleanField(default=False)\r\n is_franchise = models.BooleanField(default=False)\r\n\r\n\r\nclass AdminProfile(models.Model):\r\n\r\n user = models.OneToOneField(User , on_delete=models.CASCADE,primary_key=True,related_name='admin')\r\n adminFullName = models.CharField(max_length=150 , blank=True)\r\n adminGender = models.CharField(max_length = 30 , choices =genderChoice , default=\"\")\r\n adminAbout= models.TextField(blank=True , null=True)\r\n adminEmail = models.EmailField(max_length = 100 , blank = True)\r\n adminImage = models.FileField(upload_to='Admin_image/', null=True , blank=True)\r\n adminPhoneNo1 = PhoneNumberField(null=False, blank=False, unique=False)\r\n adminPhoneNo2 = PhoneNumberField(null=False, blank=False, unique=False)\r\n adminAddress = models.CharField(max_length= 500 , blank = True)\r\n adminCity = models.CharField(blank=True,max_length=50)\r\n adminPostalCode = models.IntegerField(blank=True,null=True)\r\n adminState = models.CharField(blank=True,max_length=50)\r\n adminCountry = models.CharField(max_length=20,blank=False,default='')\r\n adminStatus = models.CharField(max_length = 30 , choices = approvalChoice, default=\"Pending\")\r\n adminAddedDate = models.DateTimeField(auto_now_add=True)\r\n def __str__(self):\r\n return self.adminFullName\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.adminImage.url\r\n except:\r\n url = ''\r\n return url\r\n\r\nclass TrainerProfile(models.Model):\r\n user = models.OneToOneField(User , on_delete=models.CASCADE,primary_key=True)\r\n trainerFullName = models.CharField(max_length=150 , blank=True)\r\n trainerGender = models.CharField(max_length = 30 , choices =genderChoice , default=\"\")\r\n trainerAbout= models.TextField(blank=True , null=True)\r\n trainerEmail = models.EmailField(max_length = 100 , blank = True)\r\n trainerImage = models.FileField(upload_to='Trainer_image/', null=True , blank=True)\r\n trainerPhoneNo1 = PhoneNumberField(null=False, blank=False, unique=False)\r\n trainerPhoneNo2 = PhoneNumberField(null=False, blank=False, unique=False)\r\n trainerAddress = models.CharField(max_length= 500 , blank = True)\r\n trainerCity = models.CharField(blank=True,max_length=50)\r\n trainerPostalCode = models.IntegerField(blank=True,null=True)\r\n trainerState = models.CharField(blank=True,max_length=50)\r\n trainerCountry = models.CharField(max_length=20,blank=False,default='')\r\n trainerStatus = models.CharField(max_length = 30 , choices = approvalChoice, default=\"Pending\")\r\n trainerAddedDate = models.DateTimeField(auto_now_add=True)\r\n def __str__(self):\r\n return self.trainerFullName\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.trainerImage.url\r\n except:\r\n url = ''\r\n return url\r\n\r\nclass InstituteProfile(models.Model):\r\n\r\n user = models.OneToOneField(User , on_delete=models.CASCADE,primary_key=True)\r\n instituteName = models.CharField(max_length = 200 , blank = True)\r\n instituteAbout= models.TextField(blank=True , null=True)\r\n instituteEmail = models.EmailField(max_length = 100 , blank = True)\r\n instituteImage = models.FileField(upload_to='Institute_image/', null=True , blank=True)\r\n institutePhoneNo1 = PhoneNumberField(null=True, blank=True, unique=False)\r\n institutePhoneNo2 = PhoneNumberField(null=True, blank=True, unique=False)\r\n instituteAddress = models.CharField(max_length= 500 , blank = True)\r\n instituteCity = models.CharField(blank=True,max_length=50)\r\n institutePostalCode = models.IntegerField(blank=True,null=True)\r\n instituteState = models.CharField(blank=True,max_length=50)\r\n instituteCountry = models.CharField(max_length=20,blank=False,default='')\r\n instituteStatus = models.CharField(max_length = 30 , choices = approvalChoice,default=\"Pending\")\r\n instituteAddedDate = models.DateTimeField(auto_now_add=True)\r\n def __str__(self):\r\n return self.instituteName\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.instituteImage.url\r\n except:\r\n url = ''\r\n return url\r\n\r\nclass FranchiseProfile(models.Model):\r\n\r\n user = models.OneToOneField(User , on_delete=models.CASCADE,primary_key=True)\r\n franchiseName = models.CharField(max_length = 200 , blank = True)\r\n franchiseAbout= models.TextField(blank=True , null=True)\r\n franchiseEmail = models.EmailField(max_length =100 , blank = True)\r\n franchiseImage = models.FileField(upload_to='Franchise_image/', null=True , blank=True)\r\n franchisePhoneNo1 = PhoneNumberField(null=True, blank=True, unique=False)\r\n franchisePhoneNo2 = PhoneNumberField(null=True, blank=True, unique=False)\r\n franchiseAddress = models.CharField(max_length= 500 , blank = True)\r\n franchiseCity = models.CharField(blank=True,max_length=50)\r\n franchisePostalCode =models.IntegerField(blank=True,null=True)\r\n franchiseState = models.CharField(blank=True,max_length=50)\r\n franchiseCountry = models.CharField(max_length=20,blank=False,default='')\r\n franchiseStatus = models.CharField(max_length = 30 , choices = approvalChoice, default=\"Pending\")\r\n franchiseAddedDate = models.DateTimeField(auto_now_add=True)\r\n def __str__(self):\r\n return self.franchiseName\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.franchiseImage.url\r\n except:\r\n url = ''\r\n return url\r\n\r\n\r\n" }, { "alpha_fraction": 0.5432141423225403, "alphanum_fraction": 0.7198889255523682, "avg_line_length": 18.206666946411133, "blob_id": "5551ae13fe32fa31f13310799053a46e74d742d0", "content_id": "d57e6098363bea6905a075a96fce190d922bb2ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2881, "license_type": "no_license", "max_line_length": 39, "num_lines": 150, "path": "/requirements.txt", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "alembic==1.1.0.dev0\nappdirs==1.4.3\napturl==0.5.2\narabic-reshaper==2.1.0\nasgiref==3.2.10\nBabel==2.6.0\nbcrypt==3.1.7\nblinker==1.4\nBrlapi==0.7.0\ncertifi==2019.11.28\nchardet==3.0.4\nClick==7.0\ncolorama==0.4.3\ncommand-not-found==0.3\ncoreapi==2.3.3\ncoreschema==0.0.4\ncryptography==2.8\ncupshelpers==1.0\ndbus-python==1.2.16\ndefer==1.0.6\ndistlib==0.3.0\ndistro==1.4.0\ndistro-info===0.23ubuntu1\nDjango==2.2.15\ndjango-address==0.2.5\ndjango-better-admin-arrayfield==1.3.0\ndjango-bootstrap-datepicker-plus==3.0.5\ndjango-ckeditor==6.0.0\ndjango-cors-headers==3.5.0\ndjango-crispy-forms==1.9.2\ndjango-extensions==3.0.9\ndjango-filter==2.3.0\ndjango-js-asset==1.2.2\ndjango-meta==1.7.0\ndjango-mptt==0.11.0\ndjango-multiselectfield==0.1.12\ndjango-phone-field==1.8.1\ndjango-phonenumber-field==5.0.0\ndjango-phonenumbers==1.0.1\ndjango-rest-swagger==2.2.0\ndjango-restframework==0.0.1\ndjango-widget-tweaks==1.4.8\ndjangorestframework==3.12.1\ndjangorestframework-simplejwt==4.4.0\ndnspython==2.0.0\ndrf-yasg==1.17.1\nduplicity==0.8.12.0\nemail-validator==1.1.1\nentrypoints==0.3\nfasteners==0.14.1\nfilelock==3.0.12\nFlask==1.0.2\nFlask-BabelEx==0.9.3\nFlask-Compress==1.4.0\nFlask-Gravatar==0.5.0\nFlask-Login==0.4.1\nFlask-Mail==0.9.1\nFlask-Migrate==2.4.0\nFlask-Paranoid==0.2.0\nFlask-Principal==0.4.0\nFlask-Security-Too==3.4.4\nFlask-SQLAlchemy==2.4.1\nFlask-WTF==0.14.3\nfuture==0.18.2\nhtml5lib==1.1\nhttplib2==0.14.0\nidna==2.8\nimportlib-metadata==1.5.0\ninflection==0.5.1\nitsdangerous==1.1.0\nitypes==1.2.0\nJinja2==2.11.2\nkeyring==18.0.1\nlanguage-selector==0.1\nlaunchpadlib==1.10.13\nlazr.restfulclient==0.14.2\nlazr.uri==1.0.3\nldap3==2.8.1\nlockfile==0.12.2\nlouis==3.12.0\nmacaroonbakery==1.3.1\nMako==1.1.0\nMarkupSafe==1.1.0\nmonotonic==1.5\nmore-itertools==4.2.0\nnetifaces==0.10.4\noauthlib==3.1.0\nolefile==0.46\nopenapi-codec==1.3.2\npackaging==20.4\nparamiko==2.6.0\npasslib==1.7.2\npexpect==4.6.0\npgadmin4==4.26\nphonenumbers==8.12.11\nPillow==7.2.0\npipenv==11.9.0\nprotobuf==3.6.1\npsutil==5.7.2\npsycopg2==2.8.5\npsycopg2-binary==2.8.6\npyasn1==0.4.8\npycairo==1.16.2\npycups==1.9.73\nPyGObject==3.36.0\npyinotify==0.9.6\nPyJWT==1.7.1\npymacaroons==0.13.0\nPyNaCl==1.3.0\npyOpenSSL==19.0.0\npyparsing==2.4.7\nPyPDF2==1.26.0\npyRFC3339==1.1\npython-apt==2.0.0+ubuntu0.20.4.1\npython-bidi==0.4.2\npython-dateutil==2.8.1\npython-debian===0.1.36ubuntu1\npython-decouple==3.3\npytz==2018.9\npyxdg==0.26\nPyYAML==5.3.1\nreportlab==3.5.34\nrequests==2.22.0\nrequests-unixsocket==0.2.0\nruamel.yaml==0.16.12\nruamel.yaml.clib==0.2.2\nSecretStorage==2.3.1\nsimplejson==3.16.0\nsix==1.14.0\nspeaklater==1.3\nSQLAlchemy==1.3.19\nsqlparse==0.3.1\nsshtunnel==0.1.5\nsystemd-python==234\nubuntu-advantage-tools==20.3\nubuntu-drivers-common==0.0.0\nufw==0.36\nunattended-upgrades==0.1\nuritemplate==3.0.1\nurllib3==1.25.8\nusb-creator==0.3.7\nvirtualenv==20.0.17\nvirtualenv-clone==0.3.0\nwadllib==1.3.3\nwebencodings==0.5.1\nWerkzeug==0.16.1\nWTForms==2.2.1\nxhtml2pdf==0.2.5\nxkit==0.0.0\nzipp==1.0.0\n" }, { "alpha_fraction": 0.6261682510375977, "alphanum_fraction": 0.637244701385498, "avg_line_length": 36.45333480834961, "blob_id": "9a7399e4acf8d70d60d86263aa977d567d9c76d4", "content_id": "aeaed4533d30915c0e408305753d3ea72bd6bd6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5778, "license_type": "no_license", "max_line_length": 109, "num_lines": 150, "path": "/inventory/models.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.db import models\r\nfrom multiuser.models import *\r\nfrom mptt.models import MPTTModel, TreeForeignKey\r\nimport datetime\r\nfrom django.utils import timezone\r\nfrom multiselectfield import MultiSelectField\r\nfrom django.db.models.signals import pre_save\r\n\r\n\r\n\r\n \r\n\r\nclass Categories(MPTTModel):\r\n\r\n statusChoice = (\r\n ('Active', 'Active'),\r\n ('Inactive', 'Inactive'),\r\n ('Available', 'Available')\r\n )\r\n \r\n name = models.CharField(max_length = 250 , unique =True)\r\n parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')\r\n seo_title = models.CharField(max_length=70)\r\n seo_description =models.CharField(max_length=160)\r\n slug = models.SlugField(max_length=255 , unique =True)\r\n categoryStatus = models.CharField(max_length = 30 , choices = statusChoice, null=True , blank=True)\r\n categoryCreated = models.DateTimeField(auto_now_add=True)\r\n seo_keywords = models.CharField(max_length=160)\r\n class MPTTMeta:\r\n order_insertion_by = ['name']\r\n \r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass CourseDetails(models.Model):\r\n \r\n statusChoice = (\r\n ('Active', 'Active'),\r\n ('Inactive', 'Inactive'),\r\n ('Available', 'Available')\r\n )\r\n\r\n belongChoice = (\r\n ('Trainer','Trainer'),\r\n ('Intitute','Institute'),\r\n ('Franchise','Franchise'),\r\n )\r\n\r\n weekdayChoice = (\r\n ('Monday','Monday'),\r\n ('Tuesday','Tuesday'),\r\n ('Wednesday','Wednesday'),\r\n ('Thursday','Thursday'),\r\n ('Friday','Friday'),\r\n ('Saturday','Saturday'),\r\n ('Sunday','Sunday')\r\n )\r\n \r\n trainer = models.ForeignKey(TrainerProfile , on_delete=models.SET_NULL , blank = True , null = True)\r\n institute = models.ForeignKey(InstituteProfile, on_delete=models.SET_NULL , blank = True , null = True)\r\n franchise = models.ForeignKey(FranchiseProfile, on_delete=models.SET_NULL , blank = True , null = True)\r\n courseName = models.CharField(max_length = 400 , null = True , blank=True)\r\n courseDescription = models.TextField(null = True , blank =True)\r\n courseCategory = models.ForeignKey(Categories , on_delete=models.CASCADE , blank = True , null = True)\r\n courseOnline = models.BooleanField(blank = True , null =True , default = False)\r\n courseImage = models.FileField(upload_to='images/', null=True , blank=True)\r\n courseLive = models.BooleanField(blank = True , null =True,default = False)\r\n courseOffline = models.BooleanField(blank = True , null =True,default = False)\r\n courseStatus = models.CharField(max_length = 30 , choices = statusChoice, null=True , blank=True)\r\n coursePrice = models.DecimalField(max_digits=10,decimal_places=2 , null=True , blank=True)\r\n promocode =models.CharField(max_length=160 , null=True , blank=True)\r\n priceDiscount = models.DecimalField(max_digits=10,decimal_places=2,null=True , blank=True)\r\n totalPriceDiscount = models.DecimalField(max_digits=10,decimal_places=2,null=True , blank=True)\r\n courseCity = models.CharField(max_length = 200 , null = True , blank=True)\r\n courseState = models.CharField(max_length = 200 , null = True , blank=True)\r\n courseCountry = models.CharField(max_length = 200 , null = True,blank=True)\r\n offlineAddress = models.TextField(null=True , blank = True)\r\n offlineClassStrength = models.IntegerField(null=True , blank=True)\r\n offlineTiming = models.DateTimeField(auto_now_add=False,null=True , blank=True)\r\n offlineWeekday = MultiSelectField(choices=weekdayChoice,null=True , blank=True)\r\n onlineWeekday = MultiSelectField(choices=weekdayChoice,null=True , blank=True)\r\n onlineTiming = models.DateTimeField(auto_now_add=False,null=True , blank=True)\r\n courseBelong = models.CharField(max_length = 30 , choices = belongChoice, null=True , blank=True)\r\n seo_title = models.CharField(max_length=70)\r\n seo_description =models.CharField(max_length=160)\r\n seo_keywords =models.CharField(max_length=160)\r\n slug = models.SlugField(max_length=255 , unique =True)\r\n introVideo= models.FileField(upload_to='IntroVideos/', null=True,blank=True)\r\n courseCreated = models.DateTimeField(auto_now_add=True)\r\n \r\n \r\n class Meta:\r\n ordering = ['-courseCreated'] \r\n\r\n def __str__(self):\r\n return self.courseName\r\n\r\n\r\n def convert_to_ruppes(self,_id):\r\n return self.coursePrice * 70\r\n\r\n def get_total_price(self,_id):\r\n total = self.coursePrice\r\n if self.priceDiscount == 0:\r\n return total\r\n else:\r\n total = self.coursePrice - (self.coursePrice * (self.priceDiscount/100) )\r\n return total\r\n\r\n @property\r\n def getImageURL(self):\r\n try:\r\n url = self.courseImage.url\r\n except:\r\n url = ''\r\n return url\r\n\r\n @property\r\n def getIntoVideoURL(self):\r\n try:\r\n url = self.introVideo.url\r\n except:\r\n url = ''\r\n return url\r\n\r\n\r\n \r\n \r\n \r\n\r\n \r\n\r\nclass CourseVideos(models.Model):\r\n course = models.ForeignKey(CourseDetails , on_delete=models.SET_NULL , blank = True , null = True)\r\n courseVideoName= models.CharField(max_length=500,null=True , blank = True)\r\n courseVideoFile= models.FileField(upload_to='videos/', null=True,blank=True)\r\n courseVideoDescription= models.TextField(null=True , blank = True)\r\n courseVideoCreated = models.DateTimeField(auto_now_add=True)\r\n\r\n def __str__(self):\r\n return str(self.id)\r\n\r\n @property\r\n def videoURL(self):\r\n try:\r\n url = self.courseVideoFile.url\r\n except:\r\n url = ''\r\n return url\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7947761416435242, "alphanum_fraction": 0.7947761416435242, "avg_line_length": 27.55555534362793, "blob_id": "ebdd40d7559b506d6fe8d231818a41beef4eb3e7", "content_id": "578b3f3c21a7ba601aa6b4623cf581acfc650435", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/multiuser/admin.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom .models import *\r\n# Register your models here.\r\n\r\nadmin.site.register(User)\r\nadmin.site.register(TrainerProfile)\r\nadmin.site.register(AdminProfile)\r\nadmin.site.register(InstituteProfile)\r\nadmin.site.register(FranchiseProfile)\r\n\r\n" }, { "alpha_fraction": 0.5330227017402649, "alphanum_fraction": 0.5536872148513794, "avg_line_length": 49.41666793823242, "blob_id": "d6c95b9ae0403978a490530f4cb3a732fdb81dca", "content_id": "d742ba329a143c97232f4ca38616adfe9dea748c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4936, "license_type": "no_license", "max_line_length": 135, "num_lines": 96, "path": "/crm/migrations/0001_initial.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 12:33\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport django_extensions.db.fields\r\nimport phonenumber_field.modelfields\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Address',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('contact_name', models.CharField(max_length=23)),\r\n ('town', models.CharField(max_length=34)),\r\n ('postcode', models.CharField(default='43701', max_length=5, verbose_name='zip code')),\r\n ('state', models.CharField(max_length=34)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='Currency',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('code', models.CharField(max_length=3, unique=True)),\r\n ('pre_symbol', models.CharField(blank=True, max_length=1)),\r\n ('post_symbol', models.CharField(blank=True, max_length=1)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='customer',\r\n fields=[\r\n ('id', models.AutoField(primary_key=True, serialize=False)),\r\n ('Customer_gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('TS', 'Transgender')], max_length=50)),\r\n ('Customer_profilepic', models.FileField(upload_to='')),\r\n ('Customer_Email', models.EmailField(max_length=111)),\r\n ('Customer_created_at', models.DateTimeField(auto_now_add=True)),\r\n ('Customer_PhoneNo1', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('Customer_Street', models.CharField(default='', max_length=250)),\r\n ('Customer_Landmark', models.CharField(default='', max_length=100)),\r\n ('Customer_Zipcode', models.IntegerField(default='')),\r\n ('Customer_State', models.CharField(default='', max_length=100)),\r\n ('Customer_Country', models.CharField(default='', max_length=100)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='location1',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('country', models.CharField(max_length=30)),\r\n ('state', models.CharField(max_length=23)),\r\n ('city', models.CharField(max_length=23)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='Order',\r\n fields=[\r\n ('order_id', models.AutoField(primary_key=True, serialize=False)),\r\n ('items_json', models.CharField(max_length=5000)),\r\n ('name', models.CharField(max_length=90)),\r\n ('email', models.CharField(max_length=111)),\r\n ('amount', models.IntegerField(default=0)),\r\n ('phone', models.CharField(default='', max_length=111)),\r\n ('coursename', models.CharField(max_length=5000)),\r\n ('prices', models.CharField(max_length=500)),\r\n ('qty', models.CharField(max_length=400)),\r\n ('date', models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 18, 3, 13, 630139))),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='Invoice',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),\r\n ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),\r\n ('address', models.CharField(max_length=34)),\r\n ('invoice_id', models.CharField(blank=True, editable=False, max_length=6, null=True, unique=True)),\r\n ('invoice_date', models.DateField(default=datetime.date.today)),\r\n ('invoiced', models.BooleanField(default=False)),\r\n ('draft', models.BooleanField(default=False)),\r\n ('paid_date', models.DateField(blank=True, null=True)),\r\n ('currency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='crm.Currency')),\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='crm.customer')),\r\n ],\r\n options={\r\n 'ordering': ('-invoice_date', 'id'),\r\n },\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5786713361740112, "alphanum_fraction": 0.5921328663825989, "avg_line_length": 66.0952377319336, "blob_id": "7298aa082243178f6d99fec29e8db00f1acff2f6", "content_id": "c123d6da247e4ffb9ab6b059759bbb6de789bb56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5720, "license_type": "no_license", "max_line_length": 298, "num_lines": 84, "path": "/inventory/migrations/0001_initial.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-27 11:34\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport multiselectfield.db.fields\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Categories',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=250, unique=True)),\r\n ('seo_title', models.CharField(max_length=70)),\r\n ('seo_description', models.CharField(max_length=160)),\r\n ('slug', models.SlugField(max_length=255, unique=True)),\r\n ('categoryStatus', models.CharField(blank=True, choices=[('Active', 'Active'), ('Inactive', 'Inactive'), ('Available', 'Available')], max_length=30, null=True)),\r\n ('categoryCreated', models.DateTimeField(auto_now_add=True)),\r\n ('seo_keywords', models.CharField(max_length=160)),\r\n ('lft', models.PositiveIntegerField(editable=False)),\r\n ('rght', models.PositiveIntegerField(editable=False)),\r\n ('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),\r\n ('level', models.PositiveIntegerField(editable=False)),\r\n ],\r\n options={\r\n 'abstract': False,\r\n },\r\n ),\r\n migrations.CreateModel(\r\n name='CourseDetails',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('courseName', models.CharField(blank=True, max_length=400, null=True)),\r\n ('courseDescription', models.TextField(blank=True, null=True)),\r\n ('courseOnline', models.BooleanField(blank=True, default=False, null=True)),\r\n ('courseImage', models.FileField(blank=True, null=True, upload_to='images/')),\r\n ('courseLive', models.BooleanField(blank=True, default=False, null=True)),\r\n ('courseOffline', models.BooleanField(blank=True, default=False, null=True)),\r\n ('courseStatus', models.CharField(blank=True, choices=[('Active', 'Active'), ('Inactive', 'Inactive'), ('Available', 'Available')], max_length=30, null=True)),\r\n ('coursePrice', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),\r\n ('promocode', models.CharField(blank=True, max_length=160, null=True)),\r\n ('priceDiscount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),\r\n ('totalPriceDiscount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),\r\n ('courseCity', models.CharField(blank=True, max_length=200, null=True)),\r\n ('courseState', models.CharField(blank=True, max_length=200, null=True)),\r\n ('courseCountry', models.CharField(blank=True, max_length=200, null=True)),\r\n ('offlineAddress', models.TextField(blank=True, null=True)),\r\n ('offlineClassStrength', models.IntegerField(blank=True, null=True)),\r\n ('offlineTiming', models.DateTimeField(blank=True, null=True)),\r\n ('offlineWeekday', multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('Monday', 'Monday'), ('Tuesday', 'Tuesday'), ('Wednesday', 'Wednesday'), ('Thursday', 'Thursday'), ('Friday', 'Friday'), ('Saturday', 'Saturday'), ('Sunday', 'Sunday')], max_length=56, null=True)),\r\n ('onlineWeekday', multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('Monday', 'Monday'), ('Tuesday', 'Tuesday'), ('Wednesday', 'Wednesday'), ('Thursday', 'Thursday'), ('Friday', 'Friday'), ('Saturday', 'Saturday'), ('Sunday', 'Sunday')], max_length=56, null=True)),\r\n ('onlineTiming', models.DateTimeField(blank=True, null=True)),\r\n ('courseBelong', models.CharField(blank=True, choices=[('Trainer', 'Trainer'), ('Intitute', 'Institute'), ('Franchise', 'Franchise')], max_length=30, null=True)),\r\n ('seo_title', models.CharField(max_length=70)),\r\n ('seo_description', models.CharField(max_length=160)),\r\n ('seo_keywords', models.CharField(max_length=160)),\r\n ('slug', models.SlugField(max_length=255, unique=True)),\r\n ('introVideo', models.FileField(blank=True, null=True, upload_to='IntroVideos/')),\r\n ('courseCreated', models.DateTimeField(auto_now_add=True)),\r\n ('courseCategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.Categories')),\r\n ],\r\n options={\r\n 'ordering': ['-courseCreated'],\r\n },\r\n ),\r\n migrations.CreateModel(\r\n name='CourseVideos',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('courseVideoName', models.CharField(blank=True, max_length=500, null=True)),\r\n ('courseVideoFile', models.FileField(blank=True, null=True, upload_to='videos/')),\r\n ('courseVideoDescription', models.TextField(blank=True, null=True)),\r\n ('courseVideoCreated', models.DateTimeField(auto_now_add=True)),\r\n ('course', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='inventory.CourseDetails')),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6098888516426086, "alphanum_fraction": 0.6117184162139893, "avg_line_length": 41.86921691894531, "blob_id": "3f542f50922b26b1b1f1c85dc737aaa3aa662e45", "content_id": "48352c97a3551a8800d1a04e4cef88e7779bb747", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21863, "license_type": "no_license", "max_line_length": 193, "num_lines": 497, "path": "/customerManagement/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render , redirect\r\nfrom .forms import *\r\nfrom multiuser.models import *\r\nfrom django.shortcuts import HttpResponse\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.forms import UserCreationForm\r\n#from itertools import chain\r\n# Create your views here.\r\n\r\n\r\ndef Customer(request):\r\n\r\n #get details from all multiuser model\r\n adminDetails = AdminProfile.objects.all()\r\n trainerDetails = TrainerProfile.objects.all()\r\n instituteDetails = InstituteProfile.objects.all()\r\n franchiseDetails = FranchiseProfile.objects.all()\r\n\r\n context = {'franchiseDetails':franchiseDetails,'adminDetails':adminDetails,\r\n 'instituteDetails':instituteDetails,'trainerDetails':trainerDetails}\r\n\r\n return render(request,'crm/view_customer.html',context)\r\n\r\ndef addCustomer(request):\r\n adminForm = AdminForm()\r\n userForm = UserForm()\r\n trainerForm = TrainerForm()\r\n studentForm=StudentForm\r\n context = {'adminForm':adminForm,'userForm':userForm,'trainerForm':trainerForm}\r\n return render(request,'crm/addUser.html',context)\r\n\r\ndef addAdmin(request):\r\n if request.method == 'POST':\r\n first_name = request.POST['first_name']\r\n last_name = request.POST['last_name']\r\n username = request.POST['username']\r\n if ' ' in username:\r\n messages.info(request , \"Error there is space in Username\")\r\n return redirect('view_Customer')\r\n password = request.POST['password']\r\n email= request.POST['email']\r\n try:\r\n about= request.POST['about']\r\n except:\r\n about = ''\r\n try:\r\n middle_name = request.POST['middle_name']\r\n except:\r\n middle_name = ''\r\n if middle_name == '':\r\n full_name = first_name +\" \"+ last_name\r\n else:\r\n full_name = first_name+\" \"+ middle_name+\" \"+ last_name\r\n #image= request.FILES['image']\r\n gender=request.POST['gender']\r\n image=''\r\n phone1 = request.POST['phone1']\r\n phone2 = request.POST['phone2']\r\n address = request.POST['address']\r\n city = request.POST['city']\r\n postal_code = request.POST['postal_code']\r\n country = request.POST['country']\r\n state = request.POST['state']\r\n status = request.POST['status']\r\n\r\n if User.objects.filter(username = username).exists():\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n elif User.objects.filter(email = email).exists():\r\n messages.info(request,'Email already exists')\r\n return redirect(\"view_Customer\")\r\n else:\r\n user=get_user_model()\r\n new_user = user.objects.create_user(username = username, first_name = first_name , last_name =last_name, password = password ,is_admin=True)\r\n new_user.save()\r\n\r\n new_admin = AdminProfile.objects.create(user = new_user,adminFullName= full_name, adminGender =gender, adminAbout = about ,adminImage =image , adminPhoneNo1 = phone1 ,\r\n adminPhoneNo2 = phone2 , adminAddress = address , adminCountry = country, adminCity = city,\r\n adminPostalCode = postal_code ,adminState = state , adminEmail = email, adminStatus = status\r\n )\r\n new_admin.save()\r\n \r\n\r\n messages.info(request,'Admin is added succesfully')\r\n return redirect(\"view_Customer\")\r\n\r\ndef addTrainer(request):\r\n if request.method == 'POST':\r\n first_name = request.POST['first_name']\r\n last_name = request.POST['last_name']\r\n username = request.POST['username']\r\n if ' ' in username:\r\n messages.info(request , \"Error there is space in Username\")\r\n return redirect('view_Customer')\r\n password = request.POST['password']\r\n email= request.POST['email']\r\n try:\r\n about= request.POST['about']\r\n except:\r\n about = ''\r\n try:\r\n middle_name = request.POST['middle_name']\r\n except:\r\n middle_name = ''\r\n if middle_name == '':\r\n full_name = first_name +\" \"+ last_name\r\n else:\r\n full_name = first_name+\" \"+ middle_name+\" \"+ last_name\r\n gender=request.POST['gender']\r\n #image= request.FILES['image']\r\n image=''\r\n phone1 = request.POST['phone1']\r\n phone2 = request.POST['phone2']\r\n address = request.POST['address']\r\n city = request.POST['city']\r\n postal_code = request.POST['postal_code']\r\n country = request.POST['country']\r\n state = request.POST['state']\r\n status = request.POST['status']\r\n if User.objects.filter(username = username).exists():\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n elif User.objects.filter(email = email).exists():\r\n messages.info(request,'Email already exists')\r\n return redirect(\"view_Customer\")\r\n else:\r\n user=get_user_model()\r\n new_user = user.objects.create_user(username = username, first_name = first_name , last_name =last_name, password = password ,is_trainer=True)\r\n new_user.save()\r\n\r\n new_trainer = TrainerProfile.objects.create(user = new_user,trainerFullName= full_name, trainerGender =gender,trainerAbout = about , trainerImage =image , trainerPhoneNo1 = phone1 ,\r\n trainerPhoneNo2 = phone2 , trainerAddress = address , trainerCountry = country, trainerCity = city,\r\n trainerPostalCode = postal_code,trainerState = state , trainerEmail = email, trainerStatus = status\r\n )\r\n new_trainer.save()\r\n\r\n messages.info(request,'Tranier is added succesfully')\r\n return redirect(\"view_Customer\")\r\n\r\ndef addInstitute(request):\r\n if request.method == 'POST':\r\n username = request.POST['username']\r\n if ' ' in username:\r\n messages.info(request , \"Error there is space in Username\")\r\n return redirect('view_Customer')\r\n password = request.POST['password']\r\n email= request.POST['email']\r\n full_name = request.POST['full_name']\r\n try:\r\n about= request.POST['about']\r\n except:\r\n about = ''\r\n #image= request.FILES['image']\r\n image=''\r\n phone1 = request.POST['phone1']\r\n phone2 = request.POST['phone2']\r\n address = request.POST['address']\r\n city = request.POST['city']\r\n postal_code = request.POST['postal_code']\r\n country = request.POST['country']\r\n state = request.POST['state']\r\n status = request.POST['status']\r\n if User.objects.filter(username = username).exists():\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n elif User.objects.filter(email = email).exists():\r\n messages.info(request,'Email already exists')\r\n return redirect(\"view_Customer\")\r\n else:\r\n user=get_user_model()\r\n new_user = user.objects.create_user(username = username, password = password ,is_institute=True)\r\n new_user.save()\r\n\r\n new_institute = InstituteProfile.objects.create(user = new_user,instituteName= full_name ,instituteAbout = about ,instituteImage =image , institutePhoneNo1 = phone1 ,\r\n institutePhoneNo2 = phone2 , instituteAddress = address , instituteCountry = country,instituteCity = city,\r\n institutePostalCode = postal_code,instituteState = state , instituteEmail = email, instituteStatus = status\r\n )\r\n new_institute.save()\r\n\r\n\r\n\r\n messages.info(request,'Institute is added succesfully')\r\n return redirect(\"view_Customer\")\r\n\r\n\r\ndef addFranchise(request):\r\n if request.method == 'POST':\r\n username = request.POST['username']\r\n if ' ' in username:\r\n messages.info(request , \"Error there is space in Username\")\r\n return redirect('view_Customer')\r\n password = request.POST['password']\r\n email= request.POST['email']\r\n full_name = request.POST['full_name']\r\n try:\r\n about= request.POST['about']\r\n except:\r\n about = ''\r\n #image= request.FILES['image']\r\n image=''\r\n phone1 = request.POST['phone1']\r\n phone2 = request.POST['phone2']\r\n address = request.POST['address']\r\n country = request.POST['country']\r\n city = request.POST['city']\r\n postal_code = request.POST['postal_code']\r\n state = request.POST['state']\r\n status = request.POST['status']\r\n user=get_user_model()\r\n if User.objects.filter(username = username).exists():\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n elif User.objects.filter(email = email).exists():\r\n messages.info(request,'Email already exists')\r\n return redirect(\"view_Customer\")\r\n else:\r\n new_user = user.objects.create_user(username = username, password = password ,is_franchise=True)\r\n new_user.save()\r\n\r\n new_franchise = FranchiseProfile.objects.create(user = new_user,franchiseName= full_name ,franchiseAbout = about , franchiseImage =image , franchisePhoneNo1 = phone1 ,\r\n franchisePhoneNo2 = phone2 , franchiseAddress = address , franchiseCountry = country,franchiseCity = city,\r\n franchisePostalCode = postal_code,franchiseState = state , franchiseEmail = email, franchiseStatus = status\r\n )\r\n new_franchise.save()\r\n\r\n\r\n\r\n messages.info(request,'Franchise is added succesfully')\r\n return redirect(\"view_Customer\")\r\n\r\n\r\n\r\ndef updateAdmin(request , username):\r\n user = User.objects.get(username =username)\r\n admin = AdminProfile.objects.get(user = user)\r\n adminForm = AdminForm(instance = admin)\r\n if request.method == 'POST':\r\n adminForm = AdminForm(request.POST , request.FILES,instance = admin)\r\n username1 = request.POST['username']\r\n first_name = request.POST['first_name']\r\n last_name = request.POST['last_name']\r\n if adminForm.is_valid():\r\n try:\r\n User.objects.filter(username=username).update(username=username1 , first_name = first_name , last_name=last_name )\r\n except:\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n adminForm.save()\r\n messages.info(request,\"Changes are made successfully\")\r\n return redirect('view_Customer')\r\n else:\r\n messages.info(request,adminForm.errors)\r\n return redirect('view_Customer')\r\n context = {'adminForm':adminForm, 'admin':admin , 'user':user}\r\n return render(request,'crm/updateAdmin.html',context)\r\n\r\n\r\ndef updateTrainer(request , username):\r\n user = User.objects.get(username =username)\r\n trainer = TrainerProfile.objects.get(user = user)\r\n trainerForm = TrainerForm(instance = trainer)\r\n if request.method == 'POST':\r\n trainerForm = TrainerForm(request.POST , request.FILES,instance = trainer)\r\n username1 = request.POST['username']\r\n first_name = request.POST['first_name']\r\n last_name = request.POST['last_name']\r\n if trainerForm.is_valid():\r\n try:\r\n User.objects.filter(username=username).update(username=username1 , first_name = first_name , last_name=last_name )\r\n except:\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n trainerForm.save()\r\n messages.info(request,\"Changes are made successfully\")\r\n return redirect('view_Customer')\r\n else:\r\n messages.info(request,trainerForm.errors)\r\n return redirect('view_Customer')\r\n context = {'trainerForm':trainerForm, 'trainer':trainer , 'user':user}\r\n return render(request,'crm/updateTrainer.html',context)\r\n\r\ndef updateInstitute(request , username):\r\n user = User.objects.get(username =username)\r\n institute = InstituteProfile.objects.get(user = user)\r\n instituteForm = InstituteForm(instance = institute)\r\n if request.method == 'POST':\r\n instituteForm = InstituteForm(request.POST , request.FILES,instance = institute)\r\n username1 = request.POST['username']\r\n if instituteForm.is_valid():\r\n try:\r\n User.objects.filter(username=username).update(username=username1)\r\n except:\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n instituteForm.save()\r\n messages.info(request,\"Changes are made successfully\")\r\n return redirect('view_Customer')\r\n else:\r\n messages.info(request,instituteForm.errors)\r\n return redirect('view_Customer')\r\n context = {'instituteForm':instituteForm, 'institute':institute , 'user':user}\r\n return render(request,'crm/updateInstitute.html',context)\r\n\r\n\r\ndef updateFranchise(request , username):\r\n user = User.objects.get(username =username)\r\n franchise = FranchiseProfile.objects.get(user = user)\r\n franchiseForm = FranchiseForm(instance = franchise)\r\n if request.method == 'POST':\r\n franchiseForm = FranchiseForm(request.POST , request.FILES,instance = franchise)\r\n username1 = request.POST['username']\r\n if franchiseForm.is_valid():\r\n try:\r\n User.objects.filter(username=username).update(username=username1)\r\n except:\r\n messages.info(request,'Username already exists')\r\n return redirect(\"view_Customer\")\r\n franchiseForm.save()\r\n messages.info(request,\"Changes are made successfully\")\r\n return redirect('view_Customer')\r\n else:\r\n messages.info(request,franchiseForm.errors)\r\n return redirect('view_Customer')\r\n context = {'franchiseForm':franchiseForm, 'franchise':franchise , 'user':user}\r\n return render(request,'crm/updateFranchise.html',context)\r\n\r\n\r\n\r\n\r\n\r\ndef deleteAdmin(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n admin = AdminProfile.objects.get(user=user)\r\n name = user.username\r\n user.delete()\r\n admin.delete()\r\n messages.info(request ,\"{} username of admin is deleted succesfully\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef deleteTrainer(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n trainer = TrainerProfile.objects.get(user=user)\r\n name = user.username\r\n user.delete()\r\n trainer.delete()\r\n messages.info(request ,\"{} username of trainer is deleted succesfully\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef deleteInstitute(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n institute = InstituteProfile.objects.get(user=user)\r\n name = user.username\r\n user.delete()\r\n institute.delete()\r\n messages.info(request ,\"{} username of institute is deleted succesfully\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef deleteFranchise(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n franchise = FranchiseProfile.objects.get(user=user)\r\n name = user.username\r\n user.delete()\r\n franchise.delete()\r\n messages.info(request ,\"{} username of franchise is deleted succesfully\".format(name))\r\n return redirect('view_Customer')\r\n\r\n\r\ndef pendingUser(request):\r\n\r\n #to display pending user which are yet to verify\r\n\r\n adminDetails = AdminProfile.objects.all().filter(adminStatus = 'Pending')\r\n trainerDetails = TrainerProfile.objects.all().filter(trainerStatus = 'Pending')\r\n instituteDetails = InstituteProfile.objects.all().filter(instituteStatus = 'Pending')\r\n franchiseDetails = FranchiseProfile.objects.all().filter(franchiseStatus = 'Pending')\r\n context = {'franchiseDetails':franchiseDetails,'adminDetails':adminDetails,\r\n 'instituteDetails':instituteDetails,'trainerDetails':trainerDetails}\r\n\r\n return render(request,'crm/pendingUser.html',context)\r\n\r\n\r\n#to verfiy admin trainer franchise and institute\r\n\r\n\r\ndef verifyAdmin(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n AdminProfile.objects.filter(user=user).update(adminStatus = 'Verified')\r\n obj = AdminProfile.objects.get(user=user)\r\n name = obj.adminFullName\r\n messages.info(request,\"{} is approved\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef verifyTrainer(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n TrainerProfile.objects.filter(user=user).update(trainerStatus = 'Verified')\r\n obj = TrainerProfile.objects.get(user=user)\r\n name = obj.trainerFullName\r\n messages.info(request,\"{} is approved\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef verifyFranchise(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n FranchiseProfile.objects.filter(user=user).update(franchiseStatus = 'Verified')\r\n obj = FranchiseProfile.objects.get(user=user)\r\n name = obj.franchiseName\r\n messages.info(request,\"{} is approved\".format(name))\r\n return redirect('view_Customer')\r\n\r\n\r\ndef verifyInstitute(request , uniqueId):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(id=uniqueId)\r\n InstituteProfile.objects.filter(user=user).update( instituteStatus = 'Verified')\r\n obj = InstituteProfile.objects.get(user=user)\r\n name = obj.instituteName\r\n messages.info(request,\"{} is approved\".format(name))\r\n return redirect('view_Customer')\r\n\r\n #to discard admin trainer franchise and institute\r\n\r\ndef discardAdmin(request , username):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(username=username)\r\n AdminProfile.objects.filter(user=user).update(adminStatus = 'Discarded')\r\n obj = AdminProfile.objects.get(user=user)\r\n name = obj.adminFullName\r\n messages.info(request,\"{} is discarded\".format(name))\r\n return redirect('view_Customer')\r\n\r\n\r\ndef discardTrainer(request , username):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(username=username)\r\n TrainerProfile.objects.filter(user=user).update(trainerStatus = 'Discarded')\r\n obj = TrainerProfile.objects.get(user=user)\r\n name = obj.trainerFullName\r\n messages.info(request,\"{} is discarded\".format(name))\r\n return redirect('view_Customer')\r\n\r\n\r\ndef discardFranchise(request , username):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(username=username)\r\n FranchiseProfile.objects.filter(user=user).update(franchiseStatus = 'Discarded')\r\n obj = FranchiseProfile.objects.get(user=user)\r\n name = obj.franchiseName\r\n messages.info(request,\"{} is discarded\".format(name))\r\n return redirect('view_Customer')\r\n\r\ndef discardInstitute(request , username):\r\n if request.method == 'POST' or 'GET':\r\n user = User.objects.get(username=username)\r\n InstituteProfile.objects.filter(user=user).update( instituteStatus = 'Discarded')\r\n obj = InstituteProfile.objects.get(user=user)\r\n name = obj.instituteName\r\n messages.info(request,\"{} is discarded\".format(name))\r\n return redirect('view_Customer')\r\n \r\n\r\n #to display profile of various users \r\n\r\ndef adminProfile(request , username):\r\n user = User.objects.get(username = username)\r\n admin = AdminProfile.objects.get(user = user)\r\n profile = 'admin'\r\n context = {'admin':admin,'profile':profile}\r\n return render(request,'crm/profile-view.html',context)\r\n\r\n\r\ndef trainerProfile(request , username):\r\n user = User.objects.get(username = username)\r\n trainer = TrainerProfile.objects.get(user = user)\r\n profile = 'trainer'\r\n context = {'trainer':trainer,'profile':profile}\r\n return render(request,'crm/profile-view.html',context)\r\n\r\ndef instituteProfile(request , username):\r\n user = User.objects.get(username = username)\r\n institute = InstituteProfile.objects.get(user = user)\r\n profile = 'institute'\r\n context = {'institute':institute,'profile':profile}\r\n return render(request,'crm/profile-view.html',context)\r\n\r\ndef franchiseProfile(request , username):\r\n user = User.objects.get(username = username)\r\n franchise = FranchiseProfile.objects.get(user = user)\r\n profile = 'franchise'\r\n context = {'franchise':franchise,'profile':profile}\r\n return render(request,'crm/profile-view.html',context)\r\n \r\n \r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.4789029657840729, "alphanum_fraction": 0.5843881964683533, "avg_line_length": 22.947368621826172, "blob_id": "d9b246e020e7c6c8739b5ebb2de8745241624130", "content_id": "35a8fc16a000652a393bb78da04f7ef85f8c9292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 112, "num_lines": 19, "path": "/crm/migrations/0008_auto_20201011_1539.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1 on 2020-10-11 10:09\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0007_auto_20200927_1704'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 11, 15, 39, 49, 878899)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.4789029657840729, "alphanum_fraction": 0.5822784900665283, "avg_line_length": 22.947368621826172, "blob_id": "42a4241691c1923fac43d4d6b1e522550d074c54", "content_id": "75040d71bd344e1c43bd39c7dd7edb61a89d389a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 110, "num_lines": 19, "path": "/crm/migrations/0006_auto_20200925_2134.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 16:04\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0005_auto_20200925_1829'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 21, 34, 9, 214266)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.639689564704895, "alphanum_fraction": 0.6405518651008606, "avg_line_length": 24.429967880249023, "blob_id": "2d628be860cdfd27ec254bc4c41078c3b557e0b6", "content_id": "0ef07ccec185d716ceea8c38d39178e45a94c22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8118, "license_type": "no_license", "max_line_length": 110, "num_lines": 307, "path": "/multiuser/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from .models import *\r\nfrom django.shortcuts import render, redirect\r\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\r\nfrom multiuser.models import AdminProfile,FranchiseProfile,InstituteProfile,TrainerProfile, User\r\nfrom student.models import Student\r\nfrom inventory.models import CourseDetails\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib.auth import login as auth_login\r\nfrom django.views.generic import CreateView\r\nfrom .form import StudentSignUpForm,AdminSignUpForm,FranchiseSignUpForm,InstituteSignUpForm,TrainerSignUpForm\r\nfrom .models import User\r\n\r\n\r\ndef Registerchoice(request):\r\n return render(request, 'register_choice.html')\r\n\r\nclass AdminView(CreateView):\r\n model = User\r\n form_class = AdminSignUpForm\r\n template_name = 'admin_reg.html'\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n\r\n return redirect('login')\r\n\r\n\r\n\r\nclass TrainerSignUpView(CreateView):\r\n model = User\r\n form_class = TrainerSignUpForm\r\n template_name = 'trainer_reg.html'\r\n\r\n\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n\r\n return redirect('login')\r\n\r\nclass StudentSignUpView(CreateView):\r\n model = User\r\n form_class = StudentSignUpForm\r\n template_name = 'student_reg.html'\r\n\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n\r\n return redirect('login')\r\n\r\nclass InstituteSignUpView(CreateView):\r\n model = User\r\n form_class = InstituteSignUpForm\r\n template_name = 'institute_reg.html'\r\n\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n\r\n return redirect('login')\r\n\r\n\r\nclass FranchiseSignUpView(CreateView):\r\n model = User\r\n form_class = FranchiseSignUpForm\r\n template_name = 'franchise_reg.html'\r\n\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n\r\n return redirect('login')\r\n\r\n\r\n\r\n'''Login page'''\r\ndef login(request):\r\n if request.method == 'POST':\r\n username = request.POST.get('username')\r\n password = request.POST.get('password')\r\n\r\n user = authenticate(username=username, password=password)\r\n if user is not None:\r\n type_obj = User.objects.get(username=user)\r\n auth_login(request, user)\r\n if user.is_authenticated and type_obj.is_admin:\r\n\r\n return redirect('course')\r\n elif user.is_authenticated and type_obj.is_franchise:\r\n\r\n return redirect('homepage1')\r\n\r\n elif user.is_authenticated and type_obj.is_institute:\r\n\r\n return redirect('homepage1')\r\n elif user.is_authenticated and type_obj.is_trainer:\r\n return redirect('homepage1')\r\n\r\n elif user.is_authenticated and type_obj.is_student:\r\n\r\n return redirect('homepage1')\r\n\r\n\r\n\r\n else:\r\n messages1=True\r\n return render(request, \"admin_login.html\", {\"messages1\":messages1})\r\n\r\n\r\n return render(request,'admin_login.html')\r\n\r\n\r\n\r\n'''Logout Page '''\r\ndef logoutPage(request):\r\n logout (request)\r\n return redirect('homePage')\r\n\r\n\r\n\r\ndef user(request):\r\n '''institute'''\r\n '''count'''\r\n institute_count=InstituteProfile.objects.all().count()\r\n\r\n '''apply filter'''\r\n\r\n all_institute=InstituteProfile.objects.all()\r\n\r\n '''franchise'''\r\n '''count'''\r\n franchise_count=FranchiseProfile.objects.all().count()\r\n\r\n '''apply filter'''\r\n all_franchise=FranchiseProfile.objects.all()\r\n\r\n\r\n '''student'''\r\n\r\n student_count=Student.objects.all().count()\r\n\r\n\r\n '''trainer'''\r\n\r\n trainer_count = TrainerProfile.objects.all().count()\r\n\r\n context={\r\n\r\n 'all_institute':all_institute,\r\n 'trainer_count':trainer_count,\r\n 'student_count':student_count,\r\n 'franchise_count':franchise_count,\r\n 'institute_count':institute_count,\r\n 'user_count':student_count+trainer_count+franchise_count+institute_count\r\n }\r\n\r\n return render(request,'user.html',context)\r\n\r\n\r\n\r\ndef user_franchise(request):\r\n '''institute'''\r\n '''count'''\r\n institute_count = InstituteProfile.objects.all().count()\r\n\r\n '''franchise'''\r\n '''count'''\r\n franchise_count = FranchiseProfile.objects.all().count()\r\n\r\n '''apply filter'''\r\n all_franchise = FranchiseProfile.objects.all()\r\n\r\n '''student'''\r\n\r\n student_count = Student.objects.all().count()\r\n\r\n '''trainer'''\r\n\r\n trainer_count = TrainerProfile.objects.all().count()\r\n\r\n context = {\r\n\r\n 'all_franchise': all_franchise,\r\n 'trainer_count': trainer_count,\r\n 'student_count': student_count,\r\n 'franchise_count': franchise_count,\r\n 'institute_count': institute_count,\r\n 'user_count': student_count + trainer_count + franchise_count + institute_count\r\n }\r\n\r\n\r\n return render(request,'user_franchise.html',context)\r\n\r\n\r\ndef user_student(request):\r\n all_student=Student.objects.all()\r\n student_count = Student.objects.all()\r\n\r\n '''institute'''\r\n '''count'''\r\n institute_count = InstituteProfile.objects.all().count()\r\n\r\n '''franchise'''\r\n '''count'''\r\n franchise_count = FranchiseProfile.objects.all().count()\r\n\r\n '''apply filter'''\r\n all_franchise = FranchiseProfile.objects.all()\r\n\r\n '''student'''\r\n\r\n student_count = Student.objects.all().count()\r\n\r\n '''trainer'''\r\n\r\n trainer_count = TrainerProfile.objects.all().count()\r\n\r\n context = {\r\n\r\n 'all_student': all_student,\r\n 'trainer_count': trainer_count,\r\n 'student_count': student_count,\r\n 'franchise_count': franchise_count,\r\n 'institute_count': institute_count,\r\n 'user_count': student_count + trainer_count + franchise_count + institute_count\r\n }\r\n\r\n return render(request, 'user_student.html', context)\r\n\r\n\r\n\r\n\r\ndef user_trainer(request):\r\n institute_count = InstituteProfile.objects.all().count()\r\n\r\n all_trainer = TrainerProfile.objects.all()\r\n\r\n '''franchise_count'''\r\n franchise_count = FranchiseProfile.objects.all().count()\r\n\r\n\r\n\r\n '''student'''\r\n\r\n student_count = Student.objects.all().count()\r\n\r\n '''trainer'''\r\n\r\n trainer_count = TrainerProfile.objects.all().count()\r\n\r\n context = {\r\n\r\n 'all_trainer': all_trainer,\r\n 'trainer_count': trainer_count,\r\n 'student_count': student_count,\r\n 'franchise_count': franchise_count,\r\n 'institute_count': institute_count,\r\n 'user_count': student_count + trainer_count + franchise_count + institute_count\r\n }\r\n\r\n return render(request,'user_trainer.html',context)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef trainerProfile(request , username):\r\n user = User.objects.get(username = username)\r\n trainer = TrainerProfile.objects.get(user = user)\r\n profile = 'trainer'\r\n context = {'trainer':trainer,'profile':profile}\r\n return render(request,'user_profile.html',context)\r\n\r\ndef instituteProfile(request , username):\r\n user = User.objects.get(username = username)\r\n institute = InstituteProfile.objects.get(user = user)\r\n profile = 'institute'\r\n context = {'institute':institute,'profile':profile}\r\n return render(request,'user_profile.html',context)\r\n\r\ndef franchiseProfile(request , username):\r\n user = User.objects.get( username = username )\r\n franchise = FranchiseProfile.objects.get(user = user)\r\n profile = 'franchise'\r\n context = {'franchise':franchise,'profile':profile}\r\n return render(request,'user_profile.html',context)\r\n\r\ndef studentProfile(request , username):\r\n user = User.objects.get(username = username)\r\n student = Student.objects.get(user = user)\r\n profile = 'student'\r\n context = {'student':student,'profile':profile }\r\n return render(request,'user_profile.html',context)\r\n\r\n\r\n\r\ndef coursedetailView(request,slugfield):\r\n course = CourseDetails.objects.get(slug = slugfield)\r\n context = {'course':course}\r\n return render(request,'multiuser_totalcourse.html',context)\r\n\r\n\r\n" }, { "alpha_fraction": 0.7167182564735413, "alphanum_fraction": 0.7213622331619263, "avg_line_length": 48.69230651855469, "blob_id": "63bf8db08607118413c3e766a7c18c51d01da797", "content_id": "9ff9dd984b4ca955fafee8a7faa33119d6a65005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "no_license", "max_line_length": 74, "num_lines": 13, "path": "/crm/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom.import views\nurlpatterns = [\n path(\"\" ,views.BulckEmail, name='bulkemail'),\n path('singlemail', views.singlemail, name='singlemail'),\n path('singlemailt', views.studentsingeemail, name='studentmail'),\n path('singlemail1', views.Institutionsingeemail,name='institutemail'),\n path('single2', views.trainersingeemail,name='trainersinglemail'),\n path('location', views.location, name='location'),\n path('customer', views.customerdetails,name='customer_detail'),\n path('crm/invoice/<int:id>/', views.InvoicePDFView,name='invoice'),\n path('checkout', views.orederdetails, name='buynow1'),\n]\n" }, { "alpha_fraction": 0.4789029657840729, "alphanum_fraction": 0.5822784900665283, "avg_line_length": 22.947368621826172, "blob_id": "48a414d225cc4174283b917ff4e2cc2b369012eb", "content_id": "236ef182af117c2dab0a33ed1681196273ef4f00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 110, "num_lines": 19, "path": "/crm/migrations/0007_auto_20200927_1704.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-27 11:34\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0006_auto_20200925_2134'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 27, 17, 4, 15, 557344)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.718120813369751, "alphanum_fraction": 0.718120813369751, "avg_line_length": 19.571428298950195, "blob_id": "73cf183e3e4a7bd3a3a38b47e72810674ce36b03", "content_id": "e5197bab720b78e3ae21143bdef54fca43e623bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/quiz/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\n\r\nfrom .models import *\r\n# Create your views here.\r\n\r\ndef quiz(request):\r\n return render(request,'quiz.html')" }, { "alpha_fraction": 0.5968422293663025, "alphanum_fraction": 0.5988839268684387, "avg_line_length": 58.23770523071289, "blob_id": "fb6c15815cb478bf2d3f8b11c608a17d32084d36", "content_id": "c22c70ae1f8a06ed1ed451811b4612db7a187039", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7347, "license_type": "no_license", "max_line_length": 102, "num_lines": 122, "path": "/customerManagement/forms.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\r\nfrom django import forms\r\nfrom student.models import Student\r\nfrom multiuser.models import AdminProfile , TrainerProfile , InstituteProfile , FranchiseProfile,User\r\nfrom django.contrib.auth.forms import UserCreationForm\r\n\r\nclass UserForm(UserCreationForm):\r\n class Meta:\r\n model = User\r\n fields = ['first_name','last_name','username','password']\r\n exclude = ['user', 'time']\r\n widgets = {\r\n 'first_name': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'last_name': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'username': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'password': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n }\r\n\r\nclass AdminForm(forms.ModelForm):\r\n class Meta:\r\n model = AdminProfile\r\n fields = '__all__'\r\n exclude = ['user', 'time']\r\n\r\n widgets = {\r\n 'adminFullName': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'adminEmail': forms.TextInput(attrs={'class':'form-control', 'required': True}),\r\n 'adminAbout': forms.Textarea(attrs={'class':'form-control','rows':2}),\r\n 'adminGender': forms.Select(attrs={'class':'custom-select', 'required': True}),\r\n 'adminPhoneNo1': forms.TextInput(attrs={'class':'form-control', 'required': True}),\r\n 'adminPhoneNo2': forms.TextInput(attrs={'class':'form-control'}),\r\n 'adminAddress': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'adminState': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'adminCity': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'adminPostalCode': forms.NumberInput(attrs={'class':'form-control','required': True}),\r\n 'adminCountry': forms.TextInput(attrs={'class':'form-control', 'required': True}),\r\n 'adminStatus': forms.Select(attrs={'class':'custom-select', 'required': True}),\r\n }\r\n\r\nclass TrainerForm(forms.ModelForm):\r\n class Meta:\r\n model = TrainerProfile\r\n fields = '__all__'\r\n exclude = ['user', 'time']\r\n\r\n widgets = {\r\n 'trainerFullName': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'trainerEmail': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'trainerAbout': forms.Textarea(attrs={'class':'form-control','rows':2}),\r\n 'trainerGender': forms.Select(attrs={'class':'custom-select', 'required': True}),\r\n 'trainerPhoneNo1': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'trainerPhoneNo2': forms.TextInput(attrs={'class': 'form-control'}),\r\n 'trainerAddress': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'trainerCity': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'trainerPostalCode': forms.NumberInput(attrs={'class':'form-control','required': True}),\r\n 'trainerState': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'trainerCountry': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'trainerStatus': forms.Select(attrs={'class': 'custom-select', 'required': True}),\r\n }\r\n\r\nclass InstituteForm(forms.ModelForm):\r\n class Meta:\r\n model = InstituteProfile \r\n fields = '__all__'\r\n exclude = ['user', 'time']\r\n\r\n widgets = {\r\n 'instituteName': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'instituteEmail': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'instituteAbout': forms.Textarea(attrs={'class':'form-control','rows':2}),\r\n 'institutePhoneNo1': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'institutePhoneNo2': forms.TextInput(attrs={'class': 'form-control'}),\r\n 'instituteAddress': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'instituteState': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'instituteCity': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'institutePostalCode': forms.NumberInput(attrs={'class':'form-control','required': True}),\r\n 'instituteCountry': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'instituteStatus': forms.Select(attrs={'class': 'custom-select', 'required': True}),\r\n }\r\n\r\nclass FranchiseForm(forms.ModelForm):\r\n class Meta:\r\n model = FranchiseProfile \r\n fields = '__all__'\r\n exclude = ['user', 'time']\r\n\r\n widgets = {\r\n 'franchiseName': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'franchiseEmail': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'franchiseAbout': forms.Textarea(attrs={'class':'form-control','rows':2}),\r\n 'franchisePhoneNo1': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'franchisePhoneNo2': forms.TextInput(attrs={'class': 'form-control'}),\r\n 'franchiseAddress': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'franchiseState': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'franchiseCity': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'franchisePostalCode': forms.NumberInput(attrs={'class':'form-control','required': True}),\r\n 'franchiseCountry': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'franchiseStatus': forms.Select(attrs={'class': 'custom-select', 'required': True}),\r\n }\r\n\r\n\r\n\r\nclass StudentForm(forms.ModelForm):\r\n class Meta:\r\n model = Student\r\n fields = '__all__'\r\n exclude = ['user', 'time']\r\n\r\n widgets = {\r\n 'student_name': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'stdent_Email': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'student_About': forms.Textarea(attrs={'class':'form-control','rows':2}),\r\n 'student_gender': forms.Select(attrs={'class':'custom-select', 'required': True}),\r\n 'student_PhoneNo1': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'student_PhoneNo2': forms.TextInput(attrs={'class': 'form-control'}),\r\n 'student_Address': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'student_City': forms.TextInput(attrs={'class':'form-control','required': True}),\r\n 'student_Zipcode': forms.NumberInput(attrs={'class':'form-control','required': True}),\r\n 'student_State': forms.TextInput(attrs={'class': 'form-control','required': True}),\r\n 'student_Country': forms.TextInput(attrs={'class': 'form-control', 'required': True}),\r\n 'trainerStatus': forms.Select(attrs={'class': 'custom-select', 'required': True}),\r\n }" }, { "alpha_fraction": 0.6168309450149536, "alphanum_fraction": 0.6226724982261658, "avg_line_length": 28.897727966308594, "blob_id": "94626ecbe3208ea3ed64da81e72fca6ff3e23620", "content_id": "17850d2ac910eaee9135b3f6175738c662448359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5478, "license_type": "no_license", "max_line_length": 87, "num_lines": 176, "path": "/inventory/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render , redirect\r\nfrom django.http import HttpResponse\r\nfrom .models import *\r\nfrom .forms import CourseDetailsForm , CategoriesForm , VideoForm\r\nfrom django.contrib import messages\r\nfrom django.shortcuts import get_object_or_404\r\n\r\n\r\n\r\ndef inventory(request):\r\n\r\n ''' show courses in tabel format '''\r\n\r\n courseDetails = CourseDetails.objects.all()\r\n context = {'courseDetails':courseDetails}\r\n return render(request,'view_courses.html',context)\r\n\r\ndef addCourse(request):\r\n\r\n ''' add course from user '''\r\n\r\n form = CourseDetailsForm()\r\n form1 = VideoForm()\r\n template = 'add_course.html'\r\n\r\n context = {'form':form,'form1':form1}\r\n if request.method == \"POST\":\r\n form1 = VideoForm(request.POST, request.FILES)\r\n form = CourseDetailsForm(request.POST, request.FILES)\r\n if form1.is_valid() or form.is_valid():\r\n courseObj = form.save()\r\n videoObj=form1.save()\r\n pk = videoObj.id\r\n CourseVideos.objects.filter(id=pk).update(course = courseObj)\r\n messages.info(request,'Course is added succesfully')\r\n redirect ('inventory')\r\n else:\r\n messages.info(request,'Error are {} {}'.format(form.errors , form1.errors))\r\n redirect ('inventory')\r\n return render(request, template,context)\r\n\r\n\r\ndef editCourse(request , slugfield):\r\n\r\n ''' edit details of already added course \r\n slugfield is unique to every course '''\r\n\r\n courseDetails = get_object_or_404( CourseDetails ,slug = slugfield)\r\n form = CourseDetailsForm(instance = courseDetails)\r\n courseVideos = CourseVideos.objects.get(course = courseDetails)\r\n form1 = VideoForm( instance =courseVideos )\r\n if request.method == 'POST':\r\n form1 = VideoForm(request.POST, request.FILES ,instance = courseVideos )\r\n form = CourseDetailsForm(request.POST, request.FILES,instance =courseDetails )\r\n if form.is_valid() or form1.is_valid():\r\n form.save()\r\n form1.save()\r\n messages.info(request,'Changes are made successfully')\r\n else:\r\n messages.info(request,form.errors)\r\n return redirect('inventory')\r\n\r\n context = {'form':form,'form1':form1,'courseDetails':courseDetails}\r\n return render(request,'edit_course.html',context)\r\n\r\n \r\n\r\n\r\ndef deleteCourse(request , pk):\r\n\r\n ''' delete course of id = pk '''\r\n\r\n course = get_object_or_404(CourseDetails ,id=pk)\r\n courseVideos = CourseVideos.objects.get(course = course)\r\n if request.method == 'POST':\r\n course.delete()\r\n courseVideos.delete()\r\n messages.info(request,'Course is deleted successfully')\r\n return redirect('inventory')\r\n return render(request,'view_courses.html')\r\n \r\n\r\n\r\ndef Course(request):\r\n '''courses'''\r\n courseDetails = CourseDetails.objects.all()\r\n\r\n '''count'''\r\n course_count=CourseDetails.objects.all().count()\r\n\r\n all_course = CourseDetails.objects.all()\r\n '''filter'''\r\n\r\n \"\"'CAtegory_Course'\"\"\r\n Course_Category=Categories.objects.count()\r\n\r\n '''courseStatus_wise'''\r\n a,i=0,0\r\n for a_ctive in all_course:\r\n if a_ctive.courseStatus=='active':\r\n a+=1\r\n else:\r\n i+=1\r\n\r\n\r\n thank=True\r\n context={\r\n 'category_course_count':Course_Category,\r\n 'course_count':course_count,\r\n 'active_course_count':a,\r\n 'inactive_course_count':i,\r\n 'courseDetails': courseDetails,\r\n 'thank':thank\r\n\r\n }\r\n return render(request,'course.html',context)\r\n\r\n\r\n\r\ndef showCategory(request):\r\n\r\n ''' show category in tabel format '''\r\n\r\n categories = Categories.objects.all()\r\n context = {'categories':categories}\r\n return render(request,'view_category.html',context)\r\n\r\n\r\ndef addCategory(request):\r\n form = CategoriesForm()\r\n if request.method == 'POST':\r\n form = CategoriesForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n messages.info(request,'New category is added successfully')\r\n else:\r\n messages.info(request,form.errors)\r\n return redirect('show_category')\r\n context = {'form':form}\r\n return render(request,'add-category.html',context)\r\n\r\n\r\ndef editCategory(request , slugfield):\r\n\r\n categories = get_object_or_404(Categories ,slug = slugfield)\r\n form = CategoriesForm(instance = categories)\r\n if request.method == 'POST':\r\n form = CategoriesForm(request.POST,instance =categories )\r\n if form.is_valid():\r\n form.save()\r\n messages.info(request,'Changes are made successfully')\r\n else:\r\n messages.info(request,form.errors)\r\n return redirect('show_category')\r\n\r\n context = {'form':form,'categories':categories}\r\n return render(request,'edit_categories.html',context)\r\n\r\n\r\n\r\ndef deleteCategory(request , pk):\r\n\r\n ''' delete course of id = pk '''\r\n\r\n category = get_object_or_404(Categories ,id=pk)\r\n if request.method == 'POST' or 'GET':\r\n category.delete()\r\n messages.info(request,'Category is deleted successfully')\r\n return redirect('show_category')\r\n return render(request,'view_category.html')\r\n\r\n\r\ndef detailView(request,slugfield):\r\n course = CourseDetails.objects.get(slug = slugfield)\r\n context = {'course':course}\r\n return render(request,'course_detail_view.html',context)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6575875282287598, "alphanum_fraction": 0.6653696298599243, "avg_line_length": 34.71428680419922, "blob_id": "f7814cb69d70a04829e87d0eabc9fd9d0f63524d", "content_id": "a56a415d121e62a3d38fd3d822fe69a55709f255", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 74, "num_lines": 7, "path": "/homeapp/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n path('homelogin/', views.homepage1 , name='homepage1'),\r\n path('', views.homePage , name='homePage'),\r\n path('course/<slug:slugfield>/',views.coursePage , name='course_page')\r\n]\r\n" }, { "alpha_fraction": 0.6382665038108826, "alphanum_fraction": 0.6438679099082947, "avg_line_length": 34.494625091552734, "blob_id": "0be29c9eaf3ce4ca6ddc23f03d227e943b46724d", "content_id": "8a8861b8d3166477bda20eb56120ad8a4a83f8d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3392, "license_type": "no_license", "max_line_length": 87, "num_lines": 93, "path": "/trainer/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\r\nfrom.form import CourseDetailsForm, VideoForm, CategoriesForm\r\nfrom inventory.models import CourseVideos, CourseDetails\r\nfrom django.contrib import messages\r\nfrom multiuser.models import TrainerProfile\r\n# Create your views here.\r\n\r\ndef trainerProfile(request):\r\n return render(request,'trainer/trainer-profile-look.html')\r\n\r\ndef addCourse(request):\r\n return render(request,'trainer/T-addcourses.html')\r\n\r\ndef students(request):\r\n return render(request,'trainer/T-student.html')\r\n\r\ndef trainerCourses(request):\r\n return render(request,'trainer/T-coursesdetails.html')\r\n\r\n\r\n\r\ndef addCourse1(request):\r\n\r\n ''' add course from user '''\r\n\r\n form = CourseDetailsForm()\r\n form1 = VideoForm()\r\n template = 'offline.html'\r\n context = {'form':form,'form1':form1}\r\n if request.method == \"POST\":\r\n form1 = VideoForm(request.POST, request.FILES)\r\n form = CourseDetailsForm(request.POST, request.FILES)\r\n email=request.user.email\r\n trainer1=TrainerProfile.objects.get(trainerEmail=email)\r\n if form1.is_valid() or form.is_valid():\r\n courseObj = form.save()\r\n name=courseObj.courseName\r\n videoObj=form1.save()\r\n pk = videoObj.id\r\n CourseDetails.objects.filter(courseName=name).update(trainer=trainer1)\r\n CourseVideos.objects.filter(id=pk).update(course=courseObj)\r\n messages.info(request,'Course is added succesfully')\r\n return redirect ('trainer_profile')\r\n else:\r\n messages.info(request,'Error are {} {}'.format(form.errors , form1.errors))\r\n return render(request, template,context)\r\n return render(request, template,context)\r\n\r\n\r\n\r\ndef addCategory(request):\r\n form = CategoriesForm()\r\n context = {'form':form}\r\n if request.method == 'POST':\r\n form = CategoriesForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n messages.info(request,'New category is added successfully')\r\n return redirect('add_course')\r\n else:\r\n messages.info(request,form.errors)\r\n return render(request, 'add-category.html',context)\r\n\r\n return render(request,'add-category.html',context)\r\n\r\n\r\n\r\ndef onlineaddcourse(request):\r\n\r\n ''' add course from user '''\r\n\r\n form = CourseDetailsForm()\r\n form1 = VideoForm()\r\n template = 'Addcourse.html'\r\n context = {'form':form,'form1':form1}\r\n if request.method == \"POST\":\r\n email=request.user.email\r\n traine1=TrainerProfile.objects.get(trainerEmail=email)\r\n form1 = VideoForm(request.POST, request.FILES)\r\n form = CourseDetailsForm(request.POST, request.FILES)\r\n if form1.is_valid() or form.is_valid():\r\n courseObj = form.save()\r\n videoObj=form1.save()\r\n pk = videoObj.id\r\n name=courseObj.courseName\r\n CourseVideos.objects.filter(id=pk).update(course=courseObj)\r\n CourseDetails.objects.filter(courseName=name).update(trainer=traine1)\r\n messages.info(request,'Course is added succesfully')\r\n return redirect ('trainer_profile')\r\n else:\r\n messages.info(request,'Error are {} {}'.format(form.errors , form1.errors))\r\n return render(request, template,context)\r\n return render(request, template,context)" }, { "alpha_fraction": 0.6513761281967163, "alphanum_fraction": 0.6513761281967163, "avg_line_length": 20.200000762939453, "blob_id": "a456ea57cabb3dd7e7243cea09735290c75a0a92", "content_id": "cfe338f9d74a6c1ab4e9d16b6a8af436695dec49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/quiz/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n path('',views.quiz, name = 'quiz')\r\n]" }, { "alpha_fraction": 0.5839243531227112, "alphanum_fraction": 0.611347496509552, "avg_line_length": 50.875, "blob_id": "9c86cddb677176360ea2f2bf2a2793d5647dd3ad", "content_id": "1fc08974bee4d561e3c50ea24e5fcdc19661372e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2115, "license_type": "no_license", "max_line_length": 145, "num_lines": 40, "path": "/student/migrations/0001_initial.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-27 11:34\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport phonenumber_field.modelfields\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ('inventory', '0001_initial'),\r\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Student',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('student_FullName', models.CharField(blank=True, max_length=150)),\r\n ('student_About', models.CharField(blank=True, max_length=2500)),\r\n ('student_gender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('others', 'Transgender')], max_length=50)),\r\n ('student_Image', models.FileField(blank=True, null=True, upload_to='Student_image/')),\r\n ('stdent_Email', models.EmailField(max_length=111)),\r\n ('student_created_at', models.DateTimeField(auto_now_add=True)),\r\n ('student_PhoneNo1', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('student_PhoneNo2', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('student_Address', models.CharField(default='', max_length=250)),\r\n ('student_City', models.CharField(default='', max_length=100)),\r\n ('student_Zipcode', models.IntegerField(default=273003)),\r\n ('student_State', models.CharField(default='', max_length=100)),\r\n ('student_Country', models.CharField(default='', max_length=100)),\r\n ('course_id', models.ManyToManyField(default=1, to='inventory.CourseDetails')),\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='student', to=settings.AUTH_USER_MODEL)),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.718367338180542, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 34.75, "blob_id": "2fcdaddff5a6c32321b880d4bdaf43121b1dea5e", "content_id": "246c505d42ab5fcdf1c66ed0977a29bd30d43099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 129, "num_lines": 20, "path": "/homeapp/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\nfrom inventory.models import *\r\nfrom django.shortcuts import get_object_or_404\r\n# Create your views here.\r\n\r\ndef homepage1(request):\r\n courseDetails = CourseDetails.objects.all()\r\n messages=True\r\n return render(request,'index_main.html',{'courseDetails':courseDetails, \"messages\":messages , \"name\":request.user.username })\r\n\r\n\r\ndef homePage(request):\r\n courseDetails = CourseDetails.objects.all()\r\n return render(request,'index_main.html',{'courseDetails':courseDetails })\r\n\r\n\r\ndef coursePage(request , slugfield ):\r\n course = get_object_or_404(CourseDetails , slug = slugfield)\r\n context = {'course':course}\r\n return render(request,'homepage_course_details.html',context)\r\n" }, { "alpha_fraction": 0.6793103218078613, "alphanum_fraction": 0.6793103218078613, "avg_line_length": 45.490909576416016, "blob_id": "b6cd41ec449734c0b8a93123497b59cd1a879347", "content_id": "abccebf0455527440591cc9a0491c67427c2bb4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2610, "license_type": "no_license", "max_line_length": 97, "num_lines": 55, "path": "/customerManagement/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n #for view customer\r\n path('',views.Customer, name = 'view_Customer'),\r\n\r\n #To add customer\r\n\r\n path('add',views.addCustomer, name = 'add_Customer'),\r\n path('add_admin',views.addAdmin, name = 'add_Admin'),\r\n path('add_trainer',views.addTrainer, name = 'add_Trainer'),\r\n path('add_institute',views.addInstitute, name = 'add_Institute'),\r\n path('add_franchise',views.addFranchise, name = 'add_Franchise'),\r\n\r\n #To edit customer\r\n path('update_admin/<str:username>',views.updateAdmin, name = 'update_Admin'),\r\n path('update_trainer/<str:username>',views.updateTrainer, name = 'update_Trainer'),\r\n path('update_institute/<str:username>',views.updateInstitute, name = 'update_Institute'),\r\n path('update_franchise/<str:username>',views.updateFranchise, name = 'update_Franchise'),\r\n\r\n #To delete Customer\r\n path('delete_admin/<int:uniqueId>',views.deleteAdmin, name = 'delete_Admin'),\r\n path('delete_trainer/<int:uniqueId>',views.deleteTrainer, name = 'delete_Trainer'),\r\n path('delete_institute/<int:uniqueId>',views.deleteInstitute, name = 'delete_Institute'),\r\n path('delete_franchise/<int:uniqueId>',views.deleteFranchise, name = 'delete_Franchise'),\r\n\r\n\r\n #for pending users view\r\n path('pending',views.pendingUser, name = 'pending_User'),\r\n\r\n #to verify user\r\n\r\n path('verify_admin/<int:uniqueId>',views.verifyAdmin, name = 'verify_Admin'),\r\n path('verify_trainer/<int:uniqueId>',views.verifyTrainer, name = 'verify_Trainer'),\r\n path('verify_institute/<int:uniqueId>',views.verifyInstitute, name = 'verify_Institute'),\r\n path('verify_franchise/<int:uniqueId>',views.verifyFranchise, name = 'verify_Franchise'),\r\n\r\n \r\n #to discard user\r\n\r\n path('discard_admin/<str:username>',views.discardAdmin, name = 'discard_Admin'),\r\n path('discard_trainer/<str:username>',views.discardTrainer, name = 'discard_Trainer'),\r\n path('discard_institute/<str:username>',views.discardInstitute, name = 'discard_Institute'),\r\n path('discard_franchise/<str:username>',views.discardFranchise, name = 'discard_Franchise'),\r\n\r\n\r\n\r\n #to display profile\r\n path('profile_admin/<str:username>',views.adminProfile, name = 'admin_Profile'),\r\n path('profile_trainer/<str:username>',views.trainerProfile, name = 'trainer_Profile'),\r\n path('profile_institute/<str:username>',views.instituteProfile, name = 'institute_Profile'),\r\n path('profile_franchise/<str:username>',views.franchiseProfile, name = 'franchise_Profile'),\r\n\r\n \r\n]" }, { "alpha_fraction": 0.47789472341537476, "alphanum_fraction": 0.5831578969955444, "avg_line_length": 23, "blob_id": "755b5321133e273f4b7d9e4fa0d665b91c999665", "content_id": "b90c0138fe738a0f360936e833cf9f0b8bd325ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 111, "num_lines": 19, "path": "/crm/migrations/0005_auto_20200925_1829.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 12:59\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0004_auto_20200925_1827'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 18, 29, 39, 651722)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6910569071769714, "alphanum_fraction": 0.6910569071769714, "avg_line_length": 12.625, "blob_id": "ea9a73bf21bb3d885856c59e5fb7db1dac9c24d0", "content_id": "b353090fe3a637065796e1045b60c8dff6371ddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/student/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom django.urls import path\r\nfrom student import views\r\n\r\nurlpatterns = [\r\n\r\n\r\n]\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6250391006469727, "alphanum_fraction": 0.6250391006469727, "avg_line_length": 26.74774742126465, "blob_id": "03f715972ad2a37cb9e0835ca2797f836df93de9", "content_id": "9b1d524d15e8a24bbef1c8e72f89e0ee354f757c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3195, "license_type": "no_license", "max_line_length": 86, "num_lines": 111, "path": "/multiuser/form.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django import forms\r\nfrom django.forms import ModelForm\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom .models import User,AdminProfile,TrainerProfile,InstituteProfile,FranchiseProfile\r\nfrom student.models import Student\r\nfrom django.db import transaction\r\n\r\nclass AdminSignUpForm(UserCreationForm):\r\n email = forms.CharField(required=True)\r\n class Meta(UserCreationForm.Meta):\r\n model = User\r\n\r\n @transaction.atomic\r\n def save(self,commit=True):\r\n user = super().save(commit=False)\r\n user.is_admin = True\r\n user.save()\r\n if commit:\r\n admin=AdminProfile.objects.create(user=user)\r\n admin.adminFullName=self.cleaned_data.get(\"username\")\r\n admin.adminEmail = self.cleaned_data.get(\"email\")\r\n admin.save()\r\n\r\n\r\n return user\r\n\r\n\r\nclass FranchiseSignUpForm(UserCreationForm):\r\n\r\n email = forms.CharField(required=True)\r\n\r\n class Meta(UserCreationForm.Meta):\r\n model = User\r\n\r\n @transaction.atomic\r\n def save(self,commit=True):\r\n user = super().save(commit=False)\r\n user.is_franchise = True\r\n user.save()\r\n if commit:\r\n admin=FranchiseProfile.objects.create(user=user)\r\n admin.franchiseName=self.cleaned_data.get(\"username\")\r\n admin.franchiseEmail = self.cleaned_data.get(\"email\")\r\n admin.save()\r\n\r\n\r\n return user\r\n\r\n\r\n\r\nclass InstituteSignUpForm(UserCreationForm):\r\n email = forms.CharField(required=True)\r\n class Meta(UserCreationForm.Meta):\r\n model = User\r\n\r\n @transaction.atomic\r\n def save(self,commit=True):\r\n user = super().save(commit=False)\r\n user.is_institute = True\r\n user.save()\r\n if commit:\r\n institute=InstituteProfile.objects.create(user=user)\r\n institute.instituteName=self.cleaned_data.get(\"username\")\r\n institute.instituteEmail = self.cleaned_data.get(\"email\")\r\n institute.save()\r\n\r\n\r\n return user\r\n\r\n\r\n\r\nclass TrainerSignUpForm(UserCreationForm):\r\n email = forms.CharField(required=True)\r\n class Meta(UserCreationForm.Meta):\r\n model = User\r\n\r\n @transaction.atomic\r\n def save(self,commit=True):\r\n user = super().save(commit=False)\r\n user.is_trainer = True\r\n user.save()\r\n if commit:\r\n trainer=TrainerProfile.objects.create(user=user)\r\n trainer.trainerFullName=self.cleaned_data.get(\"username\")\r\n trainer.trainerEmail = self.cleaned_data.get(\"email\")\r\n trainer.save()\r\n\r\n\r\n return user\r\n\r\n\r\n\r\n\r\nclass StudentSignUpForm(UserCreationForm):\r\n email = forms.CharField(required=True)\r\n class Meta(UserCreationForm.Meta):\r\n model = User\r\n\r\n @transaction.atomic\r\n def save(self,commit=True):\r\n user = super().save(commit=False)\r\n user.is_student = True\r\n user.save()\r\n if commit:\r\n student=Student.objects.create(user=user)\r\n student.student_FullName=self.cleaned_data.get(\"username\")\r\n student.stdent_Email = self.cleaned_data.get(\"email\")\r\n student.save()\r\n\r\n\r\n return user\r\n\r\n\r\n" }, { "alpha_fraction": 0.47789472341537476, "alphanum_fraction": 0.5831578969955444, "avg_line_length": 23, "blob_id": "e670a893383657a7c3726b1a76c6114903cc413b", "content_id": "63da84cfdcfb7c86d5db9c8896f3c83c70cb6453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 111, "num_lines": 19, "path": "/crm/migrations/0003_auto_20200925_1814.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 12:44\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0002_auto_20200925_1803'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 18, 14, 15, 666781)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6624472737312317, "alphanum_fraction": 0.6645569801330566, "avg_line_length": 34.61538314819336, "blob_id": "a59f636992b0d69bc368fc57db6fce6892ecc3b9", "content_id": "1dd755943fdd0ebb1f76c0a63a1d04997ec0a323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/trainer/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n \r\n path('',views.trainerProfile, name = 'trainer_profile'),\r\n path('addCourse',views.addCourse1, name = 'add_course'),\r\n path('students',views.students, name = 'students'),\r\n path('courses',views.trainerCourses, name = 'trainer_courses'),\r\n path(\"addcategory\", views.addCategory, name=\"addcategory\"),\r\n path(\"onlinecourse\", views.onlineaddcourse, name=\"onlinecourse\")\r\n\r\n \r\n]" }, { "alpha_fraction": 0.6273477077484131, "alphanum_fraction": 0.6333158016204834, "avg_line_length": 58.94736862182617, "blob_id": "788548d3a9eaef590a33e368fd041a3b9136e839", "content_id": "f281415caa35387522b1eb144f2d664d02e7b86e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5697, "license_type": "no_license", "max_line_length": 139, "num_lines": 95, "path": "/trainer/form.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom django import forms\nfrom inventory.models import CourseDetails , Categories , CourseVideos\n\n\n\nclass CourseDetailsForm(forms.ModelForm):\n weekdayChoice = (\n ('Monday','Monday'),\n ('Tuesday','Tuesday'),\n ('Wednesday','Wednesday'),\n ('Thursday','Thursday'),\n ('Friday','Friday'),\n ('Saturday','Saturday'),\n ('Sunday','Sunday')\n )\n\n class Meta:\n model = CourseDetails\n fields = ['courseName','courseCategory','courseDescription','courseOnline','courseLive','courseOffline','seo_keywords',\n 'courseStatus','coursePrice','courseCity','courseState','courseCountry','priceDiscount','courseImage',\n 'courseBelong','seo_title','seo_description','slug' ,'trainer','institute','franchise','totalPriceDiscount','introVideo',\n 'offlineTiming','onlineTiming','offlineAddress','offlineClassStrength','offlineWeekday','onlineWeekday','promocode']\n\n\n widgets ={\n 'courseName':forms.TextInput(attrs={'class':'form-control','placeholder':'eg:-React','required':True}),\n 'courseDescription':forms.Textarea(attrs={'class':'form-control','placeholder':'Full Python Course','rows':3,'required':True}),\n 'courseCategory':forms.Select(attrs={'class':'custom-select','required':True}),\n 'courseStatus':forms.Select(attrs={'class':'custom-select','required':True}),\n 'courseCity':forms.TextInput(attrs={'class':'form-control','placeholder':'eg:-Mumbai'}),\n 'courseState':forms.TextInput(attrs={'class':'form-control','placeholder':'eg:-Maharashtra'}),\n 'courseCountry':forms.TextInput(attrs={'class':'form-control','placeholder':'eg:-India'}),\n 'coursePrice':forms.NumberInput(attrs={'class':'form-control','placeholder':'$100','required':True}),\n 'priceDiscount':forms.NumberInput(attrs={'class':'form-control','placeholder':'10',}),\n 'courseOnline':forms.NullBooleanSelect(attrs={'class':'custom-select','required':True}),\n 'courseLive':forms.NullBooleanSelect(attrs={'class':'custom-select','required':True}),\n 'courseOffline':forms.NullBooleanSelect(attrs={'class':'custom-select','required':True}),\n 'seo_title':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'seo_description':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'seo_keywords':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'slug':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'trainer':forms.Select(attrs={'class':'form-control'}),\n 'institute':forms.Select(attrs={'class':'form-control'}),\n 'franchise':forms.Select(attrs={'class':'form-control'}),\n 'offlineTiming':forms.DateTimeInput(attrs={'class':'form-control','placeholder':'2020-09-15 14:30'}),\n 'onlineTiming':forms.DateTimeInput(attrs={'class':'form-control','placeholder':'2020-09-15 14:30'}),\n 'offlineAddress':forms.Textarea(attrs={'class':'form-control','rows':2}),\n 'offlineClassStrength':forms.NumberInput(attrs={'class':'form-control'}),\n #'offlineWeekday':forms.CheckboxSelectMultiple(attrs={'class':''}),\n #'onlineWeekday':forms.CheckboxSelectMultiple(),\n 'promocode':forms.TextInput(attrs={'class':'form-control','placeholder':'eg:-Offer20'}),\n 'totalPriceDiscount':forms.NumberInput(attrs={'class':'form-control'})\n }\n\n\nclass CategoriesForm(forms.ModelForm):\n class Meta:\n model = Categories\n fields = ['parent','name','seo_title','seo_description','categoryStatus','slug','seo_keywords']\n\n widgets = {\n 'parent':forms.Select(attrs={'class':'form-control'}),\n 'name':forms.TextInput(attrs={'class':'form-control','placeholder':'Web development','required':True}),\n 'categoryStatus':forms.Select(attrs={'class':'custom-select','required':True}),\n 'seo_title':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'seo_description':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'seo_keywords':forms.TextInput(attrs={'class':'form-control','required':True}),\n 'slug':forms.TextInput(attrs={'class':'form-control','required':True}),\n }\n\n\nclass TotalCourseForm(forms.ModelForm):\n class Meta:\n model = CourseDetails\n fields = ['courseName', 'courseCategory', 'courseStatus', 'courseState', 'courseCountry', 'courseBelong']\n\n widgets = {\n 'courseName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'eg:-React', 'required': True}),\n 'courseBelong': forms.Select(attrs={'class': 'custom-select', 'required': True}),\n 'courseCategory': forms.Select(attrs={'class': 'custom-select', 'required': True}),\n 'courseStatus': forms.Select(attrs={'class': 'custom-select', 'required': True}),\n 'courseState': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'eg:-Maharashtra', 'required': True}),\n 'courseCountry': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'eg:-India', 'required': True}),\n }\n\nclass VideoForm(forms.ModelForm):\n class Meta:\n model = CourseVideos\n fields = ['courseVideoName','courseVideoFile','courseVideoDescription']\n\n widgets = {\n 'courseVideoName': forms.TextInput(attrs={'class': 'form-control'}),\n 'courseVideoDescription': forms.Textarea(attrs={'class': 'form-control','rows':2}),\n }\n\n\n" }, { "alpha_fraction": 0.7077922224998474, "alphanum_fraction": 0.7532467246055603, "avg_line_length": 49.66666793823242, "blob_id": "95effa6dc63ffce7e8e5ed8bc5278b86aab59026", "content_id": "d7267b2d32a772d7ab6c6b8cf08d17b8340a25ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 154, "license_type": "permissive", "max_line_length": 128, "num_lines": 3, "path": "/static/assets/bootstrap-sidebar-2/README.markdown", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Bootstrap Sidebar 2\n\nA Pen created on CodePen.io. Original URL: [https://codepen.io/sachin545/pen/bGErLdw](https://codepen.io/sachin545/pen/bGErLdw).\n\n\n" }, { "alpha_fraction": 0.6036300659179688, "alphanum_fraction": 0.617825984954834, "avg_line_length": 73.86154174804688, "blob_id": "d8cb5b9d1699ef4c5a7eec881496151d413181dd", "content_id": "f61a43487a07311d40d7b420951a547a18fc09e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9862, "license_type": "no_license", "max_line_length": 329, "num_lines": 130, "path": "/multiuser/migrations/0001_initial.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-27 11:34\r\n\r\nfrom django.conf import settings\r\nimport django.contrib.auth.models\r\nimport django.contrib.auth.validators\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport django.utils.timezone\r\nimport phonenumber_field.modelfields\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ('auth', '0011_update_proxy_permissions'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='User',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('password', models.CharField(max_length=128, verbose_name='password')),\r\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\r\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\r\n ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),\r\n ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),\r\n ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),\r\n ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),\r\n ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),\r\n ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),\r\n ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),\r\n ('is_trainer', models.BooleanField(default=False)),\r\n ('is_admin', models.BooleanField(default=False)),\r\n ('is_student', models.BooleanField(default=False)),\r\n ('is_institute', models.BooleanField(default=False)),\r\n ('is_franchise', models.BooleanField(default=False)),\r\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\r\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\r\n ],\r\n options={\r\n 'verbose_name': 'user',\r\n 'verbose_name_plural': 'users',\r\n 'abstract': False,\r\n },\r\n managers=[\r\n ('objects', django.contrib.auth.models.UserManager()),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='AdminProfile',\r\n fields=[\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='admin', serialize=False, to=settings.AUTH_USER_MODEL)),\r\n ('adminFullName', models.CharField(blank=True, max_length=150)),\r\n ('adminGender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('Other', 'Other')], default='', max_length=30)),\r\n ('adminAbout', models.TextField(blank=True, null=True)),\r\n ('adminEmail', models.EmailField(blank=True, max_length=100)),\r\n ('adminImage', models.FileField(blank=True, null=True, upload_to='Admin_image/')),\r\n ('adminPhoneNo1', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('adminPhoneNo2', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('adminAddress', models.CharField(blank=True, max_length=500)),\r\n ('adminCity', models.CharField(blank=True, max_length=50)),\r\n ('adminPostalCode', models.IntegerField(blank=True, null=True)),\r\n ('adminState', models.CharField(blank=True, max_length=50)),\r\n ('adminCountry', models.CharField(default='', max_length=20)),\r\n ('adminStatus', models.CharField(choices=[('Verified', 'Verified'), ('Pending', 'Pending'), ('Discarded', 'Discarded')], default='Pending', max_length=30)),\r\n ('adminAddedDate', models.DateTimeField(auto_now_add=True)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='FranchiseProfile',\r\n fields=[\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\r\n ('franchiseName', models.CharField(blank=True, max_length=200)),\r\n ('franchiseAbout', models.TextField(blank=True, null=True)),\r\n ('franchiseEmail', models.EmailField(blank=True, max_length=100)),\r\n ('franchiseImage', models.FileField(blank=True, null=True, upload_to='Franchise_image/')),\r\n ('franchisePhoneNo1', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),\r\n ('franchisePhoneNo2', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),\r\n ('franchiseAddress', models.CharField(blank=True, max_length=500)),\r\n ('franchiseCity', models.CharField(blank=True, max_length=50)),\r\n ('franchisePostalCode', models.IntegerField(blank=True, null=True)),\r\n ('franchiseState', models.CharField(blank=True, max_length=50)),\r\n ('franchiseCountry', models.CharField(default='', max_length=20)),\r\n ('franchiseStatus', models.CharField(choices=[('Verified', 'Verified'), ('Pending', 'Pending'), ('Discarded', 'Discarded')], default='Pending', max_length=30)),\r\n ('franchiseAddedDate', models.DateTimeField(auto_now_add=True)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='InstituteProfile',\r\n fields=[\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\r\n ('instituteName', models.CharField(blank=True, max_length=200)),\r\n ('instituteAbout', models.TextField(blank=True, null=True)),\r\n ('instituteEmail', models.EmailField(blank=True, max_length=100)),\r\n ('instituteImage', models.FileField(blank=True, null=True, upload_to='Institute_image/')),\r\n ('institutePhoneNo1', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),\r\n ('institutePhoneNo2', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),\r\n ('instituteAddress', models.CharField(blank=True, max_length=500)),\r\n ('instituteCity', models.CharField(blank=True, max_length=50)),\r\n ('institutePostalCode', models.IntegerField(blank=True, null=True)),\r\n ('instituteState', models.CharField(blank=True, max_length=50)),\r\n ('instituteCountry', models.CharField(default='', max_length=20)),\r\n ('instituteStatus', models.CharField(choices=[('Verified', 'Verified'), ('Pending', 'Pending'), ('Discarded', 'Discarded')], default='Pending', max_length=30)),\r\n ('instituteAddedDate', models.DateTimeField(auto_now_add=True)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='TrainerProfile',\r\n fields=[\r\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\r\n ('trainerFullName', models.CharField(blank=True, max_length=150)),\r\n ('trainerGender', models.CharField(choices=[('Male', 'Male'), ('Female', 'Female'), ('Other', 'Other')], default='', max_length=30)),\r\n ('trainerAbout', models.TextField(blank=True, null=True)),\r\n ('trainerEmail', models.EmailField(blank=True, max_length=100)),\r\n ('trainerImage', models.FileField(blank=True, null=True, upload_to='Trainer_image/')),\r\n ('trainerPhoneNo1', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('trainerPhoneNo2', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),\r\n ('trainerAddress', models.CharField(blank=True, max_length=500)),\r\n ('trainerCity', models.CharField(blank=True, max_length=50)),\r\n ('trainerPostalCode', models.IntegerField(blank=True, null=True)),\r\n ('trainerState', models.CharField(blank=True, max_length=50)),\r\n ('trainerCountry', models.CharField(default='', max_length=20)),\r\n ('trainerStatus', models.CharField(choices=[('Verified', 'Verified'), ('Pending', 'Pending'), ('Discarded', 'Discarded')], default='Pending', max_length=30)),\r\n ('trainerAddedDate', models.DateTimeField(auto_now_add=True)),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6440864205360413, "alphanum_fraction": 0.6506773829460144, "avg_line_length": 33.35219955444336, "blob_id": "e0d46c712b567720642a205b52cbb51e621bb990", "content_id": "0bf2de78f9a4d1af8145b6b7b32d8538bd3b494b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5462, "license_type": "no_license", "max_line_length": 146, "num_lines": 159, "path": "/crm/views.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n# Create your views here.\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.shortcuts import render, redirect\nfrom multiuser.models import FranchiseProfile,InstituteProfile,TrainerProfile\nfrom student.models import Student\nfrom .models import location1, customer , Order\nfrom django.template.loader import get_template\nfrom io import BytesIO\nfrom xhtml2pdf import pisa\nfrom django.http import HttpResponse\nfrom inventory.models import CourseDetails\nfrom django.shortcuts import render, get_object_or_404\nimport json\ndef BulckEmail(request):\n if request.method=='POST':\n from django.core import mail\n connection=mail.get_connection()\n connection.open()\n recivers_list=[]\n name=request.POST['name']\n subject=request.POST['subject']\n message=request.POST['message']\n if name=='Student':\n print(name)\n for user in Student.objects.all():\n recivers_list.append(user.stdent_Email)\n elif name=='Trainer':\n print(name)\n for user in TrainerProfile.objects.all():\n recivers_list.append(user.trainerEmail)\n elif name=='Institution':\n print(name)\n for user in InstituteProfile.objects.all():\n recivers_list.append(user.instituteEmail)\n elif name=='Franchise':\n print(name)\n for user in FranchiseProfile.objects.all():\n recivers_list.append(user.franchiseEmail)\n email1=mail.EmailMessage( subject, message , '[email protected]', recivers_list, connection=connection)\n email1.send()\n connection.close()\n return render(request, \"bulk-mail.html\")\n\ndef singlemail(request):\n institution=InstituteProfile.objects.all()\n train=TrainerProfile.objects.all()\n student=Student.objects.all()\n print(student, train, institution)\n params={'context': institution, 'context1': train, 'context2': student}\n return render(request, \"individual-mail.html\", params)\n\n\n\n\ndef studentsingeemail(request):\n if request.method=='POST':\n from django.core import mail\n connection=mail.get_connection()\n connection.open()\n recivers_list=[]\n name=request.POST['name']\n subject=request.POST['subject']\n message=request.POST['message']\n print(name)\n recivers_list.append(name)\n print(name)\n email1=mail.EmailMessage(subject, message , '[email protected]', recivers_list, connection=connection)\n email1.send()\n connection.close()\n return render(request , \"individual-mail.html\")\ndef trainersingeemail(request):\n if request.method=='POST':\n from django.core import mail\n connection=mail.get_connection()\n connection.open()\n recivers_list=[]\n name=request.POST['name']\n subject=request.POST['subject']\n message=request.POST['message']\n recivers_list.append(name)\n email1=mail.EmailMessage(subject, message , '[email protected]', recivers_list, connection=connection)\n email1.send()\n connection.close()\n return render(request, \"individual-mail.html\")\n\n\ndef Institutionsingeemail(request):\n if request.method=='POST':\n from django.core import mail\n connection=mail.get_connection()\n connection.open()\n recivers_list=[]\n name=request.POST['name']\n subject=request.POST['subject']\n message=request.POST['message']\n recivers_list.append(name)\n email1=mail.EmailMessage(subject, message , '[email protected]', recivers_list, connection=connection)\n email1.send()\n connection.close()\n return render(request, \"individual-mail.html\")\n\n\ndef location(request):\n if request.method=='POST':\n country1=request.POST['country']\n state=request.POST['state']\n city=request.POST['city']\n locat=location1(country=country1, state=state, city=city)\n locat.save()\n return render(request, 'locat.html')\n\n return render(request, 'locat.html')\n\n\ndef customerdetails(request):\n customer1=customer.objects.all()\n return render(request,'customer.html', {'context': customer1})\n\n\n\ndef orederdetails(request):\n if request.method==\"POST\":\n name=request.POST['name']\n email=request.POST['email']\n iteam_jso=request.POST['itemsJson']\n amount=request.POST['amount']\n phone=request.POST['phone']\n data=json.loads(iteam_jso)\n prices=request.POST['prices1']\n qty=request.POST['items1']\n\n val1=\"\"\n for val in data['pr'][1]:\n val1=val1 + val\n order=Order( items_json= iteam_jso, name=name, email=email, amount=amount, phone=phone, coursename=val1, prices=prices, qty=qty)\n order.save()\n thank = True\n id = order.order_id\n return render(request, 'checkout.html', {'thank':thank, 'id': id})\n return render(request, 'checkout.html')\n\n\n\n\n\n\ndef InvoicePDFView(request, id):\n obj = get_object_or_404(Order, order_id=id)\n obj={\"obj1\":obj}\n template_name =get_template('invoice-template.html')\n respone=BytesIO()\n data=template_name.render(obj)\n pdfpage=pisa.pisaDocument(BytesIO(data.encode(\"ISO-8859-1\")), respone)\n if not pdfpage.err:\n return HttpResponse(respone.getvalue(), content_type='application/pdf')\n else:\n return HttpResponse('error genertation pdf')\n" }, { "alpha_fraction": 0.7394366264343262, "alphanum_fraction": 0.7394366264343262, "avg_line_length": 45.66666793823242, "blob_id": "673f7db7dd6b19d53662aec8be0a227aece2e604", "content_id": "66ab70dbed963e7aa0563fc959bec1908606e964", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "permissive", "max_line_length": 118, "num_lines": 3, "path": "/static/assets/bootstrap-sidebar/README.markdown", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# bootstrap sidebar\n\nA Pen created on CodePen.io. Original URL: [https://codepen.io/rijdz/pen/zybbVK](https://codepen.io/rijdz/pen/zybbVK).\n\n\n" }, { "alpha_fraction": 0.6605603694915771, "alphanum_fraction": 0.6605603694915771, "avg_line_length": 35.79591751098633, "blob_id": "57fe27e422d6477d2253a4b8d720510f1ead452f", "content_id": "ab47860c4ba8e7d581ac0c4fb29407b7cdda3067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 100, "num_lines": 49, "path": "/inventory/admin.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom .models import CourseDetails , Categories,CourseVideos\r\nfrom mptt.admin import DraggableMPTTAdmin\r\n\r\n# Register your models here.\r\n\r\nclass CategoryAdmin(DraggableMPTTAdmin):\r\n mptt_indent_field = \"name\"\r\n list_display = ('tree_actions', 'indented_title',\r\n 'related_products_count', 'related_products_cumulative_count')\r\n list_display_links = ('indented_title',)\r\n\r\n def get_queryset(self, request):\r\n qs = super().get_queryset(request)\r\n\r\n # Add cumulative product count\r\n qs = Categories.objects.add_related_count(\r\n qs,\r\n CourseDetails,\r\n 'courseCategory',\r\n 'products_cumulative_count',\r\n cumulative=True)\r\n\r\n # Add non cumulative product count\r\n qs = Categories.objects.add_related_count(qs,\r\n CourseDetails,\r\n 'courseCategory',\r\n 'products_count',\r\n cumulative=False)\r\n return qs\r\n\r\n def related_products_count(self, instance):\r\n return instance.products_count\r\n related_products_count.short_description = 'Related products (for this specific category)'\r\n\r\n def related_products_cumulative_count(self, instance):\r\n return instance.products_cumulative_count\r\n related_products_cumulative_count.short_description = 'Related products (in tree)'\r\n\r\n\r\nclass CourseDetailsAdmin(admin.ModelAdmin):\r\n list_display = ('courseName','courseCategory','trainer','institute','franchise','courseCreated')\r\n\r\nclass CourseVideosAdmin(admin.ModelAdmin):\r\n list_display = ('courseVideoName','course','courseVideoCreated')\r\n\r\nadmin.site.register(CourseDetails,CourseDetailsAdmin)\r\nadmin.site.register(Categories , CategoryAdmin)\r\nadmin.site.register(CourseVideos,CourseVideosAdmin)\r\n\r\n\r\n" }, { "alpha_fraction": 0.6839120984077454, "alphanum_fraction": 0.6839120984077454, "avg_line_length": 50.185184478759766, "blob_id": "f60fad6af499b713609f432597d0f2b6a72a893e", "content_id": "8fb235cdae73f930fc49cef22076fe64e7aee753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 102, "num_lines": 27, "path": "/multiuser/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n\r\n path('regchoice/', views.Registerchoice, name='registerchoice'),\r\n path('stu_reg/', views.StudentSignUpView.as_view(), name='student_reg'),\r\n path('inst_reg/', views.InstituteSignUpView.as_view(), name='institute_reg'),\r\n path('fran_reg/', views.FranchiseSignUpView.as_view(), name='franchise_reg'),\r\n path('train_reg/', views.TrainerSignUpView.as_view(), name='trainer_reg'),\r\n path('admin_reg/', views.AdminView.as_view(), name='admin_reg'),\r\n path('login/', views.login, name='login'),\r\n path('logout',views.logoutPage,name='logout'),\r\n path('',views.user,name='user'),\r\n path('user_student', views.user_student, name='user_student'),\r\n path('user_trainer', views.user_trainer, name='user_trainer'),\r\n path('user_franchise', views.user_franchise, name='user_franchise'),\r\n\r\n\r\n # to display profile\r\n path('user_profile_student/<str:username>', views.studentProfile, name='u_student_Profile'),\r\n path('user_profile_trainer/<str:username>', views.trainerProfile, name='u_trainer_Profile'),\r\n path('user_profile_institute/<str:username>', views.instituteProfile, name='u_institute_Profile'),\r\n path('user_profile_franchise/<str:username>', views.franchiseProfile, name='u_franchise_Profile'),\r\n\r\n path('view/<slug:slugfield>', views.coursedetailView, name='multi_view_course'),\r\n\r\n]\r\n\r\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 19.625, "blob_id": "af0e08041b48abec8c1cfb0f39a67d7c15a2df13", "content_id": "6a7e0136f8bf204e3e456824eea4c37dccd5e0eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/crm/admin.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom.models import Invoice,customer, Order\n\n\n\nadmin.site.register(Invoice)\nadmin.site.register(customer)\nadmin.site.register(Order)\n" }, { "alpha_fraction": 0.5959596037864685, "alphanum_fraction": 0.612554132938385, "avg_line_length": 34.47368240356445, "blob_id": "8a736afff1b2e48cb28c31a94ae3f49e28eab582", "content_id": "8dfc0bfbb8435f6e5cee4c12b41419bf370a3f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "no_license", "max_line_length": 165, "num_lines": 38, "path": "/inventory/migrations/0002_auto_20200927_1704.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-27 11:34\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport mptt.fields\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ('inventory', '0001_initial'),\r\n ('multiuser', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='coursedetails',\r\n name='franchise',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='multiuser.FranchiseProfile'),\r\n ),\r\n migrations.AddField(\r\n model_name='coursedetails',\r\n name='institute',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='multiuser.InstituteProfile'),\r\n ),\r\n migrations.AddField(\r\n model_name='coursedetails',\r\n name='trainer',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='multiuser.TrainerProfile'),\r\n ),\r\n migrations.AddField(\r\n model_name='categories',\r\n name='parent',\r\n field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='inventory.Categories'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.6865478754043579, "alphanum_fraction": 0.7043635249137878, "avg_line_length": 34.83333206176758, "blob_id": "cde385f5e3316e18a2df02a3de1f293e7f42744b", "content_id": "832ee6bc49c0a9625ac12823d5606e53407b24dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3873, "license_type": "no_license", "max_line_length": 91, "num_lines": 108, "path": "/crm/models.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nfrom django.db import models\nfrom address.models import AddressField\nfrom django.utils.translation import ugettext as _\nfrom django_extensions.db.models import TimeStampedModel\nfrom datetime import date\nfrom decimal import Decimal\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom datetime import datetime\n\n# Create your models here.\nclass location1(models.Model):\n country=models.CharField(max_length=30)\n state=models.CharField(max_length=23)\n city=models.CharField(max_length=23)\n\nclass Currency(models.Model):\n code = models.CharField(unique=True, max_length=3)\n pre_symbol = models.CharField(blank=True, max_length=1)\n post_symbol = models.CharField(blank=True, max_length=1)\n\n def __unicode__(self):\n return self.code\n\nclass Address(models.Model):\n contact_name=models.CharField(max_length=23)\n address_one=AddressField()\n town=models.CharField(max_length=34)\n postcode=models.CharField(_(\"zip code\"), max_length=5, default=\"43701\")\n state = models.CharField(max_length=34)\n\nclass InvoiceManager(models.Manager):\n def get_invoiced(self):\n return self.filter(invoiced=True, draft=False)\n\n def get_due(self):\n return self.filter(invoice_date__lte=date.today(),\n invoiced=False,\n draft=False)\n\n\n\n\n\n\n\ngender=(('M','Male'),('F','Female'),('TS','Transgender'))\nclass customer(models.Model):\n id = models.AutoField(primary_key=True)\n Customer_name= models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n Customer_gender = models.CharField(choices=gender,max_length=50)\n Customer_profilepic = models.FileField()\n Customer_Email = models.EmailField(max_length=111)\n Customer_created_at = models.DateTimeField(auto_now_add=True)\n Customer_PhoneNo1 = PhoneNumberField(null=False, blank=False, unique=False)\n Customer_Street=models.CharField(max_length=250,default='')\n Customer_Landmark = models.CharField(max_length=100, default='')\n Customer_Zipcode= models.IntegerField(default='')\n Customer_State = models.CharField(max_length=100, default='')\n Customer_Country = models.CharField(max_length=100, default='')\n objects = models.Manager()\n\n\n\nclass Invoice(TimeStampedModel):\n user = models.OneToOneField(customer, on_delete=models.CASCADE)\n currency = models.ForeignKey(Currency, blank=True, null=True, on_delete=models.CASCADE)\n address = models.CharField(max_length=34)\n invoice_id = models.CharField(unique=True, max_length=6, null=True,\n blank=True, editable=False)\n invoice_date = models.DateField(default=date.today)\n invoiced = models.BooleanField(default=False)\n draft = models.BooleanField(default=False)\n paid_date = models.DateField(blank=True, null=True)\n\n objects = InvoiceManager()\n\n\n def __unicode__(self):\n return u'%s (%s)' % (self.invoice_id, self.total_amount())\n\n class Meta:\n ordering = ('-invoice_date', 'id')\n\n def total(self):\n total = Decimal('0.00')\n for item in self.items.all():\n total = total + item.total()\n return total\n\n def file_name(self):\n return u'Invoice %s.pdf' % self.invoice_id\n\nclass Order(models.Model):\n order_id= models.AutoField(primary_key=True)\n items_json= models.CharField(max_length=5000)\n name=models.CharField(max_length=90)\n email=models.CharField(max_length=111)\n amount=models.IntegerField(default=0)\n phone=models.CharField(max_length=111, default=\"\")\n coursename=models.CharField(max_length=5000)\n prices=models.CharField(max_length=500)\n qty=models.CharField(max_length=400)\n date= models.DateTimeField(default=datetime.now(), blank=True)\n\n\n\n" }, { "alpha_fraction": 0.6471306681632996, "alphanum_fraction": 0.6471306681632996, "avg_line_length": 37.095237731933594, "blob_id": "69fa1fface66c3261eb1220759124f62e87bfa2b", "content_id": "b56dd6315e67a3d74f58479525b81b6db6d67bcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 87, "num_lines": 21, "path": "/inventory/urls.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n #for courses\r\n\r\n path('',views.inventory, name = 'inventory'),\r\n path('add/',views.addCourse, name = 'add_Course'),\r\n path('edit/<slug:slugfield>',views.editCourse, name = 'edit_Course'),\r\n path('delete/<int:pk>',views.deleteCourse, name = 'delete_Course'),\r\n path('course',views.Course,name='course'),\r\n\r\n #for categories \r\n\r\n path('add_category/',views.addCategory , name = 'add_category'),\r\n path('category/',views.showCategory , name = 'show_category'),\r\n path('edit_category/<slug:slugfield>',views.editCategory, name = 'edit_category'),\r\n path('delete_category/<int:pk>',views.deleteCategory, name = 'delete_category'),\r\n\r\n path('view/<slug:slugfield>', views.detailView, name='view_course'),\r\n\r\n]" }, { "alpha_fraction": 0.47789472341537476, "alphanum_fraction": 0.5831578969955444, "avg_line_length": 23, "blob_id": "d82c1b7993c7d6868400e55e7f6c8654cc2b0cef", "content_id": "efe5efa4c8ae29edca228bba08398a8ad65aab05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 111, "num_lines": 19, "path": "/crm/migrations/0004_auto_20200925_1827.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 12:57\r\n\r\nimport datetime\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('crm', '0003_auto_20200925_1814'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='order',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 18, 27, 43, 646403)),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5809627771377563, "alphanum_fraction": 0.6192560195922852, "avg_line_length": 28.46666717529297, "blob_id": "209064776b8f4ae7b7a4c43693e1a9a3620215e2", "content_id": "c24b025c65d56a5b12c5e60337cf68b013400002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 914, "license_type": "no_license", "max_line_length": 113, "num_lines": 30, "path": "/crm/migrations/0002_auto_20200925_1803.py", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-25 12:33\r\n\r\nimport address.models\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ('address', '0002_auto_20160213_1726'),\r\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\r\n ('crm', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='customer',\r\n name='Customer_name',\r\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\r\n ),\r\n migrations.AddField(\r\n model_name='address',\r\n name='address_one',\r\n field=address.models.AddressField(on_delete=django.db.models.deletion.CASCADE, to='address.Address'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5613576769828796, "alphanum_fraction": 0.5613576769828796, "avg_line_length": 25.5, "blob_id": "6af478833066c1fad518478d48b8d3bafda73a61", "content_id": "59ce841ca8cdfe719439c8cd5794379f856ad94a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 383, "license_type": "no_license", "max_line_length": 62, "num_lines": 14, "path": "/static/assets/js/dark.js", "repo_name": "raja456881/Lms3", "src_encoding": "UTF-8", "text": "$(document).ready(function(){\r\n $('#mode').click(function(){\r\n if($('link#styles').attr('href')==\"assets/css/style.css\"){\r\n $('#mode').attr('value','Switch To Day Mode')\r\n $('link#styles').attr('href','assets/css/darkmodestyle.css')\r\n }\r\n else\r\n {\r\n $('#mode').attr('value','Switch To Night Mode')\r\n $('link#styles').attr('href','assets/css/style.css')\r\n }\r\n })\r\n \r\n });" } ]
41
paulineAnnan/Data-curation
https://github.com/paulineAnnan/Data-curation
58d6fc3ec86f90ec4f884ffdd3c2160d7c0f3df7
a5d2c8947bf299eea382b8e0e793e1086cbfb055
339595ab09a21ee2f1c93a27ebd1b500a3323bca
refs/heads/master
2020-03-20T05:09:23.058328
2018-06-13T11:21:28
2018-06-13T11:21:28
137,205,233
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6615168452262878, "alphanum_fraction": 0.6615168452262878, "avg_line_length": 25.55769157409668, "blob_id": "2198f70bb4976c67ffc563a6d12727c28dfa9325", "content_id": "5f8746ef8e1ae348e2cc3224f0597838c40817e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 221, "num_lines": 52, "path": "/Development/flask/amazon.py", "repo_name": "paulineAnnan/Data-curation", "src_encoding": "UTF-8", "text": "import pymysql\nclass queries:\n\tdef __init__(self,clientId,query,domain,sessionId,createdOn,formatCreatedOn):\n\t\tself.clientId = clientId\n\t\tself.query = query\n\t\tself.domain = domain\n\t\tself.sessionId = sessionId\n\t\tself.createdOn = createdOn\n\t\tself.formatCreatedOn = formatCreatedOn\n\t\n\n\tdef save(self):\n\t\tqueries = {}\n\t\tclientId = self.clientId\n\t\tquery = self.query\n\t\tsessionId =self.sessionId\t\n\t\tdomain = self.domain\n\t\tcreatedOn= self.createdOn\n\t\tformatCreatedOn = self.formatCreatedOn\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect('localhost','root', 'rancard', 'data_curation')\n\t\t\tcur = con.cursor()\n\t\t\tstatement = \"INSERT INTO queries(clientId,query,domain,sessionId,createdOn,formatCreatedOn) VALUES('\"+ clientId +\"', '\"+ query +\"','\"+ domain +\"', '\"+ sessionId +\"','\"+ str(createdOn) +\"','\"+ str(formatCreatedOn) +\"')\"\n\t\t\tprint(\"Insert statement : \"+ statement)\n\t\t\tcur.execute(statement)\n\t\t\tcon.commit()\n\n\t\t\tqueries['query'] = self.query\n\t\t\tqueries['clientId'] = self.clientId\n\t\t\tqueries['domain'] = self.domain\n\t\t\tqueries['sessionId'] = self.sessionId\n\t\t\tqueries['createdOn'] = str(self.createdOn)\n\t\t\tqueries['formatCreatedOn'] = str(elf.formatCreatedOn)\n\n\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn queries\n\n\t@classmethod\n\tdef get_id(self,clientId):\n\t\treturn None\n\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6761695742607117, "alphanum_fraction": 0.6873781681060791, "avg_line_length": 25.65584373474121, "blob_id": "c0fa98e64838ae88f52fc33dcc89e8e3869da411", "content_id": "743e9621dde283b02ffb26535a085c0a3786e813", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4104, "license_type": "no_license", "max_line_length": 115, "num_lines": 154, "path": "/Development/flask/flaskdev.py", "repo_name": "paulineAnnan/Data-curation", "src_encoding": "UTF-8", "text": "import pymysql\nfrom flask import Flask\nfrom flask import Response\nfrom flask import request\nfrom datetime import datetime\nfrom logging.handlers import RotatingFileHandler\nimport requests as req\n\nimport json\nfrom Category import Category \nfrom Config import Config\nfrom amazon import queries\n\nimport logging\n\napp = Flask(__name__)\n\nformatter = logging.Formatter(\"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\nhandler = RotatingFileHandler('application.log', maxBytes=10000000, backupCount=5)\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\napp.logger.addHandler(handler)\n\[email protected](\"/\", methods = ['GET'])\ndef index():\n\tstatus = 200\n\tdata = {}\n\tdata['message'] = \"Welcome to training data aquisition application.\"\n\tjsonData = json.dumps(data)\n\treturn Response(jsonData, status=status, mimetype='application/json')\n\n\[email protected]('/config', methods = ['GET', 'POST'])\ndef getAndCreatedConfig():\n\tstatus = 200\n\tdata = {}\n\tif request.method == 'GET':\n\t\tprint('Hello')\n\n\telif request.method == 'POST':\n\t\tconfig_resUrl = request.args.get('configResourceUrl')\n\t\tconfig_name = request.args.get('configName')\n\t\tconfig_category = request.args.get('categoryId')\n\t\tconfig = Config(config_resUrl, config_name, config_category, datetime.now())\n\t\tconfig_data = config.save()\n\t\tdata['message'] = \"\"\n\t\tdata['data'] = config_data\n\n\tjsonData = json.dumps(data)\n\treturn Response(jsonData, status=status, mimetype='application/json')\n\[email protected](\"/config/<id>\", methods = ['PUT', 'DELETE'])\ndef updateOrDeleteConfig(id):\n\tstatus = 200\n \n \n\n\treturn Response()\n\[email protected](\"/category\", methods = ['GET', 'POST'])\ndef getAndCreatedCategory():\n\tstatus = 200\n\tdata = None\n\tprint(\"Request parameters passed : \"+ str(request.args))\n\tif request.method == 'GET':\n\t\tprint(\"get triggered\")\n\telif request.method == 'POST':\n\t\tcategory_name = request.args.get('categoryName')\n\t\tcategory = Category(category_name, datetime.now())\n\t\tcategory.save()\n\t\tprint('Yay')\n\n\tjson_data = json.dumps(data)\n\t\n\treturn Response(json_data, status = 200, mimetype='application/json')\n\[email protected](\"/configuration\",methods = [\"GET\"])\ndef getQueries():\n\tstatus = 200\n\tdata = None\n\tindQueryList = []\n\tif request.method == 'GET':\n\t\tconfig_id = request.args.get(\"configId\")\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect('localhost','root', 'rancard', 'data_curation')\n\n\t\t\tcur =con.cursor()\n\t\t\tcur.execute(\" SELECT resUrl FROM Config WHERE id = \" + config_id + \"\")\n\n\t\t\tresUrlTuple= cur.fetchone()\n\t\t\tresUrlString = resUrlTuple[0]\n\n\t\t\tresult = req.get(resUrlString)\n\t\t\tresultDict = result.json()\n\t\t\tqueriesList = resultDict.get(\"queries\")\n\t\t\tfor elementIndex in range(len(queriesList)):\n\t\t\t\tindQuery = queriesList[elementIndex].get('query')\n\t\t\t\tindQueryList.append(indQuery)\n\t\t\tindQuerySet = set(indQueryList)\n\t\t\tprint(indQuerySet)\n\t\t\t\t#indQuery.replace(\"'\",\"\\'\")\n\t\t\t\t#indQuerySet.add(indQuery)\n\t\t\tfor element in indQuerySet:\t\n\t\t\t\tstatement = \"\"\"INSERT INTO DataStore(ConfigId,Query) VALUES (\"\"\" + config_id + \"\"\", \\\"\"\"\" + element + \"\"\"\\\")\"\"\"\n\t\t\t\tprint(statement)\n\t\t\t\tcur.execute(statement)\n\t\t\t\tcon.commit()\n\n\t\t\t#while row is not None:\n\t\t\t#\tprint(type(row))\n\t\t\t#\tprint(row)\n\t\t\t#\trow = cur.fetchone()\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\n\treturn \"done\"\n \n\[email protected](\"/category/<category_id>\", methods = ['PUT', 'DELETE'])\ndef updateAnddeleteCategory(category_id):\n\tif request.method =='PUT':\n\t\tstatus = 200\n\t\tprint(\"updated\")\n\n\telif request.method == 'DELETE':\n\t\tstatus = 200\n\t\tprint(\"\")\n\t\treturn Response()\n\nsandbox = \"http://sandbox.rancardmobility.com:9092/3rd-provider-integration/v1/request/queries?domain=amazon.com\"\n\[email protected](\"/queries\", methods=['GET'])\ndef getResource(resourceProvider):\n\tif request.method == 'GET':\n\t\tstatus = 200\n\t\tjsonData = req.get(resourceProvider)\n\t\tprint(type(jsonData))\n\t\tqueries = jsonData['queries']\n\tfor query in queries:\n\t\t\tquery = amazon()\n\t\t\tquery.save()\n\treturn Response(jsonData, status = 200, mimetype='application/json')\n\t\n\tif __name__ == '__main__':\n\t\tapp.run()" }, { "alpha_fraction": 0.6516746282577515, "alphanum_fraction": 0.6516746282577515, "avg_line_length": 22.659090042114258, "blob_id": "8fea34d8c9ba0ea8b6df27c876d5cf93719d81ed", "content_id": "28df90357b170398699b6143cdb23c9ce55b8850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 153, "num_lines": 44, "path": "/Development/flask/Config.py", "repo_name": "paulineAnnan/Data-curation", "src_encoding": "UTF-8", "text": "import pymysql\nclass Config:\n\tdef __init__(self,resUrl,name,category,createdOn):\n\t\tself.resUrl = resUrl\n\t\tself.name = name\n\t\tself.category = category\n\t\tself.createdOn = createdOn\n\t\n\n\tdef save(self):\n\t\tconfig = {}\n\t\tname = self.name\n\t\tcreatedOn =self.createdOn\t\n\t\tresUrl = self.resUrl\n\t\tcategory = self.category\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect('localhost','root', 'rancard', 'data_curation')\n\t\t\tcur = con.cursor()\n\t\t\tstatement = \"INSERT INTO Config(resUrl,domainName,categoryId,createdOn) VALUES('\"+ resUrl +\"', '\"+ name +\"','\"+ category +\"', '\"+ str(createdOn) +\"')\"\n\t\t\tprint(\"Insert statement : \"+ statement)\n\t\t\tcur.execute(statement)\n\t\t\tcon.commit()\n\n\t\t\tconfig['domainName'] = self.name\n\t\t\tconfig['resourceUrl'] = self.resUrl\n\t\t\tconfig['categoryId'] = self.category\n\t\t\tconfig['createdOn'] = str(self.createdOn)\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn config\n\n\t@classmethod\n\tdef get_id(self,id):\n\t\treturn None\n\t\t\n\n" }, { "alpha_fraction": 0.611940324306488, "alphanum_fraction": 0.611940324306488, "avg_line_length": 17.370967864990234, "blob_id": "3e7d476e2c5206a2b4faddf8bb40561738948a53", "content_id": "d79529710d96641f15ec314f03f643560e81556d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2278, "license_type": "no_license", "max_line_length": 108, "num_lines": 124, "path": "/Development/flask/Category.py", "repo_name": "paulineAnnan/Data-curation", "src_encoding": "UTF-8", "text": "import pymysql\n\nclass Category:\n\tdef __init__(self,name,createdOn):\n\t\tself.name = name\n\t\tself.createdOn = createdOn\n\n\tdef save(self):\n\n\t\tcategory = None\n\t\tname = self.name\n\t\tcreatedOn = self.createdOn\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect('localhost','root', 'rancard', 'data_curation')\n\t\t\tcur = con.cursor()\n\t\t\tstatement = \"INSERT INTO Category(name,createdOn) VALUES('\"+ name +\"', '\"+ str(createdOn) +\"')\"\n\t\t\tprint(\"Insert statement : \"+ statement)\n\t\t\tcur.execute(statement)\n\t\t\tcon.commit()\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn category\n\n\tdef delete(self,id):\n\t\tcategory = None\n\t\t\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect(\"dbname='data_curation' user= 'root'\")\n\n\t\t\tcur =con.cursor()\n\t\t\tcur.execute(\" DELETE FROM category WHERE id = \" + id)\n\n\t\t\trow = cursor.fetchone()\n\n\t\t\twhile row is not None:\n\t\t\t\tprint(row)\n\t\t\t\trow = cursor.fetchone()\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn category\n\n\n\tdef update(self,id,name):\n\t\tcategory = None\n\t\t\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect(\"dbname='data_curation' user= 'root'\")\n\n\t\t\tcur =con.cursor()\n\t\t\tcur.execute(\" UPDATE category SET name = \" + name + \"WHERE id = \" + id + \"\")\n\n\t\t\trow = cursor.fetchone()\n\n\t\t\twhile row is not None:\n\t\t\t\tprint(row)\n\t\t\t\trow = cursor.fetchone()\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn category\n\n\tdef select(self,id,name,createdOn):\n\t\tcategory = None\n\t\tself.name = name\n\t\tself.createdOn =createdOn\n\t\t\n\n\t\tcon = None\n\n\t\ttry :\n\t\t\tcon = pymysql.connect(\"dbname='data_curation' user= 'root'\")\n\n\t\t\tcur =con.cursor()\n\t\t\tcur.execute(\" SELECT FROM category WHERE id = \" + id , name = \" + name , createdOn = \" + createdOn + \"\")\n\n\t\t\trow = cursor.fetchone()\n\n\t\t\twhile row is not None:\n\t\t\t\tprint(row)\n\t\t\t\trow = cursor.fetchone()\n\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\tfinally:\n\t\t\tif con != None:\n\t\t\t\ttry:\n\t\t\t\t\tcon.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(str(e))\n\t\treturn category\n\n\n\t@classmethod\n\tdef get_id(self, id):\n\n\t \treturn None\n" } ]
4
lizapressman/ResumeToWebsite
https://github.com/lizapressman/ResumeToWebsite
ab3360e4a3b1b9134d5ece43a499c1f1d6bc12fe
1769fedd27e0a94c223cf68c9b4c044713720b7d
0b6448d5c30f2eb5101eaccf0fa53cc2aa59cc8e
refs/heads/master
2022-12-03T10:07:46.935977
2020-07-22T02:38:19
2020-07-22T02:38:19
277,421,459
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6485714316368103, "alphanum_fraction": 0.6485714316368103, "avg_line_length": 25.923076629638672, "blob_id": "35847ec869e33abddd10b1524411018018b6f81e", "content_id": "cda28ee4f15ef48416a787575fcf3d35fd34a74a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 60, "num_lines": 26, "path": "/resume_parser.py", "repo_name": "lizapressman/ResumeToWebsite", "src_encoding": "UTF-8", "text": "from pyresparser import ResumeParser\nfrom flask import Flask, request\nfrom flask_cors import CORS, cross_origin\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\nCORS(app)\n\[email protected]('/upload', methods=['POST'])\n@cross_origin()\ndef parse():\n f = request.files['file']\n filename = secure_filename(f.filename)\n f.save(filename)\n data = ResumeParser(filename).get_extracted_data()\n return data\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n content = request.json\n message = \"\"\"<html>\\n<head></head>\\n<body>\\n\"\"\"\n for key in content:\n message += f\"\"\"<p>{key}: {content.get(key)}</p>\\n\"\"\"\n message += \"\"\"</body>\\n</html>\"\"\"\n return message\n" }, { "alpha_fraction": 0.8620689511299133, "alphanum_fraction": 0.8620689511299133, "avg_line_length": 8.666666984558105, "blob_id": "cf7e7bebce46e95c7607011cf3192d06390d22fc", "content_id": "ff8f70da4c1c510fa5f0dbfeb011e2053a80e5c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 29, "license_type": "no_license", "max_line_length": 11, "num_lines": 3, "path": "/requirements.txt", "repo_name": "lizapressman/ResumeToWebsite", "src_encoding": "UTF-8", "text": "flask\nflask-cors\npyresparser\n" } ]
2
kasmi2004/cell-tower-geolocation
https://github.com/kasmi2004/cell-tower-geolocation
c40c2c8f9be820db8c2e701c7850e6a3c0229b34
d08c6424b66caf6bae107b2f7d9db4c978e0c23c
2e42d8bbe793986c7ca6aec7db1c9a6a02e8dc43
refs/heads/master
2022-04-07T15:51:29.038368
2020-02-09T22:43:07
2020-02-09T22:43:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46078789234161377, "alphanum_fraction": 0.4710330665111542, "avg_line_length": 36.783409118652344, "blob_id": "d8d1a39c4bda2ba149ccac521d18db44f49f3d6d", "content_id": "3cca4e8938a5fe880fda2420747f518b69eeb3c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8199, "license_type": "no_license", "max_line_length": 126, "num_lines": 217, "path": "/cell-tower-geolocation.py", "repo_name": "kasmi2004/cell-tower-geolocation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport serial, time, json, requests, sys, os\n\napi_key = \"<api_key>\"\n\nprint(\"\")\nprint(\" Cell Tower Geolocation \")\nprint(\" via SIM800 Raspberry Pi GSM/GPRS HAT \")\nprint(\" github.com/sion-evans/cell-tower-geolocation \")\nprint(\"\")\n\nif len(sys.argv) == 1 or \"-h\" in sys.argv:\n print(\" Usage: \" + sys.argv[0] + \" <options>\")\n print(\"\")\n print(\" Options:\")\n print(\" -s Survey Available Cell Towers.\")\n print(\" -c Locate Coordinates of Cell Towers. [Google Maps API Required]\")\n print(\" -m Plot Coordinates to Google Maps HTML Template. [Google Maps API Required]\")\n print(\"\")\n\ndef query(input):\n\n port = serial.Serial('/dev/ttyS0', baudrate=115200, timeout=1)\n\n command = input + '\\r'\n port.write(bytes(command, 'utf-8'))\n\n rcv = str(port.read(256), 'utf-8')\n\n while len(rcv) == 0 or rcv == command:\n time.sleep(1)\n rcv = str(port.read(256), 'utf-8')\n\n output = \"\"\n\n while len(rcv) > 0:\n output = output + rcv\n\n if output[:len(command)] == command:\n output = output[len(command):]\n\n if len(rcv) < 256:\n break\n else:\n rcv = str(port.read(256), 'utf-8')\n\n dataList = output.split('\\r\\n')\n while '' in dataList:\n dataList.remove('')\n\n return dataList\n\nif __name__ == '__main__':\n\n if \"-s\" in sys.argv:\n\n print(\"[*]\", \"Performing AT test..\")\n response = query('AT')\n\n if 'OK' in response:\n print(\"[+]\", \"AT test successful!\")\n else:\n print(\"[!]\", \"AT test failed.\")\n exit()\n\n print(\"[*]\", \"Querying survey format..\")\n response = query('AT+CNETSCAN?')\n\n if 'OK' in response:\n if '+CNETSCAN: 1' in response:\n print(\"[+]\", \"Currently displaying LAC and BSIC information!\")\n elif '+CNETSCAN: 0' in response:\n print(\"[-]\", \"Currently not displaying LAC and BSIC information.\")\n print(\"[*]\", \"Attempting to change survey format..\")\n response = query('AT+CNETSCAN=1')\n if 'OK' in response:\n print(\"[+]\", \"Successfully changed survey format!\")\n else:\n print(\"[!]\", \"Failed to change survey format.\")\n exit()\n else:\n print(\"[!]\", \"Unexpected response.\")\n else:\n print(\"[!]\", \"Query failed.\")\n exit()\n\n print(\"[*]\", \"Performing survey..\")\n cells = []\n response = query('AT+CNETSCAN')\n\n if 'OK' in response:\n print(\"[+]\", \"Survey successful!\")\n\n for i in response:\n if i != \"OK\":\n\n pairs = i.split(',')\n\n dictionary = {}\n\n for i in pairs:\n\n if i.split(':')[0] == \"Operator\": # Long format alphanumeric of network operator.\n dictionary[\"Operator\"] = str(i.split(':')[1]).strip('\\\"')\n elif i.split(':')[0] == \"MCC\": # Mobile country code.\n dictionary[\"MCC\"] = int(i.split(':')[1])\n elif i.split(':')[0] == \"MNC\": # Mobile network code.\n dictionary[\"MNC\"] = int(i.split(':')[1])\n elif i.split(':')[0] == \"Rxlev\": # Recieve level, in decimal format.\n dictionary[\"Rxlev\"] = int(i.split(':')[1])\n elif i.split(':')[0] == \"Cellid\": # Cell identifier, in hexadecimal format.\n dictionary[\"Cellid\"] = int(i.split(':')[1], 16)\n elif i.split(':')[0] == \"Arfcn\": # Absolute radio frequency channel number, in decimal format.\n dictionary[\"Arfcn\"] = int(i.split(':')[1])\n elif i.split(':')[0] == \"Lac\": # Location area code, in hexadecimal format.\n dictionary[\"Lac\"] = int(i.split(':')[1], 16)\n elif i.split(':')[0] == \"Bsic\": # Location area code, in hexadecimal format.\n dictionary[\"Bsic\"] = int(i.split(':')[1], 16) # Base station identity code, in hexadecimal format.\n\n cells.append(dictionary)\n\n cells = sorted(cells, reverse = True, key = lambda i: (i[\"Operator\"], i[\"Rxlev\"]))\n\n filename = \"survey/survey_\" + str(int(time.time())) + \".json\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w') as outfile:\n json.dump(cells, outfile)\n print(\"[+]\", \"Survey saved to '\" + filename + \"'.\")\n print(\"\")\n else:\n print(\"[!]\", \"Survey failed.\")\n\n if \"-c\" in sys.argv:\n\n url = \"https://www.googleapis.com/geolocation/v1/geolocate?key=\" + api_key\n headers = {'content-type': 'application/json'}\n\n for filename in os.listdir('survey/'):\n if not filename.endswith(\".json\"):\n continue\n\n print(\"[*]\", \"Processing '\" + filename + \"'.\")\n\n with open('survey/' + filename) as json_file:\n json_data = json.load(json_file)\n\n dictionary = {}\n\n for i in json_data:\n if i[\"Operator\"] not in dictionary:\n dictionary[i[\"Operator\"]] = []\n dictionary[i[\"Operator\"]].append(i)\n\n for i in dictionary:\n output_filename = os.path.splitext(filename)[0] # e.g. survey_1581259393\n output_filename = output_filename.split(\"_\")[1] # e.g. 1581259393\n output_filename = \"coordinates/coordinates_\" + output_filename + \"_\" + i + \".json\"\n\n if os.path.isfile(output_filename):\n print(\"[*]\", \"Skipping '\" + i + \"', file exists: '\" + output_filename + \"'.\")\n continue\n\n print(\"[*]\", \"Processing API request(s) for operator '\" + i + \"'.\")\n\n result = []\n for x in dictionary[i]:\n body = {\n \"cellTowers\": [\n {\n \"cellId\": int(x[\"Cellid\"]),\n \"locationAreaCode\": int(x[\"Lac\"]),\n \"mobileCountryCode\": int(x[\"MCC\"]),\n \"mobileNetworkCode\": int(x[\"MNC\"]),\n \"signalStrength\": int(-110 + int(x[\"Rxlev\"]))\n }\n ]\n }\n\n print(\"[*]\", \"Submitting API request for CID: \" + str(x[\"Cellid\"]) + \".\")\n response = requests.post(url, data=json.dumps(body), headers=headers)\n result.append(json.loads(response.text))\n\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n with open(output_filename, 'w') as outfile:\n outfile.write(str(result))\n print(\"[+]\", \"Coordinates saved to '\" + output_filename + \"'.\")\n print(\"\")\n\n if \"-m\" in sys.argv:\n\n with open(\"template.html\", 'r') as outfile:\n html = outfile.read()\n\n html = html.replace(\"<api_key>\", api_key)\n\n for filename in os.listdir('coordinates/'):\n if not filename.endswith(\".json\"):\n continue\n\n output_filename = 'coordinates/' + filename.replace(\".json\", \".html\")\n if os.path.isfile(output_filename):\n print(\"[*]\", \"Skipping '\" + filename + \"', file exists: '\" + output_filename + \"'.\")\n continue\n\n print(\"[*]\", \"Processing '\" + filename + \"'.\")\n\n with open('coordinates/' + filename) as json_file:\n data = json_file.read()\n\n output = html.replace(\"var cells = []\", \"var cells = \" + data)\n\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n with open(output_filename, 'a') as outfile:\n outfile.write(output)\n print(\"[+]\", \"Coordinates plotted to '\" + output_filename + \"'.\")\n print(\"\")\n" } ]
1
hammad42/google_cloud_vision_web_entities
https://github.com/hammad42/google_cloud_vision_web_entities
d9b0b0788c21e288a773f6c17f7e34b4ef9784e4
3fb62b66dbd38ad88f1a86a6c82f23b28edc9c86
f610db7afe4f4cd6ce11414ef5febf011fc2323e
refs/heads/master
2023-04-05T05:15:45.265897
2021-04-21T15:35:21
2021-04-21T15:35:21
354,600,943
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6613844633102417, "alphanum_fraction": 0.6788991093635559, "avg_line_length": 34.787879943847656, "blob_id": "5e037d2051bf6e331533f740cee1d7bce82415cb", "content_id": "2f0ddd6767907d7049ee00b1df37ae95da7c127b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2398, "license_type": "no_license", "max_line_length": 144, "num_lines": 66, "path": "/main.py", "repo_name": "hammad42/google_cloud_vision_web_entities", "src_encoding": "UTF-8", "text": "\ndef hello_world(request):\n\n from google.cloud import vision\n from datetime import datetime\n import re\n import itertools\n import write2bq\n #from google.oauth2 import service_account\n import os\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"C:\\gcp_credentials\\elaborate-howl-285701-105c2e8355a8.json\"\n #SCOPES = ['https://www.googleapis.com/auth/sqlservice.admin']\n #SERVICE_ACCOUNT_FILE = 'C:\\gcp_credentials\\elaborate-howl-285701-105c2e8355a8.json'\n #credentials = service_account.Credentials.from_service_account_file(\n # SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n table_id='elaborate-howl-285701.context.image_web_entities'#destination table name\n\n now = str(datetime.now())# time\n\n print(\"now=\"+now)\n\n client = vision.ImageAnnotatorClient()\n request_json = request.get_json()\n image = vision.Image()\n if request_json:\n source_url = request_json['source_url']\n print(\"source_url=\"+source_url)\n\n source_url=re.match(r'gs://([^/]+)/(.+)', source_url) \n bucket_name=source_url.group(1) #credential bucket name\n print(bucket_name)\n prefix=source_url.group(2)# credential prefix name\n print(prefix)\n\n\n\n file_name=prefix\n exact_file_name_list = re.split(\"/\", file_name)\n exact_file_name=exact_file_name_list[-1]\n \n\n\n\n uri=\"gs://\"+bucket_name+\"/\"+file_name\n print(\"uri=\"+uri)\n\n image.source.image_uri = uri\n\n response = client.web_detection(image=image)\n matching_images_lst=[]\n matching_images=response.web_detection.full_matching_images# url string in it creates problem from json\n for matching_image in matching_images:\n matching_images_lst.append(matching_image.url)\n # list is made for matching images\n page_lst=[]\n for page in response.web_detection.pages_with_matching_images:\n page_lst.append(page.url)\n # list is made for pages\n best_match_lst=[]#list empty which stores best match result\n for best_match in response.web_detection.best_guess_labels:\n best_match_lst.append(best_match.label)\n\n for (a, b, c) in itertools.zip_longest(matching_images_lst, page_lst, best_match_lst): \n documentEntities={\"time_stamp\":now,\"file_name\":exact_file_name,\"matching_images\":a,\"pages_with_images\":b,\"best_guess\":c,\"input_uri\":uri}\n write2bq.BQ(documentEntities,table_id)\n \n return \"success\"\n \n\n\n \n \n \n\n\n\n\n " }, { "alpha_fraction": 0.784246563911438, "alphanum_fraction": 0.7945205569267273, "avg_line_length": 57.400001525878906, "blob_id": "d88d933486c2cf46b2b3258d936cc85712a767b3", "content_id": "3b56a48b35cc67e13e5e7285bd3ff4fd61c3bf13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "no_license", "max_line_length": 100, "num_lines": 5, "path": "/README.md", "repo_name": "hammad42/google_cloud_vision_web_entities", "src_encoding": "UTF-8", "text": "# google_cloud_vision_web_entities\n1)it took link of image from cloud storage run web_entities on it and save result on bigquery \n2)web_entities means link of pages or images having same picture as you provided in link(image link)\n\n3)for run this code you must have cloudfunction framework\n" }, { "alpha_fraction": 0.6206896305084229, "alphanum_fraction": 0.7413793206214905, "avg_line_length": 18.33333396911621, "blob_id": "a81a54c1d2e3328cabd79e648f6aee83f87d9d98", "content_id": "64b31f45b47e0e4457503f579d18336d9a6b17ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 58, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/requirements.txt", "repo_name": "hammad42/google_cloud_vision_web_entities", "src_encoding": "UTF-8", "text": "google-cloud-vision==2.1.0\n\ngoogle-cloud-bigquery==1.27.2\n" } ]
3
robuved/flow
https://github.com/robuved/flow
4b3a759e9fe912e414ac6a4f70057f77f63fd894
af5b2fd8195b295afbc24e66873163d90851c018
95b18d5c6d9962ab187af76673b3108346d1f870
refs/heads/master
2020-12-04T14:08:40.861956
2019-12-10T08:31:20
2019-12-10T08:31:20
231,794,060
0
0
MIT
2020-01-04T16:29:56
2019-12-10T08:31:34
2019-12-10T08:31:32
null
[ { "alpha_fraction": 0.5825768113136292, "alphanum_fraction": 0.5881784558296204, "avg_line_length": 36.244606018066406, "blob_id": "507459896f0cc8381a3b276352cc86320037886a", "content_id": "d5fed8a5c38c8e94fe30303a194443b983aa0f00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10354, "license_type": "permissive", "max_line_length": 88, "num_lines": 278, "path": "/flow/envs/nemodrive_lab/env1_lab.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "\"\"\"Environments that can train both lane change and acceleration behaviors.\"\"\"\n\nfrom flow.envs.ring.accel import AccelEnv\nfrom flow.core import rewards\n\nfrom gym.spaces.box import Box\nfrom gym.spaces.tuple import Tuple\nfrom gym.spaces.discrete import Discrete\n\nimport numpy as np\n\nADDITIONAL_ENV1_PARAMS = {\n # maximum acceleration for autonomous vehicles, in m/s^2\n \"max_accel\": 3,\n # maximum deceleration for autonomous vehicles, in m/s^2\n \"max_decel\": 3,\n # lane change duration for autonomous vehicles, in s. Autonomous vehicles\n # reject new lane changing commands for this duration after successfully\n # changing lanes.\n \"lane_change_duration\": 0,\n # desired velocity for all vehicles in the network, in m/s\n \"target_velocity\": 10,\n # specifies whether vehicles are to be sorted by position during a\n # simulation step. If set to True, the environment parameter\n # self.sorted_ids will return a list of all vehicles sorted in accordance\n # with the environment\n 'sort_vehicles': False,\n # Amplifier factor for rewarding progress of agent (meters * gain)\n 'forward_progress_gain': 0.1,\n # Reward for collision\n 'collision_reward': -1,\n # Penalty for changing lane\n 'lane_change_reward': -0.1,\n # Safe distances to keep with car in front; and with lateral cars when changing lane\n 'frontal_collision_distance': 2.0, # in meters\n 'lateral_collision_distance': 3.0, # in meters\n # Return shape as box 2 - continuous values\n \"action_space_box\": False,\n}\n\n\nclass LaneChangeAccelEnv1(AccelEnv):\n \"\"\"\n Modified version of LaneChangeAccelEnv:\n\n * reward RL agent progress\n * penalty for frontal and lateral collision\n * lane change reward\n \"\"\"\n\n def __init__(self, env_params, sim_params, network, simulator='traci'):\n for p in ADDITIONAL_ENV1_PARAMS.keys():\n if p not in env_params.additional_params:\n raise KeyError(\n 'Environment parameter \"{}\" not supplied'.format(p))\n\n add_param = env_params.additional_params\n\n self.collision_reward = add_param[\"collision_reward\"]\n self.frontal_collision_distance = add_param[\"frontal_collision_distance\"]\n self.lateral_collision_distance = add_param[\"lateral_collision_distance\"]\n self.forward_progress_gain = add_param[\"forward_progress_gain\"]\n self.action_space_box = add_param[\"action_space_box\"]\n\n super().__init__(env_params, sim_params, network, simulator)\n\n self.num_lanes = max(self.k.network.num_lanes(edge)\n for edge in self.k.network.get_edge_list())\n self.prev_lane = self.crt_lane = None, None\n\n @property\n def action_space(self):\n \"\"\"See class definition.\"\"\"\n max_decel = self.env_params.additional_params[\"max_decel\"]\n max_accel = self.env_params.additional_params[\"max_accel\"]\n\n num_rl_agents = self.initial_vehicles.num_rl_vehicles\n\n # return (Box(np.array(lb), np.array(ub), dtype=np.float32),\n if self.action_space_box:\n # Models that cannot interpret Complex actions\n return Box(\n low=-abs(max_decel),\n high=max_accel,\n shape=(num_rl_agents * 2,),\n dtype=np.float32)\n else:\n return Tuple(\n (Box(\n low=-abs(max_decel),\n high=max_accel,\n shape=(num_rl_agents,),\n dtype=np.float32), ) +\n (Discrete(3),) * num_rl_agents\n )\n\n @property\n def observation_space(self):\n \"\"\"See class definition.\"\"\"\n return Box(\n low=0,\n high=1,\n shape=(3 * self.initial_vehicles.num_vehicles, ),\n dtype=np.float32)\n\n def compute_collision_reward(self, rl_actions):\n collision_distance = self.frontal_collision_distance\n collision_reward = self.collision_reward\n lateral_collision_distance = self.lateral_collision_distance\n vehicles = self.k.vehicle\n num_lanes = self.num_lanes\n\n reward = 0\n if rl_actions is None:\n return reward\n\n sorted_rl_ids = [\n veh_id for veh_id in self.sorted_ids\n if veh_id in vehicles.get_rl_ids()\n ]\n\n directions = rl_actions[1::2]\n\n for i, rel_id in enumerate(sorted_rl_ids):\n lane = vehicles.get_lane(rel_id)\n direction = directions[i]\n headways = vehicles.get_lane_headways(rel_id)\n\n # Check front collision\n if headways[lane] < collision_distance:\n reward += collision_reward\n\n # Check lateral collision\n if direction != 0:\n tailways = vehicles.get_lane_tailways(rel_id)\n\n # Action has already been applied in the env\n # Calculate lane change based on intent\n prev_lane = self.prev_lane[i]\n new_lane = int(prev_lane + direction)\n\n # Trying to get out of the road\n if new_lane < 0 or new_lane >= num_lanes:\n reward += collision_reward\n elif np.abs(headways[new_lane]) < lateral_collision_distance or \\\n np.abs(tailways[new_lane]) < lateral_collision_distance:\n reward += collision_reward\n\n return reward\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n # compute the system-level performance of vehicles from a velocity\n # perspective\n reward = rewards.rl_forward_progress(self, gain=self.forward_progress_gain)\n\n # Calculate collision reward\n collision_r = self.compute_collision_reward(rl_actions)\n reward += collision_r\n\n # punish excessive lane changes by reducing the reward by a set value\n # every time an rl car changes lanes (10% of max reward)\n for veh_id in self.k.vehicle.get_rl_ids():\n if self.k.vehicle.get_last_lc(veh_id) == self.time_counter:\n reward -= 0.1\n\n return reward\n\n def get_state(self):\n \"\"\"See class definition.\"\"\"\n # normalizers\n state_size = len(self.initial_ids)\n no_cars = len(self.sorted_ids)\n empty_cars = state_size - no_cars\n\n max_speed = self.k.network.max_speed()\n length = self.k.network.length()\n max_lanes = max(\n self.k.network.num_lanes(edge)\n for edge in self.k.network.get_edge_list())\n\n speed = [self.k.vehicle.get_speed(veh_id) / max_speed\n for veh_id in self.sorted_ids] + [0.] * empty_cars\n pos = [self.k.vehicle.get_x_by_id(veh_id) / length\n for veh_id in self.sorted_ids] + [0.] * empty_cars\n lane = [self.k.vehicle.get_lane(veh_id) / max_lanes\n for veh_id in self.sorted_ids] + [0.] * empty_cars\n\n # Save to know previous lane of agents\n sorted_rl_ids = [\n i for i, veh_id in enumerate(self.sorted_ids)\n if veh_id in self.k.vehicle.get_rl_ids()\n ]\n\n self.prev_lane = self.crt_lane\n self.crt_lane = [lane[x] * max_lanes for x in sorted_rl_ids]\n\n return np.array(speed + pos + lane)\n\n def _apply_rl_actions(self, actions):\n \"\"\"See class definition.\"\"\"\n acceleration, direction = np.split(actions, 2)\n\n # re-arrange actions according to mapping in observation space\n if len(self.k.vehicle.get_rl_ids()) <= 0:\n raise ValueError(\"No RL agent id\")\n\n sorted_rl_ids = [\n veh_id for veh_id in self.sorted_ids\n if veh_id in self.k.vehicle.get_rl_ids()\n ]\n\n # represents vehicles that are allowed to change lanes\n lane_change_duration = self.env_params.additional_params[\"lane_change_duration\"]\n if lane_change_duration > 0:\n non_lane_changing_veh = \\\n [self.time_counter <=\n self.env_params.additional_params[\"lane_change_duration\"]\n + self.k.vehicle.get_last_lc(veh_id)\n for veh_id in sorted_rl_ids]\n\n # vehicle that are not allowed to change have their directions set to 0\n direction[non_lane_changing_veh] = \\\n np.array([0] * sum(non_lane_changing_veh))\n\n self.k.vehicle.apply_acceleration(sorted_rl_ids, acc=acceleration)\n self.k.vehicle.apply_lane_change(sorted_rl_ids, direction=direction)\n\n def additional_command(self):\n \"\"\"Define which vehicles are observed for visualization purposes.\"\"\"\n # specify observed vehicles\n if self.k.vehicle.num_rl_vehicles > 0:\n for veh_id in self.k.vehicle.get_human_ids():\n self.k.vehicle.set_observed(veh_id)\n\n def clip_actions(self, rl_actions=None):\n \"\"\"Clip & redo direction the actions passed from the RL agent.\n\n Parameters\n ----------\n rl_actions : array_like\n list of actions provided by the RL algorithm\n\n Returns\n -------\n array_like\n The rl_actions clipped according to the box or boxes\n \"\"\"\n # ignore if no actions are issued\n if rl_actions is None:\n return\n\n # clip according to the action space requirements\n if isinstance(self.action_space, Box):\n rl_actions = np.clip(\n rl_actions,\n a_min=self.action_space.low,\n a_max=self.action_space.high)\n elif isinstance(self.action_space, Tuple):\n for idx, action in enumerate(rl_actions):\n subspace = self.action_space[idx]\n if isinstance(subspace, Box):\n rl_actions[idx] = np.clip(\n action,\n a_min=subspace.low,\n a_max=subspace.high)\n\n if isinstance(rl_actions[0], np.ndarray):\n acceleration = rl_actions[0]\n direction = np.array(rl_actions[1:])\n else:\n acceleration, direction = np.split(rl_actions, 2)\n\n # discrete lane change\n direction = direction.clip(0., 2.)\n direction = (direction - 1).astype(np.int)\n\n return np.concatenate([acceleration, direction])\n" }, { "alpha_fraction": 0.5295352339744568, "alphanum_fraction": 0.569415271282196, "avg_line_length": 39.67073059082031, "blob_id": "a6f35a9a0f0c748719ad0dd9d34218bad775a114", "content_id": "6d70ca35e500f227deb60e1762cab2844704c729", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3335, "license_type": "permissive", "max_line_length": 94, "num_lines": 82, "path": "/flow/envs/nemodrive_lab/vehicles.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "from flow.core.params import VehicleParams\nfrom flow.controllers.car_following_models import IDMController, CFMController\nfrom flow.controllers.routing_controllers import ContinuousRouter, MinicityRouter\nimport numpy as np\nfrom flow.controllers.rlcontroller import RLController\n\nfrom flow.envs.nemodrive_lab.lane_change_controller import PeriodicLaneChangeController\n\n\ndef get_vehicles():\n num_vehicles = 22\n\n # Params: {param_name: (mean, std, min_value, max_value),...}\n params = dict({\n \"v0\": (30, 10, 0, 100), # float desirable velocity, in m/s (default: 30)\n \"T\": (1, 0.5, 0.1, 10), # float safe time headway, in s (default: 1)\n \"a\": (1, 0.2, 0.1, 3), # float max acceleration, in m/s2 (default: 1)\n \"b\": (1.5, 0.2, 0.1, 3), # float comfortable deceleration, in m/s2 (default: 1.5)\n \"s0\": (2, 0.5, 0.5, 10), # float linear jam distance, in m (default: 2)\n \"noise\": (2, 1, 0., 10),\n # float std dev of normal perturbation to the acceleration (default: 0)\n })\n\n vehicles = VehicleParams()\n\n veh_param = {p: np.clip(np.random.normal(x[0], x[1]), x[2], x[3]) for p, x in\n params.items()}\n\n vehicles.add(f\"rl\",\n acceleration_controller=(RLController, {}),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=1)\n\n for i in range(num_vehicles):\n veh_param = {p: np.clip(np.random.normal(x[0], x[1]), x[2], x[3]) for p, x in\n params.items()}\n vehicles.add(f\"human_{i}\",\n acceleration_controller=(IDMController, veh_param),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=1)\n\n return vehicles\n\n\ndef get_vehicles_with_lane_change():\n num_vehicles = 22\n\n # Params: {param_name: (mean, std, min_value, max_value),...}\n params = dict({\n \"v0\": (30, 10, 0, 100), # float desirable velocity, in m/s (default: 30)\n \"T\": (1, 0.5, 0.1, 10), # float safe time headway, in s (default: 1)\n \"a\": (1, 0.2, 0.1, 3), # float max acceleration, in m/s2 (default: 1)\n \"b\": (1.5, 0.2, 0.1, 3), # float comfortable deceleration, in m/s2 (default: 1.5)\n \"s0\": (2, 0.5, 0.5, 10), # float linear jam distance, in m (default: 2)\n \"noise\": (2, 1, 0., 10),\n # float std dev of normal perturbation to the acceleration (default: 0)\n })\n\n lane_change_param = dict({\n \"lane_change_params\": {\"lane_change_freqs\": [30, 150]}\n })\n\n vehicles = VehicleParams()\n\n veh_param = {p: np.clip(np.random.normal(x[0], x[1]), x[2], x[3]) for p, x in\n params.items()}\n\n vehicles.add(f\"rl\",\n acceleration_controller=(RLController, {}),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=1)\n\n for i in range(num_vehicles):\n veh_param = {p: np.clip(np.random.normal(x[0], x[1]), x[2], x[3]) for p, x in\n params.items()}\n vehicles.add(f\"human_{i}\",\n acceleration_controller=(IDMController, veh_param),\n lane_change_controller=(PeriodicLaneChangeController, lane_change_param),\n routing_controller=(ContinuousRouter, {}),\n num_vehicles=1)\n\n return vehicles\n" }, { "alpha_fraction": 0.7380073666572571, "alphanum_fraction": 0.7749077677726746, "avg_line_length": 37.71428680419922, "blob_id": "5b28acce5a1eb3907142878eaf99a8b95159c94f", "content_id": "12447f539c7a787bfaab28db5bdfc00f897a45a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/flow/envs/nemodrive_lab/networks.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "from flow.networks.figure_eight import ADDITIONAL_NET_PARAMS\n\n# ENV1_NET PARAMS\nADDITIONAL_NET_PARAMS_ENV1 = ADDITIONAL_NET_PARAMS.copy()\nADDITIONAL_NET_PARAMS_ENV1[\"lanes\"] = 2\nADDITIONAL_NET_PARAMS_ENV1[\"resolution\"] = 40\nADDITIONAL_NET_PARAMS_ENV1[\"radius_ring\"] = 60\n" }, { "alpha_fraction": 0.7030201554298401, "alphanum_fraction": 0.7290268540382385, "avg_line_length": 34.05882263183594, "blob_id": "43abfc1ae6781e33227c2d0d1e29f161f3470405", "content_id": "ce31ffc04bc7e761de662789281f64006979642c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "permissive", "max_line_length": 88, "num_lines": 34, "path": "/flow/envs/nemodrive_lab/__init__.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "\"\"\"Empty init file to ensure documentation for the ring env is created.\"\"\"\n\nfrom flow.networks.figure_eight import FigureEightNetwork\n\nfrom flow.envs.nemodrive_lab.networks import ADDITIONAL_NET_PARAMS_ENV1\nfrom flow.envs.nemodrive_lab.vehicles import get_vehicles, get_vehicles_with_lane_change\nfrom flow.envs.nemodrive_lab.env1_lab import LaneChangeAccelEnv1, ADDITIONAL_ENV1_PARAMS\nfrom flow.envs.nemodrive_lab.env2_lab import LaneChangeAccelEnv2, ADDITIONAL_ENV2_PARAMS\n\n\nENV1 = {\n \"NETWORK\": FigureEightNetwork,\n \"VEHICLES\": get_vehicles,\n \"ADDITIONAL_NET_PARAMS\": ADDITIONAL_NET_PARAMS_ENV1,\n \"INITIAL_CONFIG_PARAMS\": {\"spacing\": \"random\", \"perturbation\": 50},\n \"ENVIRONMENT\": LaneChangeAccelEnv1,\n \"ADDITIONAL_ENV_PARAMS\": ADDITIONAL_ENV1_PARAMS,\n \"HORIZON\": 3000,\n}\n\nENV2 = {\n \"NETWORK\": FigureEightNetwork,\n \"VEHICLES\": get_vehicles_with_lane_change,\n \"ADDITIONAL_NET_PARAMS\": ADDITIONAL_NET_PARAMS_ENV1,\n \"INITIAL_CONFIG_PARAMS\": {\"spacing\": \"random\", \"perturbation\": 50},\n \"ENVIRONMENT\": LaneChangeAccelEnv2,\n \"ADDITIONAL_ENV_PARAMS\": ADDITIONAL_ENV2_PARAMS,\n \"HORIZON\": 3000,\n}\n\nLAB_ENVS = {\n \"lab_env1\": ENV1,\n \"lab_env2\": ENV2,\n}\n" }, { "alpha_fraction": 0.6199702024459839, "alphanum_fraction": 0.6318926811218262, "avg_line_length": 36.879032135009766, "blob_id": "21f89b9f26e5a4cbaffe145ffc2132b0a074e4e3", "content_id": "13be2580ea7876942bd25ceb612ee074c044e9bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4697, "license_type": "permissive", "max_line_length": 97, "num_lines": 124, "path": "/flow/envs/nemodrive_lab/env2_lab.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "\"\"\"Environments that can train both lane change and acceleration behaviors.\"\"\"\n\nfrom flow.envs.ring.accel import AccelEnv\nfrom flow.core import rewards\n\nfrom gym.spaces.box import Box\nfrom gym.spaces.tuple import Tuple\nfrom gym.spaces.discrete import Discrete\n\nfrom flow.envs.nemodrive_lab.env1_lab import LaneChangeAccelEnv1\n\nimport numpy as np\n\nADDITIONAL_ENV2_PARAMS = {\n # maximum acceleration for autonomous vehicles, in m/s^2\n \"max_accel\": 3,\n # maximum deceleration for autonomous vehicles, in m/s^2\n \"max_decel\": 3,\n # lane change duration for autonomous vehicles, in s. Autonomous vehicles\n # reject new lane changing commands for this duration after successfully\n # changing lanes.\n \"lane_change_duration\": 0,\n # desired velocity for all vehicles in the network, in m/s\n \"target_velocity\": 10,\n # specifies whether vehicles are to be sorted by position during a\n # simulation step. If set to True, the environment parameter\n # self.sorted_ids will return a list of all vehicles sorted in accordance\n # with the environment\n 'sort_vehicles': False,\n # Amplifier factor for rewarding progress of agent (meters * gain)\n 'forward_progress_gain': 0.1,\n # Reward for collision\n 'collision_reward': -1,\n # Penalty for changing lane\n 'lane_change_reward': -0.1,\n # Safe distances to keep with car in front; and with lateral cars when changing lane\n 'frontal_collision_distance': 2.0, # in meters\n 'lateral_collision_distance': 3.0, # in meters\n # Return shape as box 2 - continuous values\n \"action_space_box\": False,\n # =====================================================================================\n # ENV 2 Params\n \"pos_noise_std\": [0.5, 2], # in meters\n \"pos_noise_steps_reset\": 100,\n \"speed_noise_std\": [0.2, 0.8], # in m/s\n \"acc_noise_std\": [0.2, 0.4], # m/s^2\n}\n\n\nclass LaneChangeAccelEnv2(LaneChangeAccelEnv1):\n \"\"\"\n Modified version of LaneChangeAccelEnv1:\n\n ENV 1:\n * reward RL agent progress\n * penalty for frontal and lateral collision\n * lane change reward\n + ENV 2:\n * noisy RL agent pos: accumulating noise (gaussian) - reset error periodically\n * noisy RL agent speed: gaussian noise\n * noisy command for acceleration: gaussian noise\n * other agents change lane periodically (each with it's own freq initialized random on reset)\n \"\"\"\n\n def __init__(self, env_params, sim_params, network, simulator='traci'):\n for p in ADDITIONAL_ENV2_PARAMS.keys():\n if p not in env_params.additional_params:\n raise KeyError(\n 'Environment parameter \"{}\" not supplied'.format(p))\n\n add_param = env_params.additional_params\n\n self.pos_noise_std = add_param[\"pos_noise_std\"]\n self.pos_noise_steps_reset = add_param[\"pos_noise_steps_reset\"]\n self.speed_noise_std = add_param[\"speed_noise_std\"]\n self.acc_noise_std = add_param[\"acc_noise_std\"]\n\n super().__init__(env_params, sim_params, network, simulator)\n\n self._crt_pos_noise_accum = None\n self._crt_pos_noise_std = None\n self._crt_speed_noise_std = None\n self._crt_acc_noise_std = None\n self._reset_noise()\n self._pos_noise = []\n\n def _reset_noise(self):\n self._crt_pos_noise_accum = 0\n self._crt_pos_noise_std = np.random.uniform(*self.pos_noise_std)\n self._crt_speed_noise_std = np.random.uniform(*self.speed_noise_std)\n self._crt_acc_noise_std = np.random.uniform(*self.acc_noise_std)\n\n def get_state(self):\n state = super().get_state()\n no_cars = len(self.initial_ids)\n length = self.k.network.length()\n max_speed = self.k.network.max_speed()\n\n # Add noise to RL agent speed\n act_speed_f = state[0]\n speed_noise = np.random.normal(0, self._crt_speed_noise_std) / max_speed\n state[0] += speed_noise\n state[0] = np.clip(state[0], 0, 1.)\n\n # Add noise to RL agent pos\n if self.step_counter % self.pos_noise_steps_reset == 0:\n self._crt_pos_noise_accum = 0\n self._crt_pos_noise_accum += np.random.normal(0, self._crt_pos_noise_std) * act_speed_f\n self._pos_noise.append(self._crt_pos_noise_accum)\n state[no_cars] += (self._crt_pos_noise_accum / length)\n state[no_cars] = state[no_cars] % 1.\n\n return state\n\n def reset(self):\n self._reset_noise()\n\n return super().reset()\n\n def _apply_rl_actions(self, actions):\n acc_noise = np.random.normal(0, self._crt_acc_noise_std)\n actions[0] += acc_noise\n \"\"\"See class definition.\"\"\"\n super()._apply_rl_actions(actions)\n" }, { "alpha_fraction": 0.6681614518165588, "alphanum_fraction": 0.713004469871521, "avg_line_length": 48.66666793823242, "blob_id": "33876bdb2dc9dd671b213d3c2b1f5ee3cd564377", "content_id": "84baad6881f7dc220d436239809b9f0ecb6bbed9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 446, "license_type": "permissive", "max_line_length": 246, "num_lines": 9, "path": "/README_pytorch.md", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "### Train pytorch\nInstall repo\n`https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail`\n\nDon't forget to install repo `pytorch-a2c-ppo-acktr-gail` with `python setup.py install`\n\n### Train pytorch\n\n```python tutorials/pytorch_train.py --env-name \"lab_env1\" --algo ppo --use-gae --lr 2.5e-4 --clip-param 0.1 --value-loss-coef 0.5 --num-processes 8 --num-steps 128 --num-mini-batch 4 --log-interval 10 --use-linear-lr-decay --entropy-coef 0.01```" }, { "alpha_fraction": 0.6819417476654053, "alphanum_fraction": 0.6834951639175415, "avg_line_length": 39.873016357421875, "blob_id": "23cf02c2f5cc5075d67c0ad9d838703212394c00", "content_id": "618aa2eab26b6d8f587a40f14603aa3f934366f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2575, "license_type": "permissive", "max_line_length": 94, "num_lines": 63, "path": "/flow/envs/nemodrive_lab/build_envs.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "from flow.envs.nemodrive_lab import LAB_ENVS\nfrom flow.core.params import NetParams\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import SumoParams\nfrom flow.core.params import EnvParams\nfrom flow.utils.registry import make_create_env\nfrom flow.utils.rllib import FlowParamsEncoder\nimport json\n\n\ndef make_lab_env(env_name, sim_step=0.1, render=False, emission_path='data',\n restart_instance=True, print_warnings=False):\n\n assert env_name in LAB_ENVS, f\"{env_name} not in LAB_ENVS\"\n\n ENV = LAB_ENVS[env_name]\n\n network_name = ENV[\"NETWORK\"]\n name = env_name\n vehicles = ENV[\"VEHICLES\"]()\n net_params = NetParams(additional_params=ENV[\"ADDITIONAL_NET_PARAMS\"])\n initial_config_param = ENV[\"INITIAL_CONFIG_PARAMS\"]\n\n initial_config = InitialConfig(**initial_config_param)\n\n env_name = ENV[\"ENVIRONMENT\"]\n add_env_params = ENV[\"ADDITIONAL_ENV_PARAMS\"]\n add_env_params[\"action_space_box\"] = True\n env_params = EnvParams(additional_params=add_env_params, horizon=ENV[\"HORIZON\"])\n\n sumo_params = SumoParams(sim_step=sim_step, render=render, emission_path=emission_path,\n restart_instance=restart_instance, print_warnings=print_warnings)\n\n flow_params = dict(\n # name of the experiment\n exp_tag=name,\n # name of the flow environment the experiment is running on\n env_name=env_name,\n # name of the network class the experiment uses\n network=network_name,\n # simulator that is used by the experiment\n simulator='traci',\n # sumo-related parameters (see flow.core.params.SumoParams)\n sim=sumo_params,\n # environment related parameters (see flow.core.params.EnvParams)\n env=env_params,\n # network-related parameters (see flow.core.params.NetParams and\n # the network's documentation or ADDITIONAL_NET_PARAMS component)\n net=net_params,\n # vehicles to be placed in the network at the start of a rollout\n # (see flow.core.vehicles.Vehicles)\n veh=vehicles,\n # (optional) parameters affecting the positioning of vehicles upon\n # initialization/reset (see flow.core.params.InitialConfig)\n initial=initial_config\n )\n # save the flow params for replay\n flow_json = json.dumps(flow_params, cls=FlowParamsEncoder, sort_keys=True,\n indent=4) # generating a string version of flow_params\n\n create_env, gym_name = make_create_env(params=flow_params, version=0)\n create_env()\n return gym_name, flow_json\n" }, { "alpha_fraction": 0.6447534561157227, "alphanum_fraction": 0.6523388028144836, "avg_line_length": 33.39130401611328, "blob_id": "28e6633e32bd3a985e8f101efda1205488cd899c", "content_id": "984842aff17db4ec5f942d2febe8fe2e1a40e5db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "permissive", "max_line_length": 88, "num_lines": 23, "path": "/flow/envs/nemodrive_lab/lane_change_controller.py", "repo_name": "robuved/flow", "src_encoding": "UTF-8", "text": "from flow.controllers.base_lane_changing_controller import \\\n BaseLaneChangeController\n\nimport numpy as np\n\n\nclass PeriodicLaneChangeController(BaseLaneChangeController):\n def __init__(self, veh_id, lane_change_params=None):\n \"\"\"Instantiate the base class for lane-changing controllers.\"\"\"\n if lane_change_params is None:\n lane_change_params = {}\n\n self.veh_id = veh_id\n self.lane_change_params = lane_change_params\n self.freq_interval = np.random.randint(*lane_change_params[\"lane_change_freqs\"])\n\n def get_lane_change_action(self, env):\n change_lane = 0\n if env.step_counter % self.freq_interval == 0:\n change_lane = np.random.randint(0, 2) * 2 - 1\n \"\"\"See parent class.\"\"\"\n\n return change_lane\n" } ]
8
Jops-Garcia/Hello-Hacktoberfest2k19
https://github.com/Jops-Garcia/Hello-Hacktoberfest2k19
83ff202a5c41addb3472495db2b39d2569ece892
8e948060c754d52fe6aff4ff6e31d2952393eac8
13e95da32532cb72cb9cb363475e7e2893fa028b
refs/heads/master
2020-08-28T15:48:21.020740
2019-10-26T17:34:12
2019-10-26T17:34:12
217,745,374
0
0
MIT
2019-10-26T17:31:59
2019-10-26T05:55:29
2019-10-26T05:55:27
null
[ { "alpha_fraction": 0.6585366129875183, "alphanum_fraction": 0.6951219439506531, "avg_line_length": 15.333333015441895, "blob_id": "d99a1ad77f2fce7b40f8e0bd48e6c1309a5002a8", "content_id": "f6779098868023079cc98002577b18226af8238a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "permissive", "max_line_length": 47, "num_lines": 15, "path": "/fibonacci.py", "repo_name": "Jops-Garcia/Hello-Hacktoberfest2k19", "src_encoding": "UTF-8", "text": "#Fibonacci - joao pedro garcia pereira\n#Declaracao de variaveis\ncont=0\nfib=1\naux=1\naux2=0\nn=int(input(\"Insert a number for fibonacci: \"))\n#Processamento\nwhile cont<n:\n\tcont=cont+1\n\tfib=aux2+aux\n\taux=aux2\n\taux2=fib\n\t#saida de dados\n\tprint(\"Fib %d = %d\"%(cont,fib))\n\n" } ]
1
aishwaryajaini/web_app
https://github.com/aishwaryajaini/web_app
d28ab5e86a73a26f399adb6eec261e5b708affec
bcfa07f7ed02950fa95cc4d873d8a1fd13765cad
78712c55c6725c80b366fe2790af3a56c6f763b4
refs/heads/master
2022-11-05T19:21:09.929390
2020-06-17T17:09:10
2020-06-17T17:09:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6597510576248169, "alphanum_fraction": 0.6597510576248169, "avg_line_length": 25.88888931274414, "blob_id": "7cbc487fa5425db22e9c9e7c64412c95895beea4", "content_id": "cc2a09708ea68537c5dda267b52af452ea543c12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/admapp/urls.py", "repo_name": "aishwaryajaini/web_app", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('login/',views.login,name='login'),\n path('accept/',views.accept,name='accept'),\n path('adm/',views.adm,name='adm'),\n path('addcc/',views.addcc,name='addcc')\n]" } ]
1
ashu12166/crm
https://github.com/ashu12166/crm
2a4538b9016023517059fc8496f9e2d61c59e42a
81b67d27a6c41893917550fb4283ad578fdbfc96
856e67716b2fb2dd4b49c950c253934b5600654a
refs/heads/master
2020-04-03T09:53:30.142677
2016-07-31T07:00:56
2016-07-31T07:00:56
62,441,441
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7223154306411743, "alphanum_fraction": 0.7298657894134521, "avg_line_length": 28.799999237060547, "blob_id": "619e466fd8a7296e437c266079200212d1b8098c", "content_id": "11c519e9a0a2320c477d63a9cb7077f7d64bc75b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "no_license", "max_line_length": 94, "num_lines": 40, "path": "/login/views.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "from django.shortcuts import render_to_response, render,redirect\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\ndef test(request):\n return \"123\"\n\n\n@csrf_exempt\ndef login(request):\n # c = {}\n # c.update(csrf(request))\n return render(request, 'envato.rathemes.com/infinity/topbar/login.html')\n\n\n@csrf_exempt\ndef auth_view(request):\n username = request.POST.get('username', '')\n password = request.POST.get('password', '')\n user = auth.authenticate(username=username, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect('message_board:list')\n else:\n return HttpResponse('notauthenticate....please login with correct detail', status=403)\n\n\ndef post_view(request):\n return HttpResponse('thisisthepost', status=403)\n\n# ('blog/post_list.html',\n# \t\t\t\t\t\t\t\t{'username': request.user.first_name})\n" }, { "alpha_fraction": 0.5536842346191406, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 22.75, "blob_id": "604acd392cf6706d07565a44cf59561604016dbc", "content_id": "bcb4cff5f5d933c12c9aec984ef578b70c8f42e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 86, "num_lines": 20, "path": "/login/migrations/0002_auto_20160730_2230.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2016-07-30 17:00\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('login', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='freelancer',\n name='name',\n field=models.CharField(max_length=20, unique=False, verbose_name=b'name'),\n ),\n ]\n" }, { "alpha_fraction": 0.65625, "alphanum_fraction": 0.6736111044883728, "avg_line_length": 28.564102172851562, "blob_id": "3e91948df2e7080235981953b6676cc3d5bba080", "content_id": "7ffffd91d74504e3b145c34d0176a1dd6596f4f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/message_board/models.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass Post(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_date = models.DateTimeField(\n default=timezone.now)\n published_date = models.DateTimeField(\n blank=True, null=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n\n# class Freelancer(models.Model):\n# firstname = models.CharField(max_length=20)\n# lastname = models.CharField(max_length=20)\n# interest = models.CharField(max_length=200)\n# skils = models.TextField()\n# experience = models.TextField()\n#\n# def __str__(self):\n# return self.firstname\n\n# class Clients(models.Model):\n# name = models.charfield(amx_length=20)\n# firm_name = models.charfield(max_length=20)\n# reference = models.charfield(max_length=200)\n# chu = models.charfield(max_length=200)" }, { "alpha_fraction": 0.65625, "alphanum_fraction": 0.65625, "avg_line_length": 20.44444465637207, "blob_id": "adf1d2f0a8b9b73f577f82cabe111afb54deb174", "content_id": "acb6614d2f729b7a5ba863bc56d59bce716a59b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 64, "num_lines": 9, "path": "/login/urls.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^login/$', views.login, name='login_form'),\n url(r'^accounts/auth/$', views.auth_view, name='auth_view'),\n\n]" }, { "alpha_fraction": 0.6657754182815552, "alphanum_fraction": 0.6657754182815552, "avg_line_length": 36.400001525878906, "blob_id": "d964834c0a0fe9c6074cbf197cb50674430ba0e8", "content_id": "6eef752ca66fd478eddf0a0518753f4de0d8cb11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 92, "num_lines": 10, "path": "/message_board/urls.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom .import views\n\nurlpatterns = [\n url(r'^message_board/$', views.message, name='list'),\n url(r'^message_board/post_new$', views.post_new, name='post_new'),\n #url(r'^message_board/post_detail$', views.post_detail, name='post_detail')\n url(r'^message_board/post_detail/(?P<pk>\\d+)/$', views.post_detail, name='post_detail'),\n]\n" }, { "alpha_fraction": 0.5338283777236938, "alphanum_fraction": 0.5536303520202637, "avg_line_length": 34.64706039428711, "blob_id": "3a9af4e193838f407e43cb56599fb6805afe427a", "content_id": "bf994269b323827cdc852475f37c39f025292abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/login/migrations/0001_initial.py", "repo_name": "ashu12166/crm", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2016-07-30 13:54\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Freelancer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('name', models.CharField(max_length=20, unique=False, verbose_name=b'name')),\n ('field_of_interest', models.CharField(max_length=200)),\n ('skills', models.TextField()),\n ('experience', models.TextField()),\n ('is_active', models.BooleanField(default=True)),\n ('is_admin', models.BooleanField(default=False)),\n ],\n options={\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n ),\n ]\n" } ]
6
kostaleonard/Resnet_Wu_2016
https://github.com/kostaleonard/Resnet_Wu_2016
0ec7820dec1a9efcc4ed1a8dd3f8675bb4d5f85c
86836b7a08b9b2c1f0fb761f71c43ef58dcb68cd
80f445003b4210acb5edba8eda1cb8d4405c48ea
refs/heads/master
2022-12-29T08:18:45.973506
2020-10-12T01:15:59
2020-10-12T01:15:59
301,512,332
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6685879230499268, "alphanum_fraction": 0.6974063515663147, "avg_line_length": 35.52631759643555, "blob_id": "23309a127d2c703129963e473b78878b603f587b", "content_id": "cd289060679c406fea998c0b62b8e2823d235743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 694, "license_type": "no_license", "max_line_length": 193, "num_lines": 19, "path": "/Makefile", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "dataset_args='{\"dataset_fraction\": 0.01}'\nnetwork_args='{\"architecture\": \"lenet\", \"input_shape\": [128, 128, 3], \"num_classes\": 1000}'\ntrain_args='{\"batch_size\": 32, \"epochs\": 10, \"augment_val\": true, \"early_stopping\": false, \"overfit_single_batch\": false, \"shuffle_on_epoch_end\": true, \"use_wandb\": false, \"save_model\": false}'\n\nall: train\n\ntrain:\n\t@echo Training model.\n\tPYTHONPATH=$(PYTHONPATH):. python3 training/train_model.py --gpu 0 --dataset_args $(dataset_args) --network_args $(network_args) --train_args $(train_args)\n\npytest:\n\t@echo Running linting scripts.\n\t-pylint dataset\n\t-pylint models\n\t-pylint test\n\t-pylint training\n\t-pylint util\n\t@echo Running unit tests.\n\t-pytest test/*.py\n" }, { "alpha_fraction": 0.6325187683105469, "alphanum_fraction": 0.6390977501869202, "avg_line_length": 32.25, "blob_id": "e75145cff5af5d5e23bc80bc9959d9a4a5383953", "content_id": "cd358de17d98bf7f021543d83c926f61b517a5c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "no_license", "max_line_length": 70, "num_lines": 32, "path": "/models/networks/mlp.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"An MLP Keras Model.\"\"\"\n\nfrom typing import Dict, Any\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout\n\nfrom dataset.ilsvrc_dataset import EXPECTED_NUM_CLASSES\nfrom dataset.image_dataset_sequence import DEFAULT_TARGET_SIZE\n\nDEFAULT_MLP_ARGS = {\n 'input_shape': DEFAULT_TARGET_SIZE + (3,),\n 'num_classes': EXPECTED_NUM_CLASSES,\n 'layer_size': 128,\n 'dropout_rate': 0.2,\n 'num_layers': 3\n}\n\n\nclass MLP(Sequential):\n \"\"\"A Multi-layer Perceptron.\"\"\"\n\n def __init__(self, mlp_args: Dict[str, Any]) -> None:\n \"\"\"Creates the object.\n :param mlp_args: the MLP hyperparameters.\n \"\"\"\n super().__init__()\n mlp_args = {**DEFAULT_MLP_ARGS, **mlp_args}\n self.add(Flatten(input_shape=mlp_args['input_shape']))\n for _ in range(mlp_args['num_layers']):\n self.add(Dense(mlp_args['layer_size'], activation='relu'))\n self.add(Dropout(mlp_args['dropout_rate']))\n self.add(Dense(mlp_args['num_classes'], activation='softmax'))\n" }, { "alpha_fraction": 0.660240650177002, "alphanum_fraction": 0.6922340989112854, "avg_line_length": 45, "blob_id": "5ba519189ab85d4511f0aa653f8772fa06477ce0", "content_id": "f419f6a91935a889bcad82873f8f0ecca6dd5d8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7314, "license_type": "no_license", "max_line_length": 79, "num_lines": 159, "path": "/test/test_ilsvrc_dataset.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Tests the ilsvrc_dataset module.\"\"\"\n\nimport pytest\nimport os\n\nfrom dataset.ilsvrc_dataset import ILSVRCDataset, DEFAULT_DATASET_PATH, \\\n EXPECTED_NUM_CLASSES, TRAIN_KEY, VAL_KEY, TEST_KEY, NULL_LABEL\n\nDATASET_FRACTION = 0.1\nDELTA = 0.05\n\n\[email protected]\ndef dataset() -> ILSVRCDataset:\n \"\"\"Returns an ILSVRCDataset.\n :return: the dataset.\n \"\"\"\n return ILSVRCDataset(DEFAULT_DATASET_PATH)\n\n\ndef test_trim_dataset(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that the dataset is being trimmed properly. The trimmed\n dataset should be shuffled so that the classes retain the same\n approximate distribution.\n :param dataset: the dataset.\n \"\"\"\n train_size_before = dataset.partition[TRAIN_KEY].shape[0]\n val_size_before = dataset.partition[VAL_KEY].shape[0]\n test_size_before = dataset.partition[TEST_KEY].shape[0]\n train_subset_before = dataset.partition[TRAIN_KEY][:5]\n val_subset_before = dataset.partition[VAL_KEY][:5]\n test_subset_before = dataset.partition[TEST_KEY][:5]\n dataset.trim_dataset(DATASET_FRACTION, trim_val=True, trim_test=False)\n train_size_after = dataset.partition[TRAIN_KEY].shape[0]\n val_size_after = dataset.partition[VAL_KEY].shape[0]\n test_size_after = dataset.partition[TEST_KEY].shape[0]\n train_subset_after = dataset.partition[TRAIN_KEY][:5]\n val_subset_after = dataset.partition[VAL_KEY][:5]\n test_subset_after = dataset.partition[TEST_KEY][:5]\n # Check that trimming occurred (or didn't).\n assert (train_size_before * (DATASET_FRACTION - DELTA)) < \\\n train_size_after < \\\n (train_size_before * (DATASET_FRACTION + DELTA))\n assert (val_size_before * (DATASET_FRACTION - DELTA)) < \\\n val_size_after < \\\n (val_size_before * (DATASET_FRACTION + DELTA))\n assert test_size_before == test_size_after\n # Check that the datasets were shuffled (or weren't).\n # We're just going to use the first 5 filenames to check for shuffling;\n # it's extremely unlikely that all are the same after shuffling.\n assert (train_subset_before != train_subset_after).any()\n assert (val_subset_before != val_subset_after).any()\n assert (test_subset_before == test_subset_after).all()\n\n\ndef test_label_mapping(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that the ILSVRCDataset's label mapping is correct. In\n particular, there should be 1000 classes with descriptive class\n names; synset IDs should correspond to correct classnames; and\n classnames/labels should be correctly associated.\n :param dataset: the dataset.\n \"\"\"\n assert len(dataset.label_to_classname) == EXPECTED_NUM_CLASSES\n assert len(dataset.classname_to_label.keys()) == EXPECTED_NUM_CLASSES\n assert len(dataset.synid_to_classname.keys()) == EXPECTED_NUM_CLASSES\n assert dataset.synid_to_classname['n01484850'] == \\\n 'great white shark, white shark, man-eater, man-eating shark, ' \\\n 'Carcharodon carcharias'\n assert dataset.synid_to_classname['n02508021'] == 'raccoon, racoon'\n assert dataset.synid_to_classname['n06267145'] == 'newspaper, paper'\n assert dataset.label_to_classname[0] == \\\n 'french fries, french-fried potatoes, fries, chips'\n assert dataset.label_to_classname[10] == 'blackberry'\n assert dataset.label_to_classname[999] == 'washer, automatic washer, ' \\\n 'washing machine'\n for label_idx, classname in enumerate(dataset.label_to_classname):\n assert dataset.classname_to_label[classname] == label_idx\n\n\ndef test_partition(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that ILSVRCDataset's partition is filled correctly. In\n particular, the filepaths should point to the correct files and\n the train/val labels should be correct.\n :param dataset: the dataset.\n \"\"\"\n assert NULL_LABEL not in dataset.label_to_classname\n # Train.\n fname = os.path.join(dataset.path, 'train', 'n01807496',\n 'n01807496_8.JPEG')\n assert fname in dataset.partition[TRAIN_KEY]\n assert fname not in dataset.partition[VAL_KEY]\n assert fname not in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert fname in dataset._labels\n assert dataset.synid_to_classname['n01807496'] == 'partridge'\n assert dataset._labels[fname] == dataset.classname_to_label[\n dataset.synid_to_classname['n01807496']]\n assert dataset._labels[fname] == 390\n fname = os.path.join(dataset.path, 'train', 'n04118538',\n 'n04118538_570.JPEG')\n assert fname in dataset.partition[TRAIN_KEY]\n assert fname not in dataset.partition[VAL_KEY]\n assert fname not in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert fname in dataset._labels\n assert dataset.synid_to_classname['n04118538'] == 'rugby ball'\n assert dataset._labels[fname] == dataset.classname_to_label[\n dataset.synid_to_classname['n04118538']]\n assert dataset._labels[fname] == 749\n # Val.\n fname = os.path.join(dataset.path, 'val', 'ILSVRC2010_val_00000001.JPEG')\n assert fname not in dataset.partition[TRAIN_KEY]\n assert fname in dataset.partition[VAL_KEY]\n assert fname not in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert fname in dataset._labels\n assert dataset._labels[fname] == 77\n assert dataset.classname_to_label[\n dataset.synid_to_classname['n09428293']] == 77\n assert dataset.label_to_classname[77] == \\\n 'seashore, coast, seacoast, sea-coast'\n fname = os.path.join(dataset.path, 'val', 'ILSVRC2010_val_00008079.JPEG')\n assert fname not in dataset.partition[TRAIN_KEY]\n assert fname in dataset.partition[VAL_KEY]\n assert fname not in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert dataset._labels[fname] == 734\n assert dataset.classname_to_label[\n dataset.synid_to_classname['n03535780']] == 734\n assert dataset.label_to_classname[734] == 'horizontal bar, high bar'\n fname = os.path.join(dataset.path, 'val', 'ILSVRC2010_val_00050000.JPEG')\n assert fname not in dataset.partition[TRAIN_KEY]\n assert fname in dataset.partition[VAL_KEY]\n assert fname not in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert dataset._labels[fname] == 560\n assert dataset.classname_to_label[\n dataset.synid_to_classname['n04090263']] == 560\n assert dataset.label_to_classname[560] == 'rifle'\n # Test.\n fname = os.path.join(dataset.path, 'test', 'ILSVRC2010_test_00015978.JPEG')\n assert fname not in dataset.partition[TRAIN_KEY]\n assert fname not in dataset.partition[VAL_KEY]\n assert fname in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert fname not in dataset._labels\n fname = os.path.join(dataset.path, 'test', 'ILSVRC2010_test_00150000.JPEG')\n assert fname not in dataset.partition[TRAIN_KEY]\n assert fname not in dataset.partition[VAL_KEY]\n assert fname in dataset.partition[TEST_KEY]\n assert os.path.exists(fname)\n assert os.path.isfile(fname)\n assert fname not in dataset._labels\n" }, { "alpha_fraction": 0.5813623666763306, "alphanum_fraction": 0.5879848599433899, "avg_line_length": 41.563758850097656, "blob_id": "4c18c33b887e27e3e3c5d90b6f134e63ada23075", "content_id": "730447c2ef78921e7b0e16857818586c672ad8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6342, "license_type": "no_license", "max_line_length": 79, "num_lines": 149, "path": "/dataset/ilsvrc_dataset.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"ILSVRCDataset class.\"\"\"\n\nimport os\nimport numpy as np\nfrom typing import Dict, List\n\nfrom dataset.dataset import Dataset, TRAIN_KEY, VAL_KEY, TEST_KEY\n\nDEFAULT_DATASET_PATH = os.path.join(\n '/',\n 'Users',\n 'leo',\n 'Documents',\n 'Datasets',\n 'ILSVRC2012'\n)\nCLASS_MAPPING_DIR = 'class_mapping'\nWORDS_FILENAME = os.path.join(CLASS_MAPPING_DIR, 'words.txt')\nWNIDS_FILENAME = os.path.join(CLASS_MAPPING_DIR, 'wnids.txt')\nVAL_LABELS_FILE = os.path.join(\n 'devkit-1.0',\n 'data',\n 'ILSVRC2010_validation_ground_truth.txt'\n)\nEXPECTED_NUM_CLASSES = 1000\nNULL_LABEL = 'NULL'\n\n\nclass ILSVRCDataset(Dataset):\n \"\"\"Represents the ILSVRC2012 dataset.\"\"\"\n\n def __init__(self, path: str) -> None:\n \"\"\"Instantiates the object.\n :param path: the path to the dataset if it exists, or the path\n to which the root directory should be saved.\n \"\"\"\n self.path_train: str = os.path.join(path, 'train')\n self.path_val: str = os.path.join(path, 'val')\n self.path_test: str = os.path.join(path, 'test')\n self.synid_to_classname: Dict[str, str] = {}\n super().__init__(path)\n\n def _get_train_dirs(self) -> List[str]:\n \"\"\"Returns a list of the training directories.\n :return: the training directories, unsorted.\n \"\"\"\n return [dirname for dirname in os.listdir(self.path_train)\n if os.path.isdir(os.path.join(self.path_train, dirname))]\n\n def _get_synids(self) -> List[str]:\n \"\"\"Returns the synset ID values by which classes are\n identified. For example, 'n01484850' is a great white shark.\n Because this is how the classes are determined for the training\n images, this is the same as _get_train_dirs().\n :return: the synset ID values of all classes, unsorted.\n \"\"\"\n return self._get_train_dirs()\n\n def _get_full_synid_mapping(self) -> Dict[str, str]:\n \"\"\"Returns the complete mapping from synset ID to class name.\n This is found in the WORDS_FILENAME file. Only the 1000 IDs\n used in the dataset are saved to synid_to_classname; this is\n just a helper method to populate that dict.\n :return: a dict where the keys are synset IDs and the values\n are class names.\n \"\"\"\n full_mapping = {}\n with open(os.path.join(self.path, WORDS_FILENAME)) as infile:\n for line in infile.readlines():\n pair = line.split('\\t')\n if len(pair) != 2:\n raise ValueError('Expected exactly one tab per line, but '\n 'found {0}.'.format(len(pair) - 1))\n synid, classname = pair[0].strip(), pair[1].strip()\n full_mapping[synid] = classname\n return full_mapping\n\n def _get_wnid_to_label(self) -> Dict[str, int]:\n \"\"\"Returns the dict mapping synset ID (also WNID) to label\n number. Fixed ImageNet's scheme to be zero-indexed.\n :return: the WNID/synset ID to label dict.\n \"\"\"\n wnid_to_label = {}\n with open(os.path.join(self.path, WNIDS_FILENAME)) as infile:\n lines = infile.readlines()\n for idx, line in enumerate(lines):\n wnid = line.split()[2].strip()\n wnid_to_label[wnid] = idx\n return wnid_to_label\n\n def _fill_label_mapping(self) -> None:\n \"\"\"Fills self.label_to_classname, self.classname_to_label, and\n self._synid_to_classname.\"\"\"\n full_mapping = self._get_full_synid_mapping()\n used_synids = self._get_synids()\n wnid_to_label = self._get_wnid_to_label()\n self.label_to_classname = [NULL_LABEL for _ in range(\n EXPECTED_NUM_CLASSES)]\n for i, synid in enumerate(used_synids):\n classname = full_mapping[synid]\n label = wnid_to_label[synid]\n self.synid_to_classname[synid] = classname\n self.label_to_classname[label] = classname\n if classname in self.classname_to_label:\n raise ValueError(\n 'Class {0} already assigned to label {1}'.format(\n classname, self.classname_to_label[classname]))\n self.classname_to_label[classname] = label\n\n def _fill_partition(self) -> None:\n \"\"\"Fills self.partition and self._labels.\"\"\"\n train_images = []\n train_dirs = self._get_train_dirs()\n for class_dir in train_dirs:\n class_path = os.path.join(self.path_train, class_dir)\n for imfile in os.listdir(class_path):\n imfile_path = os.path.join(class_path, imfile)\n train_images.append(str(imfile_path))\n synid = str(imfile).split('_')[0]\n label = self.classname_to_label[self.synid_to_classname[synid]]\n self._labels[imfile_path] = label\n self.partition[TRAIN_KEY] = np.array(train_images, dtype='str')\n val_images = []\n with open(os.path.join(self.path, VAL_LABELS_FILE)) as infile:\n val_labels = [int(line.strip()) - 1 for line in infile.readlines()]\n val_imfiles = sorted(os.listdir(self.path_val))\n if len(val_labels) != len(val_imfiles):\n raise ValueError(\n 'Expected the same number of labels and images, but found '\n '{0} labels, {1} images.'.format(\n len(val_labels), len(val_imfiles)))\n for i, imfile in enumerate(val_imfiles):\n imfile_path = os.path.join(self.path_val, imfile)\n val_images.append(imfile_path)\n self._labels[imfile_path] = val_labels[i]\n self.partition[VAL_KEY] = np.array(val_images, dtype='str')\n test_images = []\n for imfile in os.listdir(self.path_test):\n imfile_path = os.path.join(self.path_test, imfile)\n test_images.append(imfile_path)\n self.partition[TEST_KEY] = np.array(test_images, dtype='str')\n\n def _load_or_download_dataset(self, verbose: bool = False) -> None:\n \"\"\"Loads the dataset from a pre-existing path, or downloads it.\n :param verbose: whether to print info to stdout.\n \"\"\"\n super()._load_or_download_dataset(verbose=verbose)\n self._fill_label_mapping()\n self._fill_partition()\n" }, { "alpha_fraction": 0.640423059463501, "alphanum_fraction": 0.6427732110023499, "avg_line_length": 34.45833206176758, "blob_id": "08bf356556f29fef18344247209dda67075ce4c2", "content_id": "bb2000f1fc9da860553774743cf5bc574bd68782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/models/image_model.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"ImageModel class.\"\"\"\n\nimport numpy as np\nfrom typing import Tuple\n\nfrom models.project_model import ProjectModel\nfrom util.util import normalize_images\n\n\nclass ImageModel(ProjectModel):\n \"\"\"Represents an ML model used on images.\"\"\"\n\n def predict_on_image(self, image: np.ndarray) -> Tuple[str, float]:\n \"\"\"Returns the prediction and confidence on a single image.\n :param image: a single raw (non-normalized) image.\n :return: the predicted class name and the confidence.\n \"\"\"\n norm_image = normalize_images(image)\n pred_raw = self.network.predict(np.expand_dims(norm_image, 0),\n batch_size=1).flatten()\n i = np.argmax(pred_raw)\n confidence = pred_raw[i]\n pred_class = self.dataset.label_to_classname[i]\n return pred_class, confidence\n" }, { "alpha_fraction": 0.6357445120811462, "alphanum_fraction": 0.645683765411377, "avg_line_length": 36.99300765991211, "blob_id": "9dc9eeb2eff6cca0928f158f11fd884f52a390b1", "content_id": "7ca3d6a752651b83a8b767fe8bfb553420488262", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5433, "license_type": "no_license", "max_line_length": 76, "num_lines": 143, "path": "/test/test_image_dataset_sequence.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Tests the image_dataset_sequence module.\"\"\"\n\nimport pytest\nimport numpy as np\n\nfrom dataset.image_dataset_sequence import ImageDatasetSequence\nfrom dataset.ilsvrc_dataset import ILSVRCDataset, DEFAULT_DATASET_PATH, \\\n TRAIN_KEY\n\nDATASET_FRACTION = 0.001\nNUM_CLASSES = 1000\nIMAGE_TARGET_SIZE = (128, 128)\nBATCH_SIZE = 32\n\n\[email protected]\ndef dataset() -> ILSVRCDataset:\n \"\"\"Returns an ILSVRCDataset.\n :return: the dataset.\n \"\"\"\n dataset = ILSVRCDataset(DEFAULT_DATASET_PATH)\n return dataset\n\n\ndef test_images(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that the sequence output images meet expected standards.\n :param dataset: the dataset.\n \"\"\"\n dataset.trim_dataset(DATASET_FRACTION)\n x_train_filenames = dataset.partition[TRAIN_KEY]\n y_train = dataset.get_labels(x_train_filenames, True, NUM_CLASSES)\n train_sequence = ImageDatasetSequence(\n x_train_filenames, y=y_train, batch_size=BATCH_SIZE,\n image_target_size=IMAGE_TARGET_SIZE,\n batch_augment_fn=None,\n batch_format_fn=None,\n overfit_single_batch=False,\n shuffle_on_epoch_end=True\n )\n # Test that only the last batch is not of length BATCH_SIZE.\n # Also test that there are the correct number of batches.\n on_last_batch = False\n num_batches_seen = 0\n for batch in train_sequence:\n assert not on_last_batch\n x_batch, y_batch = batch\n # Take the first image/label pair and check that it meets standards.\n # Check that the image is of the right size.\n assert x_batch[0].shape == IMAGE_TARGET_SIZE + (3,)\n # Check that the image is of the right datatype.\n assert x_batch.dtype == np.float32\n # Check that the image is normalized.\n assert (0.0 <= x_batch.flatten()).all()\n assert (x_batch.flatten() <= 1.0).all()\n # Check that the label is categorical and of the right dimension.\n assert y_batch.shape[1] == NUM_CLASSES\n # Check that the label is of the right datatype.\n assert y_batch.dtype == np.float32\n # Check that the label is one-hot.\n for label in y_batch:\n assert sum(label) == 1\n on_last_batch = not (x_batch.shape[0] == BATCH_SIZE and\n y_batch.shape[0] == BATCH_SIZE)\n num_batches_seen += 1\n assert num_batches_seen == len(train_sequence)\n\n\ndef test_shuffle(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that the shuffling flag works as expected. Also tests that\n filenames and labels are still properly mapped.\n :param dataset: the dataset.\n \"\"\"\n dataset.trim_dataset(DATASET_FRACTION)\n x_train_filenames = dataset.partition[TRAIN_KEY]\n y_train = dataset.get_labels(x_train_filenames, True, NUM_CLASSES)\n train_sequence = ImageDatasetSequence(\n x_train_filenames, y=y_train, batch_size=BATCH_SIZE,\n image_target_size=IMAGE_TARGET_SIZE,\n batch_augment_fn=None,\n batch_format_fn=None,\n overfit_single_batch=False,\n shuffle_on_epoch_end=True\n )\n img_to_label_before = {}\n for batch in train_sequence:\n x_batch, y_batch = batch\n for i in range(x_batch.shape[0]):\n img_data = tuple(x_batch[i].flatten())\n label = tuple(y_batch[i])\n img_to_label_before[img_data] = label\n # Test shuffle.\n first_batch_before = train_sequence.__getitem__(0)\n train_sequence.on_epoch_end()\n first_batch_after = train_sequence.__getitem__(0)\n assert (first_batch_before[0] != first_batch_after[0]).any()\n # Test filename/label mappings.\n for batch in train_sequence:\n x_batch, y_batch = batch\n for i in range(x_batch.shape[0]):\n img_data = tuple(x_batch[i].flatten())\n label = tuple(y_batch[i])\n assert img_to_label_before[img_data] == label\n\n\ndef test_overfit_single_batch(dataset: ILSVRCDataset) -> None:\n \"\"\"Tests that the same batch of images is always presented to the\n model if overfitting on a single batch.\n :param dataset: the dataset.\n \"\"\"\n dataset.trim_dataset(DATASET_FRACTION)\n x_train_filenames = dataset.partition[TRAIN_KEY]\n y_train = dataset.get_labels(x_train_filenames, True, NUM_CLASSES)\n # Test that you can't set overfit and shuffle flags together.\n train_sequence = ImageDatasetSequence(\n x_train_filenames, y=y_train, batch_size=BATCH_SIZE,\n image_target_size=IMAGE_TARGET_SIZE,\n batch_augment_fn=None,\n batch_format_fn=None,\n overfit_single_batch=True,\n shuffle_on_epoch_end=True\n )\n with pytest.raises(ValueError):\n for _ in train_sequence:\n pass\n # Test that you always get the same batch, even after multiple epochs.\n train_sequence = ImageDatasetSequence(\n x_train_filenames, y=y_train, batch_size=BATCH_SIZE,\n image_target_size=IMAGE_TARGET_SIZE,\n batch_augment_fn=None,\n batch_format_fn=None,\n overfit_single_batch=True,\n shuffle_on_epoch_end=False\n )\n num_batches_epoch_1 = 0\n for batch in train_sequence:\n assert (batch[0] == train_sequence.__getitem__(0)[0]).all()\n num_batches_epoch_1 += 1\n train_sequence.on_epoch_end()\n num_batches_epoch_2 = 0\n for batch in train_sequence:\n assert (batch[0] == train_sequence.__getitem__(0)[0]).all()\n num_batches_epoch_2 += 1\n assert num_batches_epoch_1 == num_batches_epoch_2\n" }, { "alpha_fraction": 0.567157506942749, "alphanum_fraction": 0.5841142535209656, "avg_line_length": 34.015625, "blob_id": "c65fbe717a6f40e7ece1d5210436551e1ebb44e8", "content_id": "9ebb3548565fab70c01f6c62649185d587ac9c93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2241, "license_type": "no_license", "max_line_length": 81, "num_lines": 64, "path": "/models/networks/lenet.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"A LeNet Convolutional Neural Network Keras Model.\"\"\"\n\nfrom typing import Dict, Any\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D, MaxPooling2D\nfrom tensorflow.keras.backend import image_data_format\n\nfrom dataset.ilsvrc_dataset import EXPECTED_NUM_CLASSES\nfrom dataset.image_dataset_sequence import DEFAULT_TARGET_SIZE\n\nDEFAULT_LENET_ARGS = {\n 'input_shape': DEFAULT_TARGET_SIZE + (3,),\n 'num_classes': EXPECTED_NUM_CLASSES,\n 'filters_1': 32,\n 'kernel_1': (3, 3),\n 'pool_1': (2, 2),\n 'filters_2': 64,\n 'kernel_2': (3, 3),\n 'pool_2': (2, 2),\n 'dropout': 0.2,\n 'dense': 128\n}\n\n\nclass LeNet(Sequential):\n \"\"\"A LeNet Convolutional Neural Network.\"\"\"\n\n def __init__(self, lenet_args: Dict[str, Any]) -> None:\n \"\"\"Creates the object.\n :param lenet_args: the LeNet hyperparameters.\n \"\"\"\n super().__init__()\n lenet_args = {**DEFAULT_LENET_ARGS, **lenet_args}\n if len(lenet_args['input_shape']) != 3:\n raise ValueError('Expected 3 dimensions (width, height, channels),'\n 'but got.'.format(len(lenet_args['input_shape'])))\n # TODO there may be a need to support channels_first.\n if image_data_format() == 'channels_first':\n raise ValueError('Expected channels last in image tensors.')\n self.add(Conv2D(\n lenet_args['filters_1'],\n kernel_size=lenet_args['kernel_1'],\n activation='relu',\n input_shape=lenet_args['input_shape'],\n padding='valid'\n ))\n self.add(MaxPooling2D(\n pool_size=lenet_args['pool_1'],\n padding='valid'\n ))\n self.add(Conv2D(\n lenet_args['filters_2'],\n kernel_size=lenet_args['kernel_2'],\n activation='relu',\n padding='valid'\n ))\n self.add(MaxPooling2D(\n pool_size=lenet_args['pool_2'],\n padding='valid'\n ))\n self.add(Dropout(lenet_args['dropout']))\n self.add(Flatten())\n self.add(Dense(lenet_args['dense'], activation='relu'))\n self.add(Dense(lenet_args['num_classes'], activation='softmax'))\n" }, { "alpha_fraction": 0.6024360060691833, "alphanum_fraction": 0.6377264261245728, "avg_line_length": 39.531646728515625, "blob_id": "bc402421d07e3ae6c244ff7ccd9458979df9d8b2", "content_id": "8192e21d80e6408edcf5eaa3a66ea3a883aa9153", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3202, "license_type": "no_license", "max_line_length": 78, "num_lines": 79, "path": "/test/test_train_model.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Tests the train_model module.\"\"\"\n\nimport pytest\n\nfrom training import train_model\nfrom training.train_model import ARCHITECTURE_LENET\n\nNUM_CLASSES = 1000\nOVERFIT_SINGLE_BATCH_TARGET_LOSS = 0.001\n\n\ndef test_get_model() -> None:\n \"\"\"Tests that the model is receiving parameters and getting created\n correctly.\"\"\"\n dataset_args = {'dataset_fraction': 0.01}\n network_args = {'input_shape': (64, 64, 3),\n 'num_classes': 1000}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (64 * 64 * 3)\n assert model.network.layers[-1].output_shape[1] == 1000\n dataset_args = {'dataset_fraction': 0.001}\n network_args = {'input_shape': (32, 32, 3),\n 'num_classes': 999}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (32 * 32 * 3)\n assert model.network.layers[-1].output_shape[1] == 999\n\n\ndef test_train_model() -> None:\n \"\"\"Tests that the model is being trained correctly.\"\"\"\n dataset_args = {'dataset_fraction': 0.1}\n network_args = {'input_shape': (32, 32, 3),\n 'num_classes': NUM_CLASSES - 1}\n train_args = {}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (32 * 32 * 3)\n assert model.network.layers[-1].output_shape[1] == NUM_CLASSES - 1\n # The output shape is lower than the number of classes, so training fails.\n with pytest.raises(IndexError):\n train_model.train_model(model, train_args)\n dataset_args = {'dataset_fraction': 0.01}\n network_args = {'input_shape': (32, 32, 3),\n 'num_classes': NUM_CLASSES}\n train_args = {}\n model = train_model.get_model(dataset_args, network_args)\n assert model.network.layers[0].output_shape[1] == (32 * 32 * 3)\n assert model.network.layers[-1].output_shape[1] == NUM_CLASSES\n history = train_model.train_model(model, train_args)\n loss_by_epoch = history.history['loss']\n acc_by_epoch = history.history['accuracy']\n # Test that training improved loss and accuracy.\n assert loss_by_epoch[0] > loss_by_epoch[-1]\n assert acc_by_epoch[0] < acc_by_epoch[-1]\n\n\ndef test_overfit_single_batch() -> None:\n \"\"\"Tests that the model can overfit on a single batch.\"\"\"\n dataset_args = {'dataset_fraction': 0.01}\n network_args = {\n 'architecture': ARCHITECTURE_LENET,\n 'input_shape': (32, 32, 3),\n 'num_classes': NUM_CLASSES\n }\n train_args = {\n 'batch_size': 32,\n 'epochs': 30,\n 'early_stopping': False,\n 'overfit_single_batch': True,\n 'shuffle_on_epoch_end': False\n }\n model = train_model.get_model(dataset_args, network_args)\n history = train_model.train_model(model, train_args)\n loss_by_epoch = history.history['loss']\n acc_by_epoch = history.history['accuracy']\n # Test that training improved loss and accuracy.\n assert loss_by_epoch[0] > loss_by_epoch[-1]\n assert acc_by_epoch[0] < acc_by_epoch[-1]\n # Test that we can get training loss arbitrarily low.\n assert loss_by_epoch[-1] < OVERFIT_SINGLE_BATCH_TARGET_LOSS\n" }, { "alpha_fraction": 0.6109127998352051, "alphanum_fraction": 0.6147373914718628, "avg_line_length": 41.630435943603516, "blob_id": "f01e51c9700f44ad760ab8f0c433681a23260d51", "content_id": "25684a0bb3e0e2369b03954db727b030bd493c11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3922, "license_type": "no_license", "max_line_length": 79, "num_lines": 92, "path": "/dataset/dataset.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Dataset class.\"\"\"\n\nimport os\nfrom typing import Dict, List\nimport numpy as np\nfrom tensorflow.keras.utils import to_categorical\n\nVERBOSE: bool = True\nTRAIN_KEY: str = 'train'\nVAL_KEY: str = 'val'\nTEST_KEY: str = 'test'\nEMPTY_PARTITION: Dict[str, np.ndarray] = {\n TRAIN_KEY: np.arange(0, dtype='str'),\n VAL_KEY: np.arange(0, dtype='str'),\n TEST_KEY: np.arange(0, dtype='str')\n}\n\n\nclass Dataset:\n \"\"\"Represents a dataset.\"\"\"\n\n def __init__(self, path: str) -> None:\n \"\"\"Instantiates the object.\n :param path: the path to the dataset if it exists, or the path\n to which the root directory should be saved.\n \"\"\"\n self.path: str = path\n self.partition: Dict[str, np.ndarray] = EMPTY_PARTITION\n self._labels: Dict[str, int] = {}\n self.label_to_classname: List[str] = []\n self.classname_to_label: Dict[str, int] = {}\n self._load_or_download_dataset(verbose=VERBOSE)\n\n def get_labels(self, x_filenames: np.ndarray, categorical: bool,\n categorical_num_classes: int) -> np.ndarray:\n \"\"\"Returns an np.ndarray of ints representing the labels for\n the given filenames.\n :param x_filenames: the names of the training files.\n :param categorical: whether to return the labels as categorical values.\n :param categorical_num_classes: the number of classes in the dataset;\n only used if categorical is True.\n :return: the labels, in the same order as the filenames.\n \"\"\"\n arr = np.array([self._labels[fname] for fname in x_filenames])\n if categorical:\n return to_categorical(arr, num_classes=categorical_num_classes)\n return arr\n\n def _load_or_download_dataset(self, verbose: bool = False) -> None:\n \"\"\"Loads the dataset from a pre-existing path, or downloads it.\n Subclasses should override.\n :param verbose: whether to print info to stdout.\n \"\"\"\n if not os.path.exists(self.path):\n if verbose:\n print('Making dir {0}'.format(self.path))\n os.mkdir(self.path)\n if not os.path.isdir(self.path):\n raise NotADirectoryError('{0} is not a directory.'.format(\n self.path))\n\n def trim_dataset(self, target_dataset_fraction: float,\n trim_train: bool = True, trim_val: bool = True,\n trim_test: bool = False) -> None:\n \"\"\"Reduces the size of the dataset (train, val, and/or test) so\n that only target_dataset_fraction of them are used.\n :param target_dataset_fraction: the fraction of data to keep,\n in the interval [0.0, 1.0].\n :param trim_train: whether to trim the training set.\n :param trim_val: whether to trim the validation set.\n :param trim_test: whether to trim the test set.\n \"\"\"\n train_indices = np.arange(self.partition[TRAIN_KEY].shape[0])\n val_indices = np.arange(self.partition[VAL_KEY].shape[0])\n test_indices = np.arange(self.partition[TEST_KEY].shape[0])\n np.random.shuffle(train_indices)\n np.random.shuffle(val_indices)\n np.random.shuffle(test_indices)\n if trim_train:\n end_idx = int(train_indices.shape[0] * target_dataset_fraction)\n selected_indices = train_indices[:end_idx]\n self.partition[TRAIN_KEY] = self.partition[TRAIN_KEY][\n selected_indices]\n if trim_val:\n end_idx = int(val_indices.shape[0] * target_dataset_fraction)\n selected_indices = val_indices[:end_idx]\n self.partition[VAL_KEY] = self.partition[VAL_KEY][selected_indices]\n if trim_test:\n end_idx = int(test_indices.shape[0] * target_dataset_fraction)\n selected_indices = test_indices[:end_idx]\n self.partition[TEST_KEY] = self.partition[TEST_KEY][\n selected_indices]\n" }, { "alpha_fraction": 0.6149346232414246, "alphanum_fraction": 0.6176875233650208, "avg_line_length": 42.05185317993164, "blob_id": "73fb1d534565a05da3b856b8b4afcd55677b9065", "content_id": "8ef9aabad66f22c3ebb3ebc86f56650da1b61d3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5812, "license_type": "no_license", "max_line_length": 79, "num_lines": 135, "path": "/models/project_model.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Model class.\"\"\"\n\nimport os\nfrom typing import Callable, List, Optional, Dict, Any\nimport numpy as np\nfrom tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras.models import Model as KerasModel\nfrom tensorflow.keras.callbacks import History\nfrom tensorflow.keras.optimizers import Optimizer, RMSprop\n\nfrom dataset.dataset import Dataset, TRAIN_KEY, VAL_KEY, TEST_KEY\nfrom dataset.image_dataset_sequence import ImageDatasetSequence, \\\n DEFAULT_BATCH_SIZE\n\nDEFAULT_TRAIN_ARGS = {\n 'batch_size': DEFAULT_BATCH_SIZE,\n 'epochs': 10,\n 'augment_val': True,\n 'early_stopping': False,\n 'overfit_single_batch': False,\n 'shuffle_on_epoch_end': True,\n 'use_wandb': False,\n 'save_model': False\n}\n\n\nclass ProjectModel:\n \"\"\"Represents an ML model that can be trained and make predictions.\"\"\"\n\n def __init__(self, dataset: Dataset, network: KerasModel) -> None:\n \"\"\"Instantiates the object.\n :param dataset: the dataset on which to train.\n :param network: the neural network to use.\n \"\"\"\n self.dataset: Dataset = dataset\n self.network: KerasModel = network\n self.loss: str = 'categorical_crossentropy'\n self.optimizer: Optimizer = RMSprop()\n self.metrics: List[str] = ['accuracy']\n self.weights_filename = os.path.join('saved', '{0}_{1}_{2}'.format(\n self.__class__.__name__, self.dataset.__class__.__name__,\n self.network.__class__.__name__\n ))\n self.batch_augment_fn: Optional[Callable] = None\n self.batch_format_fn: Optional[Callable] = None\n\n def fit(self, train_args: Dict[str, Any],\n callbacks: List[Callback] = None) -> History:\n \"\"\"Trains the model and returns the history.\n :param train_args: the training arguments.\n :param callbacks: a list of keras callbacks to use during\n training.\n :return: the training history.\n \"\"\"\n train_args = {**DEFAULT_TRAIN_ARGS, **train_args}\n callbacks = [] if callbacks is None else callbacks\n self.network.compile(loss=self.loss, optimizer=self.optimizer,\n metrics=self.metrics)\n x_train_filenames = self.dataset.partition[TRAIN_KEY]\n y_train = self.dataset.get_labels(x_train_filenames, True,\n self.network.output_shape[1])\n x_val_filenames = self.dataset.partition[VAL_KEY]\n y_val = self.dataset.get_labels(x_val_filenames, True,\n self.network.output_shape[1])\n train_sequence = ImageDatasetSequence(\n x_train_filenames, y=y_train, batch_size=train_args['batch_size'],\n image_target_size=self.network.input_shape[1:3],\n batch_augment_fn=self.batch_augment_fn,\n batch_format_fn=self.batch_format_fn,\n overfit_single_batch=train_args['overfit_single_batch'],\n shuffle_on_epoch_end=train_args['shuffle_on_epoch_end']\n )\n val_sequence = ImageDatasetSequence(\n x_val_filenames, y=y_val, batch_size=train_args['batch_size'],\n image_target_size=self.network.input_shape[1:3],\n batch_augment_fn=self.batch_augment_fn if train_args['augment_val']\n else None,\n batch_format_fn=self.batch_format_fn,\n shuffle_on_epoch_end=train_args['shuffle_on_epoch_end']\n )\n return self.network.fit(\n x=train_sequence,\n epochs=train_args['epochs'],\n callbacks=callbacks,\n validation_data=val_sequence,\n use_multiprocessing=False,\n workers=1\n )\n\n def evaluate(self, x_filenames: np.ndarray, y: np.ndarray,\n batch_size: int = DEFAULT_BATCH_SIZE) -> np.ndarray:\n \"\"\"Evaluates the model on the given dataset.\n :param x_filenames: an np.ndarray of strs where each str is an\n image filename.\n :param y: an np.ndarray of ints where the ith int is the label\n of the ith image in x_filenames.\n :param batch_size: the number of examples in each batch.\n :return: the mean number of correct predictions.\n \"\"\"\n # TODO y should be categorical.\n sequence = ImageDatasetSequence(\n x_filenames, y=y, batch_size=batch_size,\n image_target_size=self.network.input_shape()[:2])\n preds = self.network.predict(sequence)\n return np.mean(np.argmax(preds, axis=-1) == np.argmax(y, axis=-1))\n\n def predict(self, x_filenames: np.ndarray,\n batch_size: int = DEFAULT_BATCH_SIZE) -> np.ndarray:\n \"\"\"Makes a prediction on the given examples.\n :param x_filenames: an np.ndarray of strs where each str is an\n image filename.\n :param batch_size: the number of examples in each batch.\n :return: an np.ndarray of predicted classes.\n \"\"\"\n sequence = ImageDatasetSequence(\n x_filenames, y=None, batch_size=batch_size,\n image_target_size=self.network.input_shape()[:2])\n return self.network.predict(sequence)\n\n def predict_on_test(self,\n batch_size: int = DEFAULT_BATCH_SIZE) -> np.ndarray:\n \"\"\"Makes a prediction on the test dataset.\n :param batch_size: the number of examples in each batch.\n :return: an np.ndarray of predicted classes.\n \"\"\"\n return self.predict(self.dataset.partition[TEST_KEY],\n batch_size=batch_size)\n\n def save_weights(self) -> None:\n \"\"\"Saves the weights to the predefined file.\"\"\"\n self.network.save_weights(self.weights_filename)\n\n def load_weights(self) -> None:\n \"\"\"Loads the weights from the predefined file.\"\"\"\n self.network.load_weights(self.weights_filename)\n" }, { "alpha_fraction": 0.5650950074195862, "alphanum_fraction": 0.7297677397727966, "avg_line_length": 49.75, "blob_id": "9ce1667c4fa732b508edf7e1ae895310112daf30", "content_id": "06eaa33ab477bcc5edd49d659737fdebc1113d8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1421, "license_type": "no_license", "max_line_length": 331, "num_lines": 28, "path": "/test/test_reproducible.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Tests that training results are reproducible.\nDon't add any other tests, because that could mess with the random seeding.\nAlso, updating packages can mess up the results, but they should still be the\nsame every time.\"\"\"\n\nfrom dataset.ilsvrc_dataset import ILSVRCDataset, DEFAULT_DATASET_PATH\nfrom models.networks.mlp import MLP\nfrom models.project_model import ProjectModel\nfrom training import train_model\nfrom util.util import set_random_seed\n\nSEED = 52017\nSEED_HISTORY = \"{'loss': [6.760105133056641, 1.0928313732147217, 1.2394728660583496, 0.8879892230033875], 'accuracy': [0.7902321815490723, 0.7870296239852905, 0.8126501441001892, 0.8462769985198975], 'val_loss': [15.447237968444824, 47.05992126464844, 22.719436645507812, 30.461416244506836], 'val_accuracy': [0.0, 0.0, 0.0, 0.0]}\"\n\n\ndef test_training_reproducible() -> None:\n \"\"\"Tests that training results are reproducible.\"\"\"\n set_random_seed(SEED)\n dataset_args = {'dataset_fraction': 0.001}\n network_args = {'input_shape': (128, 128, 3),\n 'num_classes': 1000}\n train_args = {'epochs': 10, 'batch_size': 32, 'early_stopping': True}\n dataset = ILSVRCDataset(DEFAULT_DATASET_PATH)\n dataset.trim_dataset(dataset_args['dataset_fraction'])\n network = MLP(network_args)\n model = ProjectModel(dataset, network)\n history = train_model.train_model(model, train_args)\n assert str(history.history) == SEED_HISTORY\n" }, { "alpha_fraction": 0.762499988079071, "alphanum_fraction": 0.7875000238418579, "avg_line_length": 52, "blob_id": "641b388f00f9bf2f8eda35cd0d484b4755715f08", "content_id": "32590efdf7f19e8bded9740b9b1251dd76a76e5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 160, "license_type": "no_license", "max_line_length": 148, "num_lines": 3, "path": "/README.md", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "# README\n\nThis repository is a reimplementation of the Wu et al. ResNet paper from 2016 (Wider or Deeper: Revisiting the ResNet Model for Visual Recognition).\n\n" }, { "alpha_fraction": 0.7571428418159485, "alphanum_fraction": 0.7571428418159485, "avg_line_length": 69, "blob_id": "8b52f9dfebe85c82948076573899fad296298d21", "content_id": "48450a4e59034f46a7c9962efb825579ec36faa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 69, "num_lines": 1, "path": "/test/test_key_predictions.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Tests that the model makes correct predictions on key examples.\"\"\"\n" }, { "alpha_fraction": 0.6127819418907166, "alphanum_fraction": 0.6127819418907166, "avg_line_length": 16.733333587646484, "blob_id": "49071112ab1a6444d36803b7a1fa3b089da2e2da", "content_id": "acee5d2fc4b02d1c7e7de6a97a0c1ea89d4ec224", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 60, "num_lines": 15, "path": "/training/run_sweep.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Runs a hyperparameter sweep to find the best ML model.\"\"\"\n\nfrom util.util import set_random_seed\nUSE_RANDOM_SEED = True\nif USE_RANDOM_SEED:\n set_random_seed()\n\n\ndef main() -> None:\n \"\"\"Runs the program.\"\"\"\n # TODO\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6184448599815369, "alphanum_fraction": 0.6224633455276489, "avg_line_length": 44.66054916381836, "blob_id": "82aec6c67187f0e2e4795679fd1eefe88561f193", "content_id": "663032bf32f6aa692897468e0888fdc46af3e9df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4977, "license_type": "no_license", "max_line_length": 79, "num_lines": 109, "path": "/dataset/image_dataset_sequence.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"DatasetSequence class.\"\"\"\n\nimport math\nfrom typing import Tuple, Optional, Callable\nimport numpy as np\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\nfrom util.util import get_max_batch_size, normalize_images\n\nDEFAULT_BATCH_SIZE = 32\nCHECK_MAX_BATCH_SIZE = True\nDEFAULT_TARGET_SIZE = (128, 128)\n\n\nclass ImageDatasetSequence(Sequence):\n \"\"\"Represents a single sequence in the dataset (i.e., the train,\n val, or test set).\"\"\"\n\n def __init__(self, x_filenames: np.ndarray, y: Optional[np.ndarray] = None,\n image_target_size: Tuple[int, int] = DEFAULT_TARGET_SIZE,\n batch_size: int = DEFAULT_BATCH_SIZE,\n batch_augment_fn: Optional[Callable] = None,\n batch_format_fn: Optional[Callable] = None,\n overfit_single_batch: bool = False,\n shuffle_on_epoch_end: bool = True) -> None:\n \"\"\"Instantiates the object.\n :param x_filenames: ndarray of strs containing the filenames of\n all examples in the dataset.\n :param y: ndarray of ints containing the labels of all examples\n in the dataset. If None, \"labels\" are set to all zeros as a\n placeholder (use this for predictions, where labels are\n unknown).\n :param image_target_size: the size at which images will be loaded.\n :param batch_size: the number of examples in each batch.\n :param batch_augment_fn: the function to augment a batch of\n data.\n :param batch_format_fn: the function to format a batch of data.\n :param overfit_single_batch: if True, the sequence will always return\n the first batch in the dataset. You can use this to validate the\n training pipeline--if the dataset, network, and training regime are\n set up correctly, then you should be able to achieve a training loss\n arbitrarily close to zero after many epochs. Generally used only on the\n train set.\n :param shuffle_on_epoch_end: whether to shuffle the sequence on epoch\n end. tensorflow's model.fit has a shuffle flag, but it does not work on\n generators, so we need it here.\n \"\"\"\n # pylint: disable=invalid-name\n if y is None:\n y = np.zeros(x_filenames.shape[0])\n if x_filenames.shape[0] != y.shape[0]:\n raise ValueError('Found {0} examples, but {1} labels'.format(\n x_filenames.shape[0], y.shape[0]))\n self.x_filenames: np.ndarray = x_filenames\n self.y: np.ndarray = y\n self.image_target_size: Tuple[int, int] = image_target_size\n self.batch_size: int = batch_size\n self.batch_augment_fn: Optional[Callable] = batch_augment_fn\n self.batch_format_fn: Optional[Callable] = batch_format_fn\n self.overfit_single_batch: bool = overfit_single_batch\n self.shuffle_on_batch_end = shuffle_on_epoch_end\n if CHECK_MAX_BATCH_SIZE:\n print('Estimated maximum batch size: {0}'.format(\n get_max_batch_size(x_filenames)))\n\n def __len__(self) -> int:\n \"\"\"Returns the number of batches in the dataset.\n :return: the length of the dataset.\n \"\"\"\n return math.ceil(len(self.x_filenames) / self.batch_size)\n\n def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Returns a single batch of loaded data.\n :param idx: the batch index.\n :return: a tuple of two np.ndarray objects, the batch x values\n (features) and the batch labels, respectively.\n \"\"\"\n if self.overfit_single_batch:\n if self.shuffle_on_batch_end:\n raise ValueError('Cannot overfit on one batch if shuffling is '\n 'true.')\n idx = 0\n # TODO write a test to check for correct shuffling.\n batch_start = idx * self.batch_size\n batch_end = (idx + 1) * self.batch_size\n batch_x_filenames = self.x_filenames[batch_start:batch_end]\n batch_x = np.array([img_to_array(load_img(\n filename, target_size=self.image_target_size), dtype=np.uint8)\n for filename in batch_x_filenames])\n batch_x = normalize_images(batch_x)\n batch = batch_x, self.y[batch_start:batch_end]\n if self.batch_augment_fn:\n batch = self.batch_augment_fn(batch)\n if self.batch_format_fn:\n batch = self.batch_format_fn(batch)\n return batch\n\n def on_epoch_end(self) -> None:\n \"\"\"Performs actions that happen at the end of every epoch, e.g.\n shuffling.\"\"\"\n if self.shuffle_on_batch_end:\n self.shuffle()\n\n def shuffle(self) -> None:\n \"\"\"Shuffles the dataset.\"\"\"\n shuffled_indices = np.random.permutation(self.x_filenames.shape[0])\n self.x_filenames, self.y = self.x_filenames[shuffled_indices], \\\n self.y[shuffled_indices]\n" }, { "alpha_fraction": 0.6787749528884888, "alphanum_fraction": 0.688746452331543, "avg_line_length": 31.65116310119629, "blob_id": "082f58970f384a3533018e29caf010e60d5a574f", "content_id": "432f0b2ea8620388b3dce0734f5cfaed43ca7716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 77, "num_lines": 43, "path": "/util/util.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Utility functions.\"\"\"\n\n# Random seeds need to be set up at program launch, before other\n# imports, because some libraries use random initialization.\nimport os\nfrom random import seed as base_random_seed\nfrom numpy.random import seed\nfrom tensorflow._api.v2.random import set_seed\nimport numpy as np\n\nRANDOM_SEED = 52017\n\n\ndef set_random_seed(random_seed: int = RANDOM_SEED) -> None:\n \"\"\"Sets up the random seed so that experiments are reproducible.\n :param random_seed: the random seed to use.\n \"\"\"\n os.environ['PYTHONHASHSEED'] = str(random_seed)\n base_random_seed(random_seed)\n seed(random_seed)\n set_seed(random_seed)\n print('Random seed set')\n\n\ndef get_max_batch_size(x_filenames: np.ndarray) -> int:\n \"\"\"Returns the maximum batch size attainable on the current\n CPU/GPU.\n :param x_filenames: ndarray of strs containing the filenames of all\n examples in the dataset.\n :return: maximum batch size.\n \"\"\"\n # TODO\n\n\ndef normalize_images(images: np.ndarray) -> np.ndarray:\n \"\"\"Returns a normalized data array for one or more images.\n :param images: an np.ndarray of one or more images.\n :return: a normalized array.\n \"\"\"\n if images.dtype != np.uint8:\n raise ValueError('Expected processed images to be of data '\n 'type np.uint8, but found {0}'.format(images.dtype))\n return (images / 255).astype(np.float32)\n" }, { "alpha_fraction": 0.6613898277282715, "alphanum_fraction": 0.6665295958518982, "avg_line_length": 33.74285888671875, "blob_id": "117d59642053b634a42ebad02c70d6e492ee5ed1", "content_id": "b014ce620daf2d9d35c1a077e6ca93eb3fffedeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4864, "license_type": "no_license", "max_line_length": 76, "num_lines": 140, "path": "/training/train_model.py", "repo_name": "kostaleonard/Resnet_Wu_2016", "src_encoding": "UTF-8", "text": "\"\"\"Trains the model.\"\"\"\n\nfrom util.util import set_random_seed\nUSE_RANDOM_SEED = True\nif USE_RANDOM_SEED:\n set_random_seed()\n# pylint: disable=wrong-import-position\nfrom tensorflow.keras.callbacks import EarlyStopping, Callback, History\nfrom wandb.keras import WandbCallback\nfrom typing import List, Dict, Any\nfrom time import time\nimport argparse\nimport json\n\nfrom models.project_model import ProjectModel, DEFAULT_TRAIN_ARGS\nfrom models.image_model import ImageModel\nfrom dataset.ilsvrc_dataset import ILSVRCDataset, DEFAULT_DATASET_PATH, \\\n EXPECTED_NUM_CLASSES\nfrom dataset.image_dataset_sequence import DEFAULT_TARGET_SIZE\nfrom dataset.dataset import TRAIN_KEY, VAL_KEY, TEST_KEY\nfrom models.networks.mlp import MLP\nfrom models.networks.lenet import LeNet\n\nARCHITECTURE_MLP = 'mlp'\nARCHITECTURE_LENET = 'lenet'\nDEFAULT_DATASET_ARGS = {\n 'dataset_fraction': 0.01\n}\nDEFAULT_NETWORK_ARGS = {\n 'architecture': ARCHITECTURE_MLP,\n 'input_shape': DEFAULT_TARGET_SIZE + (3,),\n 'num_classes': EXPECTED_NUM_CLASSES\n}\n\n\ndef get_custom_wandb_callbacks() -> List[Callback]:\n \"\"\"Returns a list of custom wandb callbacks to use.\n :return: custom callbacks.\n \"\"\"\n # TODO custom callbacks.\n return []\n\n\ndef get_model(dataset_args: Dict[str, Any],\n network_args: Dict[str, Any]) -> ProjectModel:\n \"\"\"Returns the model.\n :param dataset_args: the dataset arguments; see DEFAULT_DATASET_ARGS for\n available arguments.\n :param network_args: the network arguments; see DEFAULT_NETWORK_ARGS for\n available arguments.\n :return: the model.\n \"\"\"\n dataset_args = {**DEFAULT_DATASET_ARGS, **dataset_args}\n network_args = {**DEFAULT_NETWORK_ARGS, **network_args}\n print('Dataset args: {0}'.format(dataset_args))\n print('Network args: {0}'.format(network_args))\n print('Loading dataset from {0}'.format(DEFAULT_DATASET_PATH))\n dataset = ILSVRCDataset(DEFAULT_DATASET_PATH)\n if dataset_args['dataset_fraction'] < 1.0:\n dataset.trim_dataset(dataset_args['dataset_fraction'])\n print('Num training examples: {0}'.format(\n dataset.partition[TRAIN_KEY].shape[0]))\n print('Num validation examples: {0}'.format(\n dataset.partition[VAL_KEY].shape[0]))\n print('Num test examples: {0}'.format(\n dataset.partition[TEST_KEY].shape[0]))\n if network_args['architecture'] == ARCHITECTURE_MLP:\n network = MLP(network_args)\n elif network_args['architecture'] == ARCHITECTURE_LENET:\n network = LeNet(network_args)\n else:\n raise ValueError('Unrecognized architecture: {0}'.format(\n network_args['architecture']))\n return ImageModel(dataset, network)\n\n\ndef train_model(model: ProjectModel, train_args: Dict[str, Any]) -> History:\n \"\"\"Trains the model.\n :param model: the model to train.\n :param train_args: training arguments; see DEFAULT_TRAIN_ARGS for\n available arguments.\n :return: the training history.\n \"\"\"\n train_args = {**DEFAULT_TRAIN_ARGS, **train_args}\n print('Training args: {0}'.format(train_args))\n callbacks = []\n if train_args['early_stopping']:\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.01,\n patience=3, verbose=1, mode='auto')\n callbacks.append(early_stopping)\n if train_args['use_wandb']:\n callbacks.append(WandbCallback())\n callbacks.extend(get_custom_wandb_callbacks())\n model.network.summary()\n t_start = time()\n history = model.fit(train_args, callbacks=callbacks)\n t_end = time()\n print('Model training finished in {0:2f}s'.format(t_end - t_start))\n return history\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Returns the command line arguments.\n :return: the command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--gpu', type=int, default=0,\n help='The index of the GPU to use. If the system has no GPUs, this '\n 'argument is ignored.'\n )\n parser.add_argument(\n '--dataset_args', type=str, default='{}',\n help='The dataset arguments, as a JSON dictionary.'\n )\n parser.add_argument(\n '--network_args', type=str, default='{}',\n help='The network arguments, as a JSON dictionary.'\n )\n parser.add_argument(\n '--train_args', type=str, default='{}',\n help='The training arguments, as a JSON dictionary.'\n )\n return parser.parse_args()\n\n\ndef main() -> None:\n \"\"\"Runs the program.\"\"\"\n args = parse_args()\n # TODO GPU assignment.\n dataset_args = json.loads(args.dataset_args)\n network_args = json.loads(args.network_args)\n train_args = json.loads(args.train_args)\n model = get_model(dataset_args, network_args)\n history = train_model(model, train_args)\n print(history.history)\n\n\nif __name__ == '__main__':\n main()\n" } ]
17
EdilsonTarcio/Heap_Sort
https://github.com/EdilsonTarcio/Heap_Sort
0e772e851793114c883c2073186e7cf1c6e1f823
fc84d24c3f7952eed9f3b5bfcdda3893aa71ec54
a5bcfba60e8f54b7debdd13d1b1be3d38b8d2a33
refs/heads/main
2023-05-08T12:41:05.043175
2021-06-01T00:20:06
2021-06-01T00:20:06
372,657,157
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 33, "blob_id": "73c2c4a4f47dd2d9d35a19036b87a37183551c00", "content_id": "9810f1c85770ceb9ceaa0636a57e3032f551fdfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "EdilsonTarcio/Heap_Sort", "src_encoding": "UTF-8", "text": "# Algoritmo de ordenaรงรฃo Heap Sort\nAlgoritmo de ordenaรงรฃo Heap Sort\n" }, { "alpha_fraction": 0.5757097601890564, "alphanum_fraction": 0.597003161907196, "avg_line_length": 29.95121955871582, "blob_id": "8176a007c741902ff0212d27d275a48e0a417b0c", "content_id": "c69d7f3f4f91539780d01e3be6de3c06e7ed6c74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1278, "license_type": "no_license", "max_line_length": 88, "num_lines": 41, "path": "/HeapSort.py", "repo_name": "EdilsonTarcio/Heap_Sort", "src_encoding": "UTF-8", "text": "import random\n\ndef organizar(lista, n, m): \n maior = m # Nรณ maior maxheap que ficarรก na raiz\n esquerdo = 2 * m + 1 # รญndice mais a esquerda = 2*i + 1 \n direito = 2 * m + 2 # ndice mais a direita = 2*i + 2 \n \n # Verifica se o indice mais a esquerda da raiz existe and > root \n if esquerdo < n and lista[m] < lista[esquerdo]: \n maior = esquerdo \n \n # Verifica se o indice mais a direita da raiz existe and > root \n if direito < n and lista[maior] < lista[direito]: \n maior = direito \n \n # Atualiza a raiz se necessario \n if maior != m: \n lista[m],lista[maior] = lista[maior],lista[m] \n \n organizar(lista, n, maior) \n \n \n# definiรงรฃo de classificaรงรฃo da Heap Sort\ndef HeapSort(lista): \n n = len(lista) \n \n for m in range(n // 2 - 1, -1, -1): \n organizar(lista, n, m) \n \n # extraindo elementos \n for m in range(n-1, 0, -1): \n lista[m], lista[0] = lista[0], lista[m] \n organizar(lista, m, 0) \n \nran=20 # รndice para quantidade de nรบmeros gerados\nlista = random.sample(range(100), ran) # gerando valore aleatรณrios entre 0 e 100 unicos\nHeapSort(lista) \nn = len(lista) \nprint (\"Valores organizados pelo Heap Sort\") \nfor m in range(n): \n print (\"%d\" %lista[m])" } ]
2
coderwyc/tianhongfund
https://github.com/coderwyc/tianhongfund
1b249999867a92bd72536f589534e44db97982de
be54952aae4fd5c85d35f4de393cc5fb5ff90698
9c7481db3558767a8167247a28c52d37d894af04
refs/heads/master
2021-01-10T13:04:54.067873
2016-03-08T07:11:25
2016-03-08T07:11:25
53,319,330
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6643295288085938, "alphanum_fraction": 0.6809815764427185, "avg_line_length": 31.140844345092773, "blob_id": "2975231653fdb2c81e4d6fc6441fb6b9aeac3a56", "content_id": "45d51e6f77bbb8517a8122fa448f5c53da294375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2716, "license_type": "no_license", "max_line_length": 80, "num_lines": 71, "path": "/tianhong.py", "repo_name": "coderwyc/tianhongfund", "src_encoding": "UTF-8", "text": "#_*_coding:utf-8_*_\n# from crawl import get_net_val\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom allfunds import get_url_by_name\nfrom crawl import get_val\n\nclass Fund(object):\n\t\"\"\" ๆฒชๆทฑ300ๅŸบ้‡‘ๆŠ•่ต„,\n\t\tinvestment:ๆŠ•่ต„้ข\n\t\tfund_net:ๅŸบ้‡‘ๅ‡€ๅ€ผ\t\t\n\t\thanding_charge:ๆ‰‹็ปญ่ดน\n\t\"\"\"\n\tdef __init__(self, investment, fund_net, handing_charge=0.001):\n\t\tself.investment = float(investment)\n\t\tself.fund_net = fund_net\n\t\tself.net_investment = float(investment)/(1+handing_charge)\t#ๅ‡€็”ณ่ดญ้ข=็”ณ่ดญ้‡‘้ข/(1+็”ณ่ดญ่ดน็އ)\n\t\tself.share = self.net_investment / fund_net #็”ณ่ดญไปฝ้ข=ๅ‡€็”ณ่ดญ้ข/Tๆ—ฅๅŸบ้‡‘ไปฝ้ขๅ‡€ๅ€ผ\n\t\tself.in_charge = [float(investment) - self.net_investment] #็”ณ่ดญ่ดน็”จ=็”ณ่ดญ้ข-ๅ‡€็”ณ่ดญ้‡‘้ข\n\t\tself.out_charge = []\n\t\tself.in_ = [fund_net] #่ฎฐๅฝ•ไนฐๅ…ฅไปทๆ ผ\n\t\tself.out = [] #่ฎฐๅฝ•ๅ–ๅ‡บไปทๆ ผ\n\tdef get_share(self):\n\t\t\"\"\"่Žทๅ–ๅŸบ้‡‘ไปฝ้ข\"\"\"\n\t\treturn self.share\n\tdef get_profit(self, sell_charge=0.0005):\n\t\t\"\"\"่Žทๅ–ๅฝ“ๅ‰ๅˆฉๆถฆ\"\"\"\n\t\treturn (self.share * self.fund_net) - self.investment\n\tdef update_net(self, new_net):\n\t\t\"\"\"ๆ›ดๆ–ฐๅŸบ้‡‘ๅ‡€ๅ€ผ\"\"\"\n\t\tself.fund_net = new_net\n\tdef get_all_investment(self):\n\t\t\"\"\"่Žทๅ–ๆ€ปๆŠ•่ต„้ข\"\"\"\n\t\treturn self.investment\n\tdef get_total(self):\n\t\t\"\"\"่Žทๅ–ๅฝ“ๅ‰ๆ‹ฅๆœ‰่ต„ไบง\"\"\"\n\t\treturn self.share * self.fund_net\n\tdef buy_more(self, investment, fund_net, handing_charge=0.001):\n\t\t\"\"\"่ฟฝๅŠ ๆŠ•่ต„\"\"\"\n\t\tself.in_.append(fund_net)\n\t\tself.investment += investment\n\t\tself.update_net(fund_net) # ๆ›ดๆ–ฐๅŸบ้‡‘ๅ‡€ๅ€ผ\n\t\tself.net_investment = float(investment)/(1+handing_charge)\t#ๅ‡€็”ณ่ดญ้ข=็”ณ่ดญ้‡‘้ข/(1+็”ณ่ดญ่ดน็އ)\n\t\tself.share += self.net_investment / fund_net #็”ณ่ดญไปฝ้ข=ๅ‡€็”ณ่ดญ้ข/Tๆ—ฅๅŸบ้‡‘ไปฝ้ขๅ‡€ๅ€ผ\n\t\tself.in_charge.append(float(investment) - self.net_investment) #็”ณ่ดญ่ดน็”จ=็”ณ่ดญ้ข-ๅ‡€็”ณ่ดญ้‡‘้ข\n\tdef unload(self, share, fund_net, sell_charge=0.0005):\n\t\t\"\"\"ๅ‡ๆŒๅŸบ้‡‘\"\"\"\n\t\tif share > self.share:\n\t\t\tprint 'You have no share'\n\t\t\treturn None\n\t\tself.out.append(fund_net)\n\t\tself.update_net(fund_net) # ๆ›ดๆ–ฐๅŸบ้‡‘ๅ‡€ๅ€ผ\n\t\tself.share -= share\n\t\ttotal = share * fund_net #่ตŽๅ›žๆ€ป้ข=่ตŽๅ›žๆ•ฐ้‡*Tๆ—ฅๅŸบ้‡‘ไปฝ้ขๅ‡€ๅ€ผ\n\t\tself.investment -= total*(1-sell_charge)\n\t\tself.out_charge.append(total*sell_charge) #่ตŽๅ›ž่ดน็”จ=่ตŽๅ›žๆ€ป้ข*่ตŽๅ›ž่ดน็އ\n\t\treturn total*(1-sell_charge) \n\nif __name__ == '__main__':\n\ttz = Fund(1000, 0.7438)\n\turl = get_url_by_name(u'ๅคฉๅผ˜ๅˆ›ไธšๆฟ')\n\tname, date, val = get_val(url) \t\n\ttz.update_net(float(val))\n\tprint u'ๆŠ•่ต„้ข:' + str(tz.get_all_investment())\n\tprint u'ๅŸบ้‡‘ๅ็งฐ:' + name\n\tprint u'ๆ—ฅๆœŸ:'+ date\n\tprint u'ๅŸบ้‡‘ๅ‡€ๅ€ผ:'+ val\n\tprint u'ๅŸบ้‡‘ๆŒๆœ‰ไปฝ้ข:'+str(tz.get_share())\n\tprint u'ๆ€ป่ต„ไบงไธบ:'+str(float(val)*tz.get_share())\n\tprint u'ๆŠ•่ต„ๆ”ถ็›Š:'+str(tz.get_profit())\n" }, { "alpha_fraction": 0.637982189655304, "alphanum_fraction": 0.6735904812812805, "avg_line_length": 29.590909957885742, "blob_id": "1c57893bf8552dba14236f1c5922b19f6003978f", "content_id": "8b0acb1c7547c45354419fc98910c16f4e0ee074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 126, "num_lines": 44, "path": "/crawl.py", "repo_name": "coderwyc/tianhongfund", "src_encoding": "UTF-8", "text": "#_*_coding:utf-8_*_\n'''get funds net val and date'''\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport allfunds\nimport re\nfrom urllib2 import urlopen\nfrom lxml.html import parse\nimport requests\n\ndef get_net_val(url):\n\ttry:\n\t\t# urlๆ˜ฏๆฒชๆทฑ3000\n\t\t# url = \"http://www.thfund.com.cn/column.dohsmode=searchtopic&pageno=0&channelid=2&categoryid=2992&childcategoryid=2993.htm\"\n\t\thtml = urlopen(url).read()\n\t\tfind_value = re.compile('<td><span style=\"color:red;\">(.+?)</span></td>', re.DOTALL)\n\t\tfind_time = re.compile('<td>(20.+?)</td>', re.DOTALL)\n\t\tnet_val = find_value.findall(html)\n\t\tdate = find_time.findall(html)\n\t\treturn net_val[0], date[0]\n\texcept:\n\t\tprint 'Network Failed'\n\ndef get_val(url):\n\t# urlๆ˜ฏๅˆ›ไธšๆฟ\n\t# url = \"http://www.thfund.com.cn/column.dohsmode=searchtopic&pageno=0&channelid=2&categoryid=3785&childcategoryid=3788.htm\"\n\tparsed = parse(urlopen(url))\n\tdoc = parsed.getroot()\n\ttables = doc.findall('.//table')\n\t# print len(tables)\n\tdata = tables[1]\n\trows = data.findall('.//tr')\n\tthhs300 = [val.text_content() for val in rows[1].findall('td')]\n\treturn thhs300[0], thhs300[1], thhs300[2]\nif __name__ == '__main__':\n\t# val, date = get_net_val()\n\t# print r'ๆ—ฅๆœŸ:'+date\n\t# print r'ๅŸบ้‡‘ๅ‡€ๅ€ผ:'+val\n\turl = allfunds.get_url_by_name(u'ๅคฉๅผ˜ๅˆ›ไธšๆฟ')\n\tname, date, val = get_val(url)\n\tprint r'ๅŸบ้‡‘ๅ็งฐ:' + str(name)\n\tprint r'ๆ—ฅๆœŸ:'+ date\n\tprint r'ๅŸบ้‡‘ๅ‡€ๅ€ผ:'+ val\n\n\n" }, { "alpha_fraction": 0.6482577323913574, "alphanum_fraction": 0.6706114411354065, "avg_line_length": 15.911110877990723, "blob_id": "3aa2c7059121d335665f56e689a66d4d6921c888", "content_id": "9870a2dfa35bdd361e65137452ee492f5285093a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 124, "num_lines": 90, "path": "/allfunds.py", "repo_name": "coderwyc/tianhongfund", "src_encoding": "UTF-8", "text": "#_*_coding:utf-8_*_\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n# ็ˆฌๅ–ๅคฉๅผ˜ๆ‰€ๆœ‰ๅŸบ้‡‘็š„ๆ•ฐๆฎ\n\n\n'''module for get URL of funds by name'''\n\ndef get_url_by_name(name):\n\ttry:\n\t\tfrom urllib2 import urlopen\n\t\tfrom lxml.html import parse\n\t\turl = 'http://www.thfund.com.cn/column.dohsmode=searchtopic&pageno=0&channelid=2&categoryid=2435&childcategoryid=2436.htm'\n\t\tparsed = parse(urlopen(url))\n\t\tdoc = parsed.getroot()\n\t\tnavs = doc.find_class('left_nav_new_subnav') # ๆŸฅๆ‰พclass='left_nav_new_subnav'็š„้กน\n\t\t# print len(navs)\n\n\t\tfunds = {}\n\t\tfor nav in navs:\n\t\t\tfunds_link = nav.findall('.//a')\n\t\t\tfor item in funds_link:\n\t\t\t\tfunds[item.text] = item.get('href')\n\t\t# print len(funds)\n\t\t# print funds\n\t\tbase_url = 'http://www.thfund.com.cn'\n\t\tfunds_db = {}\n\t\tfor key in funds.keys():\n\t\t\tfunds[key] = base_url + funds[key]\n\t\t\tspt = key.split('\\n')\n\t\t\tfunds_db[spt[0]] = funds[key]\n\t\t# for k,v in funds_db.items():\n\t\t# \tprint k\n\t\treturn funds_db[name]\n\texcept Exception, e:\n\t\traise e\n\n\nif __name__ == '__main__':\n\tprint get_url_by_name(u'ๅคฉๅผ˜ๆฒชๆทฑ300')\n\n# ๅคฉๅผ˜้‘ซๅฎ‰ๅฎไฟๆœฌ\n# ่กŒๅฅๅฎๆ‰ฌไธญๅ›ฝๅŸบ้‡‘\n# ๅคฉๅผ˜ไธญ่ฏๅ…จๆŒ‡่ฟ่พ“\n# ๅคฉๅผ˜็จณๅˆฉๅฎšๆœŸๅผ€ๆ”พ\n# ๅคฉๅผ˜ไธญ่ฏ้ฃŸๅ“้ฅฎๆ–™\n# ๅคฉๅผ˜ๆ–ฐไปทๅ€ผๆททๅˆ\n# ๅคฉๅผ˜ๆฐธๅˆฉๅ€บๅˆธ\n# ๅคฉๅผ˜ไธญ่ฏไผ‘้—ฒๅจฑไน\n# ๅคฉๅผ˜ไธญ่ฏ500\n# ๅคฉๅผ˜่ฃ•ๅˆฉ็ตๆดป้…็ฝฎ\n# ๅคฉๅผ˜ไธญ่ฏ็งปๅŠจไบ’่”็ฝ‘\n# ๅคฉๅผ˜ไธญ่ฏ้ซ˜็ซฏ่ฃ…ๅค‡ๅˆถ้€ \n# ๅคฉๅผ˜ไบ’่”็ฝ‘็ตๆดป้…็ฝฎ\n# ๅคฉๅผ˜็ฒพ้€‰ๆททๅˆ\n# ๅคฉๅผ˜้€šๅˆฉๆททๅˆ\n# ๅคฉๅผ˜ๅฎ‰ๅบทๅ…ป่€\n# ๅคฉๅผ˜ไธญ่ฏๅคงๅฎ—ๅ•†ๅ“\n# ๅคฉๅผ˜ๅผ˜ๅˆฉๅ€บๅˆธ\n# ๅคฉๅผ˜้‘ซๅŠจๅŠ›ๆททๅˆ\n# ๅคฉๅผ˜ๅญฃๅŠ ๅˆฉ็†่ดขๅ€บๅˆธ\n# ๅคฉๅผ˜ๅ€บๅˆธๅ‘่ตทๅผ\n# ๅคฉๅผ˜ไธฐๅˆฉๅ€บๅˆธLOF\n# ๅคฉๅผ˜็Žฐ้‡‘็ฎกๅฎถ่ดงๅธ\n# ๅคฉๅผ˜ๆ–ฐๆดปๅŠ›ๆททๅˆ\n# ๅคฉๅผ˜ๅผ˜่ฟๅฎ\n# ๅคฉๅผ˜ไบ‘ๅ•†ๅฎ\n# ๅคฉๅผ˜ๅขž็›Šๅฎ\n# ๅคฉๅผ˜ไธญ่ฏ้“ถ่กŒ\n# ๅคฉๅผ˜็‘žๅˆฉๅˆ†็บงๅ€บๅˆธ\n# ๅคฉๅผ˜ไฝ™้ขๅฎ่ดงๅธ\n# ๅคฉๅผ˜ๅˆ›ไธšๆฟ\n# ๅคฉๅผ˜ๆทปๅˆฉๅ€บๅˆธLOF\n# ๅคฉๅผ˜ไธญ่ฏ็”ตๅญ\n# ๅคฉๅผ˜ไธญ่ฏ100\n# ๅคฉๅผ˜ไบ‘็ซฏ็”Ÿๆดปไผ˜้€‰\n# ๅคฉๅผ˜ๅ‘จๆœŸ็ญ–็•ฅ\n# ๅคฉๅผ˜ไธŠ่ฏ50\n# ๅคฉๅผ˜ไธญ่ฏ่ฏๅˆธไฟ้™ฉ\n# ๅคฉๅผ˜ไธญ่ฏๅŒป่ฏ100\n# ๅคฉๅผ˜ๆ™ฎๆƒ ๅ…ป่€ไฟๆœฌ\n# ๅคฉๅผ˜ๆƒ ๅˆฉๆททๅˆ\n# ๅคฉๅผ˜ไธญ่ฏ800\n# ๅคฉๅผ˜ๅŒๅˆฉๅˆ†็บงๅ€บๅˆธ\n# ๅคฉๅผ˜ไธญ่ฏ่ฎก็ฎ—ๆœบ\n# ๅคฉๅผ˜ไธญ่ฏๅ…จๆŒ‡ๆˆฟๅœฐไบง\n# ๅคฉๅผ˜ๆฒชๆทฑ300\n# ๅคฉๅผ˜ไธญ่ฏ็Žฏไฟไบงไธš\n# ๅคฉๅผ˜ๆฐธๅฎšไปทๅ€ผๆˆ้•ฟ" }, { "alpha_fraction": 0.8086956739425659, "alphanum_fraction": 0.8086956739425659, "avg_line_length": 37.33333206176758, "blob_id": "d68dd149f2c77ebf7c6c6bb37f37769f425c70c0", "content_id": "e393fe29d266d6ba031ea7c0f593b4f78cea632e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 63, "num_lines": 3, "path": "/README.md", "repo_name": "coderwyc/tianhongfund", "src_encoding": "UTF-8", "text": "# tianhongfund\ncreate a class Fund for simulation,\nwe can using it for calculating profits by looking up fund name\n" }, { "alpha_fraction": 0.6133942008018494, "alphanum_fraction": 0.6529680490493774, "avg_line_length": 31.899999618530273, "blob_id": "8cb468f39dafff4bb15862224ded575da036ce58", "content_id": "fbf0401cc0856a8519b6934e951891c725b63434", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 693, "license_type": "no_license", "max_line_length": 101, "num_lines": 20, "path": "/regex_match.py", "repo_name": "coderwyc/tianhongfund", "src_encoding": "UTF-8", "text": "#_*_coding:utf-8_*_\n#ๆญฃๅˆ™ๅŒน้…ๅŸบ้‡‘ไปฃ็ ใ€ๅŸบ้‡‘ๅ‡€ๅ€ผๅ’Œๆ›ดๆ–ฐๆ—ถ้—ด\nfrom urllib2 import urlopen\nimport re\n\nurls=[\"http://fund.eastmoney.com/000051.html\",\n \"http://fund.eastmoney.com/213008.html\",\n \"http://fund.eastmoney.com/000173.html\",\n \"http://fund.eastmoney.com/000477.html\"]\n\nfind_re = re.compile(r'<div id=\"statuspzgz\" class=\"fundpz\"><span class=\".+?\">(.+?)</span>',re.DOTALL)\nhtml_re = re.compile(r'http://fund.eastmoney.com/(.+?).html',re.DOTALL)\ntime_re = re.compile(r'<p class=\"time\">(.+?)</p>',re.DOTALL)\n\nfor ulr in urls: \n html=urlopen(ulr).read()\n print html_re.findall(ulr)\n print find_re.findall(html)\n print time_re.findall(html)\n print ''" } ]
5
DewangPatil30/Check_Emotions
https://github.com/DewangPatil30/Check_Emotions
2aa2614ab426ab83bf5d8f0d066bcc7628dafc4c
4587a8c2532220c435a495453e56ac6516c17a36
665076410b61f9f91683e231652d8fa0294b48bb
refs/heads/main
2023-06-09T06:26:48.825432
2021-06-27T19:06:53
2021-06-27T19:06:53
380,801,472
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.43656766414642334, "alphanum_fraction": 0.44752049446105957, "avg_line_length": 33.28571319580078, "blob_id": "c408c883781093e013ab59f75ac4d2f90d399d4e", "content_id": "2b4b013f7855daf5349e49bd20d513ba9f1e21c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14882, "license_type": "no_license", "max_line_length": 205, "num_lines": 434, "path": "/appMain.py", "repo_name": "DewangPatil30/Check_Emotions", "src_encoding": "UTF-8", "text": "\n# Importing the libraries........\n\nimport pickle\nfrom dash_html_components.Button import Button\nfrom dash_html_components.Div import Div\nfrom dash_html_components.Section import Section\nfrom dash_html_components.Span import Span\nfrom dash_html_components.Strong import Strong\nimport pandas as pd\nimport webbrowser\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport plotly.express as px\n\nfrom dash.dependencies import Input, Output, State\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport nltk\nfrom nltk.corpus import stopwords\nimport header_footer as h\nnltk.download('stopwords')\n\n################################################################ GLOBAL VARIABLES ######################################################################################\n\nproject_name = None\nbs = 'https://cdn.jsdelivr.net/npm/[email protected]/dist/slate/bootstrap.min.css'\n\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], suppress_callback_exceptions=True)\nh.takeApp(app)\n\ndf = pd.read_csv('predictions.csv')\ndf2 = pd.read_csv('ScrapedReviews.csv')\nfeats = df2.iloc[:,-1].sample(2000)\n\nwords = None\n\nheader =h.create_header()\nfooter = h.create_footer()\n\n\n################################################################ GLOBAL VARIABLES ######################################################################################\n\n######################################################################################################################################################\n\ndef create_app_ui():\n\n main_layout = html.Div(\n [\n html.Div([\n header,\n\n html.Div([\n html.Div([\n html.H1(id='main_title', className='checkEmotions', children='Check Emotions'), \n ], className='firstDiv'),\n \n html.Div([\n html.H2(id='site_details', className='Site_details', children='Sentiment Analysis using Machine Learning and Deep Learning'), \n ], className='secondDiv'),\n \n ], className='front_parent_div'),\n\n ],className='bg_image'),\n\n html.Section(children=[\n html.Div([\n\n html.P(['.'], id='dot', className='dot'),\n html.Div([\n \n dcc.Dropdown(\n id='my_dropdown',\n options=[\n {'label': 'status', 'value': 'status'}\n ],\n value='status',\n multi=False,\n clearable=False,\n ),\n\n html.Div([\n\n html.H1([\n 'Pie chart showing the percent of +ve and -ve reviews' \n ], id='chart_title', className=\"chart_title\"),\n \n dcc.Graph(id='graph', className='graph')\n \n ], id='graph_div', className='graph_div'),\n\n html.H2(['Review Stats Pie'], className='stats')\n \n \n ], id='pie', className='pie'),\n\n html.Div([\n html.Div([\n html.H1(id='pie_title', className='pie_title', children=['What does these graph tells us ?']),\n ]),\n html.Span(id='span'),\n html.Div([\n html.P(id='pie_Details', className='pie_details', \n children=['By the data given by the pie-chart made by the predictions of the review we find that 92.1 percent of the reviews from the site, ', \n html.Strong('i.e 9,62,148 reviews are Positive '), 'rest remaining 20%, ' ,\n html.Strong('i.e 2,42,148 reviews are Negative')\n \n ]),\n ]),\n html.H2(['Your site', \n html.A( children=[\" Etsy.com \"], href='https://www.etsy.com/', className='etsy', target='_blank'), \n 'is doing great job satisfying customers !']\n \n , id='site_review', className='site_review'),\n\n\n ], id='pie_info', className='pie_info'),\n\n ], id='pie_main', className='pie_main'),\n ], id='pie_extreme_main', className='pie_extreme_main'),\n\n \n html.Section([\n\n html.Div([\n html.H1(['Review Check'], id='revCheck')\n ], id='revCheckDiv'),\n\n html.Section([\n\n html.Div([\n\n html.H1(['Check Review '], id='check_rev_heading1'),\n \n html.P([html.Strong(['This feature allows you to test our '+ \n 'model i.e Predictor if it is working fine or not. ' + \n 'Just enter any review and click on submit now than our model ' +\n 'will tell the review is Positive or Negative.'])], id='rev_para1'),\n \n ], id='text_info1'),\n\n html.Div([\n html.Div([\n html.H3(['Review Status Below !!!'], id='review_status'),\n dbc.Input(\n id='textarea_review',\n placeholder = 'Enter the review here....',\n type = \"text\",\n style = {'width':'100%', 'height':50}\n ),\n\n dbc.Button(['Check Review'], id='check_review', className='check_review', n_clicks=0)\n ]),\n html.H1(id='result', children=None), \n \n ], id='text_area1', className='text_area1'),\n\n ], id='review_section1', className='review_section1'),\n\n html.Section([\n\n html.Div([\n html.H3(['Choose Any Review !!!'], id='revDrop'),\n dcc.Dropdown(\n id='rev_drop',\n options=[{'label': i, 'value': i} for i in feats],\n value=None,\n multi=False,\n clearable=True,\n style={\"width\": \"100%\"},\n optionHeight = 150,\n placeholder= 'Select Any Review From Drop...'\n ),\n\n dbc.Button(['Check Review'], id='check_drop', className='check_drop', n_clicks=0),\n html.H1(id='result2', children=None)\n\n ], id='text_area2', className='text_area2'),\n \n html.Div([\n \n html.H1(['Review Dropdown'], id='check_rev_heading2', className='check_rev_heading2'),\n \n html.P([html.Strong(['This is another feature which allows you to test the Predictor model' + \n ' by choosing the review from the Dropdown menu.' +\n ' Choose review and than click on check now.' +\n ' You will see result of your Review.'])], id='rev_para2'),\n\n ], id='text_info2', className='text_info2' ),\n \n ], id='review_section2', className='review_section2'),\n\n ], id='review_sec', className='review_sec'),\n\n\n html.Div([\n html.H1(['Word Cloud'], id='wordCloud')\n ], id='wordCloudDiv'),\n\n html.Div([\n\n html.Div([\n html.H2(['20 Most Frequent Words '], className='cloud_head'),\n html.H2(['with Count'], className='cloud_head_part2'),\n\n html.Div([\n \n html.Div([\n html.H5([str(words[i][0])], className='word'), \n html.P([str(words[i][1])], className='word_count') \n ], className='word_wraper') for i in range(20)\n\n ], className='words_container'), \n\n\n ], className='left_sec'),\n html.Div([\n\n html.H2([\n 'Wondering how we got these words ?'\n ], className='cloud_info_heading'),\n\n html.P([\n 'To accomplish this task we took the help of the ',\n html.A(['Natural Language Toolkit aka NLTK .'],href='https://www.nltk.org/', className='nltk') , \n ' By the use of it, we removed all the unuseful words and kept useful words only. ' , \n 'Later by the use of some simple functions like counter we extracted Top 20 words'\n ], className='cloud_info_para')\n\n ], className='cloud_info_div')\n\n ], className='cloud'),\n\n html.Div([\n\n html.Span(id='cloud_span1'),\n html.Div ([\n \n html.Div([\n html.H1(['452300+'], className='count'),\n html.P(['Words'], className='count_word'),\n ], className='count_wrap'),\n\n html.Div([\n html.H1(['50000+'], className='count'),\n html.P(['Usefull Words'], className='count_word'),\n ], className='count_wrap'),\n\n html.Div([\n html.H1(['20'], className='count'),\n html.P(['Words shown here'], className='count_word'),\n ], className='count_wrap'),\n ], className='count_external_wrapper'),\n html.Span(id='cloud_span2')\n ], className='counter'),\n\n\n footer\n \n ], className= 'main_div')\n \n \n return main_layout\n\n###################################################################### APP UI ENDS HERE ################################################################################\n\n\n################################################################### FUNCTIONS DEFINED HERE ##################################################################################################################\n\n# Defining My Functions\ndef load_model():\n global scrappedReviews\n scrappedReviews = pd.read_csv('balanced_reviews.csv')\n \n global pickle_model\n file = open(\"model_pickle.pkl\", 'rb') \n pickle_model = pickle.load(file)\n\n global vocab\n file = open(\"vocab_pickle.pkl\", 'rb') \n vocab = pickle.load(file)\n \n\n\ndef check_review(reviewText):\n\n transformer = TfidfTransformer()\n loaded_vec = CountVectorizer(decode_error=\"replace\",vocabulary=vocab)\n vectorised_review = transformer.fit_transform(loaded_vec.fit_transform([reviewText]))\n \n return pickle_model.predict(vectorised_review)\n\n\n\ndef open_browser():\n webbrowser.open_new('http://192.168.182.56:8050/ ')\n\n\n\[email protected](\n Output(component_id='graph', component_property='figure'),\n [Input(component_id='my_dropdown', component_property='value')]\n)\n\ndef pie_chart(my_dropdown):\n \n \n dff = df\n piechart = px.pie(\n data_frame=dff,\n names=my_dropdown,\n color_discrete_sequence=['darkorchid', 'black'],\n\n )\n \n '''\n piechart.update_traces(textposition='outside', \n textinfo='percent+label', \n marker=dict(line=dict(color='#00000', width=4)),\n pull=[0.2, 0],\n opacity = 0.7\n )\n \n '''\n return (piechart)\n \n \[email protected](\n Output('result2', 'children' ),\n [\n Input('check_drop', 'n_clicks')\n ],\n [State('rev_drop', 'value')]\n )\ndef update_app_ui2(dropN, rev_drop):\n print('Data Type of ', str(type(rev_drop)))\n print('Value = ', str(rev_drop) )\n\n response = check_review(rev_drop)\n print('response = ', response)\n\n if dropN > 0 :\n if (response[0] == 0):\n result1 = 'Sorry! The review is Negative'\n elif (response[0] == 1):\n result1 = 'Hurray! The review is Positive'\n else:\n result1 = 'Unknown'\n \n if rev_drop: \n return result1\n else:\n return None\n\n\[email protected](\n Output('result', 'children' ),\n [Input('check_review', 'n_clicks')], \n [State('textarea_review', 'value')]\n )\ndef update_app_ui(n, textarea_value):\n print('Data Type of ', str(type(textarea_value)))\n print('Value = ', str(textarea_value) )\n\n response = check_review (textarea_value)\n print('response = ', response)\n\n if n > 0:\n if (response[0] == 0):\n result1 = 'Sorry! The review is Negative'\n elif (response[0] == 1):\n result1 = 'Hurray! The review is Positive'\n else:\n result1 = 'Unknown'\n \n return result1\n\n\n# MOST UESD WORDS FUNCTIONS:-\n\ndef get_top_n_words(corpus):\n \n vec = CountVectorizer().fit(corpus)\n bag_of_words = vec.transform(corpus)\n sum_words = bag_of_words.sum(axis=0) \n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items() if word not in stopwords.words('english')]\n words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True) \n \n return words_freq[:21]\n\n################################################################### FUNCTIONS ENDS HERE ##################################################################################################################\n\n################################################################### MAIN FUNCTION HERE ##################################################################################################################\n\n\n# Main Function to control the Flow of your Project\ndef main():\n print(\"Start of my project\")\n \n load_model() \n global words\n global df2\n \n words = get_top_n_words(df2[\"Review\"])\n \n global project_name\n global scrappedReviews\n global app\n \n \n project_name = \"Sentiment Analysis with Insights\"\n \n app.title = project_name\n app.layout = create_app_ui()\n open_browser()\n app.run_server(host='0.0.0.0', port=8050)\n\n \n print(\"End of my project\")\n project_name = None\n scrappedReviews = None\n app = None\n\n\n################################################################### MAIN FUNCTIONS CALLING ##################################################################################################################\n\n\n# Calling the main function \nif __name__ == '__main__':\n main()\n \n \n################################################################### END OF APP ##################################################################################################################\n\n" }, { "alpha_fraction": 0.5216836929321289, "alphanum_fraction": 0.5331632494926453, "avg_line_length": 34.6363639831543, "blob_id": "49d91a3140030a4de4dd6d3c88283c093138cde9", "content_id": "15c4e33f2590749c173f165da20283634985c09e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2353, "license_type": "no_license", "max_line_length": 110, "num_lines": 66, "path": "/header_footer.py", "repo_name": "DewangPatil30/Check_Emotions", "src_encoding": "UTF-8", "text": "import dash\nimport dash_html_components as html\nimport dash_core_components as dbc\n\n\nappClone = None\n\ndef takeApp(clone):\n global appClone\n appClone = clone\n\ndef create_header():\n header_layout=html.Div(className='hfeed site', id='page',children=[\n html.Header(className='header',id='masthead', children=[\n html.Div(className='header-div', children=[ \n html.A(href='#dot',className='active',children=\"Let's Go\"),\n html.A(href='#revCheckDiv',children='Review Check'),\n html.A(href='#wordCloudDiv',children='Word Cloud'),\n html.A(href='#footSec2',children='Contact Me')\n ])\n ])\n ])\n \n return header_layout\n\ndef create_footer():\n\n footer_layout=html.Footer(className='site-footer', id='colophon', children=[\n\n html.Section([\n html.Div([\n # html.Img(src='data:image/png;base64,{}'.format(encoded_image), className='img'),\n html.P(['Lakshmi Narain College of Technology (LNCT), '\n , 'Kalchuri Nagar, Raisen Rd, Bhopal, Madhya Pradesh (MP)'\n ], className='clg')\n ], className='fDiv1'),\n\n html.Div([\n html.A(['[email protected]'], \n href='mailto: [email protected]', \n className='mail'),\n \n html.A(['HackerRank @dewangpatil30081'], \n href='https://www.hackerrank.com/dewangpatil30081?hr_r=1',\n className='hackerrank', target='_blank'),\n html.A(['GitHub'], href='https://github.com/dewangpatil30', className='git', target='_blank'),\n ], className='fDiv2'),\n\n html.Div([\n html.H3(['Navigation'], className='nav'),\n \n html.A(href='#masthead',className='home',children=\"Home\"),\n html.A(href='#revCheckDiv',children='Review Check'),\n html.A(href='#wordCloudDiv',children='Word Cloud'),\n html.A(href='#site-footer',children='Contact Me')\n ], className='fDiv3')\n ], className='footSec1'),\n\n html.Section([\n html.P(['Copyright ยฉ 2021 Check Emotions | Created by Dewang Patil'],\n className='copyRight')\n ], className='footSec2', id='footSec2')\n\n ])\n\n return footer_layout\n" } ]
2
SelahattinAksoy/Machine_ERP
https://github.com/SelahattinAksoy/Machine_ERP
13c6875503f1558497ab76b891311c05aa02d62b
c5ea2ea6abfb193b5b16b3b711e228aed7736a4f
d92951438aad41fb133c12e6f12928f070e32a32
refs/heads/master
2020-09-16T03:19:43.678731
2019-11-23T18:33:29
2019-11-23T18:33:29
222,733,837
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6985573172569275, "alphanum_fraction": 0.7046317458152771, "avg_line_length": 31.09756088256836, "blob_id": "41e6d07975d86dbf0f496ca48895dd1dea8e033c", "content_id": "f4cc58ce34b12883b4ae3f4ce952c14193f73e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/Machine_ERP/urls.py", "repo_name": "SelahattinAksoy/Machine_ERP", "src_encoding": "UTF-8", "text": "\"\"\"Machine_ERP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom djgeojson.views import GeoJSONLayerView\nfrom ERP.views import index\nfrom ERP.views import mainpage\nfrom ERP.views import register\nfrom ERP.views import map\nfrom ERP.views import databases\nfrom ERP.views import logout\nfrom ERP.views import notes\nfrom ERP.views import chart\nfrom ERP.views import speech\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', index),\n path('mainpage/', mainpage),\n path('register/', register),\n path('map/', map),\n path('databases/', databases),\n path('logout/', logout),\n path('note/', notes),\n path('chart/', chart),\n path('speech/', speech),\n\n\n]\n\n" }, { "alpha_fraction": 0.5237226486206055, "alphanum_fraction": 0.5620437860488892, "avg_line_length": 23.909090042114258, "blob_id": "d1165b78ccbd4cf2e8b7ab3664b2a80385ab6d06", "content_id": "57ec2479871b751da062c2cb09baf5598279405c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 114, "num_lines": 22, "path": "/ERP/migrations/0001_initial.py", "repo_name": "SelahattinAksoy/Machine_ERP", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-11-22 17:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='note',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('notehead', models.CharField(max_length=100)),\n ('note', models.CharField(max_length=300)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5590564608573914, "alphanum_fraction": 0.6472107172012329, "avg_line_length": 34.85802459716797, "blob_id": "7fa6dd7dd56fa0295178bb1c37bd72ef69d8ba57", "content_id": "47029225c24d618cd335d19ee13c931df940cc1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5809, "license_type": "no_license", "max_line_length": 119, "num_lines": 162, "path": "/pages/map.html", "repo_name": "SelahattinAksoy/Machine_ERP", "src_encoding": "UTF-8", "text": "{% if user.is_authenticated %}\n<html>\n\n<body>\n\n{% include 'bars.html' %}\n\n\n\n\n<div style=\"margin-left:100px; height:888px; max-height: 85vh;\" class=\"pre-scrollable\" >\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"180\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"180\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\">SVG</text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n\n<svg width=\"200\" height=\"200\">\n <rect x=\"50\" y=\"20\" width=\"150\" height=\"150\" style=\"fill:black;stroke-width:5;fill-opacity:0.1;stroke-opacity:0.9\" />\n <text fill=\"#ffffff\" font-size=\"45\" font-family=\"Verdana\"\n x=\"77\" y=\"109\"><a href=\"#\"> SVG</a></text>\n Sorry, your browser does not support inline SVG.\n</svg>\n</div>\n\n\n\n\n\n\n\n</body>\n\n\n{% else %}\n <p>siktir puลŸt</p>\n</html>\n\n{% endif %}" }, { "alpha_fraction": 0.6809523701667786, "alphanum_fraction": 0.6952381134033203, "avg_line_length": 22.44444465637207, "blob_id": "dddbed33ba793e111daa087bcaf51261a531f7d3", "content_id": "aea6ef519d4e81a699a97da85ed78e98f3119a96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/ERP/models.py", "repo_name": "SelahattinAksoy/Machine_ERP", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\nclass note(models.Model):\n notehead=models.CharField(max_length=100)\n note=models.TextField()\n def __str__(self):\n return self.notehead;" }, { "alpha_fraction": 0.6414276361465454, "alphanum_fraction": 0.6474316120147705, "avg_line_length": 27.29245376586914, "blob_id": "67f54a706c3a97ba3455354534939912ed7c8c86", "content_id": "24ff212e81665cc281cf90b2becf3fc2a523f5bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3005, "license_type": "no_license", "max_line_length": 102, "num_lines": 106, "path": "/ERP/views.py", "repo_name": "SelahattinAksoy/Machine_ERP", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.contrib.auth.models import User,auth\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import note\nfrom django.contrib import messages\nimport speech_recognition as sr\nfrom gtts import gTTS\nimport os\n# Create your views here.\n\n\ndef index(request):\n\n return render(request,\"index.html\")\n\n\ndef mainpage(request):\n\n#login page we making autontikastion for login\n if request.method==\"POST\":\n val1 = request.POST[\"username\"]\n val2 = request.POST[\"password\"]\n\n user=auth.authenticate(username=val1,password=val2)\n\n if user is not None:\n auth.login(request,user)\n return render(request, \"mainpage.html\")\n else:\n return redirect(\"/\")\n else:\n return redirect(\"/\")\n\n\n#register page dรผzenlemeler yap\ndef register(request):\n if request.method == \"POST\":\n val1 = request.POST[\"username\"]\n val2 = request.POST[\"password\"]\n val3 = request.POST[\"repassword\"]\n\n if val2==val3 :\n if User.objects.filter(username=val1).exists():\n return render(request, \"register.html\")\n user=User.objects.create_user(username=val1,password=val2)\n user.save();\n return redirect(\"/\")\n else:\n return render(request, \"register.html\")\n\n else:\n return render(request, \"register.html\")\n\n@login_required(login_url=\"/mainpage/\") #buuuuuu รงohhhhh รถnemli important requirement resmen aq :) :)\ndef map(request):\n return render(request, \"map.html\",{\"numbers\":range(3)})\n\n@login_required(login_url=\"/mainpage/\")\ndef databases(request):\n return render(request, \"databases.html\")\n\n@login_required(login_url=\"/mainpage/\")\ndef logout(request):\n auth.logout(request)\n return redirect(\"/\")\n@login_required(login_url=\"/mainpage/\")\ndef notes(request):\n all_note=note.objects.all()\n if request.method == \"POST\":\n val1 = request.POST[\"notehead\"]\n val2 = request.POST[\"note\"]\n save_note=note(notehead=val1,note=val2)\n save_note.save()\n return render(request, \"note.html\", {\"notes\": all_note})\n else:\n return render(request, \"note.html\",{\"notes\": all_note})\n\n@login_required(login_url=\"/mainpage/\")\ndef chart(request):\n return render(request,\"chart.html\")\n\n@login_required(login_url=\"/mainpage/\")\ndef speech(request):\n text=\"lan biลŸeyler sรถyle\"\n language = \"tr\"\n if request.method == \"POST\":\n r=sr.Recognizer()\n with sr.Microphone() as source:\n audio=r.listen(source)\n\n try:\n text=r.recognize_google(audio,language=\"tr\")\n output = gTTS(text=text, lang=language, slow=False)\n output.save(\"C:/Users/selah/Desktop/TEZ/Machine-ERP/Machine_ERP/static/sound/out.mp3\")\n\n return render(request, \"speech.html\", {\"text\": text})\n except:\n text=\"biลŸeyler sรถyle\"\n\n\n return render(request,\"speech.html\",{\"text\":text})\n\n\n\n#os.system(\"start out.mp3\")" } ]
5
puri-datacafe/CheckyBot
https://github.com/puri-datacafe/CheckyBot
5138384ab7a5553d10aea0fffe619371c5bd252f
772d5bfa9bf0e568571bf2678bd26c6523f0ef3f
8ea49598c3390d16de9576e3d5c3d72f8f653ed3
refs/heads/master
2022-09-23T06:17:00.108558
2020-05-30T18:30:58
2020-05-30T18:30:58
268,134,812
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 11, "blob_id": "93b53f7917575bd5621ce49b8120a3ac050ac076", "content_id": "a09e9d99b0bca6fd4d12fde5b1bdffd8578eba4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 11, "num_lines": 1, "path": "/README.md", "repo_name": "puri-datacafe/CheckyBot", "src_encoding": "UTF-8", "text": "# CheckyBot\n \n" }, { "alpha_fraction": 0.584541380405426, "alphanum_fraction": 0.6014593243598938, "avg_line_length": 46.43338394165039, "blob_id": "8a0a71128f83c8714c8eca152827b24093340cae", "content_id": "735fdd4fe2ad909cbf7d071a6f1db2eaf6170ffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33519, "license_type": "no_license", "max_line_length": 301, "num_lines": 653, "path": "/main.py", "repo_name": "puri-datacafe/CheckyBot", "src_encoding": "UTF-8", "text": "import json\nimport os\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import redirect, url_for\nimport matplotlib\nmatplotlib.use('Agg')\nimport time\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nfrom linebot import (LineBotApi, WebhookHandler)\nfrom linebot.models import (\n ImageSendMessage,\n TextSendMessage\n)\n# from mlxtend.frequent_patterns import apriori, association_rules \n# from gcloudconnector import BigQueryConnector\n# config_file_path = 'config/config.json'\n# gbq_connector = BigQueryConnector()\n# gbq_connector.init_conn_with_config_file(config_file_path)\n\n# def supermarket_data(path):\n# sql_code = f\"SELECT * FROM {path}\"\n# query_data = gbq_connector.query(sql_code, output_type='df')\n# return query_data\n\n# path = '`superstore.supermarket_data`'\n# df = supermarket_data(path)\ndf = pd.read_csv('supermarket_data.csv')\ndf['SHOP_DATE'] = pd.to_datetime(df['SHOP_DATE'], format = '%Y%m%d')\n\napp = Flask(__name__)\nline_bot_api = LineBotApi('')\nhandler = WebhookHandler('')\nwith open('config/ngrok.json') as f:\n config = json.load(f)\n\[email protected]('/', methods=['POST'])\n# ----- Main Function -----\ndef MainFunction():\n #Getting intent from Dailogflow\n question_from_dailogflow_raw = request.get_json(silent=True, force=True)\n #Call generating_answer function to classify the question\n answer_from_bot = generating_answer(question_from_dailogflow_raw)\n #Make a respond back to Dailogflow\n r = make_response(answer_from_bot)\n #Setting Content Type\n r.headers['Content-Type'] = 'application/json' \n return r\n\n# ----- Generate Answer -----\ndef generating_answer(question_from_dailogflow_dict):\n #Print intent that recived from dialogflow.\n print(json.dumps(question_from_dailogflow_dict, indent=4 ,ensure_ascii=False))\n #Getting intent name form intent that recived from dialogflow.\n intent_group_question_str = question_from_dailogflow_dict[\"queryResult\"][\"intent\"][\"displayName\"] \n #Select function for answering question\n if intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ x เธงเธฑเธ™':\n answer_str = sales_1(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธ‚เธญเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx':\n answer_str = sales_2(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx เธ–เธถเธ‡ xxxx-xx-xx':\n answer_str = sales_3(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ x เธงเธฑเธ™ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x x':\n answer_str = sales_4(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธ‚เธญเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = sales_5(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธ‚เธญเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x x':\n answer_str = sales_6(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx เธ–เธถเธ‡ xxxx-xx-xx เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = sales_7(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ xxxx-xx-xx เธ–เธถเธ‡ xxxx-xx-xx เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x x':\n answer_str = sales_8(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ x เธงเธฑเธ™ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = sales_9(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธชเธดเธ™เธ„เน‰เธฒเน„เธซเธ™เธ–เธนเธเธ‹เธทเน‰เธญเธšเนˆเธญเธขเธชเธธเธ” x เธญเธฑเธ™เธ”เธฑเธšเนเธฃเธ':\n answer_str = product_1(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธชเธดเธ™เธ„เน‰เธฒเน„เธซเธ™เธ–เธนเธเธ‹เธทเน‰เธญเธšเนˆเธญเธขเธชเธธเธ” x เธญเธฑเธ™เธ”เธฑเธšเนเธฃเธ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = product_2(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธˆเธณเธ™เธงเธ™เธฅเธนเธเธ„เน‰เธฒเธ—เธฑเน‰เธ‡เธซเธกเธ”':\n answer_str = customer_1(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธˆเธณเธ™เธงเธ™เธฅเธนเธเธ„เน‰เธฒเธ—เธฑเน‰เธ‡เธซเธกเธ”เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = customer_2(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธชเธดเธ™เธ„เน‰เธฒเธ—เธตเนˆเธ—เธณเน€เธ‡เธดเธ™ x% เธˆเธฒเธเธขเธญเธ”เธ‚เธฒเธขเธ—เธฑเน‰เธ‡เธซเธกเธ”':\n answer_str = item_1(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธชเธดเธ™เธ„เน‰เธฒเธ—เธตเนˆเธ—เธณเน€เธ‡เธดเธ™ x% เธˆเธฒเธเธขเธญเธ”เธ‚เธฒเธขเธ—เธฑเน‰เธ‡เธซเธกเธ” เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ x':\n answer_str = item_2(question_from_dailogflow_dict)\n elif intent_group_question_str == 'เธชเธดเธ™เธ„เน‰เธฒเน„เธซเธ™เธ–เธนเธเธ‹เธทเน‰เธญเธ„เธนเนˆเธเธฑเธ™เธšเนˆเธญเธขเธ—เธตเนˆเธชเธธเธ”':\n answer_str = product_3(question_from_dailogflow_dict)\n else: \n answer_str = \"เธœเธกเน„เธกเนˆเน€เธ‚เน‰เธฒเนƒเธˆ เธ„เธธเธ“เธ•เน‰เธญเธ‡เธเธฒเธฃเธญเธฐเน„เธฃ\"\n #Build answer dict \n answer_from_bot = {\"fulfillmentText\": answer_str}\n #Convert dict to JSON\n answer_from_bot = json.dumps(answer_from_bot) \n return answer_from_bot\n\n# ----- Answer String -----\ndef sales_1(respond_dict):\n days = float(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"days.original\"])\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ {days:.0f} เธงเธฑเธ™ เธ„เธทเธญ {total_previous_sales(df, days):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_previous_sales(df, days)\n # Response Text\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef sales_2(respond_dict):\n date = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date.original\"]\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธงเธฑเธ™เธ—เธตเนˆ {date} เธ„เธทเธญ {total_sales_on_date(df, date):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n return answer_str\n\ndef sales_3(respond_dict):\n date_start = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_start.original\"]\n date_stop = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_stop.original\"]\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ {date_start} เธ–เธถเธ‡ เธงเธฑเธ™เธ—เธตเนˆ {date_stop} เธ„เธทเธญ {total_sales_between_date(df, start = date_start, stop = date_stop):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_total_sales_between_date(df, date_start, date_stop)\n # Response Text\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef sales_4(respond_dict):\n days = float(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"days.original\"])\n store1 = str(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store1.original\"])\n store2 = str(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store2.original\"])\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ {days:.0f} เธงเธฑเธ™ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ {store1} เธ„เธทเธญ {total_previous_sales(df, days = days, store = store1):,.2f} เนเธฅเธฐ {store2} เธ„เธทเธญ {total_previous_sales(df, days = days, store = store2):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n print(answer_str)\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_previous_sales(df, days, store = [store1,store2])\n # Response\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef sales_5(respond_dict):\n date = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date.original\"]\n store = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"]\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธงเธฑเธ™เธ—เธตเนˆ {date} เธ‚เธญเธ‡เธฃเน‰เธฒเธ™ {store} เธ„เธทเธญ {total_sales_on_date(df, date = date, store = [store]):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n return answer_str\n\ndef sales_6(respond_dict):\n date = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date.original\"]\n store1 = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store1.original\"]\n store2= respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store2.original\"]\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธงเธฑเธ™เธ—เธตเนˆ {date} เธ‚เธญเธ‡เธฃเน‰เธฒเธ™ {store1} เธ„เธทเธญ {total_sales_on_date(df, date = date, store = [store1]):,.2f} เนเธฅเธฐ {store2} เธ„เธทเธญ {total_sales_on_date(df, date = date, store = [store2]):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n return answer_str\n\ndef sales_7(respond_dict):\n date_start = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_start.original\"]\n date_stop = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_stop.original\"]\n store = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"]\n \n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ {date_start} เธ–เธถเธ‡ เธงเธฑเธ™เธ—เธตเนˆ {date_stop} เธ‚เธญเธ‡เธฃเน‰เธฒเธ™ {store} เธ„เธทเธญ {total_sales_between_date(df, start = date_start, stop = date_stop, store=store):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_total_sales_between_date(df, date_start, date_stop,[store])\n # Response Text\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef sales_8(respond_dict):\n date_start = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_start.original\"]\n date_stop = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"date_stop.original\"]\n store1 = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store1.original\"]\n store2 = respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store2.original\"]\n\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ {date_start} เธ–เธถเธ‡ เธงเธฑเธ™เธ—เธตเนˆ {date_stop} เธ‚เธญเธ‡เธฃเน‰เธฒเธ™ {store1} เธ„เธทเธญ {total_sales_between_date(df, start = date_start, stop = date_stop, store=store1):,.2f} เนเธฅเธฐ เธฃเน‰เธฒเธ™ {store2} เธ„เธทเธญ {total_sales_between_date(df, start = date_start, stop = date_stop, store=store2):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_total_sales_between_date(df, date_start, date_stop,[store1,store2])\n # Response Text\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef sales_9(respond_dict):\n days = float(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"days.original\"])\n store = str(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"])\n answer_str = f\"เธขเธญเธ”เธ‚เธฒเธขเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ {days:.0f} เธงเธฑเธ™ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ {store} เธ„เธทเธญ {total_previous_sales(df, days = days, store = store):,.2f} เธšเธฒเธ—เธˆเน‰เธฒ\"\n print(answer_str)\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n # Generate Graph\n image_name = each_previous_sales(df, days, store = [store])\n # Response\n reply_token = respond_dict['originalDetectIntentRequest']['payload']['data']['replyToken']\n line_bot_api.reply_message(\n reply_token,\n TextSendMessage(text=answer_str)\n )\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef product_1(respond_dict):\n numbers = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"numbers.original\"])\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n answer_str = \"\"\n image_name = popular_item(df, numbers)\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef product_2(respond_dict):\n numbers = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"numbers.original\"])\n store = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"])\n\n user_id = respond_dict['originalDetectIntentRequest']['payload']['data']['source']['userId']\n answer_str = \"\"\n image_name = popular_item(df, numbers,store=[store])\n line_bot_api.push_message(user_id, ImageSendMessage(\n original_content_url=f\"{config['url']}/static/{image_name}\",\n preview_image_url=f\"{config['url']}/static/{image_name}\"\n ))\n return answer_str\n\ndef product_3(respond_dict):\n # print('Start apriori')\n # check_prod = df['PROD_CODE'].value_counts().to_frame()\n # prod_list = check_prod[check_prod['PROD_CODE']>= 100].index\n # df_fillter = df[df['PROD_CODE'].isin(prod_list)]\n # df_pv = df_fillter.pivot_table(index='BASKET_ID' , columns = 'PROD_CODE' , values= 'QUANTITY' , aggfunc='sum').fillna(0)\n # for col in df_pv.columns:\n # df_pv[col] = df_pv[col].apply(lambda x:1 if x > 0 else 0 )\n # frq_items = apriori(df_pv, min_support = 0.01, use_colnames = True) \n # rules = association_rules(frq_items, metric =\"lift\", min_threshold = 1) \n # rules = rules.sort_values(['support', 'lift'], ascending =[False, False])\n # antecedents = [''.join(list(x)) for x in rules[\"antecedents\"]]\n # consequents = [''.join(list(x)) for x in rules[\"consequents\"]]\n answer_str = f'''เธชเธดเธ™เธ„เน‰เธฒเธ—เธตเนˆเธ‚เธฒเธขเธ„เธนเนˆเธเธฑเธ™เธšเนˆเธญเธข 5 เธญเธฑเธ™เธ”เธฑเธšเนเธฃเธ เธ„เธทเธญ\n1. PRD0903678 เธเธฑเธš PRD0903052\n2. PRD0903052 เธเธฑเธš PRD0903678\n3. PRD0903052 เธเธฑเธš PRD0904358\n4. PRD0904358 เธเธฑเธš PRD0903052\n5. PRD0903052 เธเธฑเธš PRD0901265\n '''\n reply_token = respond_dict['originalDetectIntentRequest']['payload']['data']['replyToken']\n line_bot_api.reply_message(\n reply_token,\n TextSendMessage(text=answer_str)\n )\n return answer_str\n\ndef customer_1(respond_dict):\n answer_str = f\"เธˆเธณเธ™เธงเธ™เธฅเธนเธเธ„เน‰เธฒเธ—เธฑเน‰เธ‡เธซเธกเธ” เธ„เธทเธญ {number_of_customer(df)}\"\n return answer_str\n\ndef customer_2(respond_dict):\n store = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"])\n answer_str = f\"เธˆเธณเธ™เธงเธ™เธฅเธนเธเธ„เน‰เธฒเธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ {store} เธ„เธทเธญ {number_of_customer(df,[store])}\"\n return answer_str\n\ndef item_1(respond_dict):\n k = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"k.original\"])\n prod_spend_k, n_prod_spend_k, value_spend_k, total_spend = item_generated_percentage_income(df, k = k)\n answer_str = f\"เธชเธดเธ™เธ„เน‰เธฒเธ—เธตเนˆเธ—เธณเน€เธ‡เธดเธ™ {k}% เธกเธตเธˆเธณเธ™เธงเธ™ {n_prod_spend_k:,.0f} เธ„เธดเธ”เน€เธ›เน‡เธ™เธฃเธฒเธขเน„เธ”เน‰ {value_spend_k:,.0f} เธˆเธฒเธเธฃเธฒเธขเน„เธ”เน‰เธ—เธฑเน‰เธ‡เธซเธกเธ” {total_spend:,.0f} เธ„เธทเธญ {' '.join(prod_spend_k[0:5])} ...\"\n return answer_str\n\ndef item_2(respond_dict):\n k = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"k.original\"])\n store = int(respond_dict[\"queryResult\"][\"outputContexts\"][0][\"parameters\"][\"store.original\"])\n prod_spend_k, n_prod_spend_k, value_spend_k, total_spend = item_generated_percentage_income(df, k = k, store = [store])\n answer_str = f\"เธชเธดเธ™เธ„เน‰เธฒเธ—เธตเนˆเธ—เธณเน€เธ‡เธดเธ™ {k}% เธกเธตเธˆเธณเธ™เธงเธ™ {n_prod_spend_k:,.0f} เธ„เธดเธ”เน€เธ›เน‡เธ™เธฃเธฒเธขเน„เธ”เน‰ {value_spend_k:,.0f} เธˆเธฒเธเธฃเธฒเธขเน„เธ”เน‰เธ—เธฑเน‰เธ‡เธซเธกเธ” {total_spend:,.0f} เธ„เธทเธญ {' '.join(prod_spend_k[0:5])} ...\"\n return answer_str\n\ndef total_sales_on_date(df, date, store = None):\n\n ''' เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธ‚เธญเธ‡เธงเธฑเธ™เธ—เธตเนˆ date (เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ store)'''\n\n if store is None:\n selected_df = df[df['SHOP_DATE'] == date]\n sales = sum(selected_df['SPEND'])\n elif len(store) == 1:\n selected_df = df[(df['SHOP_DATE'] == date) & (df['STORE_CODE'] == 'STORE0000'+str(store))]\n sales = sum(selected_df['SPEND'])\n elif len(store) == 2:\n selected_df_1 = df[(df['SHOP_DATE'] == date) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n \n selected_df_2 = df[(df['SHOP_DATE'] == date) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[1]))]\n \n sales_each_1 = selected_df_1.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales_each_2 = selected_df_2.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales = sales_each_1.merge(sales_each_2, \n left_on = 'SHOP_DATE', right_on = 'SHOP_DATE',\n suffixes = ('_store'+str(store[0]), '_store'+str(store[1])))\n \n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each_1['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.4 # the width of the bars\n rects1 = ax.bar(x - (1*width/2), sales_each_1['SPEND'], width, label='Total Sales store'+str(store[0]))\n rects2 = ax.bar(x + (1*width/2), sales_each_2['SPEND'], width, label='Total Sales store'+str(store[1]))\n rects = (rects1, rects2)\n \n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Sales',fontsize=18)\n ax.set_xticks(x)\n ax.set_xticklabels(labels, fontsize=18)\n ax.legend(fontsize=18)\n ax.tick_params(axis=\"y\", labelsize=18)\n \n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = round(rect.get_height(), 1)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n size=20)\n if store is not None:\n if len(store) == 2:\n for i in rects:\n autolabel(i)\n else:\n autolabel(rects)\n else:\n autolabel(rects)\n fig.tight_layout()\n image_name = str(time.time_ns()) + '.png'\n plt.savefig(image_name) \n \n return sales\n\ndef total_sales_between_date(df, start, stop, store = None):\n\n ''' เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ start เธ–เธถเธ‡ stop เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ store'''\n\n if store is None:\n selected_df = df[(df['SHOP_DATE'] >= start) & (df['SHOP_DATE'] <= stop)]\n sales = sum(selected_df['SPEND'])\n else:\n selected_df = df[((df['SHOP_DATE'] >= start) & (df['SHOP_DATE'] <= stop)) & (df['STORE_CODE'] == 'STORE0000'+str(store))]\n sales = sum(selected_df['SPEND'])\n\n return sales\n\ndef total_previous_sales(df, days = 1, store = None):\n\n ''' เธขเธญเธ”เธ‚เธฒเธขเธฃเธงเธกเธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ days เธงเธฑเธ™ เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ store'''\n\n date = max(df['SHOP_DATE']) - datetime.timedelta(days = days)\n if store is None:\n selected_df = df[df['SHOP_DATE'] >= date]\n sales = sum(selected_df['SPEND'])\n else:\n selected_df = df[(df['SHOP_DATE'] >= date) & (df['STORE_CODE'] == 'STORE0000'+str(store))]\n sales = sum(selected_df['SPEND'])\n\n return sales\n\ndef each_total_sales_between_date(df, start, stop, store = None):\n\n ''' เธขเธญเธ”เธ‚เธฒเธขเนเธ•เนˆเธฅเธฐเธงเธฑเธ™เธฃเธฐเธซเธงเนˆเธฒเธ‡เธงเธฑเธ™เธ—เธตเนˆ start เธ–เธถเธ‡ stop (เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ ['store1','store2'])'''\n \n if store is None:\n selected_df = df[(df['SHOP_DATE'] >= start) & (df['SHOP_DATE'] <= stop)]\n sales_each = selected_df.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.6 # the width of the bars\n rects = ax.bar(x - (0*width/2), sales_each['SPEND'], width, label='Total Sales')\n ax.set(ylim=(min(sales_each['SPEND']-200), max(sales_each['SPEND']+200)))\n\n elif len(store) == 1:\n selected_df = df[(df['SHOP_DATE'] >= start) \n & (df['SHOP_DATE'] <= stop) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n sales_each = selected_df.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.6 # the width of the bars\n rects = ax.bar(x - (0*width/2), sales_each['SPEND'], width, label='Total Sales')\n ax.set(ylim=(min(sales_each['SPEND']-200), max(sales_each['SPEND']+200)))\n \n elif len(store) == 2: \n selected_df_1 = df[(df['SHOP_DATE'] >= start) \n & (df['SHOP_DATE'] <= stop) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n \n selected_df_2 = df[(df['SHOP_DATE'] >= start) \n & (df['SHOP_DATE'] <= stop)\n & (df['STORE_CODE'] == 'STORE0000'+str(store[1]))]\n \n sales_each_1 = selected_df_1.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales_each_2 = selected_df_2.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales_each = sales_each_1.merge(sales_each_2, \n left_on = 'SHOP_DATE', right_on = 'SHOP_DATE',\n suffixes = ('_store'+str(store[0]), '_store'+str(store[1])))\n\n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each_1['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.4 # the width of the bars\n rects1 = ax.bar(x - (1*width/2), sales_each_1['SPEND'], width, label='Total Sales store'+str(store[0]))\n rects2 = ax.bar(x + (1*width/2), sales_each_2['SPEND'], width, label='Total Sales store'+str(store[1]))\n rects = (rects1, rects2)\n \n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Sales',fontsize=18)\n ax.set_xticks(x)\n ax.set_xticklabels(labels, fontsize=18)\n ax.legend(fontsize=18)\n ax.tick_params(axis=\"y\", labelsize=18)\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = round(rect.get_height(), 1)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n size=20)\n if store is not None:\n if len(store) == 2:\n for i in rects:\n autolabel(i)\n else:\n autolabel(rects)\n else:\n autolabel(rects)\n fig.tight_layout()\n # plt.show() \n image_name = str(time.time_ns()) + '.png'\n plt.savefig('static/'+image_name)\n return image_name\n\ndef each_previous_sales(df, days = 1, store = None):\n \n ''' เธขเธญเธ”เธ‚เธฒเธขเนเธ•เนˆเธฅเธฐเธงเธฑเธ™เธขเน‰เธญเธ™เธซเธฅเธฑเธ‡ days เธงเธฑเธ™ (เธ‚เธญเธ‡เธฃเน‰เธฒเธ™เธ—เธตเนˆ ['store1','store2'])'''\n date = max(df['SHOP_DATE']) - datetime.timedelta(days = days)\n\n if store is None:\n selected_df = df[df['SHOP_DATE'] >= date]\n \n sales_each = selected_df.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.6 # the width of the bars\n rects = ax.bar(x - (0*width/2), sales_each['SPEND'], width, label='Total Sales')\n ax.set(ylim=(min(sales_each['SPEND']-200), max(sales_each['SPEND']+200)))\n\n elif len(store) == 1:\n selected_df = df[(df['SHOP_DATE'] >= date) & (df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n sales_each = selected_df.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.6 # the width of the bars\n rects = ax.bar(x - (0*width/2), sales_each['SPEND'], width, label='Total Sales')\n ax.set(ylim=(min(sales_each['SPEND']-200), max(sales_each['SPEND']+200)))\n \n elif len(store) == 2: \n selected_df_1 = df[(df['SHOP_DATE'] >= date) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n \n selected_df_2 = df[(df['SHOP_DATE'] >= date) \n & (df['STORE_CODE'] == 'STORE0000'+str(store[1]))]\n \n sales_each_1 = selected_df_1.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales_each_2 = selected_df_2.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n sales_each = sales_each_1.merge(sales_each_2, \n left_on = 'SHOP_DATE', right_on = 'SHOP_DATE',\n suffixes = ('_store'+str(store[0]), '_store'+str(store[1])))\n \n #plot\n fig, ax = plt.subplots(figsize=(20, 10))\n labels = list(sales_each_1['SHOP_DATE'].dt.date) \n x = np.arange(len(labels)) # the label locations\n width = 0.4 # the width of the bars\n rects1 = ax.bar(x - (1*width/2), sales_each_1['SPEND'], width, label='Total Sales store'+str(store[0]))\n rects2 = ax.bar(x + (1*width/2), sales_each_2['SPEND'], width, label='Total Sales store'+str(store[1]))\n rects = (rects1, rects2)\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Sales',fontsize=18)\n ax.set_xticks(x)\n ax.set_xticklabels(labels, fontsize=18)\n ax.legend(fontsize=18)\n ax.tick_params(axis=\"y\", labelsize=18)\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = round(rect.get_height(), 1)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom',\n size=20)\n if store is not None:\n if len(store) == 2:\n for i in rects:\n autolabel(i)\n else:\n autolabel(rects)\n else:\n autolabel(rects)\n fig.tight_layout()\n image_name = str(time.time_ns()) + '.png'\n plt.savefig('static/'+image_name) \n \n return image_name\n\ndef popular_item(df, n, store = None):\n\n ''' เธชเธดเธ™เธ„เน‰เธฒเน„เธซเธ™เธ–เธนเธเธ‹เธทเน‰เธญเธšเนˆเธญเธขเธชเธธเธ” n เธญเธฑเธ™เธ”เธฑเธšเนเธฃเธ'''\n \n if store is None:\n #select data\n pivot_df = df[['BASKET_ID', 'PROD_CODE']].groupby(['PROD_CODE','BASKET_ID']).size().unstack(fill_value = 0)\n support = pivot_df.sum(axis = 1)/df['BASKET_ID'].nunique()\n n_rank_support = support.sort_values(ascending = False)[0:n]\n n_rank_support.loc['else'] = 1 - n_rank_support.sum()\n \n #plot\n labels = n_rank_support.reset_index()['PROD_CODE']\n sizes = list(n_rank_support)\n fig1, ax1 = plt.subplots(figsize = (10,6))\n ax1.pie(sizes, explode = n_rank_support.reset_index().groupby('PROD_CODE').size()/20, labels=labels, autopct='%1.1f%%',\n startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n image_name = str(time.time_ns()) + '.png'\n plt.savefig('static/'+image_name) \n \n else:\n #select data\n selected_df = df[(df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n sales_each = selected_df.groupby('SHOP_DATE')[['SPEND']].sum().reset_index()\n \n pivot_df = selected_df[['BASKET_ID', 'PROD_CODE']].groupby(['PROD_CODE','BASKET_ID']).size().unstack(fill_value = 0)\n support = pivot_df.sum(axis = 1)/selected_df['BASKET_ID'].nunique()\n n_rank_support = support.sort_values(ascending = False)[0:n]\n n_rank_support.loc['else'] = 1 - n_rank_support.sum()\n \n #plot\n labels = n_rank_support.reset_index()['PROD_CODE']\n sizes = list(n_rank_support)\n fig1, ax1 = plt.subplots(figsize = (10,6))\n ax1.pie(sizes, explode = n_rank_support.reset_index().groupby('PROD_CODE').size()/20, labels=labels, autopct='%1.1f%%',\n startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n image_name = str(time.time_ns()) + '.png'\n plt.savefig('static/'+image_name) \n \n return image_name\n\ndef item_generated_percentage_income(df, k = 0.3, store = None):\n \n ''' เธชเธดเธ™เธ„เน‰เธฒ n เธญเธฑเธ™เธ”เธฑเธšเนเธฃเธเธ—เธตเนˆเธ—เธณเน€เธ‡เธดเธ™ k*100% เธˆเธฒเธเธขเธญเธ”เธ‚เธฒเธขเธ—เธฑเน‰เธ‡เธซเธกเธ” '''\n \n if store is None:\n prod_spend_ratio = df[['PROD_CODE', 'SPEND']].groupby('PROD_CODE').sum()/df['SPEND'].sum()\n index = []\n sum_v = 0\n\n for i,v in enumerate(prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False)['SPEND']):\n sum_v += v\n if sum_v >= k:\n break\n else:\n index.append(i)\n prod_spend_k = prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False).iloc[index,0]\n value_spend_k = (prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False).iloc[index,1]*(df['SPEND'].sum())).sum()\n total_spend = df['SPEND'].sum()\n n_prod_spend_k = len(prod_spend_k)\n else:\n selected_df = df[(df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n prod_spend_ratio = selected_df[['PROD_CODE', 'SPEND']].groupby('PROD_CODE').sum()/df['SPEND'].sum()\n index = []\n sum_v = 0\n\n for i,v in enumerate(prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False)['SPEND']):\n sum_v += v\n if sum_v >= k:\n break\n else:\n index.append(i)\n prod_spend_k = list(prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False).iloc[index,0])\n value_spend_k = (prod_spend_ratio.reset_index().sort_values('SPEND', ascending = False).iloc[index,1]*(selected_df['SPEND'].sum())).sum()\n total_spend = selected_df['SPEND'].sum()\n n_prod_spend_k = len(prod_spend_k)\n \n return prod_spend_k, n_prod_spend_k, value_spend_k, total_spend\n\ndef number_of_customer(df, store = None):\n \n ''' เธˆเธณเธ™เธงเธ™เธฅเธนเธเธ„เน‰เธฒ '''\n \n if store is None:\n selected_df = df\n else:\n selected_df = df[(df['STORE_CODE'] == 'STORE0000'+str(store[0]))]\n n_customer = selected_df['CUST_CODE'].nunique()\n \n return n_customer\n\[email protected]('/img', methods=['GET'])\ndef view_image():\n # generate_img(\"test.jpg\"); #save inside static folder\n return '<img src=' + url_for('static',filename='test.png') + '>'\n\n# ----- Main Flask -----\nif __name__ == '__main__':\n #import and prep\n df = pd.read_csv('supermarket_data.csv')\n df['SHOP_DATE'] = pd.to_datetime(df['SHOP_DATE'], format = '%Y%m%d')\n port = int(os.getenv('PORT', 5000))\n print(\"Starting app on port %d\" % port)\n app.run(debug=True, port=port, host='0.0.0.0', threaded=True)" } ]
2
arturlfsouza/3LNN
https://github.com/arturlfsouza/3LNN
b954f9ad6303d32be70bcc4359ceb871f55856cd
e5c16eeb7cfc3cca28dfca05652012b409148a68
de7adb278e4080d18b369f4b2f6b67ad34ee0288
refs/heads/master
2020-03-17T07:34:42.775602
2018-05-18T21:01:09
2018-05-18T21:01:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5232974886894226, "alphanum_fraction": 0.5378584265708923, "avg_line_length": 26.55555534362793, "blob_id": "4dc23801fb80192bce218362ce581ed45c008d50", "content_id": "8e4df44518ed599154ad39719538f8d82f35d69f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4464, "license_type": "no_license", "max_line_length": 121, "num_lines": 162, "path": "/3LNN.py", "repo_name": "arturlfsouza/3LNN", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import SGD\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import KFold\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport sys\n\nDATA_DIR = 'data/data_tp1'\n\n\n# Read data from file\ndef get_data(filename=DATA_DIR):\n data = np.genfromtxt(filename, delimiter=',')\n labels = data[:, 0]\n imgs = data[:, 1:]\n return imgs, labels\n\n\n# Split data into folds\ndef split_data(input_data, folds=5):\n print('Splitting data in', folds, 'folds')\n kf = KFold(n_splits=folds, shuffle=True)\n return(kf.split(input_data))\n\n\n# Split data naively into training and testing sets\ndef naive_split(input_data, labels, train_size=4000):\n x_train = input_data[:train_size]\n y_train = labels[:train_size]\n\n x_test = input_data[train_size:]\n y_test = labels[train_size:]\n\n return (x_train, y_train, x_test, y_test)\n\n\n# Convert data to one hot encoding format\ndef one_hot(data, num_classes=None):\n return to_categorical(data, num_classes=num_classes)\n\n\n# Define Neural Network model\ndef run_model(\n x_train,\n y_train,\n x_test,\n y_test,\n learning_rate=0.1,\n batch_size=50,\n input_dim=784,\n hidden_dim=50,\n activation='sigmoid',\n num_classes=10,\n epochs=50\n ):\n\n model = Sequential()\n model.add(Dense(\n hidden_dim,\n activation=activation,\n input_dim=input_dim,\n ))\n\n model.add(Dense(\n num_classes,\n activation=activation,\n input_dim=hidden_dim,\n ))\n\n sgd = SGD(lr=learning_rate)\n model.compile(\n loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy']\n )\n\n history = model.fit(\n x_train,\n y_train,\n epochs=epochs,\n verbose=2,\n batch_size=batch_size,\n validation_data=(x_test, y_test),\n shuffle=True\n )\n\n return history\n\n\ndef plot_results(history, out_file):\n train_acc = history.history['acc']\n val_acc = history.history['val_acc']\n\n type(val_acc)\n\n plt.plot(range(len(train_acc)), train_acc, color='blue')\n plt.plot(range(len(train_acc)), val_acc, color='orange')\n\n # add grid lines\n plt.grid(linestyle=\"dashed\")\n\n # change limits to improve visibility\n plt.xlim((0, len(train_acc)-1))\n # plt.ylim((0, 105))\n\n # add labels\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Accuracy (%)\")\n\n # add legends\n legend = mpatches.Patch(color='blue', label='Training')\n legend2 = mpatches.Patch(color='orange', label='Validation')\n plt.legend(handles=[legend, legend2])\n\n # save plot to file\n plt.savefig(out_file)\n plt.gcf().clear()\n\n\ndef main(argv):\n if len(argv) > 1:\n filename = argv[1]\n imgs, labels = get_data(filename)\n else:\n print(\"no input file, using default\")\n imgs, labels = get_data()\n\n labels = one_hot(labels, 10)\n x_train, y_train, x_test, y_test = naive_split(imgs, labels)\n\n # batch_sizes = [x_train.shape[0]]\n batch_sizes = [1, 10, 50, x_train.shape[0]]\n learning_rates = [0.5, 1, 10]\n hidden_dims = [25, 50, 100]\n\n # history = run_model(x_train, y_train, x_test, y_test, learning_rate=0.5, batch_size=4000, hidden_dim=50, epochs=50)\n # plot_results(history, 'test')\n\n done = 0\n total = len(batch_sizes)*len(learning_rates)*len(hidden_dims)\n for batch_size in batch_sizes:\n for learning_rate in learning_rates:\n for hidden_dim in hidden_dims:\n print(done, \"of\", total)\n history = run_model(\n x_train,\n y_train,\n x_test,\n y_test,\n learning_rate=learning_rate,\n batch_size=batch_size,\n hidden_dim=hidden_dim\n )\n plot_results(history, \"result\"+str(done))\n done += 1\n\n\nif __name__ == '__main__':\n main(sys.argv)\n" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 11.600000381469727, "blob_id": "d8ee8000ba968102812b3bb2f87c58fc20b98e1f", "content_id": "33e1eb0476fe89f6fc3d31d48d2519ab043697ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 63, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/requirements.txt", "repo_name": "arturlfsouza/3LNN", "src_encoding": "UTF-8", "text": "tensorflow >= 1.8\nkeras >= 2.1\nsklearn\nscikit-learn\nmatplotlib\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 14.333333015441895, "blob_id": "35003f4628b36ac4292130f4c9cd6638b0774a0d", "content_id": "c7232746788aab63ab2515cb327295e03cbadac0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 37, "num_lines": 3, "path": "/README.md", "repo_name": "arturlfsouza/3LNN", "src_encoding": "UTF-8", "text": "# 3LNN\n\n### 3-Layer Neural Network with Keras" } ]
3
whitespots/brokensocial
https://github.com/whitespots/brokensocial
b457ed5058e030c1a7b415992b41151abcd1746e
cd5dead0dc61cce56d4c53f75c63ed36349eb643
649e89992dbb48da4faf599db43881552b5c474d
refs/heads/main
2023-01-20T14:31:32.071453
2020-11-22T11:00:43
2020-11-22T11:00:43
314,635,204
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7726218104362488, "alphanum_fraction": 0.7749419808387756, "avg_line_length": 52.75, "blob_id": "3f946225aa653f72d90bdd1f018a4ecb07de6910", "content_id": "7350a2d2f813ff3bddc8e493007eb96e9c59f3b3", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 431, "license_type": "permissive", "max_line_length": 116, "num_lines": 8, "path": "/README.md", "repo_name": "whitespots/brokensocial", "src_encoding": "UTF-8", "text": "# A small contribution to community :)\nThis is a part of our toolset for [vulneravility monitoring service](https://whitespots.io/vulnerability-monitoring)\n\n### Check other [opensource tools](https://github.com/whitespots/fast-security-scanners)\n\n# Check your site for social network \"accounts takeover\" via broken social network links\n\n`docker run --rm -it --name scanner -e VULN_ID=1 -e DOMAIN=site.com whitespots/brokensocial`\n\n" }, { "alpha_fraction": 0.5829428434371948, "alphanum_fraction": 0.5918462872505188, "avg_line_length": 26.727272033691406, "blob_id": "7b20d84a062d1ea4a980692e35f4b6cffec46486", "content_id": "70b6a3b1e6dd040e33dfaf0eb06c44a83fb71c64", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2134, "license_type": "permissive", "max_line_length": 90, "num_lines": 77, "path": "/check.py", "repo_name": "whitespots/brokensocial", "src_encoding": "UTF-8", "text": "import requests\nimport os\nimport json\nimport re\nfrom urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)\n\nTIMEOUT=3\nsocial_list = {\n 'instagram.com': '',\n 'github.com': '',\n 'facebook.com': '',\n 'vk.com': '',\n 'twitter.com': ''\n}\n\n\nports = os.environ.get('PORTS')\nvuln_id = os.environ.get('VULN_ID')\nurls = ['http://{0}/'.format(os.environ.get('DOMAIN'))]\ntry:\n ports = ports.strip(' ').split(',')\n for port in ports:\n urls.append('http://{0}:{1}/'.format(os.environ.get('DOMAIN'), port))\nexcept Exception as ex:\n pass\n\n\ndef resp(url, state=False):\n if state:\n return json.dumps({\"vulnerable\": \"True\", \"vuln_id\": vuln_id, \"description\": url})\n else:\n return json.dumps({\"vulnerable\": \"False\", \"vuln_id\": vuln_id, \"description\": url})\n\n\ndef parse_social_networks(text):\n links_list = []\n if not any([text.find(social) for social in social_list]) > -1:\n return links_list\n for social in social_list:\n findings = re.findall(fr'(?:{social}/)(\\w+)', text)\n if findings:\n links_list.append({social: findings[0]})\n return links_list\n\n\ndef check_social_404(links):\n result_list = []\n if len(links) == 0:\n return result_list\n for link in links:\n for social, nickname in link.items():\n social_url = f'https://{social}/{nickname}'\n if 'twitter' in social:\n social_url = f'https://mobile.{social}/{nickname}'\n if requests.get(social_url).status_code == 404:\n result_list.append(social)\n return result_list\n\n\ndef check():\n if not urls:\n return resp(False)\n for url in urls:\n try:\n page_content = requests.get(url, timeout=TIMEOUT, verify=False).text\n links = parse_social_networks(page_content)\n result = check_social_404(links)\n if len(result) > 0:\n resp(url=url, state=True)\n except Exception as ex:\n pass\n return resp(url=url, state=False)\n\n\nif __name__ == '__main__':\n print(check())" } ]
2
NIKsaurabh/Machine-Learning-Algorithm
https://github.com/NIKsaurabh/Machine-Learning-Algorithm
664087514dc7ba7e051f9feeefee9d4c276d7bd1
a7bfbc917124659c5b4ab9996acd049b437490a2
e03cf254ab03ce4c374d3f7bef9ab8e82ffb55f7
refs/heads/master
2020-07-28T09:18:09.751376
2020-06-05T12:09:18
2020-06-05T12:09:18
209,377,728
0
0
null
2019-09-18T18:29:11
2019-09-18T18:43:22
2019-09-20T05:34:08
Python
[ { "alpha_fraction": 0.7085781693458557, "alphanum_fraction": 0.7426556944847107, "avg_line_length": 24.787878036499023, "blob_id": "67f0be79734af5f5d66fdf0795bea766bb378a68", "content_id": "bd7128e2d636394f19c5c5542baec54d79576e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 64, "num_lines": 33, "path": "/random forest regression.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 11 11:51:00 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset=pd.read_csv('Position_Salaries.csv')\nX=dataset.iloc[:,1:2].values\ny=dataset.iloc[:,-1].values\n\n#fitting random forest regression to the dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor=RandomForestRegressor(n_estimators=500,random_state=0)\nregressor.fit(X,y)\n\n#predicting a new result\ny_pred=regressor.predict(np.array([6.5]).reshape(1,-1))\n\n#visualising the result\nx_grid=np.arange(min(X),max(X),0.01)\nx_grid=x_grid.reshape(len(x_grid),1)\nplt.scatter(X,y,color='red')\nplt.plot(x_grid,regressor.predict(x_grid),color='blue')\nplt.title(\"truth or bluff (random forest regression)\")\nplt.xlabel(\"position lavel\")\nplt.ylabel(\"salary\")\nplt.show()\n" }, { "alpha_fraction": 0.7149532437324524, "alphanum_fraction": 0.7453271150588989, "avg_line_length": 24.205883026123047, "blob_id": "781da137649e620698d6c09a8158e11677fb0e4a", "content_id": "9cec94aaf5502d0bb95f97386edce66cc4456737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 55, "num_lines": 34, "path": "/decision_tree_regression_.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 10 09:17:16 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndataset=pd.read_csv('Position_Salaries.csv')\nX=dataset.iloc[:,1:2].values\ny=dataset.iloc[:,2].values\n\n#fitting decision tree regression to the dataset\nfrom sklearn.tree import DecisionTreeRegressor\nregressor=DecisionTreeRegressor(random_state=0)\nregressor.fit(X,y)\n\n\n#predicting the new result\ny_pred=regressor.predict(np.array([6.5]).reshape(1,-1))\n\n#visualising thr decision tree regression result\nX_grid=np.arange(min(X),max(X),0.01)\nX_grid=X_grid.reshape(len(X_grid),1)\nplt.scatter(X,y,color='red')\nplt.plot(X_grid,regressor.predict(X_grid),color='blue')\nplt.title(\"truth or bluff (decision tree regression)\")\nplt.xlabel(\"position lavel\")\nplt.ylabel(\"salary\")\nplt.show()" }, { "alpha_fraction": 0.6513410210609436, "alphanum_fraction": 0.702426552772522, "avg_line_length": 24.29032325744629, "blob_id": "0ce54d07a72f20cfb2949ae4b1df380c3d60ca21", "content_id": "094aa429e33f38ecd889dc4217901f58e45c102f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 783, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/Apriori.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 23 14:00:31 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the dataset\ndataset=pd.read_csv('Market_Basket_Optimisation.csv',header=None)\ntransaction=[]\nfor i in range(0,7501):\n transaction.append([str(dataset.values[i,j]) for j in range(0,20)])\n'''for i in range(0,7501):\n l=[]\n for j in range(0,20):\n l.append(str(dataset.values[i,j]))\n transaction.append(l)'''\n#print(transaction)\n\n#training apriori on the dataset\nfrom apyori import apriori\nrules=apriori(transaction, min_support=0.003,min_confidence=0.2,min_lift=3,min_length=2)\n\nresult=list(rules)\nlistRules = [list(result[i][0]) for i in range(0,len(result))]\n#print(result)" }, { "alpha_fraction": 0.7283519506454468, "alphanum_fraction": 0.7479050159454346, "avg_line_length": 26.519229888916016, "blob_id": "06d695d2a97ab99968a3907f3db6159c5e60a3dd", "content_id": "67eced9a4da72c179f8ae73bbe275f547c1e70cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1432, "license_type": "no_license", "max_line_length": 94, "num_lines": 52, "path": "/Natural Language Processing.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 28 16:05:28 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the dataset\ndataset=pd.read_csv('Restaurant_Reviews.tsv', delimiter='\\t', quoting=3)\n\n#cleaning the texts\nimport re\n#import nltk\n#nltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\ncorpus=[]\nfor i in range(0,1000):\n review=re.sub('[^a-zA-Z]',' ',dataset['Review'][i])\n review=review.lower()\n review=review.split()\n ps=PorterStemmer()\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review=' '.join(review)\n corpus.append(review)\n\n#creating the bag of word model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv=CountVectorizer(max_features=1500)\nX = cv.fit_transform(corpus).toarray()\ny=dataset.iloc[:,1].values\n\n#splitting dataset in training and test set\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)\n\n#fitting Naive Bayes to the training set\nfrom sklearn.naive_bayes import GaussianNB\nclassifier=GaussianNB()\nclassifier.fit(X_train,y_train)\n\n#predicting the test set result\ny_pred=classifier.predict(X_test)\n\n#mating confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\n\n" }, { "alpha_fraction": 0.741754412651062, "alphanum_fraction": 0.7621052861213684, "avg_line_length": 25.88679313659668, "blob_id": "6703a785c78e604defe8e4d688d5c38dc0e305a5", "content_id": "cc413aa878952c95beef704c04e4c47cd28b8c78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1425, "license_type": "no_license", "max_line_length": 72, "num_lines": 53, "path": "/polynomial regression.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 16:00:29 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the data\ndata=pd.read_csv('Position_Salaries.csv')\n#plt.scatter(data['Position'].values,data['Salary'].values)\n#plt.xticks(rotation='vertical')\n\nX=data.iloc[:,1:2].values\ny=data.iloc[:,2].values\n\n#fitting data to linear regression model\nfrom sklearn.linear_model import LinearRegression\nLregressor=LinearRegression()\nLregressor.fit(X,y)\n\n#fitting polynomial regression to the dataset\nfrom sklearn.preprocessing import PolynomialFeatures\nPregressor=PolynomialFeatures(degree=4)\nX_poly=Pregressor.fit_transform(X)\n\nLregressor_2=LinearRegression()\nLregressor_2.fit(X_poly,y)\n\n#visualising the linear regression results\nplt.scatter(X,y,color='red')\nplt.plot(X, Lregressor.predict(X))\nplt.title(\"Truth or Bluff (Linear regression)\")\nplt.xlabel(\"Position level\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n#visualising the polynomial regression results\nX_grid=np.arange(min(X),max(X),0.01)\nX_grid=X_grid.reshape(len(X_grid),1)\nplt.scatter(X,y,color='red')\nplt.plot(X_grid, Lregressor_2.predict(Pregressor.fit_transform(X_grid)))\nplt.title(\"Truth or Bluff (Polynomial regression)\")\nplt.xlabel(\"Position level\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n#predicting new results using linear regression\nprint(Lregressor.predict(np.array([6.5]).reshape(1,-1)))\n" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 65, "blob_id": "20429079b032edcc7c23ca26b409c08b376f862d", "content_id": "157e4c1a77517361528e7268bea44102cef1c61b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 132, "license_type": "no_license", "max_line_length": 102, "num_lines": 2, "path": "/README.md", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "# Machine-Learning-Algorithm\nIt contains machine learning algotithms for Regression, Classification, Clustering, Deep Learning etc.\n" }, { "alpha_fraction": 0.6511628031730652, "alphanum_fraction": 0.705198347568512, "avg_line_length": 31.511110305786133, "blob_id": "104c46474a69d91bcd2939bba3eb9b41c94afab4", "content_id": "933b3f450b7f809bd0a232382a3265133516afaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 105, "num_lines": 45, "path": "/K Means Clustering.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 21 16:14:52 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the dataset\ndataset=pd.read_csv('Mall_Customers.csv')\nX=dataset.iloc[:,[3,4]].values\n\n#using Elebow method to find the optimal numbers of clusters\nfrom sklearn.cluster import KMeans\nwcss=[]\nfor i in range(1,11):\n kmeans=KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10,random_state=0)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\nplt.plot(range(1,11),wcss)\nplt.title('The elebow method')\nplt.xlabel('number of clusters')\nplt.ylabel('wcss')\nplt.show()\n\n#applying kmeans to the dataset\nkmeans=KMeans(n_clusters=5,init='k-means++',n_init=10,max_iter=300,random_state=0)\ny_kmeans=kmeans.fit_predict(X)\n\n#visualising the clusters\nplt.scatter(X[y_kmeans==0,0],X[y_kmeans==0,1],s=100,c='red',label='careful')\nplt.scatter(X[y_kmeans==1,0],X[y_kmeans==1,1],s=100,c='blue',label='standard')\nplt.scatter(X[y_kmeans==2,0],X[y_kmeans==2,1],s=100,c='green',label='target')\nplt.scatter(X[y_kmeans==3,0],X[y_kmeans==3,1],s=100,c='cyan',label='careless')\nplt.scatter(X[y_kmeans==4,0],X[y_kmeans==4,1],s=100,c='magenta',label='sensible')\nplt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s=300,c='yellow',label='centroids')\nplt.title('cluster of clients')\nplt.xlabel('Annual Income')\nplt.ylabel('Spending Score (1-100)')\nplt.legend()\nplt.show()" }, { "alpha_fraction": 0.6735708117485046, "alphanum_fraction": 0.7199668884277344, "avg_line_length": 29.200000762939453, "blob_id": "f5ed5408be976c781e9152c39a94a45484de8bc3", "content_id": "8342d9a9f95b3f0e9bb26b369563755172819c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "no_license", "max_line_length": 76, "num_lines": 40, "path": "/Heirarchical Clustering.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 23 10:17:48 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the dataset\ndataset=pd.read_csv('Mall_Customers.csv')\nX=dataset.iloc[:,[3,4]].values\n\n#using the dendogram to find optimal number of clusters\nimport scipy.cluster.hierarchy as sch\ndendogram=sch.dendrogram(sch.linkage(X))\nplt.title('Dendogram')\nplt.xlabel('Clusters')\nplt.ylabel('Euclidean distance')\nplt.show()\n\n#Fitting Hierarchical clustering to the dataset\nfrom sklearn.cluster import AgglomerativeClustering\nhc=AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')\ny_hc=hc.fit_predict(X)\n\n#visualising the clusters\nplt.scatter(X[y_hc==0,0],X[y_hc==0,1],s=100,c='red',label='careful')\nplt.scatter(X[y_hc==1,0],X[y_hc==1,1],s=100,c='blue',label='standard')\nplt.scatter(X[y_hc==2,0],X[y_hc==2,1],s=100,c='green',label='target')\nplt.scatter(X[y_hc==3,0],X[y_hc==3,1],s=100,c='cyan',label='careless')\nplt.scatter(X[y_hc==4,0],X[y_hc==4,1],s=100,c='magenta',label='sensible')\nplt.title('cluster of clients')\nplt.xlabel('Annual Income')\nplt.ylabel('Spending Score (1-100)')\nplt.legend()\nplt.show()" }, { "alpha_fraction": 0.7452452182769775, "alphanum_fraction": 0.7667667865753174, "avg_line_length": 26.73611068725586, "blob_id": "1dc952fe8ee4ef13f57a6204b12ba10c42c7133e", "content_id": "914a8df5a269bb82ea65de0704dc493b4982cb74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1998, "license_type": "no_license", "max_line_length": 86, "num_lines": 72, "path": "/ANN.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 1 09:13:11 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#importing the dataset\ndataset=pd.read_csv('Churn_Modelling.csv')\nX=dataset.iloc[:,3:13]\ny=dataset.iloc[:,13]\n\n#Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelencoder_X_1=LabelEncoder()\nX.iloc[:,1] = labelencoder_X_1.fit_transform(X.iloc[:,1])\nlabelencoder_X_2=LabelEncoder()\nX.iloc[:,2] = labelencoder_X_1.fit_transform(X.iloc[:,2])\nonehotencoder=OneHotEncoder(categorical_features=[1])\nX=onehotencoder.fit_transform(X).toarray()\nX=X[:,1:]\n\n#feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc=StandardScaler()\nX=sc.fit_transform(X)\n#X_train=sc.fit_transform(X_train)\n#X_test=sc.transform(X_test)\n\n#splitting the dataset int training and test set\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2, random_state=0)\n\n#making ANN\n\n#importing Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#initialising the ANN\nclassifier=Sequential()\n\n#adding the input layer and first hidden layer\nclassifier.add(Dense(output_dim=6, init='uniform', activation='relu', input_dim=11))\n\n#adding the second hidden layer\nclassifier.add(Dense(output_dim=6, init='uniform', activation='relu'))\n\n#adding the output layer\nclassifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))\n\n#compiling the ANN\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n#fitting the ANN to the training set\nclassifier.fit(X_train,y_train,batch_size=10,nb_epoch=100)\n\n#making the prediction and evaluating the model\n\n#predicting the test set result\ny_pred=classifier.predict(X_test)\ny_pred=(y_pred>0.5)\n\n#making the confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test, y_pred)\n\n" }, { "alpha_fraction": 0.7401812672615051, "alphanum_fraction": 0.7643504738807678, "avg_line_length": 25.864864349365234, "blob_id": "dcb86e5521ba3d48cc267e9309d140e02469b4a5", "content_id": "8828f2281b1549202534be4a502a2f49fd86a9ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 993, "license_type": "no_license", "max_line_length": 65, "num_lines": 37, "path": "/multiple Linear Regression.py", "repo_name": "NIKsaurabh/Machine-Learning-Algorithm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 16 22:57:22 2019\n\n@author: saurabh\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#importing the dataset\ndata=pd.read_csv('50_Startups.csv')\nX=data.iloc[:,:-1].values\ny=data.iloc[:,-1].values \nplt.scatter(data['Profit'].values,data['State'].values)\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nencoder=LabelEncoder()\nX[:,3]=encoder.fit_transform(X[:,3])\noneHotEncoder=OneHotEncoder(categorical_features=[3])\nX=oneHotEncoder.fit_transform(X).toarray()\n\n#avoiding the dummy variable trap\nX=X[:,1:]\n\n#splitting the dataset in training and test sets\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)\n\n#fitting multiple linear regression\nfrom sklearn.linear_model import LinearRegression\nregressor=LinearRegression()\nregressor.fit(X_train,y_train)\n\n#predicting the test set result\ny_pred=regressor.predict(X_test)" } ]
10
AlexJamesWright/MessWithDevs
https://github.com/AlexJamesWright/MessWithDevs
73c6e8621628a295226139164240dd31cf8353eb
c6d83ff5fc4c954d0d83834191d9a6ce18b1a2d8
f464a172370d159ed0b91f3200dc2a1913a63503
refs/heads/master
2021-03-24T09:50:27.330302
2018-10-20T07:50:58
2018-10-20T07:50:58
119,056,522
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.526674211025238, "alphanum_fraction": 0.5312145352363586, "avg_line_length": 35.70833206176758, "blob_id": "9d1f2a35d8d91a502bb9d032f01fd426d6dc5d90", "content_id": "6c04fa3367c97c395418a87c75f5ac5991e560f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "permissive", "max_line_length": 127, "num_lines": 24, "path": "/setup.py", "repo_name": "AlexJamesWright/MessWithDevs", "src_encoding": "UTF-8", "text": "#============================================================================\n# Name : setup.py\n# Author : Alex James Wright\n# Version : 0.1\n# Copyright : MIT\n# Description : PyPi setup file\n#============================================================================\n\nfrom setuptools import setup\n\nsetup(name='MessWithDevs',\n version='0.2',\n description='Swaps characters in a text file with indistinguishable doppelgangers to break code (damage is reversible!)',\n url='https://github.com/AlexJamesWright/MessWithDevs',\n author='Alex James Wright',\n author_email='[email protected]',\n license='MIT',\n packages=['MessWithDevs'],\n entry_points={'console_scripts': ['mwd=MessWithDevs.commandLine:main']},\n include_package_data=True,\n keywords='MessWithDevs',\n zip_safe=False,\n test_suite='nose.collector',\n tests_require=['nose'])\n" }, { "alpha_fraction": 0.5103562474250793, "alphanum_fraction": 0.5120132565498352, "avg_line_length": 42.10714340209961, "blob_id": "971cac699d70aefefbf83c2303d6eb6f903b0f74", "content_id": "2e2c54f8f6ee536a9abfdcc24a9d0a893e41166d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1207, "license_type": "permissive", "max_line_length": 99, "num_lines": 28, "path": "/MessWithDevs/tests/test_mwd.py", "repo_name": "AlexJamesWright/MessWithDevs", "src_encoding": "UTF-8", "text": "#============================================================================\n# Name : test_mwd.py\n# Author : Alex James Wright\n# Version : 0.1\n# Copyright : MIT\n# Description : Tests for command line tool, ensures process reverses\n#============================================================================\n\nimport unittest\nimport subprocess\nimport os\n\ndirec = os.path.dirname(os.path.realpath(__file__))\n\nclass Test_MWD(unittest.TestCase):\n def test_r_reverses_perfectly(self):\n subprocess.call(['cp', direc + '/allRelevantChars.c', direc + '/test.c'])\n subprocess.call(['python', direc + '/../__init__.py', direc + '/allRelevantChars.c'])\n subprocess.call(['python', direc + '/../__init__.py', direc + '/allRelevantChars.c', '-r'])\n\n with open(direc + '/allRelevantChars.c', 'r') as converted:\n with open(direc + '/test.c', 'r') as original:\n orig = original.readlines()\n conv = converted.readlines()\n self.assertEqual(len(orig), len(conv))\n for charOrig, charConv in zip(orig, conv):\n self.assertEqual(charOrig, charConv)\n subprocess.call(['rm', direc + '/test.c'])\n" }, { "alpha_fraction": 0.4113081991672516, "alphanum_fraction": 0.41862526535987854, "avg_line_length": 38.21739196777344, "blob_id": "6ec9b282ad83114d8d501950dd67e72505901d44", "content_id": "11592f94dcba49b29036961a7ad51137097db6c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4561, "license_type": "permissive", "max_line_length": 134, "num_lines": 115, "path": "/MessWithDevs/__init__.py", "repo_name": "AlexJamesWright/MessWithDevs", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#============================================================================\n# Name : __init__.py\n# Author : Alex James Wright\n# Version : 0.1\n# Copyright : MIT\n# Description : Script to swap characters, contains ruin()\n#============================================================================\n\n\nimport sys\nimport os\nimport subprocess\nfrom copy import deepcopy\n\ndef ruin():\n if int(sys.version[0]) is not 3:\n print(\"Get a newer version of python, dammit!\")\n sys.exit(-1)\n\n # Dictionary has the form normal : pain in the arse version\n\n opDictionary = {';':'อพ', ':':'แก', '.':'แސ', \"'\":\"แ‘Š\", '-':'โ€’', '\"':'โ€œ', \"'\":'โ€›', \\\n '|':'โŽน', '/':'โˆ•', ')':'โŸฏ', '(':'โŸฎ', '+':'โงพ'}\n\n # Objective is to get as many different errors as possible, so change available\n # letters on an alternate basis to get undefined errors and whatever else it does\n # thats bad\n charDictionary = {'A':'ฮ‘', 'B':'ฮ’', 'E':'ฮ•', 'Z':'ฮ–', 'H':'ฮ—', 'I':'ฮ™', 'K':'ฮš', \\\n 'M':'ฮœ', 'N':'ฮ', 'O':'ฮŸ', 'P':'ฮก', 'T':'ฮค', 'Y':'ฮฅ', 'X':'ฮง', \\\n 'F':'ฯœ', 'c':'ฯฒ', 'j':'ฯณ', 'C':'ฯน', 'S':'ะ…', 'J':'ะˆ', 'e':'ะต', \\\n 'o':'ะพ', 's':'ั•', 'i':'ั–', 'h':'าป', 'd':'ิ', 'q':'ิ›', 'w':'ิ'}\n\n EXE=[\".cc\", \".c\", \".cpp\", \".cxx\", \".cu\", \".c++\", \".js\", \".java\", \".ii\", \\\n \".ixx\", \".ipp\", \".i++\", \".inl\", \".idl\", \".ddl\", \".odl\", \".h\", \".hh\", \".hxx\", \".hpp\", \".h++\", \\\n \".cs\", \".d\", \".php\", \".php4\", \".php5\", \".phtml\", \".inc\", \".m\", \".md\", \".mm\", \".html\", \\\n \".dox\", \".py\", \".pyw\", \".f90\", \".f95\", \".f03\", \".f08\", \".f\", \".for\", \".tcl\", \".vhd\", \".vhdl\", \\\n \".ucf\", \".qsf\"]\n\n inputFile=0\n FileList = []\n useExe = None\n userExeLen=1\n # Get files\n if '-e' in sys.argv:\n useExe = sys.argv[sys.argv.index('-e') + 1]\n userExeLen = len(useExe)\n EXE.append(useExe)\n # Get flags\n if '-r' in sys.argv:\n mode='-r'\n else:\n mode='-f'\n\n for i, arg in enumerate(sys.argv):\n if (arg.find('.')>=0 and not arg[-11:]=='__init__.py'):\n if (arg[-2:] in EXE or arg[-3:] in EXE or arg[-4:] in EXE or arg[-5:] in EXE or arg[-userExeLen:] in EXE) and arg!=useExe:\n inputFile+=1\n FileList.append(arg)\n\n if not inputFile:\n print(\"No valid input file.\")\n sys.exit(-1)\n\n for File in FileList:\n subprocess.call(['cp', File, 'source.txt'])\n\n if mode == '-r':\n # Reverse mode, so swap keys and values\n useOpDict = dict([[v, k] for k, v in opDictionary.items()])\n useCharDict = dict([[v, k] for k, v in charDictionary.items()])\n else:\n useOpDict = opDictionary\n useCharDict = charDictionary\n\n # Keep track of which characters have been changed\n charUsed = deepcopy(useCharDict)\n for key in charUsed.keys():\n charUsed[key] = 0\n\n # OK, start messing with their file\n with open(\"source.txt\", 'r') as fin:\n with open(\"target.txt\", 'w') as fout:\n for line in fin.readlines():\n string = ''\n for char in line:\n if char in useOpDict.keys():\n # Character is one of the operators\n string += useOpDict[char]\n elif char in useCharDict.keys():\n # Character is one of the chars to change\n if mode == '-r':\n # Always swap if in reverse mode\n string += useCharDict[char]\n elif charUsed[char]==1:\n # Do not swap char this time\n string += char\n charUsed[char] = 0\n elif charUsed[char]==0:\n # Swap this time\n string += useCharDict[char]\n charUsed[char] = 1\n else:\n # Character is not in dictionaries so do not change\n string += char\n # Place this line in the target file\n fout.write(string)\n\n subprocess.call(['mv', 'target.txt', File])\n subprocess.call(['rm', os.getcwd() + '/source.txt'])\n # Fin.\nif __name__=='__main__':\n ruin()\n" }, { "alpha_fraction": 0.3012048304080963, "alphanum_fraction": 0.3072289228439331, "avg_line_length": 26.66666603088379, "blob_id": "e7966fdfc25bbf731ec641212b2b589e962b6457", "content_id": "c181a43779fc70bdbf037a66c494931aeab52456", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "permissive", "max_line_length": 77, "num_lines": 12, "path": "/MessWithDevs/commandLine.py", "repo_name": "AlexJamesWright/MessWithDevs", "src_encoding": "UTF-8", "text": "#============================================================================\n# Name : commandLine.py\n# Author : Alex James Wright\n# Version : 0.1\n# Copyright : MIT\n# Description : For command line tool\n#============================================================================\n\nfrom . import ruin\n\ndef main():\n ruin()\n" }, { "alpha_fraction": 0.7578328847885132, "alphanum_fraction": 0.7591384053230286, "avg_line_length": 29.039215087890625, "blob_id": "7fc9338f2aa920e9565532e3ca15e3d4e436bbe2", "content_id": "a5fa166cc3bdad159870e332d4ce02f432a60d83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1533, "license_type": "permissive", "max_line_length": 139, "num_lines": 51, "path": "/README.md", "repo_name": "AlexJamesWright/MessWithDevs", "src_encoding": "UTF-8", "text": "# MessWithDevs\n\n[![Build Status](https://travis-ci.org/AlexJamesWright/MessWithDevs.svg?branch=master)](https://travis-ci.org/AlexJamesWright/MessWithDevs)\n\nWind up people that leave their computers unattended, they should probably learn\nif they haven't already!\n\n## What does it do?\n\nEver noticed how a greek question mark, `อพ`, looks a lot like a semicolon, `;`,\nand thought that this little coincidence could play havoc with compiling and\nrunning code?\n\nProbably not, but we had. We also took it further than necessary, by changing a\nhost of common operators and even more common characters to their indistinguishable\ndoppelgangers. The idea being that after running MessWithDevs on someone else's\ncode they will get a bunch, nay shittonne, of compiler errors and will have no\nidea why!\n\nCruel right? Well we thought so too, so we have added a reversing method so they\ndon't lose their jobs. I know, boring, but probably for the best.\n\n## How?\n\nTo start messing with people, install on their machine with\n\n `pip install MessWithDevs`\n\nRun the following command to break their code:\n\n `mwd _filename_`\n\nand the following to reverse it:\n\n `mwd _filename_ -r`.\n\nIf the file extension you are after is not in the list, you can specify it with\nthe `-e` flag:\n\n `mwd _filename_ -e .exe`,\n\nthis flag must also be on when doing the reverse process.\n\nSimple.\n\nEnjoy!\n\n### Other stuff\n\nThis only works for python version 3.* unless you want to figure out a way to\nmake it work with older versions, but why would anyone use 2.* anyway?\n" } ]
5
tks007-github/book_review
https://github.com/tks007-github/book_review
d5230d4b4998c074e338e488db0fe3f4d82e5105
cf25c894bf877e4053bf557fd801e1683c1a714f
dab85cd0182b6faae856a6a791c1fd6640cb904e
refs/heads/master
2023-07-09T14:43:34.171518
2021-08-19T05:55:49
2021-08-19T05:55:49
382,786,680
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.605585515499115, "alphanum_fraction": 0.6124448776245117, "avg_line_length": 36.12727355957031, "blob_id": "e022dfad765232402ff27536b0a649e740fe3058", "content_id": "950bfbe1391a0f7a19903510aaf45ec95009c7cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2265, "license_type": "permissive", "max_line_length": 108, "num_lines": 55, "path": "/review/forms.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.core.mail import EmailMessage, message\nfrom django.forms import fields\nfrom .models import Review\n\nclass InquiryForm(forms.Form):\n name = forms.CharField(label='ใŠๅๅ‰', max_length=30)\n email = forms.EmailField(label='ใƒกใƒผใƒซใ‚ขใƒ‰ใƒฌใ‚น')\n title = forms.CharField(label='ใ‚ฟใ‚คใƒˆใƒซ', max_length=30)\n message = forms.CharField(label='ใƒกใƒƒใ‚ปใƒผใ‚ธ', widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name'].widget.attrs['class'] = 'form-control col-9'\n self.fields['name'].widget.attrs['placeholder'] = 'ใŠๅๅ‰ใ‚’ใ“ใ“ใซๅ…ฅๅŠ›ใ—ใฆใใ ใ•ใ„ใ€‚'\n\n self.fields['email'].widget.attrs['class'] = 'form-control col-11'\n self.fields['email'].widget.attrs['placeholder'] = 'ใƒกใƒผใƒซใ‚ขใƒ‰ใƒฌใ‚นใ‚’ใ“ใ“ใซๅ…ฅๅŠ›ใ—ใฆใใ ใ•ใ„ใ€‚'\n\n self.fields['title'].widget.attrs['class'] = 'form-control col-11'\n self.fields['title'].widget.attrs['placeholder'] = 'ใ‚ฟใ‚คใƒˆใƒซใ‚’ใ“ใ“ใซๅ…ฅๅŠ›ใ—ใฆใใ ใ•ใ„ใ€‚'\n\n self.fields['message'].widget.attrs['class'] = 'form-control col-12'\n self.fields['message'].widget.attrs['placeholder'] = 'ใƒกใƒƒใ‚ปใƒผใ‚ธใ‚’ใ“ใ“ใซๅ…ฅๅŠ›ใ—ใฆใใ ใ•ใ„ใ€‚'\n\n def send_email(self):\n name = self.cleaned_data['name']\n email = self.cleaned_data['email']\n title = self.cleaned_data['title']\n message = self.cleaned_data['message']\n\n subject = 'ใŠๅ•ใ„ๅˆใ‚ใ› {}'.format(title)\n message = '้€ไฟก่€…ๅ: {0}\\nใƒกใƒผใƒซใ‚ขใƒ‰ใƒฌใ‚น: {1}\\nใƒกใƒƒใ‚ปใƒผใ‚ธ:\\n{2}'.format(name, email, message)\n from_email = '[email protected]'\n to_list = [\n '[email protected]'\n ]\n cc_list = [\n email\n ] \n\n message = EmailMessage(subject=subject, body=message, from_email=from_email, to=to_list, cc=cc_list)\n message.send()\n\n\nclass ReviewCreateForm(forms.ModelForm):\n class Meta:\n model = Review\n fields = ('title', 'photo', 'content', 'review', 'evaluation')\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs['class'] = 'form-control'" }, { "alpha_fraction": 0.5492610931396484, "alphanum_fraction": 0.5960590839385986, "avg_line_length": 21.55555534362793, "blob_id": "bb69e3dc288217222488eb244962aed441647ac9", "content_id": "ef456a9727ba9f196d707b4d0a2894a645163b94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "permissive", "max_line_length": 55, "num_lines": 18, "path": "/review/migrations/0008_alter_review_useful_review_record.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-11 13:24\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0007_review_useful_review_record'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='review',\n name='useful_review_record',\n field=models.TextField(default='a'),\n ),\n ]\n" }, { "alpha_fraction": 0.70138019323349, "alphanum_fraction": 0.70138019323349, "avg_line_length": 45.94117736816406, "blob_id": "8a699440fa2e593c396cedc3c75e005458631745", "content_id": "0a1041ddbdc981e160645268c10343280b42c776", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "permissive", "max_line_length": 92, "num_lines": 17, "path": "/review/urls.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom django.urls.resolvers import URLPattern\n\nfrom .import views\nfrom .views import evaluationview\n\napp_name = 'review'\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('inquiry/', views.InquiryView.as_view(), name='inquiry'),\n path('review-list/', views.ReviewListView.as_view(), name='review_list'),\n path('review-detail/<int:pk>/', views.ReviewDetailView.as_view(), name='review_detail'),\n path('review-create/', views.ReviewCreateView.as_view(), name='review_create'),\n path('review-update/<int:pk>/', views.ReviewUpdateView.as_view(), name='review_update'),\n path('review-delete/<int:pk>/', views.ReviewDeleteView.as_view(), name='review_delete'),\n path('evaluation/<int:pk>', evaluationview, name='evaluation'),\n]" }, { "alpha_fraction": 0.4812760055065155, "alphanum_fraction": 0.5270457863807678, "avg_line_length": 24.75, "blob_id": "60eebedc90efec76ec41af48a80b3ef81f042f6c", "content_id": "37ea28c995d675cd9389f69133aae3c19bdad560", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "permissive", "max_line_length": 107, "num_lines": 28, "path": "/review/migrations/0003_auto_20210710_2229.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-10 13:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0002_auto_20210710_1730'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='reiview',\n old_name='image',\n new_name='photo',\n ),\n migrations.RenameField(\n model_name='reiview',\n old_name='author',\n new_name='user',\n ),\n migrations.AlterField(\n model_name='reiview',\n name='evaluation',\n field=models.CharField(choices=[('่‰ฏใ„', '่‰ฏใ„'), ('ๆ‚ชใ„', 'ๆ‚ชใ„')], max_length=10, verbose_name='่ฉ•ไพก'),\n ),\n ]\n" }, { "alpha_fraction": 0.667294979095459, "alphanum_fraction": 0.6720075607299805, "avg_line_length": 41.439998626708984, "blob_id": "1948b0de51287d391af2ba4bdb1f18305eef4f5e", "content_id": "37d27de642bd0669637fe8e46681f3374316efb6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1177, "license_type": "permissive", "max_line_length": 95, "num_lines": 25, "path": "/review/models.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "from accounts.models import CustomUser\nfrom django.db import models\n\n# Create your models here.\n\nEVALUATION_CHOICES = [('โ˜†โ˜†โ˜†', 'โ˜†โ˜†โ˜†๏ผš่‰ฏใ„'), ('โ˜†โ˜†', 'โ˜†โ˜†ใ€€๏ผšๆ™ฎ้€š'), ('โ˜†', 'โ˜†ใ€€ใ€€๏ผšๆ‚ชใ„')]\nclass Review(models.Model):\n \"\"\"ใƒฌใƒ“ใƒฅใƒผใƒขใƒ‡ใƒซ\"\"\"\n\n user = models.ForeignKey(CustomUser, verbose_name='ใƒฆใƒผใ‚ถใƒผ', on_delete=models.PROTECT)\n title = models.CharField(verbose_name='ใ‚ฟใ‚คใƒˆใƒซ', max_length=40)\n content = models.TextField(verbose_name='ๅ†…ๅฎน')\n photo = models.ImageField(verbose_name='ๅ†™็œŸ', blank=True, null=True)\n # useful_review = models.IntegerField(verbose_name='ใ„ใ„ใญ', null=True, blank=True, default=0)\n # useful_review_record = models.TextField(default='a')\n review = models.TextField(verbose_name='่ฌ›่ฉ•')\n evaluation = models.CharField(verbose_name='่ฉ•ไพก', max_length=10, choices=EVALUATION_CHOICES)\n created_at = models.DateTimeField(verbose_name='ไฝœๆˆๆ—ฅๆ™‚', auto_now_add=True)\n updated_at = models.DateTimeField(verbose_name='ๆ›ดๆ–ฐๆ—ฅๆ™‚', auto_now=True)\n\n class Meta:\n verbose_name_plural = 'Review'\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.4425887167453766, "alphanum_fraction": 0.4478079378604889, "avg_line_length": 24.91891860961914, "blob_id": "ee25fb1c9938afe98420c7a986b1bbd447aab621", "content_id": "52c1c22f96386acce7c2e3ebde3de0a65db1afd5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 974, "license_type": "permissive", "max_line_length": 90, "num_lines": 37, "path": "/review/templates/inquiry.html", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "{% extends 'base2.html' %}\n\n{% block title %}ใŠๅ•ใ„ๅˆใ‚ใ› | Book Review{% endblock %}\n\n{% block active_inquiry %}active{% endblock %}\n\n{% block contents %}\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"my-div-style\">\n <form method=\"post\">\n {% csrf_token %}\n\n {{ form.non_field_errors }}\n\n {% for field in form %}\n <div class=\"form-group row\">\n <label for=\"{{ field.id_for_label }}\" class=\"col-sm-2 col-form-label\">\n <strong>{{ field.label_tag }}</strong>\n </label>\n <div class=\"col-sm-8\">\n {{ field }}\n {{ field.errors }}\n <br>\n </div>\n </div>\n {% endfor %}\n <br>\n <div class=\"offset-sm-9 col-sm-8\">\n <button class=\"btn btn-primary\" type=\"submit\">้€ไฟก</button>\n </div>\n <br>\n </form>\n</div>\n</div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.5114504098892212, "alphanum_fraction": 0.5903307795524597, "avg_line_length": 20.83333396911621, "blob_id": "9c1f3779a34881abca75b6f8b4b60030177dbf85", "content_id": "12642bc35b21c66bc46976248bb738791b73631f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 393, "license_type": "permissive", "max_line_length": 47, "num_lines": 18, "path": "/review/migrations/0007_review_useful_review_record.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-11 12:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0006_auto_20210711_1832'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='review',\n name='useful_review_record',\n field=models.TextField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5534883737564087, "alphanum_fraction": 0.6255813837051392, "avg_line_length": 21.63157844543457, "blob_id": "3041d93d2a19dc46031e6486aa8c4299064b3fae", "content_id": "43f965fbdfa4f8b3b98a9af4a05a2136920f09cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "permissive", "max_line_length": 66, "num_lines": 19, "path": "/review/migrations/0004_rename_reiview_review.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-11 02:15\n\nfrom django.conf import settings\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('review', '0003_auto_20210710_2229'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Reiview',\n new_name='Review',\n ),\n ]\n" }, { "alpha_fraction": 0.5300429463386536, "alphanum_fraction": 0.5708154439926147, "avg_line_length": 21.190475463867188, "blob_id": "e7a92fbbe87463d0d9ad3ad0f8b87356023acdf7", "content_id": "3fe67fbe99578f1bfcfb2b9606977e1e8ea00f81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "permissive", "max_line_length": 61, "num_lines": 21, "path": "/review/migrations/0009_auto_20210721_2232.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.5 on 2021-07-21 13:32\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0008_alter_review_useful_review_record'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='review',\n name='useful_review',\n ),\n migrations.RemoveField(\n model_name='review',\n name='useful_review_record',\n ),\n ]\n" }, { "alpha_fraction": 0.5301204919815063, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 25.678571701049805, "blob_id": "2a214fe1f252d571d7f47fb3163bb14ae26128f7", "content_id": "a180960675548a471782637d0f1d954ba1ce232a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "permissive", "max_line_length": 92, "num_lines": 28, "path": "/review/migrations/0002_auto_20210710_1730.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-10 08:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='reiview',\n name='content',\n field=models.TextField(verbose_name='ๅ†…ๅฎน'),\n ),\n migrations.AlterField(\n model_name='reiview',\n name='image',\n field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='ๅ†™็œŸ'),\n ),\n migrations.AlterField(\n model_name='reiview',\n name='useful_review_record',\n field=models.TextField(verbose_name='่ฌ›่ฉ•'),\n ),\n ]\n" }, { "alpha_fraction": 0.6908348798751831, "alphanum_fraction": 0.6914626359939575, "avg_line_length": 31.520408630371094, "blob_id": "98c95edd6277635af54400ca397b9a3eb17510eb", "content_id": "8f30d494363ab5dd61bc997a20d2578411dc6c7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3344, "license_type": "permissive", "max_line_length": 89, "num_lines": 98, "path": "/review/views.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.shortcuts import redirect, render\nfrom django.views import generic\nfrom .forms import InquiryForm, ReviewCreateForm\nimport logging\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .models import Review\nfrom review import models\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\nclass IndexView(generic.TemplateView):\n template_name = 'index.html'\n\nclass InquiryView(generic.FormView):\n template_name = 'inquiry.html'\n form_class = InquiryForm\n success_url = reverse_lazy('review:inquiry')\n\n def form_valid(self, form):\n form.send_email()\n messages.success(self.request, 'ใƒกใƒƒใ‚ปใƒผใ‚ธใ‚’้€ไฟกใ—ใพใ—ใŸใ€‚')\n logger.info('Inquiry sent by {}'.format(form.cleaned_data['name']))\n return super().form_valid(form)\n\nclass ReviewListView(LoginRequiredMixin, generic.ListView):\n model = Review\n template_name = 'review_list.html'\n paginate_by = 5\n\n def get_queryset(self):\n # reviews = Review.objects.filter(user=self.request.user).order_by('-created_at')\n reviews = Review.objects.order_by('-created_at')\n return reviews\n\nclass ReviewDetailView(LoginRequiredMixin, generic.DeleteView):\n model = Review\n template_name = 'review_detail.html'\n\nclass ReviewCreateView(LoginRequiredMixin, generic.CreateView):\n model = Review\n template_name = 'review_create.html'\n form_class = ReviewCreateForm\n success_url = reverse_lazy('review:review_list')\n\n def form_valid(self, form):\n review = form.save(commit=False)\n review.user = self.request.user\n review.save()\n messages.success(self.request, 'ใƒฌใƒ“ใƒฅใƒผใ‚’ไฝœๆˆใ—ใพใ—ใŸใ€‚')\n return super().form_valid(form)\n \n def form_invalid(self, form):\n messages.error(self.request, 'ใƒฌใƒ“ใƒฅใƒผใฎไฝœๆˆใซๅคฑๆ•—ใ—ใพใ—ใŸใ€‚')\n return super().form_invalid(form)\n\nclass ReviewUpdateView(LoginRequiredMixin, generic.UpdateView):\n model = Review\n template_name = 'review_update.html'\n form_class = ReviewCreateForm\n\n def get_success_url(self):\n return reverse_lazy('review:review_detail', kwargs={'pk': self.kwargs['pk']})\n \n def form_valid(self, form):\n messages.success(self.request, 'ใƒฌใƒ“ใƒฅใƒผใ‚’ๆ›ดๆ–ฐใ—ใพใ—ใŸใ€‚')\n return super().form_valid(form)\n \n def form_invalid(self, form):\n messages.error(self.request, 'ใƒฌใƒ“ใƒฅใƒผใฎๆ›ดๆ–ฐใซๅคฑๆ•—ใ—ใพใ—ใŸใ€‚')\n return super().form_invalid(form)\n\nclass ReviewDeleteView(LoginRequiredMixin, generic.DeleteView):\n model = Review\n template_name = 'review_delete.html'\n success_url = reverse_lazy('review:review_list')\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, 'ใƒฌใƒ“ใƒฅใƒผใ‚’ๅ‰Š้™คใ—ใพใ—ใŸใ€‚')\n return super().delete(request, *args, **kwargs)\n\n\ndef evaluationview(request, pk):\n post = Review.objects.get(pk=pk)\n user = str(request.user)\n\n if user in post.useful_review_record:\n return redirect('review:review_list')\n else:\n post.useful_review = post.useful_review + 1\n post.useful_review_record = post.useful_review_record + user\n\n post.save()\n return redirect('review:review_list')" }, { "alpha_fraction": 0.4858934283256531, "alphanum_fraction": 0.537617564201355, "avg_line_length": 26.7391300201416, "blob_id": "f93c6614ffff120807e94c6c40e3131b01b36062", "content_id": "14a25b3ffbce702a2aadac977ee15726a29cca0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 684, "license_type": "permissive", "max_line_length": 148, "num_lines": 23, "path": "/review/migrations/0006_auto_20210711_1832.py", "repo_name": "tks007-github/book_review", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-07-11 09:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('review', '0005_alter_review_user'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='review',\n old_name='useful_review_record',\n new_name='review',\n ),\n migrations.AlterField(\n model_name='review',\n name='evaluation',\n field=models.CharField(choices=[('โ˜†โ˜†โ˜†', 'โ˜†โ˜†โ˜†๏ผš่‰ฏใ„'), ('โ˜†โ˜†', 'โ˜†โ˜†\\u3000๏ผšๆ™ฎ้€š'), ('โ˜†', 'โ˜†\\u3000\\u3000๏ผšๆ‚ชใ„')], max_length=10, verbose_name='่ฉ•ไพก'),\n ),\n ]\n" } ]
12
afrankenthal/iDM-analysis-plotting
https://github.com/afrankenthal/iDM-analysis-plotting
4b7fc6214b2dc409fa3e43245955b6691fbe623d
64708b0d96c864229412d4f9f7236d08b34a6d14
5ea8f725ebd8ce3a5fa7547a9583bca304511030
refs/heads/master
2020-03-28T13:02:01.294247
2019-08-03T00:11:33
2019-08-03T00:11:33
148,359,606
1
1
null
2018-09-11T18:12:18
2020-03-09T16:48:35
2019-10-25T14:23:44
Jupyter Notebook
[ { "alpha_fraction": 0.5923188924789429, "alphanum_fraction": 0.5995004177093506, "avg_line_length": 48.020408630371094, "blob_id": "ac55bfc8a12078f9d75629290e4731dfd612ef8a", "content_id": "3a14994933b1419462de165f8764979e5a301e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9608, "license_type": "no_license", "max_line_length": 159, "num_lines": 196, "path": "/utils/PlotMaker.py", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"PlotMaker.py\n\nImplements class PlotMaker to make plots from HistogramContainers.\n\"\"\"\n\nimport numpy as np\nfrom skhep.visual import MplPlotter as skh_plt\nfrom collections import OrderedDict\n\nimport utils.HistogramContainer as HC\n\nclass Error(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\nclass EmptyHistogramError(Error):\n \"\"\"Exception raised for empty histograms.\"\"\"\n def __init__(self, message):\n self.message = message\n\nclass PlotMaker:\n \"\"\"Class PlotMaker\n\n Given some HistogramContainer objects calculated by\n class HistogramCalculator, this class will generate\n the relevant physics plots that we are interested in.\n Two plotting functions are provided, plot_binned_data\n and plot_binned_data_error (preferred). The error version\n includes the sum of weights squared that was calculated\n together with the histograms, and uses the scikit-hep\n visual libraries for plotting errorbars.\n \"\"\"\n\n # Default plot options\n def_kwargs = {'density': True, 'log': True, 'histtype':'step'}\n \n def __init__(self, histos, bkgs, signals=[]):\n \"\"\"Parameters:\n\n histos (dict): a dict of dict of HistogramContainer objects\n \"\"\"\n\n self.kwargs = PlotMaker.def_kwargs.copy()\n self.histos = histos\n self.bkgs = bkgs\n self.mchis = signals\n \n def plot_binned_data(self, axis, bin_edges, data, *args, **kwargs):\n kwargs = kwargs + self.kwargs\n #The dataset values are the bin centres\n x = (bin_edges[1:] + bin_edges[:-1]) / 2.0\n #The weights are the y-values of the input binned data\n weights = data\n return axis.hist(x, bins=bin_edges, weights=weights, *args, **kwargs)\n\n def plot_binned_data_error(self, axis, bin_edges, data, wgt_sqrd, *args, **kwargs):\n binwidth = bin_edges[1] - bin_edges[0]\n errors = np.sqrt(wgt_sqrd)\n if 'density' in kwargs and kwargs['density'] == True:\n errors = errors/np.sum(data)/binwidth\n errors = errors.reindex(np.arange(1, len(bin_edges)), fill_value=0)\n #The dataset values are the bin centres\n x = (bin_edges[1:] + bin_edges[:-1]) / 2.0\n #The weights are the y-values of the input binned data\n weights = data\n return skh_plt.hist(x, ax=axis, bins=bin_edges, weights=weights, errorbars=errors, *args, **kwargs)\n\n def plot_stacked_binned_data_error(self, axis, bin_edges, data, wgt_sqrd, *args, **kwargs):\n errors = wgt_sqrd[0]\n for i in np.arange(1, len(wgt_sqrd)):\n errors = errors.add(wgt_sqrd[i], fill_value=0)\n errors = np.sqrt(errors)\n errors = np.array(errors.reindex(np.arange(1, len(bin_edges)), fill_value=0))\n #The dataset values are the bin centres\n x = (bin_edges[1:] + bin_edges[:-1]) / 2.0\n x = np.array([x]).repeat(len(data), axis=0)\n x = np.transpose(x)\n #The weights are the y-values of the input binned data\n weights = np.transpose(data)\n return skh_plt.hist(x, ax=axis, bins=bin_edges, weights=weights, errorbars=errors, stacked=True, *args, **kwargs)\n\n def make_group_stacked_plot(self, axis, plot_var, cut, *args, **kwargs):\n grp_histos = {}\n for bkg, properties in self.bkgs.items():\n grp = properties['group']\n if grp not in grp_histos:\n grp_histos[grp] = HC.HistogramContainer()\n # self.histos[plot_var][bkg].set_weight(properties['weight'])\n # FIXME placeholder while H.C. doesn't have set_weight\n grp_histos[grp].counts[cut] += self.histos[plot_var][bkg].counts[cut] * properties['weight']\n grp_histos[grp].edges = self.histos[plot_var][bkg].edges\n grp_histos[grp].wgt_sqrd[cut] = grp_histos[grp].wgt_sqrd[cut].add(self.histos[plot_var][bkg].wgt_sqrd[cut] * properties['weight']**2, fill_value=0)\n\n labels = []\n sorted_grp_histos = OrderedDict()\n sorted_keys = sorted(grp_histos, key=lambda obj: max(grp_histos[obj].counts[cut]))\n for key in sorted_keys:\n sorted_grp_histos[key] = grp_histos[key]\n labels.append(key)\n\n # Log doesn't seem to work with stacked option in scikit-hep\n # so we set it ourselves, nbd\n if 'log' in kwargs and kwargs['log'] == True:\n kwargs.pop('log')\n axis.set_yscale('log', nonposy='clip')\n\n if 'density' in kwargs and kwargs['density'] == True:\n binwidth = next(iter(sorted_grp_histos.values())).edges[1] - next(iter(sorted_grp_histos.values())).edges[0]\n max_vals = np.array([histo.get_max()[cut]/np.sum(histo.counts[cut])/binwidth for histo in sorted_grp_histos.values()])\n min_vals = np.array([histo.get_min()[cut]/np.sum(histo.counts[cut])/binwidth for histo in sorted_grp_histos.values()])\n max_val = 10*max(max_vals[~np.isnan(max_vals)])\n min_val = 0.1*min(min_vals[~np.isnan(min_vals)])\n else:\n max_val = 10*max([histo.get_max()[cut] for histo in sorted_grp_histos.values()])\n min_val = min([histo.get_min()[cut] for histo in sorted_grp_histos.values()])\n min_val = max(min_val, 1.0) #max(min(min_val, 1.0), 0.1)\n\n axis.set_ylim([min_val, max_val])\n\n self.plot_stacked_binned_data_error(\n axis, next(iter(self.histos[plot_var].values())).edges,\n np.array([sorted_grp_histos[grp].counts[cut] for grp in sorted_grp_histos]),\n [sorted_grp_histos[grp].wgt_sqrd[cut] for grp in sorted_grp_histos], label=labels, *args, **kwargs\n )\n \n def make_group_plot(self, axis, plot_var, cut, *args, **kwargs):\n \"\"\"Plots groups of backgrounds together\n \n Parameters: \n axis (Axis): axis to plot on\n plot_var (str): which physics variable to plot\n cut (int): which cut (0-5 usually) to plot\n \"\"\"\n\n new_kwargs = {**self.kwargs, **kwargs}\n grp_histos = {}\n for bkg, properties in self.bkgs.items():\n grp = properties['group']\n if grp not in grp_histos:\n grp_histos[grp] = HC.HistogramContainer()\n # self.histos[plot_var][bkg].set_weight(properties['weight'])\n # FIXME placeholder while H.C. doesn't have set_weight\n grp_histos[grp].counts[cut] += self.histos[plot_var][bkg].counts[cut] * properties['weight']\n grp_histos[grp].edges = self.histos[plot_var][bkg].edges\n grp_histos[grp].wgt_sqrd[cut] += self.histos[plot_var][bkg].wgt_sqrd[cut] * properties['weight']**2\n # grp_histos[grp] += self.histos[plot_var][bkg]\n\n for mchi in self.mchis:\n grp_histos[mchi] = HC.HistogramContainer()\n grp_histos[mchi] += self.histos[plot_var][mchi]\n \n if new_kwargs['density'] == False:\n max_val = 10*max([histo.get_max()[cut] for histo in grp_histos.values()])\n min_val = min([histo.get_min()[cut] for histo in grp_histos.values()])\n min_val = min(0.1*min_val, 1.0)\n else:\n binwidth = next(iter(grp_histos.values())).edges[1] - next(iter(grp_histos.values())).edges[0]\n max_vals = np.array([histo.get_max()[cut]/np.sum(histo.counts[cut])/binwidth for histo in grp_histos.values()])\n min_vals = np.array([histo.get_min()[cut]/np.sum(histo.counts[cut])/binwidth for histo in grp_histos.values()])\n max_val = 10*max(max_vals[~np.isnan(max_vals)])\n min_val = 0.1*min(min_vals[~np.isnan(min_vals)])\n\n for grp, histo in grp_histos.items():\n if not any(i > 0 for i in histo.counts[cut]): continue\n if grp in self.mchis:\n new_kwargs['ls'] = ':'\n #plot_binned_data(axis, edges[grp], counts[grp], label=grp, *args, **kwargs)\n self.plot_binned_data_error(axis, histo.edges, histo.counts[cut], histo.wgt_sqrd[cut], label=grp, *args, **new_kwargs)\n axis.set_ylim([min_val, max_val])\n \n def make_bkg_plot(self, axis, plot_var, cut, *args, **kwargs):\n \"\"\"Plots each background sample separately (e.g. QCD_HTXXtoYY)\n \n Parameters: \n axis (Axis): axis to plot on\n plot_var (str): which physics variable to plot\n cut (int): which cut (0-5 usually) to plot\n \"\"\"\n new_kwargs = {**self.kwargs, **kwargs}\n \n if new_kwargs['density'] == False:\n max_val = 10*max([histo.get_max()[cut] for bkg,histo in self.histos[plot_var].items()])\n min_val = min([histo.get_min()[cut] for histo in self.histos[plot_var].values()])\n min_val = min(0.1*min_val, 1.0)\n else:\n binwidth = next(iter(self.histos[plot_var].values())).edges[1] - next(iter(self.histos[plot_var].values())).edges[0]\n max_vals = np.array([histo.get_max()[cut]/np.sum(histo.counts[cut])/binwidth for histo in self.histos[plot_var].values()])\n min_vals = np.array([histo.get_min()[cut]/np.sum(histo.counts[cut])/binwidth for histo in self.histos[plot_var].values()])\n max_val = 10*max(max_vals[~np.isnan(max_vals)])\n min_val = 0.1*min(min_vals[~np.isnan(min_vals)])\n \n for bkg, histo in self.histos[plot_var].items():\n if bkg not in self.histos[plot_var] or not histo: continue\n plot_binned_data_error(axis, histo.edges, histo.counts[cut], histo.wgt_sqrd[cut], label=bkg, *args, **new_kwargs)\n axis.set_ylim([min_val, max_val])\n" }, { "alpha_fraction": 0.4794303774833679, "alphanum_fraction": 0.48813292384147644, "avg_line_length": 41.13333511352539, "blob_id": "0df85608ecba679249ec2a48ca4269aaf67a3ab2", "content_id": "a94477a1d45ad2615d2ba87982553882d676916e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 114, "num_lines": 30, "path": "/make_html_listing_tres.py", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "#!/bin/env python3\nimport sys\nfrom pathlib import Path\n\nif __name__ == '__main__':\n plot_dir = Path(sys.argv[1])\n html_dir = Path(f'{plot_dir}')\n #html_dir = Path(f'/publicweb/a/as2872/iDMPlots/{plot_dir}')\n if (not html_dir.exists()):\n html_dir.mkdir()\n\n html_file = Path(html_dir/'index.html')\n with html_file.open('wt') as index:\n index.write('<html><head></head><body><pre>')\n index.write('<a href=\"..\">.. (parent directory)</a><br>')\n for i, f in enumerate(sorted(html_dir.glob('*'))):\n if f.name != 'index.html':\n print(i, f.name)\n index.write(f'<a href=\"#{f.name}\">{i}</a>')\n index.write(f' ')\n index.write(f'<a href=\"{f.name}\">{f.name}</a><br>')\n\n for i, f in enumerate(sorted(html_dir.glob('*'))):\n if f.name != 'index.html':\n index.write(f'<h4 id=\"{f.name}\"><a href=\"#{f.name}\">{f.name}</a></h4><br>')\n if f.suffix == '.png':\n index.write(f'<a href=\"{f.name}\"><img src=\"{f.name}\" style=\"max-width: 600px\"></a><br><br>\\n')\n else:\n index.write(f'<a href=\"{f.name}\">{f.name}</a><br><br>\\n')\n index.write('</pre></body></html>\\n')\n" }, { "alpha_fraction": 0.580452561378479, "alphanum_fraction": 0.5861093401908875, "avg_line_length": 36.880950927734375, "blob_id": "50609484e3a3eeb351bc10596cfe84db753941b2", "content_id": "094eccc512287c7042b93f938d3c0a5743ca87a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3182, "license_type": "no_license", "max_line_length": 101, "num_lines": 84, "path": "/utils/HistogramContainer.py", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"HistogramContainer.py\n\nImplements class HistogramContainer to hold histogram results computed by\nclass HistogramCalculator and progressively add statistics.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nclass HistogramContainer:\n \"\"\"Class HistogramContainer\n\n This class keeps track of counts, edges, and weight squared\n for a given observable histogram. It has an add method that\n allows 2 such objects to easily sum together (useful when\n iterating over many files).\n \"\"\"\n\n def __init__(self, bins=None, numCuts=np.arange(0,16), weight=1):\n \"\"\"Parameters:\n\n bins (int): number of bins for this histogram\n If bins not given, default is 60 and set\n init to false (i.e. object not initialized yet)\n numCuts (numpy array): array with index of cuts\n weight: takes into account xsec, lumi, and relative gen. weight\n \"\"\"\n\n self.numCuts = numCuts\n if bins is None:\n self.init = False\n self.bins = 60\n else:\n self.init = True\n self.bins = bins\n self.counts = {}\n self.wgt_sqrd = {}\n self.edges = np.zeros(self.bins+1)\n for cut in self.numCuts:\n self.counts[cut] = np.zeros(self.bins)\n self.wgt_sqrd[cut] = pd.Series([])\n self.weight = weight\n \n def __add__(self, new_hists):\n # Can only add 2 HistogramContainer objects together,\n # or a list of counts, edges, and wgt_squared to a H. C.\n if not isinstance(new_hists, list) and not type(new_hists).__name__ == 'HistogramContainer':\n raise TypeError(f'Trying to add non-list and non-HistogramContainer object'\n f'to HistogramContainer! Type: {type(new_hists)}')\n \n if type(new_hists).__name__ == 'HistogramContainer':\n new_obj = HistogramContainer(new_hists.bins)\n for cut in self.numCuts:\n new_obj.counts[cut] = new_hists.counts[cut] + self.counts[cut]\n new_obj.wgt_sqrd[cut] = new_hists.wgt_sqrd[cut].add(self.wgt_sqrd[cut], fill_value=0)\n new_obj.edges = new_hists.edges\n elif isinstance(new_hists, list):\n new_obj = HistogramContainer(len(new_hists[0][0]))\n for cut, (counts, edges, wgt_sqrd) in enumerate(new_hists):\n new_obj.counts[cut] = self.counts[cut] + counts\n new_obj.wgt_sqrd[cut] = self.wgt_sqrd[cut].add(wgt_sqrd, fill_value=0)\n if cut == 0:\n new_obj.edges = edges\n new_obj.calc_max_min()\n return new_obj\n\n def set_weight(self, weight):\n self.weight = weight\n for cut in self.numCuts:\n self.counts[cut] *= self.weight\n self.wgt_sqrd[cut] *= self.weight**2\n \n def calc_max_min(self):\n self.max = {cut:max(self.counts[cut]) for cut in self.numCuts}\n self.min = {cut:min(self.counts[cut]) for cut in self.numCuts}\n \n def get_max(self):\n self.calc_max_min()\n return self.max\n \n def get_min(self):\n self.calc_max_min()\n return self.min\n" }, { "alpha_fraction": 0.6174957156181335, "alphanum_fraction": 0.6266957521438599, "avg_line_length": 41.753334045410156, "blob_id": "e784a24bc09faecc2b5049dc1f3992662d7df067", "content_id": "ec0a1d7cdc2f61f7bba12cbc4e77383bfb1b16bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6413, "license_type": "no_license", "max_line_length": 117, "num_lines": 150, "path": "/utils/HistogramCalculator.py", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"HistogramCalculator.py \n\nImplements HistogramCalculator class and some helper functions\nto compute more complex observables with pandas apply method.\n\"\"\"\n\nimport math\nimport numpy as np\nimport pandas as pd\nimport operator\nfrom functools import reduce\nimport multiprocessing\nimport concurrent.futures\n\nnum_cores = multiprocessing.cpu_count()\n\n# Helper functions to calculate average angles\n# This takes a few seconds to run, since we \n# are using the apply method\n\ndef parallelize(data, func):\n# data_split = np.array_split(data, partitions)\n pool = multiprocessing.Pool(int(num_cores/2))\n# data = pd.concat(pool.map(func, data_split))\n data = pd.concat(pool.map(func, [group for name, group in data]))\n pool.close()\n pool.join()\n return data\n\ndef calcAvgAngle(group):\n # FIXME need to ensure at least 2 muons (otherwise index -1 == 0)\n x = np.cos(group['reco_mu_phi'].iloc[0]) + np.cos(group['reco_mu_phi'].iloc[-1])\n y = np.sin(group['reco_mu_phi'].iloc[0]) + np.sin(group['reco_mu_phi'].iloc[-1])\n return math.atan2(y/2, x/2)\n\ndef func_group_apply(df):\n # Applies above function on event-by-event basis\n return df.groupby('entry').apply(calcAvgAngle)\n\ndef reducephi(row):\n # Helper function to normalize angle differences to [-Pi, Pi]\n # cond: if abs(phidiff) > Pi => phidiff = phidiff - 2*Pi*sign(phidiff)\n if abs(row) > math.pi:\n return row - 2*math.pi*(row/abs(row))\n return row\n\nclass HistogramCalculator:\n \"\"\"Class HistogramCalculator -- computes physics histograms\n\n Aggregrates the data into histograms, computing the relevant\n observables. For each file in background or signal samples,\n the class HistogramCalculator is called to compute the histograms.\n \"\"\"\n\n def __init__(self, objects, sample_name, sample_type='', numCuts=np.arange(0,16)):\n \"\"\"Parameters:\n objects (dict): dict of pandas dataframes\n sample_name (str): which bkg or signal sample\n sample_type (str): 'bkg' or 'signal'\n numCuts (numpy array): array of cut indexes\n \"\"\"\n \n self.sample_name = sample_name\n self.sample_type = sample_type\n self.objects = objects\n self.numCuts = numCuts\n self.cuts = objects['cuts']\n self.cuts_crit = objects['cutsCrit']\n self.MET = objects['MET']\n self.muons = objects['muons']\n self.jet = objects['leadingJet']\n self.vertex = objects['vertex'].reset_index()\n try:\n self.genwgt = objects['gen_wgt']\n except KeyError:\n self.genwgt = pd.Series(np.ones(len(self.cuts)))\n self.genwgt = self.genwgt.rename('gen_wgt')\n self.genwgt.index.name = 'entry'\n \n def cutflows(self):\n incl = np.zeros(len(self.numCuts))\n excl = np.zeros(len(self.numCuts))\n for cut in self.numCuts:\n # For inclusive, apply boolean '&&' with all cuts from 0 to cut\n cuts_to_apply = reduce(operator.and_, self.cuts_crit[0:cut+1])\n incl[cut] = len(self.cuts[cuts_to_apply])\n # For exclusive, apply each cut separately\n cuts_to_apply = self.cuts_crit[cut]\n excl[cut] = len(self.cuts[cuts_to_apply])\n return (incl, excl)\n \n def compute_hist(self, variable_df, **kwargs):\n # Given a dataframe for some observable, adds the\n # gen weight and computes the histogram for it\n if 'bins' not in kwargs:\n kwargs['bins'] = 60\n temp_df = pd.concat([variable_df, self.genwgt],axis=1).dropna() \n #temp_df = pd.concat([variable_df, self.genwgt], axis=1).dropna()\n temp_df['genwgt_sqrd'] = temp_df['gen_wgt']**2\n counts = {}; edges = {}; wgt_sqrd = {}\n for cut in self.numCuts:\n cuts_to_apply = slice(None) if self.cuts_crit is None else reduce(operator.and_, self.cuts_crit[0:cut+1])\n kwargs['weights'] = temp_df[cuts_to_apply]['gen_wgt']\n counts[cut], edges[cut] = np.histogram(temp_df[cuts_to_apply][variable_df.name], **kwargs)\n # Digitizes data to find out which bin of histogram each row falls in\n bin_idxs = np.digitize(temp_df[cuts_to_apply][variable_df.name], edges[cut])\n temp_df['bin_idx'] = pd.Series(bin_idxs)\n # Uses indexes from above to sum the gen weights squared (for errors)\n wgt_sqrd[cut] = np.sum(temp_df.groupby('bin_idx'))['genwgt_sqrd']\n return list(zip(counts.values(), edges.values(), wgt_sqrd.values()))\n \n def metmuphi(self):\n # Divide data into 'chunks' to more efficiently use parallelization\n muons = self.muons.reset_index()\n muons['data_chunk'] = muons['entry'].mod(int(num_cores * 3 / 2)) # num_cores/2 * 3 chunks/core\n muons = muons.set_index(['entry'])\n # Here, group by data_chunk instead of entry, inside func_group_apply \n # we also have a groupby('entry')\n avg_muon_angle = parallelize(muons.groupby('data_chunk'), func_group_apply)\n angle_diff = (self.MET['reco_PF_MET_phi'].dropna() - avg_muon_angle).dropna()\n reduced_angle_diff = angle_diff.apply(reducephi).dropna()\n reduced_angle_diff.name = 'reducedAngleDiff'\n return self.compute_hist(reduced_angle_diff, range=(-math.pi, math.pi))\n \n def metjetphi(self):\n angle = (self.MET['reco_PF_MET_phi'] - self.jet['reco_PF_jet_phi']).dropna()\n reduced_angle = angle.apply(reducephi)\n reduced_angle.name = 'reducedAngle'\n return self.compute_hist(reduced_angle, range=(-math.pi, math.pi))\n \n def metpt(self):\n return self.compute_hist(self.MET['reco_PF_MET_pt'], range=(0,2500))\n \n def jetpt(self):\n return self.compute_hist(self.jet['reco_PF_jet_pt'], range=(0,2500))\n \n def leadingmupt(self):\n return self.compute_hist(self.muons['reco_mu_pt'].groupby('entry').nth(0), range=(0,700))\n\n def subleadingmupt(self):\n return self.compute_hist(self.muons['reco_mu_pt'].groupby('entry').nth(1), range=(0,700))\n\n def recodr(self):\n return self.compute_hist(self.vertex['reco_vertex_dR'], range=(0,2*math.pi))\n\n def recovertex(self):\n vertex = np.sqrt(self.vertex['reco_vertex_vxy']**2 + self.vertex['reco_vertex_vz']**2)\n vertex.name = 'vertex'\n return self.compute_hist(vertex, range=(0,300))\n" }, { "alpha_fraction": 0.6113000512123108, "alphanum_fraction": 0.614962100982666, "avg_line_length": 41.477779388427734, "blob_id": "1f8747bf8ee4b4d562a6ef93784410c91e0f175d", "content_id": "b6386f9017a11c5ffcb8db41adf1bf9f6e01b62d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3823, "license_type": "no_license", "max_line_length": 105, "num_lines": 90, "path": "/utils/ObjectExtractor.py", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"ObjectExtractor.py\n\nImplements class ObjectExtractor to extract the physics objects and \ntheir kinematic observables from an uproot tree, opened from some file.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nclass ObjectExtractor:\n \"\"\"Class ObjectExtractor\n\n Extracts all relevant physics properties from\n pandas dataframes, created from an uproot-loaded tree.\n \"\"\"\n\n def configure_dfs(self):\n ### Objects with same dimension in dataframe are imported together\n ### I.e. MET, cuts only have 1 entry, muons can have up to 2, jets more\n #self.dim1entries_bkg = ['recoPFMetPt', 'recoPFMetPhi', 'cutsVec*', 'genwgt']\n #self.dim1entries_sig = ['recoPFMetPt', 'recoPFMetPhi', 'cutsVec*']\n #self.dimVertex = ['recoDr', 'recoVxy', 'recoVz']\n #self.dimMu = ['recoPt', 'recoEta', 'recoPhi']\n #self.dimJet = ['recoPFJetPt', 'recoPFJetEta', 'recoPFJetPhi']\n self.dim1entries_bkg = ['reco_PF_MET_pt', 'reco_PF_MET_phi', 'cutsVec*', 'gen_wgt']\n self.dim1entries_sig = ['reco_PF_MET_pt', 'reco_PF_MET_phi', 'cutsVec*']\n self.dimVertex = ['reco_vertex_dR', 'reco_vertex_vxy', 'reco_vertex_vz']\n self.dimMu = ['reco_mu_pt', 'reco_mu_eta', 'reco_mu_phi']\n self.dimJet = ['reco_PF_jet_pt', 'reco_PF_jet_eta', 'reco_PF_jet_phi']\n \n def __init__(self, uproot_tree, sample_name='', executor=None):\n \"\"\"Parameters:\n\n uproot_tree (uproot object): tree loaded from a ROOT file\n sample_name (str): bkg or signal sample name\n executor (Executor): for concurrency (i.e. futures module)\n \"\"\"\n self.configure_dfs()\n self.sample_name = sample_name\n \n self.uproot_tree = uproot_tree\n self.vertex_df = self.uproot_tree.pandas.df(self.dimVertex, executor=executor)\n self.muons_df = self.uproot_tree.pandas.df(self.dimMu, executor=executor)\n self.jets_df = self.uproot_tree.pandas.df(self.dimJet, executor=executor)\n try:\n self.one_dim_entries_df = self.uproot_tree.pandas.df(self.dim1entries_bkg, executor=executor)\n except KeyError:\n self.one_dim_entries_df = self.uproot_tree.pandas.df(self.dim1entries_sig, executor=executor)\n \n def get_muons(self):\n return self.muons_df.reset_index(level=1)\n \n def get_MET(self):\n return self.one_dim_entries_df[['reco_PF_MET_pt', 'reco_PF_MET_phi']]\n \n def get_leading_jet(self):\n return self.jets_df.loc[(slice(None),0),slice(None)].reset_index(level=1)\n \n def get_cuts(self):\n return self.one_dim_entries_df.filter(regex='cutsVec')#[[f'cutsVec[{cut}]' for cut in numCuts]]\n \n def get_cuts_crit(self):\n cuts_df = self.get_cuts()\n return [ cuts_df[column] == 1 for column in cuts_df ]\n \n def get_vertex(self):\n return self.vertex_df.reset_index(level=1)\n \n def get_weights(self):\n try:\n pileup_df = self.one_dim_entries_df[['gen_wgt']] # will also include pileup when available\n genwgt_df = pileup_df['gen_wgt']\n except KeyError:\n print(f'Sample \"{self.sample_name}\" does not have either pileup or weight information')\n genwgt_df = pd.Series(np.ones((len(self.one_dim_entries_df))))\n genwgt_df = genwgt_df.rename('gen_wgt')\n genwgt_df.index.name = 'entry'\n return genwgt_df\n \n def get_all(self):\n objects = {}\n objects['MET'] = self.get_MET()\n objects['cuts'] = self.get_cuts()\n objects['cutsCrit'] = self.get_cuts_crit()\n objects['vertex'] = self.get_vertex()\n objects['muons'] = self.get_muons()\n objects['leadingJet'] = self.get_leading_jet()\n objects['gen_wgt'] = self.get_weights()\n return objects\n" }, { "alpha_fraction": 0.7355102300643921, "alphanum_fraction": 0.7571428418159485, "avg_line_length": 57.33333206176758, "blob_id": "133839a3c37666444aea4d650047691bb3327adf", "content_id": "0f0247e1047ec23cd79ad2110045ca044215a517", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2450, "license_type": "no_license", "max_line_length": 308, "num_lines": 42, "path": "/README.md", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "# iDM-analysis-plotting\niDM analysis code (plotting and cutflows)\n\n### Run jupyter on the LPC\n\nIt is possible to run available versions of jupyter on the LPC. This is in lieu of having to install jupyter on your local machine. The idea is to run the jupyter notebook server on the LPC and connect to it via your local computer browser. For that to work you need to SSH-tunnel the notebook server's port.\n\nTo set this up:\n\n1) SSH tunnel. SSH to LPC machines and add port tunneling:\n\n ```shell\n $ ssh -L 8888:localhost:8888 [email protected]\n ```\n\n 8888 is the default port for jupyter notebooks.\n2) Jupyter notebook server. Once inside an LPC machine, there are two options for enabling jupyter notebooks:\n\n - Python2: this is the easiest way and comes bundled with CMSSW, but it only offers Python2 support. After running `cmsenv` inside a CMSSW release, type: `jupyter notebook --no-browser`\n \n - Python3: Python3 has a lot of nifty features that are worth using, but it doesn't come with CMSSW except for the very latest releases (10.1.X I believe). To enable it in the LPC (note this is outside CMSSW):\n \n ```shell\n source /cvmfs/sft.cern.ch/lcg/views/LCG_92python3/x86_64-slc6-gcc62-opt/setup.sh\n export PYTHONPATH=/cvmfs/sft.cern.ch/lcg/views/LCG_92python3/x86_64-slc6-gcc62-opt/lib/python3.6/site-packages:$PYTHONPATH\n ```\n \n Release LCG_92 already comes with jupyter too, so after sourcing it you can just type `jupyter notebook --no-browser` to run the server.\n \n **NOTE:** If you choose Python3 and then afterwards set up a CMSSW environment, it will mess with your jupyter configuration. If you need to use both at the same time, make sure CMSSW is set up _before_ the LCG_92 release. \n \n \n \n3) Access the notebook server on your browser. After the notebook server is set-up, it will give you a link to open (in the form `http://localhost:8888...`). Copy that link and paste it on your browser and you'll enter the jupyter notebook environment, and you're ready to go.\n\n\n### Scripts\nBeamHalo: plots of track quality information and table of beam halo summary\nGenKinematicsNew: Studies of the Gen information. \nPlotSignalBkgs: Plots the cutflow for the signal and background from data files containing the summary from SROptimizationAnalysisFull\nQCDCorrelationStudies:\nSROptimizationAnalysisFull: Loads all the backgrounds scripts and dumps the information into data files to be read by PlotSignalBkgs\n" }, { "alpha_fraction": 0.7374100685119629, "alphanum_fraction": 0.7410072088241577, "avg_line_length": 45, "blob_id": "d8e25c450e587e298cc6c4e451f9658fa170dc54", "content_id": "5bb77d657a4ef57a0cd820fe070fa010c494628c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "no_license", "max_line_length": 93, "num_lines": 6, "path": "/publishplots.sh", "repo_name": "afrankenthal/iDM-analysis-plotting", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm -r /publicweb/m/mreid/iDM_AN_Plots/plots\ncp -r plots /publicweb/m/mreid/iDM_AN_Plots/\npython make_html_listing.py /publicweb/m/mreid/iDM_AN_Plots/plots/GenKinematics\n\nfind /publicweb/m/mreid/iDM_AN_Plots/plots -mindepth 0 -type d -exec cp plots/.htaccess {} \\;\n\n\n" } ]
7
hukkelas/DCGAN-tensorflow
https://github.com/hukkelas/DCGAN-tensorflow
dd84e6dd08e911ff37a5a2cacd0f32adacf88358
b5b5e07ff4d2c388c179ba128ff0835860ee3b09
4fa4b2c00f577781af1b476dc6d49ac07447a6a3
refs/heads/master
2021-08-28T20:16:59.140946
2017-12-13T03:40:09
2017-12-13T03:40:09
113,635,976
0
0
null
2017-12-09T02:41:15
2017-12-08T17:39:47
2017-11-22T10:45:30
null
[ { "alpha_fraction": 0.5574535727500916, "alphanum_fraction": 0.5876302719116211, "avg_line_length": 34.65428161621094, "blob_id": "cfe371f1e6e43f0c39a4e59f325265e01d860431", "content_id": "042c2b3d6f3482a08d42edd41e68e47678ed44cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22070, "license_type": "permissive", "max_line_length": 137, "num_lines": 619, "path": "/model.py", "repo_name": "hukkelas/DCGAN-tensorflow", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport os\nimport time\nimport math\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\nfrom six.moves import xrange\nimport scipy.misc\nfrom ops import *\nfrom utils import *\nimport matplotlib.pyplot as plt \nimport csv\nfrom sklearn.preprocessing import OneHotEncoder\ndef conv_out_size_same(size, stride):\n return int(math.ceil(float(size) / float(stride)))\n\nclass DCGAN(object):\n def __init__(self, sess, crop=True,\n batch_size=64, sample_num = 64,\n y_dim=None, z_dim=100, gf_dim=64, df_dim=64,\n gfc_dim=2048, dfc_dim=1024, c_dim=3, dataset_name='default',\n input_fname_pattern='*.jpg', checkpoint_dir=None, sample_dir=None, imsize= 28,\n gen_activation_function=tf.nn.tanh, model=\"fc\", wgan=False):\n \"\"\"\n\n Args:\n sess: TensorFlow session\n batch_size: The size of batch. Should be specified before training.\n y_dim: (optional) Dimension of dim for y. [None]\n z_dim: (optional) Dimension of dim for Z. [100]\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\n gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]\n dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]\n c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]\n model: (optional) Fully connected or convolutional [fc, cond]\n \"\"\"\n self.model = model\n self.sess = sess\n self.gen_activation_function = gen_activation_function\n self.batch_size = batch_size\n \n self.imsize = imsize\n\n self.y_dim = y_dim\n self.z_dim = z_dim\n self.sample_num = 5\n\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n # batch normalization : deals with poor initialization helps gradient flow\n self.d_bn1 = batch_norm(name='d_bn1')\n self.d_bn2 = batch_norm(name='d_bn2')\n\n\n self.d_bn3 = batch_norm(name='d_bn3')\n\n self.g_bn0 = batch_norm(name='g_bn0')\n self.g_bn1 = batch_norm(name='g_bn1')\n self.g_bn2 = batch_norm(name='g_bn2')\n\n self.keep_prob = tf.placeholder(tf.float32)\n self.g_bn3 = batch_norm(name='g_bn3')\n\n self.dataset_name = dataset_name\n self.input_fname_pattern = input_fname_pattern\n self.checkpoint_dir = checkpoint_dir\n\n if self.dataset_name == 'mnist':\n self.data_X, self.data_y = self.load_mnist()\n self.c_dim = self.data_X[0].shape[-1]\n\n elif self.dataset_name == 'pokemon/64x64x3':\n self.data_y = self.load_pokemon_y()\n self.data = glob(os.path.join(\"./data\", self.dataset_name, self.input_fname_pattern))\n selected = [199, 196, 210, 238, 240, 239, 237, 224, 378, 377, 370, 364, 390, 376, 438, 454, 450, 449,\n 291, 317, 335, 402, 423, 466, 479, 518, 529, 581, 609, 655, 646, 743, 754, 753, 735, 749]\n self.data_X = np.zeros((802,self.imsize, self.imsize, c_dim))\n #print self.data_y[0:6] * np.arange(1,19)\n self.data_y = self.data_y#[selected]\n \n for path in self.data:\n i = int(path.split(\"/\")[-1].split(\".\")[0]) -1 \n im = imread(path)\n self.data_X[i] = im / 255\n self.data_X = self.data_X#[selected]\n \n imreadImg = imread(self.data[0])\n if len(imreadImg.shape) >= 3: #check if image is a non-grayscale image by checking channel number\n self.c_dim = imread(self.data[0]).shape[-1]\n else:\n self.c_dim = 1\n else:\n self.data = glob(os.path.join(\"./data\", self.dataset_name, self.input_fname_pattern))\n\n imreadImg = imread(self.data[0])\n if len(imreadImg.shape) >= 3: #check if image is a non-grayscale image by checking channel number\n self.c_dim = imread(self.data[0]).shape[-1]\n else:\n self.c_dim = 1\n\n self.grayscale = (self.c_dim == 1)\n\n self.build_model()\n\n def build_model(self):\n if self.y_dim:\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')\n else:\n self.y = None\n\n\n image_dims = [self.imsize, self.imsize, self.c_dim]\n\n self.inputs = tf.placeholder(\n tf.float32, [self.batch_size] + image_dims, name='real_images')\n\n inputs = self.inputs\n\n self.z = tf.placeholder(\n tf.float32, [None, self.z_dim], name='z')\n self.z_sum = histogram_summary(\"z\", self.z)\n\n self.G = self.generator(self.z, self.y)\n self.D, self.D_logits = self.discriminator(inputs, self.y, reuse=False)\n self.sampler = self.sampler(self.z, self.y)\n self.D_, self.D_logits_ = self.discriminator(self.G, self.y, reuse=True)\n \n self.d_sum = histogram_summary(\"d\", self.D)\n self.d__sum = histogram_summary(\"d_\", self.D_)\n self.G_sum = image_summary(\"G\", self.G)\n\n def sigmoid_cross_entropy_with_logits(x, y):\n try:\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n except:\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)\n\n self.d_loss_real = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D)))\n self.d_loss_fake = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_)))\n\n \n self.accuracy_real = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(tf.round(self.D)), tf.ones_like(self.D)), tf.float32))\n self.accuracy_fake = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(tf.round(self.D_)), tf.zeros_like(self.D)), tf.float32))\n \n self.d_loss_real_sum = scalar_summary(\"d_loss_real\", self.d_loss_real)\n self.d_loss_fake_sum = scalar_summary(\"d_loss_fake\", self.d_loss_fake)\n \n \n if wgan:\n self.d_loss = tf.reduce_mean(self.D_logits) - tf.reduce_mean(self.D_logits_)\n self.g_loss = tf.reduce_mean(self.D_logits_)\n else:\n self.g_loss = tf.reduce_mean(\n sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_))) \n self.d_loss = self.d_loss_real + self.d_loss_fake\n \n self.g_loss_sum = scalar_summary(\"g_loss\", self.g_loss)\n self.d_loss_sum = scalar_summary(\"d_loss\", self.d_loss)\n\n t_vars = tf.trainable_variables()\n\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\n\n self.saver = tf.train.Saver()\n\n def train(self, config):\n d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \\\n .minimize(self.d_loss, var_list=self.d_vars)\n g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \\\n .minimize(self.g_loss, var_list=self.g_vars)\n \n tf.global_variables_initializer().run()\n\n # \n self.g_sum = merge_summary([self.z_sum, self.d__sum,\n self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])\n\n self.d_sum = merge_summary(\n [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\n \n\n sample_z = np.random.uniform(-1, 1, size=(self.sample_num*self.y_dim , self.z_dim))\n \n samples = [[j] for j in range(self.y_dim) for i in range(self.sample_num)]\n oh = OneHotEncoder()\n oh.fit(samples)\n \n sample_labels = oh.transform(samples).toarray()\n\n # Load sample data\n '''\n if config.dataset == 'pokemon/64x64x3':\n sample_labels = self.data_y[0:self.sample_num]\n if config.dataset == 'mnist':\n sample_inputs = self.data_X[0:self.sample_num]\n sample_labels = self.data_y[0:self.sample_num]\n else:\n sample_files = self.data[0:self.sample_num]\n sample = [\n get_image(sample_file,\n input_height=self.imsize,\n input_width=self.imsize,\n resize_height=self.imsize,\n resize_width=self.imsize,\n crop=False,\n grayscale=self.grayscale) for sample_file in sample_files]\n if (self.grayscale):\n sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]\n else:\n sample_inputs = np.array(sample).astype(np.float32)\n '''\n counter = 1\n start_time = time.time()\n # Load checkpoint\n could_load, checkpoint_counter = self.load(self.checkpoint_dir)\n if could_load:\n counter = checkpoint_counter\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n # Start training\n for epoch in xrange(config.epoch):\n \n if config.dataset == 'mnist':\n batch_idxs = min(len(self.data_X), config.train_size) // self.batch_size\n else: \n self.data = glob(os.path.join(\n \"./data\", config.dataset, self.input_fname_pattern))\n batch_idxs = min(len(self.data), config.train_size) // self.batch_size\n\n for idx in xrange(0, batch_idxs):\n # Set batch X and Y\n if config.dataset == 'pokemon/64x64x3':\n random_idxs = np.random.randint(0,len(self.data_X), self.batch_size)\n batch_labels = self.data_y[random_idxs]\n batch_images = self.data_X[random_idxs]\n if config.dataset == 'mnist':\n batch_images = self.data_X[idx*config.batch_size:(idx+1)*config.batch_size]\n batch_labels = self.data_y[idx*config.batch_size:(idx+1)*config.batch_size]\n if False:\n random_idxs = np.random.randint(0,len(self.data_X), self.batch_size)\n batch = self.data_X[random_idxs]\n #self.data_X[idx*config.batch_size:(idx+1)*config.batch_size]\n if self.grayscale:\n batch_images = np.array(batch).astype(np.float32)[:, :, :, None]\n else:\n batch_images = np.array(batch).astype(np.float32)\n\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)\n \n \n\n # Update D network\n _, summary_str = self.sess.run([d_optim, self.d_sum],\n feed_dict={ self.inputs: batch_images, self.z: batch_z, self.y: batch_labels,self.keep_prob: 0.5 })\n\n # Update G network\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.z: batch_z, self.y:batch_labels, self.keep_prob: 0.5 })\n g_loss = 2\n# while g_loss > 0.9:\n # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)\n _, summary_str, g_loss = self.sess.run([g_optim, self.g_sum, self.g_loss],\n feed_dict={ self.z: batch_z, self.y: batch_labels, self.keep_prob: 0.5 })\n\n counter += 1\n\n if epoch % 10 == 0:\n # Gather statistics \n errD_fake, errD_real, errG, acc_real, acc_fake, d_loss = self.sess.run(\n [self.d_loss_fake, self.d_loss_real , self.g_loss, self.accuracy_real, self.accuracy_fake, self.d_loss],\n feed_dict={self.inputs: batch_images, self.y: batch_labels, self.z: batch_z, self.keep_prob: 1.0}\n )\n print d_loss\n print \"Epoch:{:4d}, time:{:6.1f}, d_real_loss:{:1.4f}, d_fake_loss:{:1.4f}, g_loss:{:2.4f}, acc_real:{:0.3f}, acc_fake:{:0.3f}\" \\\n .format(epoch, time.time() - start_time, errD_real, errD_fake, errG, acc_real, acc_fake)\n \n\n # Save losses\n f = open('{}/curve.txt'.format(config.sample_dir), 'a')\n f.write(\"{},{},{},{},{},{}\\n\".format(errG, errD_fake, errD_real, acc_real, acc_fake, d_loss) ) \n f.close()\n\n if epoch % 100 == 0:\n self.save(config.checkpoint_dir, counter)\n\n if config.dataset == 'mnist' or True:\n samples, = self.sess.run(\n [self.sampler],\n feed_dict={\n self.z: sample_z,\n self.y: sample_labels,\n self.keep_prob: 1.0\n }\n ) \n save_images(samples, image_manifold_size(samples.shape[0]),\n './{}/train_{:02d}.png'.format(config.sample_dir, epoch), column_size=self.sample_num)\n print(\"Sample saved\") \n else:\n try:\n samples, d_loss, g_loss = self.sess.run(\n [self.sampler, self.d_loss, self.g_loss],\n feed_dict={\n self.z: sample_z,\n self.inputs: sample_inputs,\n },\n )\n \n print \"Max value:\" , samples.max()\n print \"Min value:\", samples.min()\n save_images(samples, image_manifold_size(samples.shape[0]),\n './{}/train_{:02d}.png'.format(config.sample_dir, epoch))\n print(\"[Sample] d_loss: %.8f, g_loss: %.8f\" % (d_loss, g_loss)) \n except:\n print(\"one pic error!...\")\n\n\n def discriminator(self, image, y=None, reuse=False):\n with tf.variable_scope(\"discriminator\") as scope:\n if reuse:\n scope.reuse_variables()\n\n if True:#not self.y_dim:\n h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))\n h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))\n h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))\n h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h4_lin')\n\n return tf.nn.sigmoid(h4), h4\n else:\n yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])\n x = conv_cond_concat(image, yb)\n\n h0 = lrelu(conv2d(x, self.c_dim + self.y_dim, name='d_h0_conv'))\n h0 = conv_cond_concat(h0, yb)\n\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim + self.y_dim, name='d_h1_conv')))\n h1 = tf.reshape(h1, [self.batch_size, -1]) \n h1 = concat([h1, y], 1)\n \n h2 = lrelu(self.d_bn2(linear(h1, self.dfc_dim, 'd_h2_lin')))\n h2 = concat([h2, y], 1)\n\n h3 = linear(h2, 1, 'd_h3_lin')\n \n return tf.nn.sigmoid(h3), h3\n \n def create_generator(self, z, size, y=None, reuse=False):\n with tf.variable_scope(\"generator\") as scope:\n if reuse:\n scope.reuse_variables()\n if self.model=='fc' and self.y_dim:\n return self.create_cond_fcgan_generator(z,size,y)\n elif self.model=='cond' and self.y_dim:\n return self.create_cond_dcgan_generator(z, size, y)\n else:\n return self.create_dcgan_generator(z, size, y)\n\n def create_cond_fcgan_generator(self, z, size, y):\n # Input sizes\n s_h, s_w = self.imsize, self.imsize\n s_h2, s_h4 = int(s_h/2), int(s_h/4)\n s_w2, s_w4 = int(s_w/2), int(s_w/4)\n\n\n yb = tf.reshape(y, [size, 1, 1, self.y_dim])\n # shape: [batch_size, y_dim + z_dim]\n z = concat([z, y], 1)\n\n # fc1 layer\n h0 = linear(\n input_=z,\n output_size=self.gfc_dim,\n scope=\"g_h0_lin\"\n )\n # Relu\n h0 = tf.nn.relu(self.g_bn0(h0))\n # Concatenate\n # From 1024 -> 1042\n h0 = concat([h0, y], 1)\n\n # FC 2 \n h1 = tf.nn.relu(self.g_bn1(\n linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin')))\n h1 = tf.reshape(h1, [size, s_h4, s_w4, self.gf_dim * 2])\n \n h1 = conv_cond_concat(h1, yb)\n # FC 3 \n h2 = tf.nn.relu(self.g_bn2(deconv2d(h1,\n [size, s_h2, s_w2, self.gf_dim * 2], name='g_h2')))\n h2 = conv_cond_concat(h2, yb)\n h3 = deconv2d(h2, [size, s_h, s_w, self.c_dim], name='g_h3')\n\n return self.gen_activation_function(h3)\n\n def create_cond_dcgan_generator(self, z, size, y=None):\n s_h, s_w = self.imsize, self.imsize\n\n # Define input sizes for convolutions\n s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)\n s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)\n s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)\n s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)\n\n yb = tf.reshape(y, [size, 1, 1, self.y_dim])\n z = concat([z, y], 1)\n\n # project `z` and reshape\n self.z_, self.h0_w, self.h0_b = linear(\n input_=z,\n output_size=self.gf_dim*8*s_h16*s_w16,\n scope='g_h0_lin', \n with_w=True)\n\n self.h0 = tf.reshape(\n self.z_, [size, s_h16, s_w16, self.gf_dim * 8])\n # Batch normalize and relu\n h0 = lrelu(self.g_bn0(self.h0))\n\n h0 = conv_cond_concat(h0, yb)\n # Deconvolution layer 1\n self.h1, self.h1_w, self.h1_b = deconv2d(\n input_=h0,\n output_shape= [size, s_h8, s_w8, self.gf_dim*4],\n name='g_h1', with_w=True)\n # Batch normalize and relu\n h1 = lrelu(self.g_bn1(self.h1))\n \n h1 = conv_cond_concat(h1, yb)\n\n # Deconvolution layer 2\n h2, self.h2_w, self.h2_b = deconv2d(\n input_=h1, \n output_shape=[size, s_h4, s_w4, self.gf_dim*2],\n name='g_h2',\n with_w=True)\n # Batch normalize and relu\n h2 = lrelu(self.g_bn2(h2))\n \n# h2 = tf.layers.dropout(h2, rate=self.keep_prob)\n h2 = conv_cond_concat(h2, yb)\n\n # Deconvolution layer 3 \n h3, self.h3_w, self.h3_b = deconv2d(\n h2, [size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)\n # Batch normalize and relu\n h3 = lrelu(self.g_bn3(h3))\n \n# h3 = tf.layers.dropout(h3, rate=self.keep_prob)\n h3 = conv_cond_concat(h3, yb)\n\n # Deconvolution layer 4 \n h4, self.h4_w, self.h4_b = deconv2d(\n input_=h3,\n output_shape=[size, s_h, s_w, self.c_dim],\n name='g_h4', with_w=True)\n \n # Return tanh, no batch normalization\n return self.gen_activation_function(h4) \n \n\n def create_dcgan_generator(self,z, size, y=None):\n s_h, s_w = self.imsize, self.imsize\n\n # Define input sizes for convolutions\n s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)\n s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)\n s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)\n s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)\n\n # project `z` and reshape\n self.z_, self.h0_w, self.h0_b = linear(\n input_=z,\n output_size=self.gf_dim*8*s_h16*s_w16,\n scope='g_h0_lin', \n with_w=True)\n\n self.h0 = tf.reshape(\n self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n # Batch normalize and relu\n h0 = tf.nn.relu(self.g_bn0(self.h0))\n\n # Deconvolution layer 1\n self.h1, self.h1_w, self.h1_b = deconv2d(\n input_=h0,\n output_shape= [size, s_h8, s_w8, self.gf_dim*4],\n name='g_h1', with_w=True)\n # Batch normalize and relu\n h1 = tf.nn.relu(self.g_bn1(self.h1))\n\n # Deconvolution layer 2\n h2, self.h2_w, self.h2_b = deconv2d(\n input_=h1, \n output_shape=[size, s_h4, s_w4, self.gf_dim*2],\n name='g_h2',\n with_w=True)\n # Batch normalize and relu\n\n h2 = tf.nn.relu(self.g_bn2(h2))\n \n # Deconvolution layer 3 \n h3, self.h3_w, self.h3_b = deconv2d(\n h2, [size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)\n # Batch normalize and relu\n h3 = tf.nn.relu(self.g_bn3(h3))\n\n # Deconvolution layer 4 \n h4, self.h4_w, self.h4_b = deconv2d(\n input_=h3,\n output_shape=[size, s_h, s_w, self.c_dim],\n name='g_h4', with_w=True)\n \n # Return tanh, no batch normalization\n return self.gen_activation_function(h4)\n\n def generator(self, z, y=None):\n return self.create_generator(z,self.batch_size,y)\n\n def sampler(self, z, y=None):\n return self.create_generator(z, self.sample_num * self.y_dim, y, reuse=True)\n\n def load_mnist(self):\n data_dir = os.path.join(\"./data\", self.dataset_name)\n \n fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trX = loaded[16:].reshape((60000,28,28,1)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n trY = loaded[8:].reshape((60000)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teX = loaded[16:].reshape((10000,28,28,1)).astype(np.float)\n\n fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))\n loaded = np.fromfile(file=fd,dtype=np.uint8)\n teY = loaded[8:].reshape((10000)).astype(np.float)\n\n trY = np.asarray(trY)\n teY = np.asarray(teY)\n \n X = np.concatenate((trX, teX), axis=0)\n y = np.concatenate((trY, teY), axis=0).astype(np.int)\n \n seed = 547\n np.random.seed(seed)\n np.random.shuffle(X)\n np.random.seed(seed)\n np.random.shuffle(y)\n \n y_vec = np.zeros((len(y), self.y_dim), dtype=np.float)\n for i, label in enumerate(y):\n y_vec[i,y[i]] = 1.0\n \n return X/255.,y_vec\n\n def load_pokemon_y(self):\n y = [0]*802\n file_path = os.path.join('./data', self.dataset_name, \"types.csv\")\n f = open(file_path)\n reader = csv.reader(f,delimiter=\",\")\n for row in reader:\n # Skip first row\n if row[0] == \"id\":\n continue\n pid = int(row[0]) - 1\n typeid = row[3]\n y[pid] = int(typeid)\n onehot = np.zeros((len(y), self.y_dim), dtype=bool)\n for i in range(len(y)):\n onehot[i][y[i]] = 1\n return onehot\n \n\n @property\n def model_dir(self):\n return \"{}_{}_{}_{}\".format(\n self.dataset_name, self.batch_size,\n self.imsize, self.imsize)\n \n def save(self, checkpoint_dir, step):\n model_name = \"DCGAN.model\"\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)\n\n def load(self, checkpoint_dir):\n import re\n print(\" [*] Reading checkpoints...\")\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\",ckpt_name)).group(0))\n print(\" [*] Success to read {}\".format(ckpt_name))\n return True, counter\n else:\n print(\" [*] Failed to find a checkpoint\")\n return False, 0\n" } ]
1
nettobranches/image_processing_py
https://github.com/nettobranches/image_processing_py
5949ec3775f51fcf6a92890b4b5f2b4b1fd5d5d8
1f2b4ac7af346e112eb62a9016bd868c49861e32
5b4f1d1e66efac98cdf428d6bffc3807c30ded46
refs/heads/master
2020-08-01T05:10:57.139302
2019-09-26T23:10:56
2019-09-26T23:10:56
210,875,432
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7285714149475098, "avg_line_length": 13.199999809265137, "blob_id": "7f5396864edd1973cbe70dd6f1493927ce324626", "content_id": "33bb088c454b529febc9ddd44ec77538625533bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 70, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/readme.txt", "repo_name": "nettobranches/image_processing_py", "src_encoding": "UTF-8", "text": "pip install -r requirements.txt\n\n$env:FLASK_APP = \"main.py\"\n\nflask run" }, { "alpha_fraction": 0.5191376805305481, "alphanum_fraction": 0.5567532181739807, "avg_line_length": 26.725608825683594, "blob_id": "955616fa40ec9c90fc61da4e249d2c52f5d0b533", "content_id": "b89e83d9595a9a996a146aec71fbeb7b89507d9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4546, "license_type": "no_license", "max_line_length": 70, "num_lines": 164, "path": "/main.py", "repo_name": "nettobranches/image_processing_py", "src_encoding": "UTF-8", "text": "import cv2, numpy as np, math as mth\n\nfrom flask import Flask, render_template, request, send_from_directory\nfrom werkzeug.utils import secure_filename\nfrom config import DevConfig\n\n\napp = Flask(__name__, static_url_path='')\napp.config.from_object(DevConfig)\n\nimage = 'images/1.tif'\n\[email protected]('/images/<path:path>')\ndef send_js(path):\n return send_from_directory('images', path)\n\[email protected]('/')\ndef home():\n return '<h1>Hello World!</h1>'\n\[email protected]('/upload')\ndef upload():\n return render_template('upload.html')\n\[email protected]('/uploader', methods = ['GET', 'POST'])\ndef uploader():\n if request.method == 'POST':\n f = request.files['file']\n # f.save(secure_filename(f.filename))\n f.save(image)\n return 'file uploaded successfully'\n\[email protected]('/read_img')\ndef read_img():\n img = cv2.imread(image)\n imgpng = 'images/1.png'\n cv2.imwrite(imgpng, img)\n\n print('original image shape:', img.shape)\n # print('img[0]', img[0])\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n print('Converted to grayscale')\n print('Shape:', gray.shape)\n print('Data type:', gray.dtype)\n\n return '<img src=\"/' + imgpng + '\"/>'\n\[email protected]('/reverse')\ndef reverse():\n img = cv2.imread(image)\n imgpng = 'images/1.png'\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n w, h = gray.shape[1], gray.shape[0]\n\n nu = np.full(img.shape, 255, np.uint8)\n nugray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n nupng = 'images/1_nu.png'\n\n for iw in range(w):\n for ih in range(h):\n nugray[ih-1][iw-1] = gray[h-ih-1][w-iw-1]\n # print(ih, iw, gray[ih][iw])\n\n cv2.imwrite(imgpng, img)\n cv2.imwrite(nupng, nugray)\n return '<img src=\"/' + imgpng + '\"/><img src=\"/' + nupng + '\"/>'\n\[email protected]('/intensity')\ndef intensity():\n img = cv2.imread(image)\n img = img.astype(np.float32) / 255\n imgpng = 'images/1.png'\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n w, h = gray.shape[1], gray.shape[0]\n\n nu = np.full(img.shape, 255, np.float32)\n nugray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n nupng = 'images/1_nu.png'\n\n for iw in range(w):\n for ih in range(h):\n if( gray[ih-1][iw-1] > 0 ):\n nugray[ih-1][iw-1] = mth.log( gray[ih-1][iw-1] )\n else:\n nugray[ih-1][iw-1] = 0\n # print(ih, iw, gray[ih][iw])\n\n img = (img * 255).astype(np.uint8)\n nugray = (nugray * 255).astype(np.uint8)\n cv2.imwrite(imgpng, img)\n cv2.imwrite(nupng, nugray)\n return '<img src=\"/' + imgpng + '\"/><img src=\"/' + nupng + '\"/>'\n\[email protected]('/avg')\ndef average():\n img = cv2.imread(image)\n imgpng = 'images/1.png'\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n w, h = gray.shape[1], gray.shape[0]\n\n nugray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n nupng = 'images/1_nu.png'\n\n for iw in range(2,w-2):\n for ih in range(2,h-2):\n nugray[ih-1][iw-1] = avg(gray, ih-1, iw-1)\n\n cv2.imwrite(imgpng, img)\n cv2.imwrite(nupng, nugray)\n return '<img src=\"/' + imgpng + '\"/><img src=\"/' + nupng + '\"/>'\n\[email protected]('/convolution')\ndef convolution():\n img = cv2.imread(image)\n imgpng = 'images/1.png'\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n w, h = gray.shape[1], gray.shape[0]\n\n nugray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n nupng = 'images/1_nu.png'\n\n for iw in range(2,w-2):\n for ih in range(2,h-2):\n nugray[ih-1][iw-1] = cnvl(gray, ih-1, iw-1)\n\n cv2.imwrite(imgpng, img)\n cv2.imwrite(nupng, nugray)\n return '<img src=\"/' + imgpng + '\"/><img src=\"/' + nupng + '\"/>'\n\n# @app.route('/read_img')\n# def read_img():\n# img = cv2.imread(image)\n# print('original image shape:', img.shape)\n# return '<h1>readimg</h1>'\n\ndef avg(mtrx, h, w):\n return np.mean([\n mtrx[h-1][w-1], mtrx[h-1][w], mtrx[h-1][w+1], \n mtrx[h][w-1], mtrx[h][w], mtrx[h][w+1],\n mtrx[h+1][w-1], mtrx[h+1][w], mtrx[h+1][w+1] ])\n\ndef cnvl(mtrx, h, w):\n kernel = np.array([[1,0,-1], \n [0,0,0],\n [-1,0,1]])\n H = np.array([[\n mtrx[h-1][w-1], mtrx[h-1][w], mtrx[h-1][w+1], \n mtrx[h][w-1], mtrx[h][w], mtrx[h][w+1],\n mtrx[h+1][w-1], mtrx[h+1][w], mtrx[h+1][w+1] ]])\n J = np.array([[kernel[0][0]], [kernel[0][1]], [kernel[0][2]],\n [kernel[1][0]], [kernel[1][1]], [kernel[0][2]],\n [kernel[2][0]], [kernel[2][1]], [kernel[2][2]]])\n # print('H, J', H, J)\n res = np.dot(H,J)\n # res = 0\n return res\n\nif __name__ == '__main__':\n app.run()" } ]
2
Phil610351/marl-embedding
https://github.com/Phil610351/marl-embedding
c0aede77878c9d7803dd393be2510f264bebfa01
551278d6ab8ef96ef31e3806e8e8f584e7863516
c0ed3de506dab7a2508f1097f76811b410c3e04e
refs/heads/master
2023-03-17T21:19:45.667582
2020-04-03T00:20:46
2020-04-03T00:20:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7038916945457458, "alphanum_fraction": 0.7478849291801453, "avg_line_length": 34.787879943847656, "blob_id": "387d5b61cf6eaf028c396c2e5a2325a3e808c455", "content_id": "32d738003be7e6882d5adcadc5f8a3ae1ec6ccbd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1182, "license_type": "permissive", "max_line_length": 246, "num_lines": 33, "path": "/README.md", "repo_name": "Phil610351/marl-embedding", "src_encoding": "UTF-8", "text": "# Information State Embedding in Partially Observable MARL \n\nThis is the PyTorch implementation of the paper [Information State Embedding in Partially Observable Cooperative Multi-Agent Reinforcement Learning](https://arxiv.org/abs/2004.01098). Please consider citing our paper if you find this code useful:\n\n```\n@article{mao2020information,\n title={Information State Embedding in Partially Observable Cooperative Multi-Agent Reinforcement Learning},\n author={Mao, Weichao and Zhang, Kaiqing and Miehling, Erik and BaลŸar, Tamer},\n journal={arXiv preprint arXiv:2004.01098},\n year={2020}\n}\n```\n\n\n## Dependencies\n- Python 3.5\n- PyTorch 1.4\n- scikit-learn 0.22.2\n\n\n## Examples\n- Default parameter values: To test the three embedding instances in their default settings, simply run:\n```\npython FM-E.py\npython RNN-E.py\npython PCA-E.py\n```\n- Specifying parameters: Performance varies when you use different parameter values on different tasks. To test with your own parameter values, run:\n```\npython FM-E.py --sequence_size 20 --length 4 --lr 0.01 --task 'boxpushing'\npython RNN-E.py --sequence_size 10 --lr 0.01 --task 'grid3x3'\npython PCA-E.py --sequence_size 4 --pca_length 8 --lr 0.01 --task 'dectiger'\n```\n\n" }, { "alpha_fraction": 0.369546502828598, "alphanum_fraction": 0.3980744779109955, "avg_line_length": 44.897674560546875, "blob_id": "1fec63fd86dcfb50e647a7b6fccb9f3bcc786206", "content_id": "1d67af67c7259e0f2ccf1c43be1c06f8ed43c780", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 19735, "license_type": "permissive", "max_line_length": 523, "num_lines": 430, "path": "/env/boxpushing.py", "repo_name": "Phil610351/marl-embedding", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport numpy as np\n\nclass Environment:\n\n def read_matrix(self, n, m):\n line = self.f.readline()\n while not len(line) or line.startswith('#'):\n line = self.f.readline()\n line = line.strip()\n if line == 'identity':\n result = [[0.0 for i in range(n)] for j in range(m)]\n for i in range(n):\n result[i][i] = 1.0\n return result\n elif line == 'uniform':\n result = [[1.0 / m for i in range(n)] for j in range(m)]\n return result\n items = line.split(' ')\n result = [[0.0 for i in range(n)] for j in range(m)]\n for i in range(n):\n for j in range(m):\n result[i][j] = float(items[j])\n line = self.f.readline().strip()\n items = line.split(' ')\n return result\n\n def __init__(self, filename):\n\n self.agent_num = 2\n self.discount = 1\n self.action_size = []\n self.state_size = 0\n self.observation_size = []\n self.reward_flag = 1.0\n self.readline_count = 0\n\n self.state_names = []\n self.state_dict = {}\n self.action_names = []\n self.action_dict = []\n self.observation_names = []\n self.observation_dict = []\n\n self.has_initial = True\n self.initial = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n # We need to specify the initial belief of each problem manually\n self.T = [] # T[s][a_0][a_1][s']\n self.O = [] # O[s'][a_0][a_1][o_0][o_1]\n self.R = [] # R[s][a_0][a_1][s'][o_0][o_1]\n\n self.current_states = []\n self.current_discount = 1.0\n self.current_batch_size = 1\n\n\n\n self.f = open('env/' + filename, 'r')\n while True:\n line = self.f.readline()\n if not line:\n break\n line = line.strip()\n if not len(line) or line.startswith('#'):\n continue\n self.readline_count += 1\n if self.readline_count <= 7:\n items = line.split(' ')\n if items[0] == 'agents:':\n self.agent_num = int(items[1])\n self.action_names = [[] for _ in range(self.agent_num)]\n self.observation_names = [[] for _ in range(self.agent_num)]\n self.action_size = [0 for _ in range(self.agent_num)]\n self.observation_size = [0 for _ in range(self.agent_num)]\n elif items[0] == 'discount:':\n self.discount = float(items[1])\n elif items[0] == 'values:':\n if items[1] == 'cost':\n self.reward_flag = -1.0\n elif items[0] == 'states:':\n if len(items) == 2 and items[1].isdigit():\n self.state_size = int(items[1])\n for i in range(self.state_size):\n self.state_names.append(str(i))\n self.state_dict[str(i)] = i\n else:\n self.state_size = len(items) - 1\n for i in range(1, len(items)):\n self.state_names.append(items[i])\n self.state_dict[items[i]] = i - 1\n elif items[0] == 'actions:':\n for agent in range(self.agent_num):\n line = self.f.readline()\n while not len(line) or line.startswith('#'):\n line = self.f.readline()\n items = line.strip().split(' ')\n if len(items) == 1 and items[0].isdigit():\n self.action_size[agent] = int(items[0])\n else:\n self.action_size[agent] = len(items)\n tmp_dict = {}\n for i in range(0, len(items)):\n self.action_names[agent].append(items[i])\n tmp_dict[items[i]] = i\n self.action_dict.append(tmp_dict)\n elif items[0] == 'observations:':\n for agent in range(self.agent_num):\n line = self.f.readline()\n while not len(line) or line.startswith('#'):\n line = self.f.readline()\n items = line.strip().split(' ')\n if len(items) == 1 and items[0].isdigit():\n self.observation_size[agent] = int(items[0])\n else:\n self.observation_size[agent] = len(items)\n tmp_dict = {}\n for i in range(0, len(items)):\n self.observation_names[agent].append(items[i])\n tmp_dict[items[i]] = i\n self.observation_dict.append(tmp_dict)\n else:\n items = line.strip().split(':')\n if items[0] == 'start':\n if filename == 'dectiger.txt':\n self.has_initial = False\n line = self.f.readline()\n else:\n self.has_initial = True\n line = self.f.readline()\n pass\n else:\n print(\"input format not supported\")\n if self.readline_count == 7:\n self.T = [[[[0.0 for i in range(self.state_size)] for j in range(self.action_size[1])] for _ in range(self.action_size[0])] for k in range(self.state_size)]\n self.O = [[[[[0.0 for __ in range(self.observation_size[1])] for i in range(self.observation_size[0])] for k in range(self.action_size[1])] for _ in range(self.action_size[0])] for j in range(self.state_size)]\n self.R = [[[[[[0.0 for __ in range(self.observation_size[1])] for l in range(self.observation_size[0])] for i in range(self.state_size)] for j in range(self.action_size[1])] for _ in range(self.action_size[0])] for k in range(self.state_size)]\n else:\n items = line.strip().split(':')\n if items[0] == 'T': \n lower = []\n upper = []\n if len(items) <= 3:\n items[1] = items[1].strip()\n if items[1] == '*':\n lower = [0, 0]\n upper = [self.action_size[0], self.action_size[1]]\n else:\n lower = [self.action_dict[_][items[1].split(' ')[_]] for _ in range(2)]\n upper = [self.action_dict[_][items[1].split(' ')[_]] + 1 for _ in range(2)]\n\n tmp = self.read_matrix(self.state_size, self.state_size)\n for a1 in range(lower[0], upper[0]):\n for a2 in range(lower[1], upper[1]):\n for s1 in range(self.state_size):\n for s2 in range(self.state_size):\n self.T[s1][a1][a2][s2] = tmp[s1][s2]\n else:\n lower = []\n upper = []\n a1 = items[1].strip().split(' ')[0]\n a2 = items[1].strip().split(' ')[1]\n if a1 == '*':\n lower.append(0)\n upper.append(self.action_size[0])\n else:\n lower.append(int(a1))\n upper.append(int(a1) + 1)\n\n if a2 == '*':\n lower.append(0)\n upper.append(self.action_size[1])\n else:\n lower.append(int(a2))\n upper.append(int(a2) + 1)\n\n s1 = items[2].strip()\n s2 = items[3].strip()\n\n if s1 == '*':\n lower.append(0)\n upper.append(self.state_size)\n else:\n lower.append(int(s1))\n upper.append(int(s1) + 1)\n\n if s2 == '*':\n lower.append(0)\n upper.append(self.state_size)\n else:\n lower.append(int(s2))\n upper.append(int(s2) + 1)\n\n\n for a1 in range(lower[0], upper[0]):\n for a2 in range(lower[1], upper[1]):\n for s1 in range(lower[2], upper[2]):\n for s2 in range(lower[3], upper[3]):\n self.T[s1][a1][a2][s2] = float(items[4])\n\n\n elif items[0] == 'O':\n\n lower = []\n upper = []\n if len(items) <= 3:\n if filename == 'dectiger.txt':\n for a1 in range(3):\n for a2 in range(3):\n for s1 in range(self.state_size):\n for o1 in range(2):\n for o2 in range(2):\n self.O[s1][a1][a2][o1][o2] = 0.25\n line = self.f.readline()\n else:\n print('Observation format not supported')\n else:\n lower = []\n upper = []\n if items[1].strip() == '*':\n a1 = '*'\n a2 = '*'\n else:\n a1 = items[1].strip().split(' ')[0]\n a2 = items[1].strip().split(' ')[1]\n if a1 == '*':\n lower.append(0)\n upper.append(self.action_size[0])\n else:\n lower.append(int(a1))\n upper.append(int(a1) + 1)\n\n if a2 == '*':\n lower.append(0)\n upper.append(self.action_size[1])\n else:\n lower.append(int(a2))\n upper.append(int(a2) + 1)\n\n s1 = items[2].strip()\n\n if s1 == '*':\n lower.append(0)\n upper.append(self.state_size)\n else:\n lower.append(int(s1))\n upper.append(int(s1) + 1)\n\n o1 = items[3].strip().split(' ')[0]\n o2 = items[3].strip().split(' ')[1]\n if o1 == '*':\n lower.append(0)\n upper.append(self.observation_size[0])\n else:\n lower.append(int(o1))\n upper.append(int(o1) + 1)\n\n if o2 == '*':\n lower.append(0)\n upper.append(self.observation_size[1])\n else:\n lower.append(int(o2))\n upper.append(int(o2) + 1)\n\n\n for a1 in range(lower[0], upper[0]):\n for a2 in range(lower[1], upper[1]):\n for s1 in range(lower[2], upper[2]):\n for o1 in range(lower[3], upper[3]):\n for o2 in range(lower[4], upper[4]):\n self.O[s1][a1][a2][o1][o2] = float(items[4])\n\n elif items[0] == 'R':\n \n lower = []\n upper = []\n if len(items) <= 3:\n if filename == 'dectiger.txt':\n print('Observation format not supported')\n else:\n print('Observation format not supported')\n else:\n lower = []\n upper = []\n if items[1].strip() == '*':\n a1 = '*'\n a2 = '*'\n else:\n a1 = items[1].strip().split(' ')[0]\n a2 = items[1].strip().split(' ')[1]\n if a1 == '*':\n lower.append(0)\n upper.append(self.action_size[0])\n else:\n lower.append(int(a1))\n upper.append(int(a1) + 1)\n\n if a2 == '*':\n lower.append(0)\n upper.append(self.action_size[1])\n else:\n lower.append(int(a2))\n upper.append(int(a2) + 1)\n\n s1 = items[2].strip()\n s2 = items[3].strip()\n\n if s1 == '*':\n lower.append(0)\n upper.append(self.state_size)\n else:\n lower.append(int(s1))\n upper.append(int(s1) + 1)\n\n if s2 == '*':\n lower.append(0)\n upper.append(self.state_size)\n else:\n lower.append(int(s2))\n upper.append(int(s2) + 1)\n\n o1 = o2 = '*'\n\n if o1 == '*':\n lower.append(0)\n upper.append(self.observation_size[0])\n else:\n lower.append(int(o1))\n upper.append(int(o1) + 1)\n\n if o2 == '*':\n lower.append(0)\n upper.append(self.observation_size[1])\n else:\n lower.append(int(o2))\n upper.append(int(o2) + 1)\n\n\n for a1 in range(lower[0], upper[0]):\n for a2 in range(lower[1], upper[1]):\n for s1 in range(lower[2], upper[2]):\n for s2 in range(lower[3], upper[3]):\n for o1 in range(lower[4], upper[4]):\n for o2 in range(lower[5], upper[5]):\n self.R[s1][a1][a2][s2][o1][o2] = float(items[5].strip())\n\n\n else:\n print('Initial letter not recognized')\n print(line)\n self.f.close()\n\n\n def init_environment(self, batch_size):\n if not self.has_initial:\n self.current_states = np.random.choice(self.state_size, batch_size).tolist()\n else:\n self.current_states = np.random.choice(self.state_size, batch_size, p=self.initial).tolist()\n self.current_discount = 1.0\n self.current_batch_size = batch_size\n\n\n # Taken a batch of actions, returns a batch of observations and rewards in one time step\n def step(self, actions_0, actions_1):\n # input: actions_0[batch_size][1], actions_1[batch_size][1]\n # returns lists: observations_0[batch_size][1], observations_1[batch_size][1], rewards[batch_size][1]\n # the returned rewards have been discounted \n observations_0 = []\n observations_1 = []\n rewards = []\n if len(actions_0) != self.current_batch_size or len(actions_1) != self.current_batch_size:\n print('batch size does not match')\n for i in range(self.current_batch_size):\n action_0 = actions_0[i]\n action_1 = actions_1[i]\n flat_list = [item for sublist in self.O[self.current_states[i]][action_0][action_1] for item in sublist]\n observation_joint = np.random.choice(self.observation_size[0] * self.observation_size[1], 1, p=flat_list)[0]\n observations_0.append(observation_joint // self.observation_size[0])\n observations_1.append(observation_joint % self.observation_size[1])\n\n new_state = np.random.choice(self.state_size, 1, p=self.T[self.current_states[i]][action_0][action_1])[0]\n reward = self.R[self.current_states[i]][action_0][action_1][new_state][observation_joint // self.observation_size[0]][observation_joint % self.observation_size[1]]\n self.current_states[i] = new_state\n rewards.append(reward * self.current_discount)\n\n # self.current_discount *= self.discount\n return observations_0, observations_1, rewards\n\n\n # DEPRECATED!\n # generate a batch of sequences at one run\n def load_samples(self, sample_size, sequence_size):\n # returns lists: actions[sample_size][sequence_size], observations[sample_size][sequence_size], rewards[sample_size][sequence_size]\n\n samples = [] # each output sample is a [a, a, a, ..., o, o, o, ..., r, r, r, ...] sequence\n actions = []\n observations = []\n rewards = []\n # f = open('samples.txt', 'w')\n for _ in range(sample_size):\n state = random.randint(0, self.state_size - 1)\n sample = []\n action_seq = []\n observation_seq = []\n reward_seq = []\n current_discount = 1\n for __ in range(sequence_size):\n action = random.randint(0, self.action_size - 1)\n observation = np.random.choice(self.observation_size, 1, p=self.O[state][action])[0]\n action_seq.append(action)\n observation_seq.append(observation)\n new_state = np.random.choice(self.state_size, 1, p=self.T[state][action])[0]\n reward = self.R[state][action][new_state][observation]\n reward_seq.append(reward * current_discount) \n current_discount *= self.discount\n state = new_state\n sample = action_seq + observation_seq + reward_seq\n actions.append(action_seq)\n observations.append(observation_seq)\n rewards.append(reward_seq)\n samples.append(sample)\n \n # for item in sample:\n # f.write(str(item) + ' ')\n # f.write('\\n')\n # f.close()\n return actions, observations, rewards\n\nif __name__ == \"__main__\":\n pass" }, { "alpha_fraction": 0.579162061214447, "alphanum_fraction": 0.5948532819747925, "avg_line_length": 40.119354248046875, "blob_id": "54a44d7009ee7d71e32bb990194faf27c5314980", "content_id": "413f60491105f03538660c42d4ebde20322f3a26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12746, "license_type": "permissive", "max_line_length": 182, "num_lines": 310, "path": "/PCA-E.py", "repo_name": "Phil610351/marl-embedding", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nfrom collections import deque \nimport torch.optim as optim\nimport os\nfrom sklearn.preprocessing import OneHotEncoder\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nimport sklearn.decomposition\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--sequence_size', help=\"length of the horizon\", type=int, default=4)\nparser.add_argument('--pca_length', help=\"length of the PCA feature vector\", type=int, default=8)\nparser.add_argument('--lr', help=\"learning rate\", type=float, default=1e-2)\nparser.add_argument('--task', help=\"task problem\", default='dectiger')\nargs = parser.parse_args()\n\nSEQUENCE_SIZE = args.sequence_size\nPCA_LENGTH = args.pca_length\nlr = args.lr\ntask = args.task\nBATCH_SIZE = 400\nMEMORY_SIZE = 4000\nHIDDEN_SIZE = 6\nTOTAL_EPISODES = 40000\nTEST_TOTAL_EPISODES = 2000\nTARGET_UPDATE_FREQ = 100 * SEQUENCE_SIZE\nBACK_PROP_FREQ = 1 * SEQUENCE_SIZE\nINITIAL_EPSILON = 0.9\nFINAL_EPSILON = 0.0\n\n\n\nif task == 'boxpushing':\n from env.boxpushing import Environment\nelif task == 'grid3x3':\n from env.grid3x3 import Environment\nelse:\n from env.dectiger import Environment\n\n\nclass Memory():\n def __init__(self, memory_size):\n self.memory_size = memory_size\n self.memory = deque(maxlen=self.memory_size)\n \n def add_episode(self, epsiode):\n self.memory.append(epsiode)\n \n def get_batch(self, batch_size):\n batch = random.sample(self.memory, batch_size)\n return batch\n\n\n\nclass Model(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Model, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n \n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, output_size)\n self.relu = nn.ReLU()\n\n\n def forward(self, input, batch_size):\n input = torch.FloatTensor(input).view(batch_size, -1)\n output = self.fc1(input)\n output = self.relu(output)\n output = self.fc2(output)\n \n return output\n\n\ndef one_hot_encoding(xs, n):\n # xs[batch_size]\n tmp = [[i] for i in range(n)]\n enc = OneHotEncoder(handle_unknown='ignore', categories=[[i for i in range(n)]])\n enc.fit(tmp)\n xs = np.expand_dims(np.array(xs), axis=1)\n result = enc.transform(xs).toarray()\n result = torch.tensor(result).float()\n # result[batch_size][action_size]\n return result\n\n\ndef fit_pca():\n X = []\n current_sample = [0 for _ in range((action_size + observation_size) * (SEQUENCE_SIZE))]\n X.append(current_sample)\n sample_size = 300000\n\n for _ in range(sample_size):\n current_sample = [0 for _ in range((action_size + observation_size) * (SEQUENCE_SIZE))]\n length = random.randint(1, SEQUENCE_SIZE)\n for i in range(length):\n a = random.randint(0, action_size - 1)\n o = random.randint(0, observation_size - 1)\n current_sample[(SEQUENCE_SIZE - length + i) * (action_size + observation_size) + a] = 1\n current_sample[(SEQUENCE_SIZE - length + i) * (action_size + observation_size) + action_size + o] = 1\n X.append(current_sample)\n\n pca = sklearn.decomposition.PCA(n_components=PCA_LENGTH)\n pca.fit(np.array(X))\n return pca\n\n\ndef train():\n # Initialize experience memory\n for episode in range(0, MEMORY_SIZE // SEQUENCE_SIZE // 2 + 1):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n \n for _ in range(SEQUENCE_SIZE):\n actions = [[random.randint(0, action_size - 1)] for i in range(2)]\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n\n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n # TODO here\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n memory.add_episode((current_state, actions[agent_i][0], rewards[0], new_state))\n\n\n epsilon = INITIAL_EPSILON\n reward_stat = []\n total_steps = 0\n total_reward = 0\n total_loss = 0\n\n\n # Start training\n for episode in range(TOTAL_EPISODES):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n\n for _ in range(SEQUENCE_SIZE):\n total_steps += 1\n current_state = [None for i in range(2)]\n for agent_i in range(2):\n current_state[agent_i] = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state[agent_i] = torch.cat((torch.FloatTensor([agent_i]), current_state[agent_i]), 0)\n if np.random.rand(1) < epsilon:\n actions = [[random.randint(0, action_size - 1)] for i in range(2)]\n else:\n for agent_i in range(2):\n q_values = main_model(current_state[agent_i], batch_size=1)[0]\n actions[agent_i] = [int(torch.argmax(q_values))]\n\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n total_reward += rewards[0]\n \n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n memory.add_episode((current_state, actions[agent_i][0], rewards[0], new_state))\n\n\n if (total_steps % TARGET_UPDATE_FREQ) == 0:\n target_model.load_state_dict(main_model.state_dict())\n \n if (total_steps % BACK_PROP_FREQ) == 0: \n\n batch = memory.get_batch(batch_size=BATCH_SIZE)\n \n current_states = []\n local_actions = []\n local_rewards = []\n next_states = []\n\n for sample in batch:\n current_states.append(sample[0])\n local_actions.append(sample[1])\n local_rewards.append(sample[2])\n next_states.append(sample[3])\n \n current_states = torch.cat(current_states, dim=0) # [batch_size][embedding_size]\n local_actions = torch.LongTensor(local_actions)\n local_rewards = torch.FloatTensor(local_rewards) # [batch_size]\n next_states = torch.cat(next_states, dim=0)\n\n next_q_values = target_model(next_states, batch_size=BATCH_SIZE) # [batch_size][action_size]\n next_q_max_value, _ = next_q_values.detach().max(dim=1) # [batch_size]\n target_values = local_rewards + 0.75 * next_q_max_value # There should be a gamma factor\n \n q_values = main_model(current_states, batch_size=BATCH_SIZE) # [batch_size][action_size]\n current_values = torch.gather(q_values, dim=1, index=local_actions.unsqueeze(dim=1)).squeeze(dim=1)\n \n loss = criterion(current_values, target_values)\n total_loss += loss\n \n if episode <= TOTAL_EPISODES - 2000:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n reward_stat.append(total_reward)\n if episode % 100 == 99:\n print(episode, total_reward / 100, total_loss.item() / 100)\n writer.add_scalar(task + '/reward', total_reward / 100, episode)\n writer.add_scalar(task + '/loss', total_loss.item() / 100, episode)\n total_reward = 0\n total_loss = 0\n\n if epsilon > FINAL_EPSILON:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / (TOTAL_EPISODES - 4000)\n \n \ndef test():\n report = 0.0\n result = 0\n total_reward = 0\n for length in [SEQUENCE_SIZE]:\n TEST_SEQUENCE_SIZE = length\n\n for episode in range(TEST_TOTAL_EPISODES):\n env.init_environment(batch_size=1)\n embedding = [deque([0.0 for i in range((SEQUENCE_SIZE) * (action_size + observation_size))], maxlen=(SEQUENCE_SIZE) * (action_size + observation_size)) for i in range(2)]\n current_discount = 1.0\n\n for _ in range(TEST_SEQUENCE_SIZE):\n current_state = [None for i in range(2)]\n actions = [None for i in range(2)]\n for agent_i in range(2):\n current_state[agent_i] = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state[agent_i] = torch.cat((torch.FloatTensor([agent_i]), current_state[agent_i]), 0)\n for agent_i in range(2):\n q_values = main_model(current_state[agent_i], batch_size=1)[0]\n actions[agent_i] = [int(torch.argmax(q_values))]\n\n observations[0], observations[1], rewards = env.step(actions[0], actions[1])\n total_reward += rewards[0] * current_discount\n result += rewards[0] * current_discount\n for agent_i in range(agent_num): \n one_hot_actions[agent_i] = one_hot_encoding(actions[agent_i], action_size)\n one_hot_observations[agent_i] = one_hot_encoding(observations[agent_i], observation_size)\n\n current_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n current_state = torch.cat((torch.FloatTensor([agent_i]), current_state), 0)\n\n embedding[agent_i].extend(torch.cat((one_hot_actions[agent_i][0], one_hot_observations[agent_i][0]), 0).tolist())\n new_state = torch.FloatTensor(pca.transform(np.array([embedding[agent_i]]))[0])\n new_state = torch.cat((torch.FloatTensor([agent_i]), new_state), 0)\n\n\n if episode % 100 == 99:\n print(episode, total_reward / 100)\n total_reward = 0\n\n print(result / TEST_TOTAL_EPISODES)\n file = open('PCA-E_' + task + '.txt', 'a')\n file.write(str(TEST_SEQUENCE_SIZE) + '\\t' + str(result / TEST_TOTAL_EPISODES) + '\\n')\n file.close()\n report += result / TEST_TOTAL_EPISODES\n result = 0\n\n\nif __name__ == \"__main__\":\n\n env = Environment(task + '.txt')\n\n agent_num = env.agent_num\n state_size = env.state_size\n action_size = env.action_size[0]\n observation_size = env.observation_size[0]\n input_size = observation_size + action_size\n\n writer = SummaryWriter()\n\n memory = Memory(memory_size=MEMORY_SIZE)\n main_model = Model(input_size=PCA_LENGTH + 1, hidden_size=HIDDEN_SIZE, output_size=action_size).float()\n target_model = Model(input_size=PCA_LENGTH + 1, hidden_size=HIDDEN_SIZE, output_size=action_size).float()\n\n target_model.load_state_dict(main_model.state_dict())\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(main_model.parameters(), lr=lr)\n\n one_hot_actions = [None for i in range(2)]\n observations = [None for i in range(2)]\n one_hot_observations = [None for i in range(2)]\n\n pca = fit_pca()\n\n train()\n\n test()" } ]
3
a-marinin/SeleniumGettingStarted
https://github.com/a-marinin/SeleniumGettingStarted
8f7bde04d7b832f9e2c08e950cb4c903dc915d18
c8346dfcef3bb992d4a40ba4f933dc2047aea0de
cb9d86ad94631075f71ec5fc76a551b188b93b83
refs/heads/master
2020-02-16T20:42:01.343540
2018-03-18T23:42:28
2018-03-18T23:42:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7316017150878906, "alphanum_fraction": 0.7359307408332825, "avg_line_length": 40.181819915771484, "blob_id": "d25168738908a33330cbe5251f6061548c3e2de1", "content_id": "25b102d7b65c0616178cd0c86fdf9dd6a384a555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 100, "num_lines": 11, "path": "/First_test.py", "repo_name": "a-marinin/SeleniumGettingStarted", "src_encoding": "UTF-8", "text": "from selenium import webdriver\r\n\r\ndriver = webdriver.Chrome('C:\\\\Users\\\\amarinin\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\chromedriver.exe')\r\ndriver.get(\"http://www.facebook.com\")\r\ndriver.maximize_window()\r\ndriver.implicitly_wait(20)\r\ndriver.find_element_by_id(\"email\").send_keys(\"Selenium Webdriver\")\r\ndriver.find_element_by_name(\"pass\").send_keys(\"Python\")\r\ndriver.find_element_by_id(\"loginbutton\").click()\r\ndriver.get_screenshot_as_file(\"Facebook.png\")\r\n# driver.quit()" }, { "alpha_fraction": 0.7224880456924438, "alphanum_fraction": 0.7336522936820984, "avg_line_length": 38.1875, "blob_id": "533a0c7bea064f9a4f9b7a6270fa9b985d03aefb", "content_id": "56c720132108679973fe7324b1dc3563fdc7bb46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "no_license", "max_line_length": 124, "num_lines": 16, "path": "/Workflowy.py", "repo_name": "a-marinin/SeleniumGettingStarted", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndriver = webdriver.Chrome('C:\\\\Python\\\\chromedriver.exe')\ndriver.get(\"https://workflowy.com\")\n\ndriver.maximize_window()\ndriver.implicitly_wait(5)\ndriver.find_element_by_class_name(\"header-bar\").find_element_by_link_text(\"Login\").click()\ndriver.find_element_by_id(\"id_username\").send_keys(\"[email protected]\")\ndriver.find_element_by_id(\"id_password\").send_keys(\"Abcd1234\" + Keys.ENTER)\ndriver.implicitly_wait(25)\n\ndriver.find_element_by_xpath(\"//*[@id='pageContainer']/div/div[2]/div[3]/div[1]/div[1]/a\").click() // Go to the first bullet\n\n# driver.quit()\n" } ]
2
ThanasisTs/dpop
https://github.com/ThanasisTs/dpop
3949fb54389edb0b339a1d6b52b34e8915057eaa
40908909eed4abc60c61e9470baf1c81ea0a771b
980203c27ea4858fd8ac0d77a131c16ed4921fda
refs/heads/main
2023-03-06T06:38:16.003563
2021-02-17T15:05:19
2021-02-17T15:05:19
326,838,482
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7983651161193848, "alphanum_fraction": 0.7983651161193848, "avg_line_length": 32.45454406738281, "blob_id": "e24172d3dca00239fc93366bb91653698142de4e", "content_id": "8f2a631d7182035951eb8998dcba3911fcf17a8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/pipeline.py", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "from dimes_generator import dimes_generator\nfrom graph_generator import graph_generator\nfrom pseudotree_generator import pseudotree_generator\nfrom yaml_generator import yaml_generator\n\nconsistent = False\nwhile(not consistent):\n dimes_generator()\n graph = graph_generator()\n consistent, pseudotree = pseudotree_generator(graph)\n yaml_generator(pseudotree)" }, { "alpha_fraction": 0.5772241353988647, "alphanum_fraction": 0.581615149974823, "avg_line_length": 33.688743591308594, "blob_id": "73f13e2a2e46782a0e83b378c1d5bee5614e32f2", "content_id": "c332d83914eecaefdff1231a9d926253d9a3d072", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10476, "license_type": "no_license", "max_line_length": 181, "num_lines": 302, "path": "/dpop.py", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "import sys\nimport yaml\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport copy\nimport random\nimport time\nimport csv\n\nnum_agents, num_meetings, max_msg_size, start_time = 0, 0, 0, 0\n\n# Node class\nclass Node():\n def __init__(self, name, potential_values):\n self.name = name\n self.parent = None\n self.children = []\n self.p_parents = []\n self.sep = []\n self.relations = {}\n self.util = pd.DataFrame({})\n self.potential_values = potential_values\n self.values_based_on_parent_values = None\n self.value = None\n self.children_utils = None\n\n def set_parent(self, other):\n self.parent = other\n\n def set_children(self, other):\n self.children.append(other)\n\n def set_p_parents(self, other):\n self.p_parents.append(other)\n\n def set_sep(self, other):\n self.sep.append(other)\n\n def set_relations(self, other, relation):\n self.relations.update({other: relation})\n\n\ndef project_out(node, join_matrix):\n global max_msg_size\n start_time = time.time()\n headers = list(join_matrix.columns)\n headers.remove(node.name)\n headers.remove('Util')\n tmp = join_matrix.copy()\n tmp2 = tmp.sort_values(\n 'Util', ascending=False).drop_duplicates(headers)\n project_out_df = tmp2.copy()\n del project_out_df[node.name]\n del tmp2['Util']\n values_based_on_ancestor_values = tmp2\n values_based_on_ancestor_values = values_based_on_ancestor_values.rename(columns={\n node.name: 'Value'})\n\n if len(values_based_on_ancestor_values) > max_msg_size:\n max_msg_size = len(values_based_on_ancestor_values)\n\n # print(\"Project out --- %s seconds ---\" % (time.time() - start_time))\n return project_out_df.reset_index(drop=True), values_based_on_ancestor_values.reset_index(drop=True)\n\n\ndef join(node, relations, flag):\n if node.name == '15,5':\n print(relations)\n input()\n start_time = time.time()\n join_dict = {}\n headers = []\n for i in relations:\n headers.extend(list(i.columns))\n headers = list(set(headers))\n headers.remove('Util')\n headers.append('Util')\n value_combs_join = list(itertools.product(node.potential_values, repeat=len(headers)-1))\n for i in range(len(headers)-1):\n join_dict.update({headers[i]: list(zip(*value_combs_join))[i]})\n join_df = pd.DataFrame(join_dict)\n\n join_df['Util'] = 0\n for relation in relations:\n tmp = list(relation.columns)\n tmp.remove('Util')\n join_df = pd.merge(join_df, relation, how='left', on=tmp)\n join_df['Util_x'] = join_df['Util_x'] + join_df['Util_y']\n join_df.drop(['Util_y'], axis=1, inplace=True)\n join_df = join_df.rename(columns={'Util_x': 'Util'})\n\n if flag:\n # print(\"Join --- %s seconds ---\" % (time.time() - start_time))\n return project_out(node, join_df)\n else:\n # print(\"Join --- %s seconds ---\" % (time.time() - start_time))\n return join_df.reset_index(drop=True)\n\n\ndef dpop(tree):\n global max_msg_size, num_agents, num_messages, start_time\n\n # visited: set to keep track of all visited nodes\n visited = set()\n\n # open_set: list to keep track of nodes to be processed\n open_set = []\n for node in tree:\n if not node.children:\n open_set.append(node)\n\n # Util propagation\n print('--- Util propagation ---')\n while open_set:\n current_node = open_set[0]\n\n # If the current node is the root, exit util propagation phase\n if current_node.parent is None:\n if len(visited) == len(tree) - 1:\n if len(current_node.children) == 1:\n current_node.util = current_node.children[0].util\n break\n tmp = [child.util for child in current_node.children]\n current_node.util = join(current_node, tmp, False)\n break\n else:\n del open_set[0]\n open_set.append(current_node)\n continue\n\n # If the node is a leaf, compute its utility\n if not current_node.children:\n current_node.util, current_node.values_based_on_parent_values = join(\n current_node, current_node.relations.values(), True)\n \n # If the node is not a leaf, make sure that the utilities of its children are\n # available and then compute its utility\n else:\n children_update = True\n for child in current_node.children:\n if child.util.empty:\n open_set.remove(current_node)\n open_set.append(current_node)\n children_update = False\n break\n if not children_update:\n continue\n else:\n # If the children utilities are available,\n # join their util messages and compute the node's final utility\n children_utils = [\n child.util for child in current_node.children]\n current_node.children_utils = join(\n current_node, children_utils, False)\n tmp = list(current_node.relations.values())\n tmp.append(current_node.children_utils)\n current_node.util, current_node.values_based_on_parent_values = join(\n current_node, tmp, True)\n\n\n # Remove current node from open_set and add its parent\n if current_node.parent not in open_set:\n open_set.append(current_node.parent)\n open_set.remove(current_node)\n visited.add(current_node.name)\n\n num_util_msgs = len(tree)-1\n num_value_msgs = len(tree)-1\n num_variables = len(tree)\n num_cycles, num_constraints = 0, 0\n for node in tree:\n num_value_msgs += len(node.p_parents)\n num_cycles += len(node.p_parents)\n num_constraints = num_cycles + len(tree) -1\n num_messages = num_value_msgs + num_util_msgs\n\n\n print(\"Number of constraints: {}\".format(num_constraints))\n\n print(\"Number of cycles: {}\".format(num_cycles))\n\n print(\"Number of messages: {}\".format(num_messages))\n\n print(\"Maximum message size: {}\".format(max_msg_size))\n # Value propagation\n print('--- Value propagation ---')\n\n value_prop_start_time = time.time()\n open_set = [node for node in tree]\n\n # Node values\n node_values = {}\n\n sorted_root_util = current_node.util.sort_values('Util', ascending=False)\n node_values.update(\n {current_node.name: sorted_root_util[current_node.name][0]})\n current_node.value = list(node_values.values())[0]\n open_set.remove(current_node)\n\n next_node = True\n\n # For each node, get the value of its parent (and pseudoparents) and compute its own value\n while open_set:\n if not current_node.parent:\n current_node = random.choice(current_node.children)\n for ancestor in current_node.sep:\n if ancestor.name not in node_values.keys():\n open_set.remove(current_node)\n open_set.append(current_node)\n current_node = open_set[0]\n next_node = False\n break\n if not next_node:\n next_node = True\n if open_set == []:\n break\n else:\n continue\n headers = list(current_node.values_based_on_parent_values.columns)\n headers.remove('Value')\n df = current_node.values_based_on_parent_values\n for h in headers:\n df = df[(df[h] == node_values.get(h))]\n df = df.reset_index(drop=True)\n node_values.update({current_node.name: df['Value'][0]})\n current_node.value = df['Value'][0]\n open_set.remove(current_node)\n try:\n current_node = open_set[0]\n except:\n break\n print(\"Value propagation --- %s seconds ---\" % (time.time() - value_prop_start_time))\n\n # Print the nodes with their final values\n for node in tree:\n print(node.name, node.value)\n\n print(\"Total Time --- %s seconds ---\" % (time.time() - start_time))\n csvFile = open('results.csv', 'a')\n wr = csv.writer(csvFile)\n wr.writerow([sys.argv[1].split('/')[1][:-9], num_agents, num_meetings, num_variables, num_constraints, num_messages, max_msg_size, num_cycles, round(time.time()-start_time, 2)])\n\ndef main():\n global num_agents, num_meetings, start_time\n start_time = time.time()\n\n num_agents = int(sys.argv[1].split('/')[1].split('_')[0])\n num_meetings = int(sys.argv[1].split('/')[1].split('_')[1])\n\n # Parse the yaml and create the tree\n tree_file = yaml.load(open(sys.argv[1], 'r'), Loader=yaml.FullLoader)\n\n tree = []\n tree_dict = {}\n for node in tree_file['nodes']:\n tree_node = Node(node, tree_file['potential_values'])\n tree.append(tree_node)\n tree_dict.update({node: tree_node})\n\n for node in tree:\n if tree_file['parents'][node.name] is not None:\n node.set_parent(tree_dict.get(tree_file['parents'][node.name][0]))\n node.set_relations(\n node.parent, tree_file['parent_relations'][node.name])\n\n if tree_file['children'][node.name] is not None:\n for child in tree_file['children'][node.name]:\n node.set_children(tree_dict.get(child))\n\n if tree_file['p_parents'][node.name] is not None:\n for p_parent in tree_file['p_parents'][node.name]:\n node.set_p_parents(tree_dict.get(p_parent))\n node.set_relations(tree_dict.get(\n p_parent), tree_file['pseudo_parent_relations'][node.name][p_parent])\n\n if tree_file['sep'][node.name] is not None:\n for sep in tree_file['sep'][node.name]:\n node.set_sep(tree_dict.get(sep))\n\n for node in tree:\n value_combs_ancestors = list(itertools.product(node.potential_values, repeat=2))\n break\n\n for node in tree:\n t = {}\n for k, v in node.relations.items():\n tmp = {node.name: list(zip(*value_combs_ancestors))[0]}\n tmp.update({k.name: list(zip(*value_combs_ancestors))[1]})\n a = []\n for ja in v:\n for i in ja:\n a.append(i)\n tmp.update({'Util': a})\n t.update({k: pd.DataFrame(tmp)})\n node.relations = t\n\n # call DPOP algorithm\n dpop(tree)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7008547186851501, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 52.272727966308594, "blob_id": "a75f91d274e1eb9d4b55c76014f4bb30171e8c20", "content_id": "f5c80758639e163db6e19c628b438077332891d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 585, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/results", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\npython dpop.py generated_dimes_problems/100_50_4ts.yaml\npython dpop.py generated_dimes_problems/200_100_4ts.yaml\npython dpop.py generated_dimes_problems/300_150_4ts.yaml\npython dpop.py generated_dimes_problems/400_200_4ts.yaml\npython dpop.py generated_dimes_problems/500_250_4ts.yaml\npython dpop.py generated_dimes_problems/600_300_4ts.yaml\npython dpop.py generated_dimes_problems/700_350_4ts.yaml\npython dpop.py generated_dimes_problems/800_400_4ts.yaml\npython dpop.py generated_dimes_problems/900_450_4ts.yaml\npython dpop.py generated_dimes_problems/1000_500_4ts.yaml" }, { "alpha_fraction": 0.5793335437774658, "alphanum_fraction": 0.5918679237365723, "avg_line_length": 27.6842098236084, "blob_id": "b3d07c18d4e6e9bff86d06ffe2800dd74407d1d4", "content_id": "1389e03cf589a4112e5cccd21343f943fad0feab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3271, "license_type": "no_license", "max_line_length": 133, "num_lines": 114, "path": "/graph_generator.py", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\n\n\n# Node class\nclass Node():\n\tdef __init__(self, name, util):\n\t\tself.name = name\n\t\tself.util = util\n\t\tself.neighbors = []\n\t\tself.agent_relation = {}\n\t\tself.meeting_relation = {}\n\t\tself.parent = []\n\t\tself.p_parents = []\n\t\tself.children = []\n\t\tself.p_children = []\n\n\tdef set_neighbor(self, other):\n\t\tself.neighbors.append(other)\n\n\tdef set_agent_relation(self, other):\n\t\ttemp = []\n\t\tfor i in range(len(self.util)):\n\t\t\trow = []\n\t\t\tfor j in range(len(other.util)):\n\t\t\t\tif i == j:\n\t\t\t\t\trow.append(-10000000)\n\t\t\t\telse:\n\t\t\t\t\trow.append(int(self.util[i] + other.util[j]))\n\t\t\ttemp.append(row)\n\t\tself.agent_relation.update({other : temp})\n\n\tdef set_meeting_relation(self, other):\n\t\ttemp = []\n\t\tfor i in range(len(self.util)):\n\t\t\trow = []\n\t\t\tfor j in range(len(other.util)):\n\t\t\t\tif i == j:\n\t\t\t\t\trow.append(10000000)\n\t\t\t\telse:\n\t\t\t\t\trow.append(int(self.util[i] + other.util[j]))\n\t\t\ttemp.append(row)\n\t\tself.meeting_relation.update({other : temp})\n\n\ndef graph_generator():\n\tfile = open(sys.argv[3], 'r')\n\tfl = file.readlines()\n\tagent_meetings = {}\n\tagent = 1\n\tmeetings = []\n\tfirst_time = True\n\tagent_flag = True\n\t# For each node in the future tree, construct its util vector and \n\t# store the result in a dictionary of the form {(agent,meeting) : util} \n\tfor i in range(len(fl)):\n\t\tif i==0:\n\t\t\tnum_agents = int(fl[i].split(',')[0])\n\t\telif first_time and (agent_flag or int(fl[i].split(',')[0]) != 1):\n\t\t\tif int(fl[i].split(',')[0]) != 1:\n\t\t\t\tagent_flag = False\n\t\t\tagent_meetings.update({\"{},{}\".format(int(fl[i].split(',')[0]), int(fl[i].split(',')[1])) : np.array([int(fl[i].split(',')[2])])})\n\t\telse:\n\t\t\tif first_time:\n\t\t\t\tagent = 1\n\t\t\t\tfirst_time = False\n\t\t\t\ttimes = np.array([int(fl[i].split(',')[2])])\n\t\t\telse:\n\t\t\t\tif int(fl[i].split(',')[0]) == agent:\n\t\t\t\t\ttimes = np.append(times, int(fl[i].split(',')[2]))\n\t\t\t\telse:\n\t\t\t\t\tfor k,v in agent_meetings.items():\n\t\t\t\t\t\tif int(k.split(',')[0]) == agent:\n\t\t\t\t\t\t\tagent_meetings.update({k : v*times})\n\t\t\t\t\ttimes = np.array([int(fl[i].split(',')[2])])\n\t\t\t\t\tagent = int(fl[i].split(',')[0])\n\tfor k,v in agent_meetings.items():\n\t\tif int(k.split(',')[0]) == agent:\n\t\t\tagent_meetings.update({k : v*times})\n\n\t# Based on the dictionary, construct the Node objects containing\n\t# the neighbors and the util matrices with each neighbor\n\tgraph = []\n\tname_map = {}\n\tfor k in agent_meetings.keys():\n\t\tnode = Node(k, agent_meetings.get(k))\n\t\tname_map.update({k : node})\n\t\tgraph.append(node)\n\n\tfor node in graph:\n\t\tfor k in name_map.keys():\n\t\t\tif node != name_map.get(k):\n\t\t\t\tif node.name.split(',')[0] == k.split(',')[0] or node.name.split(',')[1] == k.split(',')[1]:\n\t\t\t\t\tnode.set_neighbor(name_map.get(k))\n\n\tfor node in graph:\n\t\tfor neighbor in node.neighbors:\n\t\t\tif node.name.split(',')[0] == neighbor.name.split(',')[0]:\n\t\t\t\tnode.set_agent_relation(neighbor)\n\t\t\telse:\n\t\t\t\tnode.set_meeting_relation(neighbor)\n\n\t# if input(\"Print graph? (y,n): \") == 'y':\n\t# \tfor node in graph:\n\t# \t\tprint(\"Node: \", node.name)\n\t# \t\tfor neighbor in node.neighbors:\n\t# \t\t\tprint(\"Neighbor: \", neighbor.name)\n\t# \t\t\ttry:\n\t# \t\t\t\tprint(\"Agent_relation: \", node.agent_relation[neighbor])\n\t# \t\t\texcept:\n\t# \t\t\t\tprint(\"Meeting_relation: \", node.meeting_relation[neighbor])\n\t# \t\tprint(\"==============================================\")\n\n\treturn graph\n\n" }, { "alpha_fraction": 0.6320840716362, "alphanum_fraction": 0.6403107643127441, "avg_line_length": 22.537633895874023, "blob_id": "a1415515d813dc6de10915ef689ebfdd4d7bdd72", "content_id": "2de1356cb5aa2611dd28f4c07b4541174c925b23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2188, "license_type": "no_license", "max_line_length": 72, "num_lines": 93, "path": "/yaml_generator.py", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "import sys\nimport yaml\n\ndef yaml_generator(graph):\n\tfile = open(sys.argv[4]+'.yaml', 'w')\n\t\n\t# add nodes\n\tnode_names = [i.name for i in graph]\n\tyaml_dict = {'nodes' : node_names}\n\tyaml.dump(yaml_dict, file)\n\t\n\t# add parents\n\ttmp = {}\n\tfor node in graph:\n\t\ttry:\n\t\t\ttmp.update({node.name : [node.parent[0].name]})\n\t\texcept:\n\t\t\ttmp.update({node.name : None})\n\tyaml_dict = {'parents' : tmp}\n\tyaml.dump(yaml_dict, file)\n\n\t# add children\n\ttmp = {}\n\tfor node in graph:\n\t\tif len(node.children) == 0:\n\t\t\ttmp.update({node.name : None})\n\t\t\tcontinue\n\t\tchildren_names = [i.name for i in node.children]\n\t\ttmp.update({node.name : children_names})\n\n\tyaml_dict = {'children' : tmp}\n\tyaml.dump(yaml_dict, file)\n\n\t# add pseudo parents\n\ttmp = {}\n\tfor node in graph:\n\t\tif len(node.p_parents) == 0:\n\t\t\ttmp.update({node.name : None})\n\t\t\tcontinue\n\t\tp_parents_names = [i.name for i in node.p_parents]\n\t\ttmp.update({node.name : p_parents_names})\n\n\tyaml_dict = {'p_parents' : tmp}\n\tyaml.dump(yaml_dict, file)\n\n\t# add potential values\n\tyaml_dict = {'potential_values' : list(range(1, len(graph[0].util)+1))}\n\tyaml.dump(yaml_dict, file)\n\n\t# add parent relations\n\ttmp = {}\n\tfor node in graph:\n\t\ttry:\n\t\t\tif node.parent[0] in node.agent_relation.keys():\n\t\t\t\ttmp.update({node.name : node.agent_relation.get(node.parent[0])})\n\t\t\telse:\n\t\t\t\ttmp.update({node.name : node.meeting_relation.get(node.parent[0])})\n\t\texcept:\n\t\t\ttmp.update({node.name : None})\n\n\tyaml_dict = {'parent_relations' : tmp}\n\tyaml.dump(yaml_dict, file)\n\t\n\t# add pseudoparent relations\n\ttmp = {}\n\tfor node in graph:\n\t\tif len(node.p_parents) == 0:\n\t\t\ttmp.update({node.name : None})\n\t\t\tcontinue\n\t\ttmp2 = {}\n\t\tfor n in node.p_parents:\n\t\t\tif n in node.agent_relation.keys():\n\t\t\t\ttmp2.update({n.name : node.agent_relation.get(n)})\n\t\t\telse:\n\t\t\t\ttmp2.update({n.name : node.meeting_relation.get(n)})\n\t\ttmp.update({node.name : tmp2})\n\tyaml_dict = {'pseudo_parent_relations' : tmp}\n\tyaml.dump(yaml_dict, file)\n\t\n\t# add sep\n\ttmp = {}\n\tfor node in graph:\n\t\tn = [i.name for i in node.p_parents]\n\t\ttry:\n\t\t\tn.insert(0, node.parent[0].name)\n\t\texcept:\n\t\t\tn.insert(0, None)\n\t\ttmp.update({node.name : n})\n\tyaml_dict = {'sep' : tmp}\n\tyaml.dump(yaml_dict, file)\n\n\n\tfile.close()" }, { "alpha_fraction": 0.5259671211242676, "alphanum_fraction": 0.5324736833572388, "avg_line_length": 34.06224060058594, "blob_id": "4e278c3828c56c370eb1fb1b580aa320bb2ddc3f", "content_id": "c4db548504a4f1ba23331f2eea2d4ad0b380ebc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8453, "license_type": "no_license", "max_line_length": 89, "num_lines": 241, "path": "/dimes_generator.py", "repo_name": "ThanasisTs/dpop", "src_encoding": "UTF-8", "text": "import sys\nimport random\nimport numpy as np\n\nclass Node():\n def __init__(self,name,level):\n self.name = name\n self.level = level\n self.parent = ''\n self.children = []\n self.siblings = []\n self.meetings = []\n self.meetings_utils = []\n self.time_slots_utils = []\n\ndef nodes_per_level(tree,level):\n nodes = 0 \n for node in tree:\n if(node.level == level):\n nodes += 1\n return nodes\n\ndef construct_tree(agents):\n tree = []\n tmp_name_index = 2\n level = 1\n\n root_node = Node('1',0)\n tree.append(root_node)\n agents -= 1\n\n while agents > 0:\n previous_level_nodes = nodes_per_level(tree,level-1)\n nodes_level = previous_level_nodes * (level+1)\n for i in range(tmp_name_index,tmp_name_index+nodes_level):\n if agents > 0:\n tmp_node = Node(f'{i}',level)\n tree.append(tmp_node)\n agents -= 1\n tmp_name_index += 1\n else:\n break\n level += 1\n \n child_idx = 1\n for node in tree:\n try:\n for j in range(child_idx,child_idx+node.level+2):\n child = tree[j]\n node.children.append(child)\n child_idx = j + 1\n except:\n pass\n for node in tree:\n for child in node.children:\n child.parent = node\n for node_x in tree:\n for node_y in tree:\n if((node_x.parent == node_y.parent) and (node_y.name!= node_x.name)):\n node_x.siblings.append(node_y)\n return tree\n\ndef generate_grp(tree,grp_names,max_meetings):\n grp_meetings = []\n nodes_chosen = []\n potential_nodes = []\n \n for node in tree:\n if(len(node.children)):\n potential_nodes.append(node)\n for grp_name in grp_names:\n fl = False\n while(fl==False):\n rnd_node = random.choice(potential_nodes)\n priority_nodes = []\n for node in potential_nodes:\n if(len(node.meetings)==0):\n priority_nodes.append(node)\n if(priority_nodes):\n rnd_node = random.choice(priority_nodes)\n if(rnd_node in nodes_chosen):\n continue\n tmp_meeting = []\n tmp_meeting.append(rnd_node.name)\n for child in rnd_node.children:\n if(len(child.meetings) > max_meetings):\n continue\n for child in rnd_node.children:\n child.meetings.append(grp_name)\n tmp_meeting.append(child.name)\n nodes_chosen.append(rnd_node)\n rnd_node.meetings.append(grp_name)\n grp_meetings.append(tmp_meeting)\n fl = True\n return zip(grp_names,grp_meetings)\n\ndef generate_ptc(tree,ptc_names,max_meetings):\n ptc_meetings = []\n nodes_chosen = []\n potential_nodes = []\n \n for node in tree:\n if(len(node.children)):\n potential_nodes.append(node)\n for ptc_name in ptc_names:\n fl = False\n while(fl==False):\n rnd_node = random.choice(potential_nodes)\n priority_nodes = []\n for node in potential_nodes:\n if(len(node.meetings)==0):\n priority_nodes.append(node)\n if(priority_nodes):\n rnd_node = random.choice(priority_nodes)\n if(rnd_node in nodes_chosen):\n continue\n tmp_meeting = []\n tmp_meeting.append(rnd_node.name)\n rnd_child = random.choice(rnd_node.children)\n if(len(rnd_child.meetings) > max_meetings):\n continue\n rnd_node.meetings.append(ptc_name)\n rnd_child.meetings.append(ptc_name)\n nodes_chosen.append(rnd_node)\n tmp_meeting.append(rnd_child.name)\n ptc_meetings.append(tmp_meeting)\n fl = True\n return zip(ptc_names,ptc_meetings)\n\ndef generate_sib(tree,sib_names,max_meetings):\n sib_meetings = []\n nodes_chosen = []\n potential_nodes = []\n \n for node in tree:\n if(len(node.siblings)):\n potential_nodes.append(node)\n for sib_name in sib_names:\n fl = False\n while(fl==False):\n rnd_node = random.choice(potential_nodes)\n priority_nodes = []\n for node in potential_nodes:\n if(len(node.meetings)==0):\n priority_nodes.append(node)\n if(priority_nodes):\n rnd_node = random.choice(priority_nodes)\n if(rnd_node in nodes_chosen):\n continue\n tmp_meeting = []\n tmp_meeting.append(rnd_node.name)\n for sibling in rnd_node.siblings:\n if(len(sibling.meetings) > max_meetings):\n continue\n for sibling in rnd_node.siblings:\n sibling.meetings.append(sib_name)\n tmp_meeting.append(sibling.name)\n nodes_chosen.append(rnd_node)\n rnd_node.meetings.append(sib_name)\n sib_meetings.append(tmp_meeting)\n fl = True\n return zip(sib_names,sib_meetings)\n\ndef generate_utils(tree,time_slots_utils,meetings_utils):\n for node in tree:\n for i in range(len(node.meetings)):\n rnd_ts_util = random.choice(time_slots_utils) \n node.time_slots_utils = rnd_ts_util\n rnd_meet_util = random.choice(meetings_utils) \n node.meetings_utils.append(rnd_meet_util)\n\ndef print_details(tree,agents,meetings):\n print(f'{\"-\"*25} INFO: {\"-\"*25}')\n for x in tree:\n print(f'Info for node {x.name}:')\n if(x.parent):\n print(f'Parent of {x.name}: {x.parent.name}', end = '\\n')\n else:\n print(f'Parent of {x.name}: Root node - no parents', end = '\\n')\n print(f'Children of {x.name}:',end = ' ')\n if(x.children):\n for idx,child in enumerate(x.children):\n if(idx == len(x.children)-1):\n print(child.name,end = '\\n')\n break\n print(child.name,end = ', ')\n else:\n print(f'Leaf node - no children', end = '\\n')\n print(f'Siblings of {x.name}:',end = ' ')\n if(x.siblings):\n for idx,sibling in enumerate(x.siblings):\n if(idx == len(x.siblings)-1):\n print(sibling.name,end = '\\n')\n break\n print(sibling.name,end = ', ')\n else:\n print(f'No siblings', end = '\\n')\n print(f'Meetings of {x.name} :',end = ' ')\n if(x.meetings):\n for idx,meeting in enumerate(x.meetings):\n if(idx == len(x.meetings)-1):\n print(f'{meeting} - Util: {x.meetings_utils[idx]}', end = '\\n')\n break\n print(f'{meeting} - Util: {x.meetings_utils[idx]},', end = ' ')\n else:\n print(f'{x.name} participates in no meetings', end = '\\n')\n print(f'Time slots preferences of {x.name} - {x.time_slots_utils}',end = ' ')\n print('\\n')\n\ndef export_to_file(tree,agents,meetings):\n with open(\"output.txt\", \"w\") as text_file:\n print(f'{agents},{meetings},{meetings}',file=text_file)\n for node in tree:\n for idx,meeting in enumerate(node.meetings):\n print(f'{node.name},{meeting},{node.meetings_utils[idx]}',file=text_file)\n for node in tree:\n for idx,ts_util in enumerate(node.time_slots_utils):\n print(f'{node.name},{idx+1},{ts_util}',file=text_file)\n\ndef dimes_generator():\n agents = int(sys.argv[1])\n meetings = int(sys.argv[2])\n tree = construct_tree(agents)\n max_meetings = 8\n time_slots_utils = [np.arange(10,40,10),\n np.arange(30,0,-10),\n np.full(3,10)\n ]\n meetings_utils = np.arange(10,110,10)\n meeting_names = np.arange(1,meetings+1)\n split_meeting_names = np.array_split(meeting_names,3)\n grp_names, ptc_names, sib_names = split_meeting_names\n grp = generate_grp(tree,grp_names,max_meetings)\n sib = generate_sib(tree,sib_names,max_meetings)\n ptc = generate_ptc(tree,ptc_names,max_meetings)\n generate_utils(tree,time_slots_utils,meetings_utils)\n\n #sanity check\n # print_details(tree,agents,meetings)\n\n export_to_file(tree,agents,meetings)\n\n\n\n" } ]
6
gsathya/torperf2
https://github.com/gsathya/torperf2
1250f7009f86c52c62a8ab2863f9378d0f1df820
0183be5b1106a42a0967a7008d2784ce562cedef
4a932dc949cb59aa29a97a6d8487ac4d1c14de3b
refs/heads/master
2021-01-01T18:23:00.502570
2013-09-27T01:46:33
2013-09-27T01:46:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7255347967147827, "alphanum_fraction": 0.7373691201210022, "avg_line_length": 25.792682647705078, "blob_id": "72a590c5d6e4852bda296e38c7a6379d801816b2", "content_id": "90357794fb62c3e333f8ac8d29fc020d666b2baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2197, "license_type": "no_license", "max_line_length": 70, "num_lines": 82, "path": "/torperf/torperf.py", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "# Copyright (c) 2013, Sathyanarayanan Gunasekaran, The Tor Project, Inc.\n# See LICENSE for licensing information\n#!/usr/bin/env python\n\nimport txtorcon\nimport perfconf\n\nfrom fileserver import *\n\nfrom pprint import pformat\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.web.client import Agent\nfrom twisted.web.server import Site\nfrom twisted.web.static import File\nfrom twisted.web.http_headers import Headers\n\nfrom txsocksx.http import SOCKS5Agent\n\n\nclass BeginningPrinter(Protocol):\n def __init__(self, finished):\n self.finished = finished\n self.data = []\n\n def dataReceived(self, data):\n self.data.append(data)\n\n def connectionLost(self, reason):\n print 'Finished receiving body:', reason.getErrorMessage()\n\n self.finished.callback(None)\n\ndef cbRequest(response):\n print 'Response received'\n print 'Response headers:'\n print pformat(list(response.headers.getAllRawHeaders()))\n\n finished = Deferred()\n response.deliverBody(BeginningPrinter(finished))\n return finished\n\ndef cbShutdown(ignored):\n reactor.stop()\n\ndef do_request(state):\n torServerEndpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 9050)\n agent = SOCKS5Agent(reactor, proxyEndpoint=torServerEndpoint)\n d = agent.request('GET', perfconf.server_config[\"ip\"])\n d.addCallback(cbRequest)\n # d.addBoth(cbShutdown)\n\ndef setup_complete(proto):\n print \"setup complete:\", proto\n state = txtorcon.TorState(proto.tor_protocol)\n\n state.post_bootstrap.addCallback(do_request)\n state.post_bootstrap.addErrback(setup_failed)\n\ndef setup_failed(arg):\n print \"Setup Failed\", arg\n reactor.stop()\n\ndef updates(prog, tag, summary):\n print \"%d%%: %s\" % (prog, summary)\n\nconfig = txtorcon.TorConfig()\nconfig.OrPort = 1234\nconfig.SocksPort = perfconf.tor_config['socks_port']\n\nresource = StaticFile('static')\nfactory = Site(resource)\nreactor.listenTCP(8888, factory)\n\n# Launch tor.\nd = txtorcon.launch_tor(config, reactor, progress_updates=updates)\nd.addCallback(setup_complete)\nd.addErrback(setup_failed)\nreactor.run()\n" }, { "alpha_fraction": 0.728723406791687, "alphanum_fraction": 0.7304964661598206, "avg_line_length": 22.5, "blob_id": "14f2d78adfac11b221fa12758604b8c7c713be9b", "content_id": "91dbc4c2da9e8305395242d1356884c09b3c550d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 564, "license_type": "no_license", "max_line_length": 80, "num_lines": 24, "path": "/README.md", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "Read docs/torperf2.pdf for more details\n\n# Developing with Vagrant:\n\nTo set up the development environment:\n\n $ vagrant up\n\nIf all goes well, then you can ssh into the box to continue development:\n\n $ vagrant ssh\n $ cd /torperf\n $ python torperf/torperf.py\n\n# Non-Vagrant development:\nIf you would rather not use Vagrant, please install the following packages: \n- python\n- python-dev\n- python-pip\n- tor\n\n(You can try `apt-get -y install python python-dev python-pip tor`)\n\nAnd then run `pip install -r requirements.txt` to install python packages.\n" }, { "alpha_fraction": 0.7101167440414429, "alphanum_fraction": 0.7237353920936584, "avg_line_length": 31.808509826660156, "blob_id": "2869c0c76b7230fba8176f6581a68a93cd64f958", "content_id": "de6f8fd06c6c85282c97d05a953c630e13bcc679", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Ruby", "length_bytes": 1542, "license_type": "no_license", "max_line_length": 125, "num_lines": 47, "path": "/Vagrantfile", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "# Copyright (c) 2013, Sathyanarayanan Gunasekaran, The Tor Project, Inc.\n# See LICENSE for licensing information\n\n# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n$script = <<SCRIPT\ncd /torperf/\n\n./contrib/setup-dependencies.sh -y\n\n# Kill tor if it's running\nservice tor stop\n\nif [ \"$?\" = \"0\" ]; then\n echo \"Starting TorPerf\"\n python torperf/torperf.py\nelse\n echo \"Looks like we had some setup errors. Please log a bug here: https://github.com/gsathya/torperf2/issues Thanks!\" 1>&2;\nfi\n\nSCRIPT\n\n# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!\nVAGRANTFILE_API_VERSION = \"2\"\n\nVagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n # All Vagrant configuration is done here. The most common configuration\n # options are documented and commented below. For a complete reference,\n # please see the online documentation at vagrantup.com.\n\n # Every Vagrant virtual environment requires a box to build off of.\n config.vm.box = \"precise32\"\n config.vm.network :forwarded_port, guest: 8888, host: 8888\n\n # The url from where the 'config.vm.box' box will be fetched if it\n # doesn't already exist on the user's system.\n config.vm.box_url = \"http://files.vagrantup.com/precise32.box\"\n\n # Share an additional folder to the guest VM. The first argument is\n # the path on the host to the actual folder. The second argument is\n # the path on the guest to mount the folder. And the optional third\n # argument is a set of non-required options.\n config.vm.synced_folder \".\", \"/torperf\"\n\n config.vm.provision :shell, :inline => $script\nend\n" }, { "alpha_fraction": 0.584269642829895, "alphanum_fraction": 0.6292135119438171, "avg_line_length": 16.799999237060547, "blob_id": "45b65bfaecabdc53096d82d7190e2230517831b4", "content_id": "1ad81c1592fba6b0eba7899c54d4d56563972a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/torperf/perfconf.py", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "# Copyright (c) 2013, Sathyanarayanan Gunasekaran, The Tor Project, Inc.\n# See LICENSE for licensing information\n\nserver_config = {\n 'ip' : \"localhost\"\n}\n\ntor_config = {\n 'socks_port' : 9050\n}\n" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6593521237373352, "avg_line_length": 29.870967864990234, "blob_id": "54ca9c50128c92f00d885d039489097feb9dead8", "content_id": "1b59b99b26713ff8e33b862aecd0ed00503d123f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 89, "num_lines": 31, "path": "/torperf/fileserver.py", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "# Copyright (c) 2013, Sathyanarayanan Gunasekaran, The Tor Project, Inc.\n# See LICENSE for licensing information\n\nimport os\n\nfrom datetime import datetime\n\nfrom twisted.web.server import Site\nfrom twisted.web.static import File\nfrom twisted.web.resource import Resource\n\nclass StaticFile(File):\n def render_GET(self, request):\n timestamp = datetime.now()\n uniqueRequestId = timestamp.isoformat()\n\n # Log first byte time\n self.log(\"Starting request for %s\" % (uniqueRequestId))\n\n # Give the client a unique identifier\n request.setHeader('X-Torperf-request-id', uniqueRequestId)\n\n # Log last byte time when the request finishes\n request.notifyFinish().addCallback(self.log,\n \"Finished request for %s\" % (uniqueRequestId))\n request.notifyFinish().addErrback(self.log)\n\n return File.render_GET(self, request)\n\n def log(self, message):\n print \"%s\" % message\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.7051281929016113, "avg_line_length": 14.800000190734863, "blob_id": "1512c0f4a595f523cbf8bd49b1efc8266691b792", "content_id": "cdf095e7855c8464ccc2924efd2f7c7ab2dd15ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 78, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/requirements.txt", "repo_name": "gsathya/torperf2", "src_encoding": "UTF-8", "text": "Twisted>=11.1.0\ntxtorcon>=0.7\ntxsocksx>=0.0.2\npyopenssl>=0.13.1\npygeoip>=0.2.7" } ]
6
minda1099/python-practice
https://github.com/minda1099/python-practice
e2fcd2931110dc5c1d26fcc4015f384a1e71c26b
bd9825ba26e1e63fd2d844444ec541cfa175e8b2
c0f5a018aacef7ed2257f837d22eedd323cce95a
refs/heads/master
2020-03-29T13:52:29.544552
2018-09-28T09:40:36
2018-09-28T09:40:36
149,986,405
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6746031641960144, "avg_line_length": 17.071428298950195, "blob_id": "96adf9653e87b410b5ab8f7dc67363645de597b0", "content_id": "69ace07babb2b2d47a9496ac2bcaf398d605fef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/logfilter/filter-generators.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "import sys\n\ninname, outname = sys.argv[1:3]\n\ndef errors_filter(log):\n\tfor l in log:\n\t\tif 'ERROR' in l:\n\t\t\tyield l\n\nwith open(inname) as infile:\n\twith open(outname, 'w') as outfile:\n\t\tfilter = errors_filter(infile)\n\t\tfor l in filter:\n\t\t\toutfile.write(l)" }, { "alpha_fraction": 0.6819788217544556, "alphanum_fraction": 0.6925795078277588, "avg_line_length": 24.727272033691406, "blob_id": "6aad67acde9b17f8900fde164f1fc5552bad2c0a", "content_id": "27e99208d710071d13806968a11a8b9b83b85ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 52, "num_lines": 11, "path": "/image-scale/scale.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "import sys\nfrom PIL import Image\nfrom pathlib import Path\n\ndef process_files( dir, x, y):\n\tfor filename in Path(dir).iterdir():\n\t\tim = Image.open(str(filename))\n\t\tscaled = im.resize((int(x), int(y)))\n\t\tscaled.save(str(filename))\n\nprocess_files(sys.argv[1], sys.argv[2], sys.argv[3])\n" }, { "alpha_fraction": 0.6155303120613098, "alphanum_fraction": 0.623106062412262, "avg_line_length": 20.1200008392334, "blob_id": "3f5ae5092d67b7d641b51d2018bb9c612220352e", "content_id": "7921d50ca2ffc01d771650f5b171616361a33fe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 45, "num_lines": 25, "path": "/xml-parser/coroutines.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "import re\n\n\ndef match_regex(filename, regex):\n\twith open(filename) as file:\n\t\tlines = file.readlines()\n\tfor line in lines:\n\t\tmatch = re.match(regex, line.strip())\n\t\tif(match):\n\t\t\tregex = yield match.groups()\n\ndef get_contents(filename):\n\tstart_tag = '^<([^/]*)>([\\s\\w]*)?(</\\w*>)?$'\n\tmatcher = match_regex(filename, start_tag)\n\ttag = next(matcher)\n\twhile True:\n\t\tyield tag\n\t\ttag = matcher.send(start_tag)\n\t\t# print(tag)\n\nfor tag in get_contents('simple.xml'):\n\tif(tag[1]):\n\t\tprint(tag[0] + ': ' + tag[1])\n\telse:\n\t\tprint(tag[0])\n" }, { "alpha_fraction": 0.6908315420150757, "alphanum_fraction": 0.6993603706359863, "avg_line_length": 26.52941131591797, "blob_id": "2149f45926a5f02612da8f72e87c5f842ae94e7d", "content_id": "46bd632260ea8d4a43a0e04e7c5d229f34a7344d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 101, "num_lines": 17, "path": "/smtplib/send_email.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "import smtplib\nfrom email.mime.text import MIMEText\n\n\ndef send_email(self, subject, message, from_addr, *to_addrs, host='localhost', port=1025, **headers):\n\temail = MIMEText(message)\n\temail['Subject'] = subject\n\temail['From'] = from_addr\n\tfor header, value in headers.items():\n\t\temail[header] = value\n\n\tsender = smtplib.SMTP(host, port)\n\tfor addr in to_addrs:\n\t\tdel email['To']\n\t\temail['To'] = addr\n\t\tsender.sendmail(from_addr, addr, email.as_string())\n\tsender.quit()\n\n" }, { "alpha_fraction": 0.6813187003135681, "alphanum_fraction": 0.6886447072029114, "avg_line_length": 16.125, "blob_id": "a928f492a267a9b00b897d2d8b20f95d4ac42c3a", "content_id": "b68b1285c86e45906ff7927210b0c7a2dcb8eb60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 36, "num_lines": 16, "path": "/logfilter/filter-yield-other-iterator.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "import sys\n\ninname, outname = sys.argv[1:3]\n\ndef errors_filter(infilename):\n\twith open(infilename) as infile:\n\t\tyield from (\n\t\t\tl for l in infile if 'ERROR' in l\n\t\t\t)\n\n\nfilter = errors_filter(inname)\n\nwith open(outname, 'w') as outfile:\n\tfor l in filter:\n\t\toutfile.write(l)" }, { "alpha_fraction": 0.6643002033233643, "alphanum_fraction": 0.6663286089897156, "avg_line_length": 18.739999771118164, "blob_id": "00690c5d37a2cca36c72553aa6b5b620e02aca75", "content_id": "029c4e952cf3539ec31d0e05df86d07159a06f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "no_license", "max_line_length": 63, "num_lines": 50, "path": "/todolist/todo.py", "repo_name": "minda1099/python-practice", "src_encoding": "UTF-8", "text": "''' TodoItem Class that can be added and search '''\n\nimport datetime\n\nlast_id = 0\n\nclass TodoItem:\n\n\tdef __init__ (self, content, tags=''):\n\t\tself.content = content\n\t\tself.tags = tags\n\t\tself.creation_date = datetime.date.today()\n\t\tglobal last_id\n\t\tlast_id += 1\n\t\tself.id = last_id\n\n\tdef search(self, filter):\n\t\treturn filter in self.content or filter in self.tags\n\n\nclass TodoList:\n\n\tdef __init__ (self):\n\t\tself.items = []\n\n\tdef new_todo(self, content, tags=''):\n\t\tself.items.append(TodoItem(content, tags))\n\n\tdef _find_todo(self, todo_id):\n\t\tfor item in self.items:\n\t\t\tif(todo_id == item.id):\n\t\t\t\treturn item\n\t\treturn None\n\n\tdef modify_content(self, todo_id, content):\n\t\titem = _find_todo(todo_id)\n\t\tif item:\n\t\t\titem.content = content\n\t\t\treturn True\n\t\treturn False\n\n\tdef modify_tag(self, todo_id, tags):\n\t\titem = _find_todo(todo_id)\n\t\tif item:\n\t\t\titem.tags = tags\n\t\t\treturn True\n\t\treturn False\n\n\tdef search(self, filter):\n\t\treturn [ item for item in self.items if item.search(filter) ]" } ]
6
kazcangi/lbws
https://github.com/kazcangi/lbws
c7c2099db636354e8a7d87dce68825a6915c3762
65da9717d60113d03512512f2d3c6f5f4231445f
a54c49609154d123413fd2bf75de5d164746f4bf
refs/heads/master
2020-07-05T23:19:38.191567
2019-08-17T00:09:25
2019-08-17T00:09:25
202,814,290
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5087887048721313, "alphanum_fraction": 0.5182350873947144, "avg_line_length": 27.39728355407715, "blob_id": "c4bf66d676a7aeeb4d07e5ff0186411122c78bf8", "content_id": "6a529369b7639a1a484dd1c53d39eaee44de7534", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16745, "license_type": "permissive", "max_line_length": 114, "num_lines": 589, "path": "/src/lbws/__init__.py", "repo_name": "kazcangi/lbws", "src_encoding": "UTF-8", "text": "\"\"\"Package lbws to interact with livebox.\"\"\"\n\nimport sys\nimport functools\nimport json\nimport datetime\nfrom collections import namedtuple\nfrom importlib import reload\n\nfrom lbws.exceptions import LbwsException, LbwsNotConnectedError\n\n##\n# @brief python 3 est requis\nif sys.version_info.major < 3:\n raise \"Must be using Python 3\"\n\n##\n# @brief rรจgle un problรจme de sortie vers un fichier\nif sys.stdout.encoding is None:\n reload(sys)\n sys.setdefaultencoding('utf-8') # pylint: disable=E1101\n\n##\n# @brief fonction lambda pour afficher sur stderr\nerror = functools.partial(print, file=sys.stderr) # pylint: disable=C0103\n\n##\n# @brief requests n'est pas dans la distrib standard de Python3, d'oรน le traitement spรฉcifique\n# pour l'import de cette librairie\ntry:\n import requests\n import requests.utils\nexcept ImportError as exc:\n error(\"erreur:\", exc)\n error(\"Installez http://www.python-requests.org/ :\")\n print(\" pip install requests\")\n sys.exit(2)\n\n##\n# @brief niveau de dรฉtail, -v pour l'augmenter\nVERBOSITY = 0\n\ndef debug(level, *args):\n \"\"\"Affiche un message de debug\n @param level niveau de dรฉtail\n @param args\n \"\"\"\n if VERBOSITY >= level:\n\n red = '\\033[91m'\n #green = '\\033[92m'\n yellow = '\\033[93m'\n #light_purple = '\\033[94m'\n purple = '\\033[95m'\n end = '\\033[0m'\n\n #print(*args, file=sys.stderr)\n\n if level <= 1:\n sys.stderr.write(yellow)\n elif level == 2:\n sys.stderr.write(purple)\n else:\n sys.stderr.write(red)\n\n sys.stderr.write(' '.join(args))\n sys.stderr.write(end)\n sys.stderr.write('\\n')\n\n#####################\n\n\ndef auth_required(func):\n \"\"\"Decorator to check if instance is authenticated.\"\"\"\n @functools.wraps(func)\n def inner(self, *args, **kwargs):\n \"\"\"Authenticate if not authenticated.\"\"\"\n if self.token is None and self.session is None:\n self.auth()\n\n return func(self, *args, **kwargs)\n return inner\n\ndef _json_object_hook(dct):\n return namedtuple('X', dct.keys())(*dct.values())\n\ndef json2obj(data):\n \"\"\"Create object from json\"\"\"\n return json.loads(data, object_hook=_json_object_hook)\n\n\nclass Lbws:\n \"\"\"Class to deal with Livebox.\"\"\"\n # pylint: disable=R0902\n\n def __init__(self, host=None, user=None, password=None, livebox_version='lb4'):\n \"\"\"Init Livebox with host, user and password if defined.\"\"\"\n self.host = host\n self.user = user\n self.password = password\n self.livebox_version = livebox_version\n\n self.headers = {'Content-Type': 'application/json'}\n self.sah_headers = {\n 'X-Prototype-Version':'1.7',\n 'Content-Type':'application/x-sah-ws-1-call+json; charset=UTF-8',\n 'Accept':'text/javascript'\n }\n self.session = None\n self.token = None\n self.cookies = None\n self._dsl_mib = None\n self._wan_status = None\n self._ppp_mib = None\n self._dsl_stats = None\n self._voip_sip = None\n self._wifi_status = None\n self._tv_status = None\n self._users = None\n\n @staticmethod\n def _check_req(req):\n if req['status'] is None:\n raise LbwsException('Error when retrieving informations.')\n\n def _post(self, path, args=None, raw=False, silent=False, **kwargs):\n # nettoie le chemin de la requรชte\n lpath = str.replace(path or \"sysbus\", \".\", \"/\")\n if lpath[0] == \"/\":\n lpath = lpath[1:]\n\n if lpath[0:7] != \"sysbus/\":\n lpath = \"sysbus/\" + lpath\n\n parameters = {}\n if not args is None:\n for i in args:\n parameters[i] = args[i]\n\n data = {}\n data['parameters'] = parameters\n\n # l'ihm des livebox 4 utilise une autre API, qui fonctionne aussi sur les lb2 et lb3\n sep = lpath.rfind(':')\n data['service'] = lpath[0:sep].replace('/', '.')\n if data['service'][0:7] == \"sysbus.\":\n data['service'] = data['service'][7:]\n data['method'] = lpath[sep+1:]\n lpath = 'ws'\n\n # envoie la requรชte avec les entรชtes qui vont bien\n debug(1, \"requรชte: %s with %s\" % (lpath, str(data)))\n tstamp = datetime.datetime.now()\n\n tmp = self.session.post(\n 'http://{0}/ws'.format(self.host),\n headers=self.sah_headers,\n data=json.dumps(data),\n **kwargs\n )\n debug(2, \"durรฉe requรชte: %s\" % (datetime.datetime.now() - tstamp))\n tmp = tmp.content\n\n # il y a un truc bien moisi dans le nom netbios de la Time Capsule\n # probable reliquat d'un bug dans le firmware de la TC ou de la Livebox\n tmp = tmp.replace(b'\\xf0\\x44\\x6e\\x22', b'aaaa')\n\n if raw is True:\n return tmp\n\n tmp = tmp.decode('utf-8', errors='replace')\n\n try:\n req = json.loads(tmp)\n except json.JSONDecodeError:\n if not silent:\n error(\"erreur:\", sys.exc_info()[0])\n error(\"mauvais json:\", tmp)\n return\n\n apercu = str(req)\n if len(apercu) > 50:\n apercu = apercu[:50] + \"...\"\n debug(1, \"rรฉponse:\", apercu)\n\n if not 'errors' in req['result']:\n debug(1, \"-------------------------\")\n return req['result']\n else:\n if not silent:\n error(\"erreur:\", req)\n return None\n\n\n\n def _get(self, path, args=None, raw=False, silent=False, **kwargs):\n data = '{\"parameters\":{}}'\n # nettoie le chemin de la requรชte\n lpath = str.replace(path or \"sysbus\", \".\", \"/\")\n if lpath[0] == \"/\":\n lpath = lpath[1:]\n\n if lpath[0:7] != \"sysbus/\":\n lpath = \"sysbus/\" + lpath\n\n if args is None:\n params = {'_restDepth': '-1'}\n\n debug(1, \"requรชte: %s\" % (lpath))\n tstamp = datetime.datetime.now()\n tmp = self.session.get(\n 'http://{0}/{1}'.format(self.host, lpath),\n headers=self.headers,\n data=data,\n params=params,\n **kwargs\n )\n debug(2, \"durรฉe requรชte: %s\" % (datetime.datetime.now() - tstamp))\n tmp = tmp.content\n\n # il y a un truc bien moisi dans le nom netbios de la Time Capsule\n # probable reliquat d'un bug dans le firmware de la TC ou de la Livebox\n tmp = tmp.replace(b'\\xf0\\x44\\x6e\\x22', b'aaaa')\n\n if raw is True:\n return tmp\n\n tmp = tmp.decode('utf-8', errors='replace')\n if tmp.find(\"}{\"):\n debug(2, \"listes json multiples\")\n tmp = \"[\" + tmp.replace(\"}{\", \"},{\") + \"]\"\n\n try:\n req = json.loads(tmp)\n except json.JSONDecodeError:\n if not silent:\n error(\"erreur:\", sys.exc_info()[0])\n error(\"mauvais json:\", tmp)\n return\n\n apercu = str(req)\n if len(apercu) > 50:\n apercu = apercu[:50] + \"...\"\n debug(1, \"rรฉponse:\", apercu)\n debug(1, \"-------------------------\")\n\n return req\n\n def auth(self):\n \"\"\"Call authenticate on Livebox.\n\n user, host and password must be set.\n \"\"\"\n if not self.user or not self.password or not self.host:\n raise LbwsException('User and/or password not set')\n\n self.session = requests.Session()\n\n if self.livebox_version != 'lb4':\n auth = {'username':self.user, 'password':self.password}\n debug(2, \"auth with\", str(auth))\n req = self.session.post(\n 'http://{0}/authenticate'.format(self.host),\n params=auth,\n headers=self.headers,\n )\n debug(2, \"auth return\", req.text)\n else:\n sah_headers = {\n 'Content-Type':'application/x-sah-ws-1-call+json',\n 'Authorization':'X-Sah-Login'\n }\n auth = ('{\"service\":\"sah.Device.Information\",'\n '\"method\":\"createContext\",\"parameters\":'\n '{\"applicationName\":\"so_sdkut\",\"username\":\"%s\",'\n '\"password\":\"%s\"}}') % (self.user, self.password)\n req = self.session.post(\n 'http://{0}/ws'.format(self.host),\n data=auth,\n headers=sah_headers,\n )\n\n if req.status_code != requests.codes.ok and not 'contextID' in req.json()['data']: # pylint: disable=E1101\n raise LbwsException('Authentication error : %s' % req.text)\n\n self.token = req.json()['data']['contextID']\n self.headers['X-Context'] = self.token\n self.sah_headers = {\n 'X-Context':self.token,\n 'Authorization':'X-Sah %s' % (self.token),\n 'X-Prototype-Version':'1.7',\n 'Content-Type':'application/x-sah-ws-1-call+json; charset=UTF-8',\n 'Accept':'text/javascript'\n }\n self.cookies = req.cookies\n\n # vรฉrification de l'authentification\n req = self.session.post(\n 'http://{0}/'.format(self.host) + 'sysbus/Time:getTime',\n headers=self.sah_headers,\n data='{\"parameters\":{}}'\n )\n if req.json()['result']['status'] is True:\n return True\n else:\n raise LbwsException('Authentication error : %s' % req.text)\n\n @auth_required\n def logout(self):\n \"\"\"Logout from livebox.\n\n POST on http://{0}/logout\n \"\"\"\n sah_headers = {\n 'Content-Type':'application/x-sah-ws-1-call+json',\n 'Authorization':'X-Sah-Logout %s' % (self.token)\n }\n auth = ('{\"service\":\"sah.Device.Information\",\"method\":\"releaseContext\",'\n '\"parameters\":{\"applicationName\":\"so_sdkut\"}}')\n req = self.session.post(\n 'http://{0}/ws'.format(self.host),\n data=auth,\n headers=sah_headers,\n )\n if req.status_code == requests.codes.ok and req.json()['status'] == 0: # pylint: disable=E1101\n self.token = None\n return True\n\n return False\n\n @property\n @auth_required\n def dsl_mib(self):\n \"\"\"Get DSL Infos from Livebox.\n\n POST parameters on http://{0}/sysbus/NeMo/Intf/data:getMIBsCall\n :return: an object with attributes :\n CurrentProfile\n DataPath\n DownstreamAttenuation\n DownstreamCurrRate\n DownstreamMaxRate\n DownstreamNoiseMargin\n DownstreamPower\n FirmwareVersion\n InterleaveDepth\n LastChange\n LastChangeTime\n LinkStatus\n ModulationHint\n ModulationType\n StandardUsed\n StandardsSupported\n UPBOKLE\n UpstreamAttenuation\n UpstreamCurrRate\n UpstreamMaxRate\n UpstreamNoiseMargin\n UpstreamPower\n :rtype: object\n\n \"\"\"\n if self._dsl_mib is None:\n args = {\"mibs\":\"dsl\", \"flag\":\"\", \"traverse\":\"down\"}\n\n req = self._post(\n 'NeMo.Intf.data:getMIBs',\n args=args,\n )\n\n self._check_req(req)\n\n if 'dsl0' in req['status']['dsl']:\n self._dsl_mib = json2obj(json.dumps(req['status']['dsl']['dsl0']))\n else:\n self._dsl_mib = None\n\n return self._dsl_mib\n\n @property\n @auth_required\n def wan_status(self):\n \"\"\"Get WAN status\n\n POST parameters on http://{0}/sysbus/NMC:getWANStatus\n\n :result: a JSON structure with keys :\n ConnectionState\n RemoteGateway\n LinkState\n DNSServers\n Protocol\n LastConnectionError\n IPAddress\n LinkType\n MACAddress\n IPv6Address\n :rtype: json\n \"\"\"\n\n if self._wan_status is None:\n req = self._post(\n 'NMC:getWANStatus',\n )\n\n self._check_req(req)\n self._wan_status = json2obj(json.dumps(req['data']))\n\n return self._wan_status\n\n @property\n @auth_required\n def ppp_mib(self):\n \"\"\"Get ppp Infos from Livebox.\n\n POST parameters on http://{0}/sysbus/NeMo/Intf/data:getMIBsCall\n :return: a JSON structure with keys :\n PPPoESessionID\n TransportType\n RemoteIPAddress\n IPv6CPEnable\n ConnectionTrigger\n IPCPEnable\n LastConnectionError\n PPPoEACName\n DNSServers\n IdleDisconnectTime\n LCPEchoRetry\n LCPEcho\n MaxMRUSize\n ConnectionStatus\n LastChangeTime\n IPv6CPLocalInterfaceIdentifier\n IPv6CPRemoteInterfaceIdentifier\n PPPoEServiceName\n LastChange\n LocalIPAddress\n Username\n :rtype: json\n \"\"\"\n\n if self._ppp_mib is None:\n args = {\"mibs\":\"ppp\"}\n\n req = self._post(\n 'NeMo.Intf.data:getMIBs',\n args=args,\n )\n\n self._check_req(req)\n\n if 'ppp' in req['status']:\n self._ppp_mib = json2obj(json.dumps(req['status']['ppp']['ppp_data']))\n else:\n self._ppp_mib = None\n\n return self._ppp_mib\n\n @property\n @auth_required\n def dsl_stats(self):\n \"\"\"Get DSL stats from Livebox.\n\n POST on http://{0}/sysbus/NeMo/Intf/dsl0:getDSLStats\n\n :result: a JSON structure with keys :\n LossOfFraming\n TransmitBlocks\n HECErrors\n ATUCCRCErrors\n CellDelin\n ErroredSecs\n ReceiveBlocks\n CRCErrors\n InitErrors\n LinkRetrain\n ATUCFECErrors\n SeverelyErroredSecs\n FECErrors\n InitTimeouts\n ATUCHECErrors\n :rtype: json\n \"\"\"\n\n if self._dsl_stats is None:\n req = self._post(\n 'NeMo.Intf.dsl0:getDSLStats',\n )\n self._check_req(req)\n self._dsl_stats = json2obj(json.dumps(req['status']))\n\n return self._dsl_stats\n\n @property\n @auth_required\n def voip_sip(self):\n \"\"\"Get VoIP informations fron livebox.\"\"\"\n if self._voip_sip is None:\n req = self._post(\n 'VoiceService.VoiceApplication:listTrunks',\n )\n\n self._check_req(req)\n\n item = next((item for item in req['status'] if item['name'] == 'SIP-Trunk'), None)\n #if not item:\n #raise LbwsException('Error when retrieving SIP-Trunk informations.')\n if item:\n self._voip_sip = json2obj(json.dumps(req['status'][0]['trunk_lines']))\n else:\n self._voip_sip = None\n\n return self._voip_sip\n\n @property\n @auth_required\n def wifi_status(self):\n \"\"\"Get WiFi status from livebox.\"\"\"\n if self._wifi_status is None:\n req = self._post(\n 'NMC.Wifi:get',\n )\n\n self._check_req(req)\n\n self._wifi_status = json2obj(json.dumps(req['status']))\n\n return self._wifi_status\n\n @property\n @auth_required\n def tv_status(self):\n \"\"\"Get TV status\n\n \"\"\"\n\n if self._tv_status is None:\n req = self._post(\n 'NMC.OrangeTV:getIPTVStatus',\n )\n\n self._tv_status = json2obj(json.dumps(req['data']))\n\n return self._tv_status\n\n\n @auth_required\n def mibs(self):\n \"\"\"Get all MIBS from Livebox.\n\n POST parameters on http://{0}/sysbus/NeMo/Intf/data:getMIBs\n \"\"\"\n\n req = self._post(\n 'NeMo.Intf.data:getMIBs',\n )\n\n self._check_req(req)\n\n return req['status']\n\n @property\n @auth_required\n def users(self):\n \"\"\"Get users defined on Livebox.\"\"\"\n\n if self._users is None:\n req = self._post(\n 'UserManagement:getUsers'\n )\n\n self._check_req(req)\n\n self._users = json2obj(json.dumps(req['status']))\n\n return self._users\n\n def reboot(self):\n \"\"\"Reboot the livebox.\"\"\"\n\n req = self._post(\n 'NMC:reboot',\n )\n\n if req.status_code == requests.codes.ok: # pylint: disable=E1101\n self.token = None\n return True\n\n return False\n" }, { "alpha_fraction": 0.5653082728385925, "alphanum_fraction": 0.5872518420219421, "avg_line_length": 39.72340393066406, "blob_id": "e021adefb8d3b19657e6f96aee16327eb329fa72", "content_id": "e42e22b0428cbf3f187527607d3359b905aabc28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1917, "license_type": "permissive", "max_line_length": 124, "num_lines": 47, "path": "/README.md", "repo_name": "kazcangi/lbws", "src_encoding": "UTF-8", "text": "# lbws\nQuick and dirty python library to interact with Orange Livebox\n\nExample :\n\n```\n\n\nfrom lbws import Lbws\nfrom datetime import date, timedelta, datetime\n\nif __name__ == '__main__':\n version = \"1.0.0\"\n\n lb = Lbws(\"192.168.1.1\", \"admin\", \"your password\")\n\n print(\"Date et heure locale : {0}\".format(datetime.now()))\n print(\"Statut du lien DSL : {0}\".format(lb.wan_status.LinkState))\n print(\"Type de protocol : {0}\".format(lb.wan_status.Protocol))\n print(\"Etat synchronisation : {0}\".format(lb.dsl_mib.LinkStatus))\n print(\"Type de connexion : {0} ({1})\".format(lb.dsl_mib.ModulationHint, lb.dsl_mib.ModulationType))\n\n print(\"Dรฉbit descendant : {0} Kb/s (marge de bruit : {1} dB)\".format(\n lb.dsl_mib.DownstreamCurrRate,\n lb.dsl_mib.DownstreamNoiseMargin / 10\n ))\n print(\"Dรฉbit montant : {0} Kb/s (marge de bruit : {1} dB)\".format(\n lb.dsl_mib.UpstreamCurrRate,\n lb.dsl_mib.UpstreamNoiseMargin / 10\n ))\n print(\"Synchronisรฉ depuis : {0} ({1})\".format(\n str(timedelta(seconds=lb.dsl_mib.LastChange)),\n (datetime.now() - timedelta(seconds=lb.dsl_mib.LastChange)).strftime('%d/%m/%Y %H:%M:%S')\n ))\n\n print(\"Etat WiFi : {0}\".format(lb.wifi_status.Status))\n print(\"Etat TV : {0}\".format(lb.tv_status.IPTVStatus))\n for i in lb.voip_sip:\n print(\"Etat TOIP : {0} {1} ({2})\".format(i.name, i.status, i.directoryNumber))\n print(\"IPV4 Publique : {0}\".format(lb.wan_status.IPAddress))\n print(\"IPV6 Publique : {0}\".format(lb.wan_status.IPv6Address))\n print(\"CRC Errors : {0} - ATUC CRC Errors : {1}\".format(lb.dsl_stats.CRCErrors, lb.dsl_stats.ATUCCRCErrors,))\n for i in lb.users:\n print(\"User : {0} (type {1}) - Groupes : {2}\".format(i.name, i.type, i.groups))\n print()\n\n lb.logout()\n" }, { "alpha_fraction": 0.6493150591850281, "alphanum_fraction": 0.6493150591850281, "avg_line_length": 23.33333396911621, "blob_id": "4d3c0745b880468af8e273c5fbff8b503833a6bb", "content_id": "01e74ed166bcc36baedbf4318bf1eb9ce8f9d661", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "permissive", "max_line_length": 74, "num_lines": 15, "path": "/src/lbws/exceptions.py", "repo_name": "kazcangi/lbws", "src_encoding": "UTF-8", "text": "\"\"\"Exceptions used by Lbws.\"\"\"\n\n\nclass LbwsException(Exception):\n \"\"\"General Lbws exception occurred.\"\"\"\n\n pass\n\n\nclass LbwsNotConnectedError(LbwsException):\n \"\"\"Exception raised when method needs to be connected and it's not.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the error.\"\"\"\n super().__init__('Must call method auth() before using')\n" } ]
3
allenqz98/project5-crawler
https://github.com/allenqz98/project5-crawler
60198f3360872b4a3135a4c8854de0c636fd7a25
074916564123df3381c003d6e2bae1034cea04b9
3bffa66f7247bf29b6ef506a95d4419a11d12369
refs/heads/master
2023-01-15T04:56:37.288231
2020-11-19T07:10:13
2020-11-19T07:10:13
314,162,826
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5698826313018799, "alphanum_fraction": 0.5885913372039795, "avg_line_length": 27.103092193603516, "blob_id": "f22e5e6e4a2aeb2cdda2ab99a60a80d20bce914c", "content_id": "a8e890f6bc224c4f146edda760ee6c6dbdc2e31a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5452, "license_type": "no_license", "max_line_length": 167, "num_lines": 194, "path": "/crawler.py", "repo_name": "allenqz98/project5-crawler", "src_encoding": "UTF-8", "text": "import socket\nfrom html.parser import HTMLParser\n\ntarget_host = \"www.3700.network\"\ntarget_port = 80\nurls_to_be_scraped = []\nurls_visited = {}\ncookies = {}\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient.connect((target_host, target_port))\n\n# Create a new socket\n\n\ndef new_socket():\n global client\n client.close()\n new_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n new_socket.connect((target_host, target_port))\n client = new_socket\n\n# Receive response from socket\n\n\ndef recv(cli):\n result = bytearray()\n while True:\n response = cli.recv(4096)\n result.extend(response)\n if r'0\\r\\n\\r\\n' in str(result) or r'Transfer-Encoding: chunked' not in str(result):\n break\n if r'Connection: keep-alive' not in str(result):\n new_socket()\n return result.decode('utf-8')\n\n# handle http based on status code\n\n\ndef handle_http(http, url):\n global cookie, urls_visited\n seperated = read_http(http)\n status, headers, body = seperated['status'], seperated['headers'], seperated['body']\n\n # update cookie if needed\n for header in headers:\n key, value = header.split(\": \", 1)\n if key == 'Set-Cookie':\n type = value.split(\"; \")[0].split('=')[0]\n cookies[type] = value.split(\"; \")[0]\n cookie = '; '.join(cookies.values())\n\n # 200\n if status == \"200\":\n return body\n\n # 302\n elif status == \"302\":\n for header in headers:\n key, value = header.split(\": \", 1)\n if key == \"Location\":\n if value in urls_visited:\n print('Redirect to a site visited')\n return False\n http = get_request(value)\n return handle_http(http, value)\n # 403/404\n elif status == \"403\" or status == \"404\":\n print(\"[ABANDON]: 403/404 Found\")\n return False\n\n # 500\n elif status == '500':\n http = get_request(url)\n return handle_http(http, url)\n\n# Send get request to url\n\n\ndef get_request(url):\n global cookie, client\n\n headers = \"Host:{}\\r\\nCookie:{}\".format(\n target_host, cookie)\n request = \"GET {} HTTP/1.1\\r\\n{}\\r\\n\\r\\n\".format(url, headers)\n client.send(request.encode())\n response = recv(client)\n\n while response == \"\":\n new_socket()\n client.send(request.encode())\n response = recv(client)\n\n return response\n\n# Seperate one http response into status, header and body\n\n\ndef read_http(http):\n http = http.split('\\r\\n\\r\\n')\n headers = http[0].split('\\r\\n')[1:]\n status = http[0].split('\\r\\n')[0].split(' ')[1]\n body = http[1]\n return {'status': status, 'headers': headers, 'body': body}\n\n\ndef scrape(http, url):\n global urls_to_be_scraped, urls_visited\n parse_html(http, url)\n\n while len(urls_to_be_scraped) > 0:\n next_url = urls_to_be_scraped[0]\n next_http = get_request(next_url)\n urls_to_be_scraped = urls_to_be_scraped[1:]\n parse_html(next_http, next_url)\n return\n\n\ndef parse_html(http, url):\n global urls_to_be_scraped, urls_visited\n body = handle_http(http, url)\n if body:\n parser.feed(body)\n\n return\n\n# HTML parser\n\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.recording = False\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for attr in attrs:\n if attr[0] == 'href':\n url = attr[1]\n # only add url from the same host\n if attr[1][0] == '/':\n url = \"http://\" + target_host + url\n if url not in urls_visited:\n urls_to_be_scraped.append(url)\n urls_visited[url] = True\n elif tag == 'h2':\n for attr in attrs:\n if 'secret_flag' in attr:\n self.recording = True\n\n def handle_endtag(self, tag):\n if tag == 'h2' and self.recording:\n self.recording = False\n\n def handle_data(self, data):\n if self.recording:\n print(data)\n return\n\n\nparser = MyHTMLParser()\n\n# login page\nrequest = \"GET http://www.3700.network/accounts/login/?next=/fakebook/ HTTP/1.1\\r\\nHost:%s\\r\\n\\r\\n\" % target_host\nclient.send(request.encode())\nresponse = recv(client)\n\nurls_visited['http://www.3700.network/accounts/login/?next=/fakebook/'] = True\nseperated = read_http(response)\nstatus, headers = seperated['status'], seperated['headers']\n\n# Cookie initiation\nfor header in headers:\n key, value = header.split(\": \", 1)\n if key == 'Set-Cookie':\n type = value.split(\"; \")[0].split('=')[0]\n cookies[type] = value.split(\"; \")[0]\ncookie = '; '.join(cookies.values())\n\nrequest_body = 'username=1862143&password=1KG4UQ1N&csrfmiddlewaretoken={}&next=/fakebook/'.format(\n cookies['csrftoken'].split('=')[1])\nrequest_header = \"Host:{}\\r\\nCookie:{}\\r\\nAccept-Encoding:gzip\\r\\nConnection:Keep-Alive\\r\\nContent-Length:{}\\r\\nContent-Type:application/x-www-form-urlencoded\".format(\n target_host, cookie, str(len(request_body.encode())))\n\n# login request\nrequest = \"POST http://www.3700.network/accounts/login/ HTTP/1.1\\r\\n{}\\r\\n\\r\\n{}\".format(\n request_header, request_body)\n\nclient.send(request.encode())\n\nhome_page = recv(client)\n\n# start scraping\nscrape(home_page, 'http://www.3700.network/fakebook/')\nclient.close()\n" } ]
1
Barniit/Barniit
https://github.com/Barniit/Barniit
884c56a6eaaf6445aaf4e33f39e1a4d9c14f6d02
b33e6dd84e2153029e2c21c419730b9a69a71a48
da63537a3a9b437a6ac47a6c3b00d03050bd333b
refs/heads/master
2020-06-19T18:47:09.419870
2019-07-14T11:44:59
2019-07-14T11:44:59
196,829,327
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6780045628547668, "alphanum_fraction": 0.7006802558898926, "avg_line_length": 23.941177368164062, "blob_id": "98b7e4c4820ab9f7829911cb932c03c7ed37618f", "content_id": "cbba077fadc116158c2200d8624761eb30b2ae61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 78, "num_lines": 17, "path": "/using kde.py", "repo_name": "Barniit/Barniit", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\ndf=pd.read_csv(\"C:\\\\Users\\\\Barnit\\\\Documents\\\\heatmap.csv\")\r\n\r\ndf2=df.dropna(axis='columns',how='all')\r\nlat=df2['longitude']\r\nlon=df2['latitude']\r\nwt=df2['weight']\r\ndf3=df2.drop('yrows',axis=1)\r\ndf3=df3.drop('xcols',axis=1)\r\n\r\nax=sns.kdeplot(lon,wt,kernel=\"gau\",cmap=\"Blues\",shade=True,shade_lowest=False)\r\nax.set_frame_on(True)\r\n\r\nplt.show()\r\n" } ]
1
shijq23/py-basicinterp
https://github.com/shijq23/py-basicinterp
00c35c341469a26aaf19e03c6fba19742a7ce39d
7f0a1f9f0eb543a9a54ae74d5cb7c82723c6f672
98552e8e8022161f5c12f806d97982924c96c028
refs/heads/master
2022-12-19T06:24:10.601949
2020-09-23T06:30:53
2020-09-23T06:30:53
295,625,910
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 34, "blob_id": "1323249b52cd53963665cf4d9f4b59779b55b988", "content_id": "f1fe15b6c157b60bbb4a9d307cd1cc93ff946bc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 52, "num_lines": 2, "path": "/README.md", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "# py-basicinterp\nbased on https://www.youtube.com/watch?v=Eythq9848Fg\n" }, { "alpha_fraction": 0.5883533954620361, "alphanum_fraction": 0.5896921157836914, "avg_line_length": 27.188678741455078, "blob_id": "a90a452e6a88df06eb3c17b7b6c41330729aeccf", "content_id": "15f210fa30d70fb6a7e2a280e814b3cc1606d687", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1494, "license_type": "no_license", "max_line_length": 93, "num_lines": 53, "path": "/basiclang/node.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom .token import Token\n\n\nclass NumberNode:\n def __init__(self, tok: Token) -> None:\n self.tok = tok\n self.pos_start = tok.pos_start\n self.pos_end = tok.pos_end\n\n def __repr__(self) -> str:\n return f'{self.tok}'\n\n\nclass VarAccessNode:\n def __init__(self, var_name_tok: Token) -> None:\n self.var_name_tok = var_name_tok\n self.pos_start = var_name_tok.pos_start\n self.pos_end = var_name_tok.pos_end\n\n\nclass VarAssignNode:\n def __init__(self, var_name_tok: Token, value_node) -> None:\n self.var_name_tok = var_name_tok\n self.value_node = value_node\n self.pos_start = var_name_tok.pos_start\n self.pos_end = value_node.pos_end\n\n\nclass BinOpNode:\n def __init__(self, left_node: NumberNode, op_tok: Token, right_node: NumberNode) -> None:\n self.left_node = left_node\n self.op_tok = op_tok\n self.right_node = right_node\n self.pos_start = left_node.pos_start\n self.pos_end = right_node.pos_end\n\n def __repr__(self) -> str:\n return f'({self.left_node}, {self.op_tok}, {self.right_node})'\n\n\nclass UnaryOpNode:\n def __init__(self, op_tok: Token, node: NumberNode) -> None:\n self.op_tok = op_tok\n self.node = node\n self.pos_start = op_tok.pos_start\n self.pos_end = node.pos_end\n\n def __repr__(self) -> str:\n return f'{self.op_tok}, {self.node}'\n" }, { "alpha_fraction": 0.5674110651016235, "alphanum_fraction": 0.5773366689682007, "avg_line_length": 20.589284896850586, "blob_id": "8cb4727546ee4f99d706299bd45b97def8e7573c", "content_id": "4b2f04eb64d8563f08862aa8adf27baf99691451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 110, "num_lines": 56, "path": "/basiclang/token.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom basiclang.position import Position\nimport string\n\nDIGITS = '0123456789'\nLETTERS = string.ascii_letters\nLETTERS_DIGITS = LETTERS + DIGITS\n\nTT_INT = 'TT_INT'\nTT_FLOAT = 'FLOAT'\nTT_IDENTIFIER = 'IDENTIFIER'\nTT_KEYWORD = 'KEYWORD'\nTT_PLUS = 'PLUS'\nTT_MINUS = 'MINUS'\nTT_MUL = 'MUL'\nTT_DIV = 'DIV'\nTT_POW = 'TT_POW'\nTT_EQ = 'EQ'\nTT_EE = 'EE'\nTT_NE = 'NE'\nTT_LT = 'LT'\nTT_GT = 'GT'\nTT_LTE = 'LTE'\nTT_GTE = 'GTE'\nTT_LPAREN = 'LPAREN'\nTT_RPAREN = 'RPAREN'\nTT_EOF = 'EOF'\n\nKEYWORDS = [\n 'VAR',\n 'AND',\n 'OR',\n 'NOT'\n]\n\n\nclass Token:\n def __init__(self, type_: str, value_=None, pos_start: Position = None, pos_end: Position = None) -> None:\n self.type = type_\n self.value = value_\n if pos_start:\n self.pos_start = pos_start.copy()\n self.pos_end = pos_start.copy().advance()\n if pos_end:\n self.pos_end = pos_end.copy()\n\n def matches(self, type_, value) -> bool:\n return self.type == type_ and self.value == value\n\n def __repr__(self) -> str:\n if self.value:\n return f'{self.type}:{self.value}'\n return f'{self.type}'\n" }, { "alpha_fraction": 0.6043058633804321, "alphanum_fraction": 0.6050482392311096, "avg_line_length": 39.0098991394043, "blob_id": "72764e36320d2e435149d9fabf425cb094e5852f", "content_id": "9b82a70e77d5a56c993d272f2575fb38556da033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4041, "license_type": "no_license", "max_line_length": 135, "num_lines": 101, "path": "/basiclang/interpreter.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import annotations\nfrom basiclang.rtresult import Number, RTResult\nimport math\nfrom basiclang.context import Context\nfrom basiclang.error import Error, RTError\nfrom basiclang.token import KEYWORDS, TT_DIV, TT_EE, TT_GT, TT_GTE, TT_KEYWORD, TT_LT, TT_LTE, TT_MINUS, TT_MUL, TT_NE, TT_PLUS, TT_POW\n\nfrom basiclang.position import Position\nfrom basiclang.node import BinOpNode, NumberNode, VarAccessNode, VarAssignNode\n\n\nclass Interpreter:\n def visit(self, node, context: Context) -> RTResult:\n method_name = f'visit_{type(node).__name__}'\n method = getattr(self, method_name, self.no_visit_method)\n return method(node, context)\n\n def no_visit_method(self, node, context: Context) -> RTResult:\n raise Exception(f'No visit_{type(node).__name__} method defined')\n\n def visit_NumberNode(self, node: NumberNode, context: Context) -> RTResult:\n return RTResult().success(Number(node.tok.value).set_context(context).set_pos(node.tok.pos_start, node.tok.pos_end))\n\n def visit_VarAccessNode(self, node: VarAccessNode, context: Context) -> RTResult:\n res = RTResult()\n var_name = node.var_name_tok.value\n value = context.symbol_table.get(var_name)\n if not value:\n return res.failure(RTError(node.pos_start, node.pos_end, f\"'{var_name}' is not defined\", context))\n value = value.copy().set_pos(node.pos_start, node.pos_end)\n return res.success(value)\n\n def visit_VarAssignNode(self, node: VarAssignNode, context: Context) -> RTResult:\n res = RTResult()\n var_name = node.var_name_tok.value\n value = res.register(self.visit(node.value_node, context))\n if res.error:\n return res\n\n context.symbol_table.set(var_name, value)\n return res.success(value)\n\n def visit_BinOpNode(self, node: BinOpNode, context: Context) -> RTResult:\n res = RTResult()\n left = res.register(self.visit(node.left_node, context))\n if res.error:\n return res\n right = res.register(self.visit(node.right_node, context))\n if res.error:\n return res\n\n if node.op_tok.type == TT_PLUS:\n result, error = left.add(right)\n elif node.op_tok.type == TT_MINUS:\n result, error = left.sub(right)\n elif node.op_tok.type == TT_MUL:\n result, error = left.mul(right)\n elif node.op_tok.type == TT_DIV:\n result, error = left.div(right)\n elif node.op_tok.type == TT_POW:\n result, error = left.pow(right)\n elif node.op_tok.type == TT_EE:\n result, error = left.comp_eq(right)\n elif node.op_tok.type == TT_NE:\n result, error = left.comp_ne(right)\n elif node.op_tok.type == TT_LT:\n result, error = left.comp_lt(right)\n elif node.op_tok.type == TT_GT:\n result, error = left.comp_gt(right)\n elif node.op_tok.type == TT_LTE:\n result, error = left.comp_lte(right)\n elif node.op_tok.type == TT_GTE:\n result, error = left.comp_gte(right)\n elif node.op_tok.matches(TT_KEYWORD, 'AND'):\n result, error = left.and_(right)\n elif node.op_tok.matches(TT_KEYWORD, 'OR'):\n result, error = left.or_(right)\n else:\n pass\n if error:\n return res.failure(error)\n else:\n return res.success(result.set_pos(node.pos_start, node.pos_end))\n\n def visit_UnaryOpNode(self, node, context: Context) -> RTResult:\n res = RTResult()\n op = res.register(self.visit(node.node, context))\n if res.error:\n return res\n err = None\n if node.op_tok.type == TT_MINUS:\n op, err = op.mul(Number(-1))\n elif node.op_tok.matches(TT_KEYWORD, 'NOT'):\n op, err = op.not_()\n\n if err:\n return res.failure(err)\n else:\n return RTResult().success(op.set_pos(node.pos_start, node.pos_end))\n" }, { "alpha_fraction": 0.5581939816474915, "alphanum_fraction": 0.5593645572662354, "avg_line_length": 33.17142868041992, "blob_id": "0b434ec3689de185d1bc3f908ea332f388c462db", "content_id": "549eb679678051f7012b4e7c6820e71b05bcfdf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5980, "license_type": "no_license", "max_line_length": 193, "num_lines": 175, "path": "/basiclang/parser.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom basiclang.error import Error, InvalidSyntaxError\nfrom .node import BinOpNode, NumberNode, UnaryOpNode, VarAccessNode, VarAssignNode\nfrom typing import List, Tuple\nfrom .token import TT_DIV, TT_EE, TT_EOF, TT_EQ, TT_FLOAT, TT_GT, TT_GTE, TT_IDENTIFIER, TT_INT, TT_KEYWORD, TT_LPAREN, TT_LT, TT_LTE, TT_MINUS, TT_MUL, TT_NE, TT_PLUS, TT_POW, TT_RPAREN, Token\n\n\nclass ParserResult:\n def __init__(self) -> None:\n self.error = None\n self.node = None\n self.advance_count = 0\n\n def register_advancement(self):\n self.advance_count += 1\n\n def register(self, res: ParserResult):\n self.advance_count += res.advance_count\n if res.error:\n self.error = res.error\n return res.node\n\n def success(self, node) -> ParserResult:\n self.node = node\n return self\n\n def failure(self, error: Error) -> ParserResult:\n if not self.error or self.advance_count == 0:\n self.error = error\n return self\n\n\nclass Parser:\n def __init__(self, tokens: List[Token]) -> None:\n self.tokens = tokens\n self.tok_idx = -1\n self.cur_tok = None\n self.advance()\n\n def advance(self) -> Token:\n self.tok_idx += 1\n if self.tok_idx < len(self.tokens):\n self.cur_tok = self.tokens[self.tok_idx]\n return self.cur_tok\n\n def parse(self):\n res = self.expr()\n if not res.error and self.cur_tok.type != TT_EOF:\n return res.failure(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end,\n \"Expected '+', '-', '*', '/', '^', '==', '!=', '<', '>', '<=', '>=', 'AND', 'OR'\"))\n return res\n\n def atom(self) -> NumberNode:\n res = ParserResult()\n tok = self.cur_tok\n\n if tok.type in (TT_INT, TT_FLOAT):\n res.register_advancement()\n self.advance()\n\n return res.success(NumberNode(tok))\n elif tok.type == TT_IDENTIFIER:\n res.register_advancement()\n self.advance()\n\n return res.success(VarAccessNode(tok))\n elif tok.type == TT_LPAREN:\n res.register_advancement()\n self.advance()\n\n expr = res.register(self.expr())\n if res.error:\n return res\n if self.cur_tok.type == TT_RPAREN:\n res.register_advancement()\n self.advance()\n\n return res.success(expr)\n else:\n return res.failure(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end, \"Exected ')'\"))\n\n return res.failure(InvalidSyntaxError(tok.pos_start, tok.pos_end, \"Expected int, float, identifier, +, - or (\"))\n\n def factor(self) -> NumberNode:\n res = ParserResult()\n tok = self.cur_tok\n\n if tok.type in (TT_PLUS, TT_MINUS):\n res.register_advancement()\n self.advance()\n\n factor = res.register(self.factor())\n if res.error:\n return res\n return res.success(UnaryOpNode(tok, factor))\n return self.power()\n\n def term(self) -> BinOpNode:\n return self.bin_op(self.factor, (TT_DIV, TT_MUL))\n\n def comp_expr(self):\n res = ParserResult()\n\n if self.cur_tok.matches(TT_KEYWORD, 'NOT'):\n op_tok = self.cur_tok\n res.register_advancement()\n self.advance()\n\n node = res.register(self.comp_expr())\n if res.error:\n return res\n return res.success(UnaryOpNode(op_tok, node))\n node = res.register(self.bin_op(\n self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE)))\n if res.error:\n return res.failure(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end, \"Expected int, float, identifier, +, -, (, NOT\"))\n\n return res.success(node)\n\n def arith_expr(self):\n return self.bin_op(self.term, (TT_PLUS, TT_MINUS))\n\n def expr(self) -> BinOpNode:\n res = ParserResult()\n if self.cur_tok.matches(TT_KEYWORD, 'VAR'):\n res.register_advancement()\n self.advance()\n\n if self.cur_tok.type != TT_IDENTIFIER:\n return res.failure(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end, \"Expected identifier\"))\n var_name = self.cur_tok\n\n res.register_advancement()\n self.advance()\n\n if self.cur_tok.type != TT_EQ:\n return res.failue(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end, \"Expected '='\"))\n\n res.register_advancement()\n self.advance()\n\n expr = res.register(self.expr())\n if res.error:\n return res\n return res.success(VarAssignNode(var_name, expr))\n\n node = res.register(self.bin_op(\n self.comp_expr, ((TT_KEYWORD, 'AND'), (TT_KEYWORD, 'OR'))))\n if res.error:\n return res.failure(InvalidSyntaxError(self.cur_tok.pos_start, self.cur_tok.pos_end, \"Expected 'VAR', int, float, identifier, +, - or (\"))\n return res.success(node)\n\n def power(self) -> BinOpNode:\n return self.bin_op(self.atom, (TT_POW,), self.factor)\n\n def bin_op(self, func_left: function, ops: Tuple, func_right=None) -> BinOpNode:\n if func_right == None:\n func_right = func_left\n res = ParserResult()\n left = res.register(func_left())\n if res.error:\n return res\n while self.cur_tok.type in ops or (self.cur_tok.type, self.cur_tok.value) in ops:\n op_tok = self.cur_tok\n res.register_advancement()\n self.advance()\n\n right = res.register(func_right())\n if res.error:\n return res\n left = BinOpNode(left, op_tok, right)\n return res.success(left)\n" }, { "alpha_fraction": 0.5074626803398132, "alphanum_fraction": 0.5174129605293274, "avg_line_length": 23.1200008392334, "blob_id": "26f119b150f7ca1949b949ca102f8bee3f9c18ef", "content_id": "5acd9e0183ad14c30b0519d60c78a36003b3ee98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 80, "num_lines": 25, "path": "/basiclang/position.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\n\n\nclass Position:\n def __init__(self, idx: int, ln: int, col: int, fn: str, ftxt: str) -> None:\n self.idx = idx\n self.ln = ln\n self.col = col\n self.fn = fn\n self.ftxt = ftxt\n\n def advance(self, cur_char: str = None) -> Position:\n self.idx += 1\n self.col += 1\n\n if cur_char == '\\n':\n self.ln += 1\n self.col = 0\n return self\n\n def copy(self) -> Position:\n return Position(self.idx, self.ln, self.col, self.fn, self.ftxt)\n" }, { "alpha_fraction": 0.59316086769104, "alphanum_fraction": 0.5953528881072998, "avg_line_length": 35.790321350097656, "blob_id": "0e089e1e889da6ba49777519b773a02412ba7649", "content_id": "1288f8a0dafb9afb5b72a42c3281b8e54d9861c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2281, "license_type": "no_license", "max_line_length": 103, "num_lines": 62, "path": "/basiclang/error.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom basiclang.context import Context\nfrom basiclang.strings_with_arrows import string_with_arrows\nfrom basiclang.position import Position\n\n\nclass Error:\n def __init__(self, pos_start: Position, pos_end: Position, error_name: str, details: str) -> None:\n self.pos_start = pos_start\n self.pos_end = pos_end\n self.error_name = error_name\n self.details = details\n\n def as_str(self) -> str:\n result = f'{self.error_name}: {self.details}'\n result += f' file {self.pos_start.fn}, line {self.pos_start.ln + 1}'\n result += f' col {self.pos_start.col + 1}'\n result += '\\n\\n' + \\\n string_with_arrows(self.pos_start.ftxt,\n self.pos_start, self.pos_end)\n return result\n\n\nclass IllegalCharError(Error):\n def __init__(self, pos_start: Position, pos_end: Position, details: str) -> None:\n super().__init__(pos_start, pos_end, 'Illegal Character', details)\n\n\nclass ExpectedCharError(Error):\n def __init__(self, pos_start: Position, pos_end: Position, details: str) -> None:\n super().__init__(pos_start, pos_end, 'Expected Character', details)\n\n\nclass InvalidSyntaxError(Error):\n def __init__(self, pos_start: Position, pos_end: Position, details: str) -> None:\n super().__init__(pos_start, pos_end, 'Invalid Syntax', details)\n\n\nclass RTError(Error):\n def __init__(self, pos_start: Position, pos_end: Position, details: str, context: Context) -> None:\n super().__init__(pos_start, pos_end, 'Runtime Error', details)\n self.context = context\n\n def as_string(self) -> str:\n result = self.generate_traceback()\n result += f'{self.error_name}: {self.details}\\n'\n result += '\\n\\n' + \\\n string_with_arrows(self.pos_start.ftxt,\n self.pos_start, self.pos_end)\n return result\n\n def generate_traceback(self):\n result = ''\n pos = self.pos_start\n ctx = self.context\n while ctx:\n result = f' File {pos.fn}, line {str(pos.ln +1)}, in {ctx.display_name}\\n' + result\n pos = ctx.parant_entry_pos\n ctx = ctx.parent\n return 'Tracebak (most recent call last):\\n' + result\n" }, { "alpha_fraction": 0.527262270450592, "alphanum_fraction": 0.5289841294288635, "avg_line_length": 34.080535888671875, "blob_id": "d89e8bcecfaabef755f5444a9128fb54b9a401fa", "content_id": "0d3c4978c27353706957fbe9e5180cdb18786d85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5227, "license_type": "no_license", "max_line_length": 154, "num_lines": 149, "path": "/basiclang/lexer.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom typing import List, Tuple\n\nfrom .token import DIGITS, KEYWORDS, LETTERS, LETTERS_DIGITS, TT_EE, TT_EOF, TT_EQ, TT_GT, TT_GTE, TT_IDENTIFIER, TT_KEYWORD, TT_LT, TT_LTE, TT_NE, TT_POW\nfrom .token import TT_PLUS\nfrom .token import TT_MINUS\nfrom .token import TT_MUL\nfrom .token import TT_DIV\nfrom .token import TT_LPAREN\nfrom .token import TT_RPAREN\nfrom .token import TT_INT\nfrom .token import TT_FLOAT\nfrom .token import Token\nfrom .position import Position\nfrom .error import Error, IllegalCharError, ExpectedCharError\n\n\nclass Lexer:\n def __init__(self, fn: str, text: str) -> None:\n self.fn = fn\n self.text = text\n self.pos = Position(-1, 0, -1, fn, text)\n self.cur_char = None\n self.advance()\n\n def advance(self) -> Lexer:\n self.pos.advance(self.cur_char)\n self.cur_char = self.text[self.pos.idx] if self.pos.idx < len(\n self.text) else None\n return self\n\n def get_tokens(self) -> Tuple[List[Token], Error]:\n tokens = []\n while self.cur_char != None:\n if self.cur_char in ' \\t':\n self.advance()\n elif self.cur_char in DIGITS + '.':\n tokens.append(self.make_number())\n elif self.cur_char in LETTERS:\n tokens.append(self.make_identifier())\n elif self.cur_char == '+':\n tokens.append(Token(TT_PLUS, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '-':\n tokens.append(Token(TT_MINUS, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '*':\n tokens.append(Token(TT_MUL, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '/':\n tokens.append(Token(TT_DIV, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '(':\n tokens.append(Token(TT_LPAREN, pos_start=self.pos))\n self.advance()\n elif self.cur_char == ')':\n tokens.append(Token(TT_RPAREN, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '^':\n tokens.append(Token(TT_POW, pos_start=self.pos))\n self.advance()\n elif self.cur_char == '=':\n tokens.append(self.make_equals())\n elif self.cur_char == '!':\n tok, err = self.make_not_equals()\n if err:\n return [], err\n tokens.append(tok)\n elif self.cur_char == '<':\n tokens.append(self.make_less_than())\n elif self.cur_char == '>':\n tokens.append(self.make_greater_than())\n else:\n pos_start = self.pos.copy()\n char = self.cur_char\n self.advance()\n return [], IllegalCharError(pos_start, self.pos, \"'\" + char + \"'\")\n tokens.append(Token(TT_EOF, pos_start=self.pos))\n return tokens, None\n\n def make_number(self) -> Token:\n num_str = ''\n dot_count = 0\n pos_start = self.pos.copy()\n\n while self.cur_char != None and self.cur_char in DIGITS + '.':\n if self.cur_char == '.':\n if dot_count > 0:\n break\n dot_count += 1\n num_str += self.cur_char\n self.advance()\n\n if dot_count == 0:\n return Token(TT_INT, int(num_str), pos_start, self.pos)\n else:\n return Token(TT_FLOAT, float(num_str), pos_start, self.pos)\n\n def make_identifier(self) -> Token:\n id_str = ''\n pos_start = self.pos.copy()\n\n while self.cur_char != None and self.cur_char in LETTERS_DIGITS:\n id_str += self.cur_char\n self.advance()\n\n tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER\n return Token(tok_type, id_str, pos_start, self.pos)\n\n def make_not_equals(self) -> Token:\n pos_start = self.pos.copy()\n self.advance()\n\n if self.cur_char == '=':\n self.advance()\n return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None\n\n self.advance()\n return None, ExpectedCharError(pos_start, self.pos, \"'=' (after '!')\")\n\n def make_equals(self) -> Token:\n tok_type = TT_EQ\n pos_start = self.pos.copy()\n self.advance()\n if self.cur_char == '=':\n self.advance()\n tok_type = TT_EE\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n\n def make_greater_than(self) -> Token:\n tok_type = TT_GT\n pos_start = self.pos.copy()\n self.advance()\n if self.cur_char == '=':\n self.advance()\n tok_type = TT_GTE\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n\n def make_less_than(self) -> Token:\n tok_type = TT_LT\n pos_start = self.pos.copy()\n self.advance()\n if self.cur_char == '=':\n self.advance()\n tok_type = TT_LTE\n return Token(tok_type, pos_start=pos_start, pos_end=self.pos)\n" }, { "alpha_fraction": 0.6073479056358337, "alphanum_fraction": 0.6096441149711609, "avg_line_length": 28.03333282470703, "blob_id": "96dc191154f338021e203110e7cf0460f0e1feb2", "content_id": "2ce236f8ac8eae13093480ad197c2ca279b783f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 871, "license_type": "no_license", "max_line_length": 109, "num_lines": 30, "path": "/basiclang/context.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import annotations\nfrom basiclang.position import Position\n\n\nclass Context:\n def __init__(self, display_name: str, parent: Context = None, parent_entry_pos: Position = None) -> None:\n self.display_name = display_name\n self.parent = parent\n self.parant_entry_pos = parent_entry_pos\n self.symbol_table = SymbolTable()\n\n\nclass SymbolTable:\n def __init__(self) -> None:\n self.symbols: dict = {}\n self.parent: SymbolTable = None\n\n def get(self, name: str):\n val = self.symbols.get(name, None)\n if val == None and self.parent != None:\n return self.parent.get(name)\n return val\n\n def set(self, name: str, value) -> None:\n self.symbols[name] = value\n\n def remove(self, name: str) -> None:\n del self.symbols[name]\n" }, { "alpha_fraction": 0.6032316088676453, "alphanum_fraction": 0.6184919476509094, "avg_line_length": 19.648147583007812, "blob_id": "c180c081daeaeb17de9405d7d541a2bb574f3007", "content_id": "3f45481d4ffb06aea61d7803390b53be32ebc8bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 79, "num_lines": 54, "path": "/Makefile", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "tests: clean-pyc\n\tpytest tests\n\ntests-cov: clean-pyc\n\tpytest --cov=dashboard tests\n\nclean:\n\t@rm -f .coverage\n\t@find . -maxdepth 2 -name .pytest_cache -type d -print0 | xargs -0 /bin/rm -rf\n\t@find . -maxdepth 2 -name __pycache__ -type d -print0 | xargs -0 /bin/rm -rf\n\nclean-pyc:\n find . -name '*.pyc' -exec rm --force {} +\n find . -name '*.pyo' -exec rm --force {} +\n name '*~' -exec rm --force {}\n\nclean-build:\n rm --force --recursive build/\n rm --force --recursive dist/\n rm --force --recursive *.egg-info\n\nenv:\n\tvirtualenv env --python=python3\n\t#source env/bin/activate\n\t#pip3 install jira pytest pytest-cov\n\t#deactivate\n\nisort:\n sh -c \"isort --skip-glob=.tox --recursive . \"\n\nlint:\n\t#@find . -type f -name \"*.py\" | xargs autopep8 -i\n\tflake8 --exclude=.tox\n\nhelp:\n\tpython3 shell.py --help\n\nrun:\n python3 shell.py\n\npackage:\n\tpython3 setup.py sdist\n\ndocker-run:\n docker build \\\n --file=./Dockerfile \\\n --tag=my_project ./\n docker run \\\n --detach=false \\\n --name=my_project \\\n --publish=$(HOST):8080 \\\n my_project\n\n.PHONY: tests tests-cov clean help env" }, { "alpha_fraction": 0.6122239232063293, "alphanum_fraction": 0.613764762878418, "avg_line_length": 33.157894134521484, "blob_id": "da2ce03bd8758330c92a2113e98f65d0f3c438be", "content_id": "50d5036d07b022b9aeca7492d4b9155571961a46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3894, "license_type": "no_license", "max_line_length": 102, "num_lines": 114, "path": "/basiclang/rtresult.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import annotations\nimport math\n\nfrom basiclang.error import Error, RTError\n\nfrom basiclang.context import Context\nfrom basiclang.position import Position\n\n\nclass RTResult:\n def __init__(self) -> None:\n self.value = None\n self.error = None\n\n def register(self, res: RTResult):\n if res.error:\n self.error = res.error\n return res.value\n\n def success(self, value: Number) -> RTResult:\n self.value = value\n return self\n\n def failure(self, error: Error) -> RTResult:\n self.error = error\n return self\n\n\nclass Number:\n def __init__(self, value) -> None:\n self.value = value\n self.set_pos()\n self.set_context()\n\n def set_pos(self, pos_start: Position = None, pos_end: Position = None) -> Number:\n self.pos_start = pos_start\n self.pos_end = pos_end\n return self\n\n def set_context(self, context: Context = None) -> Number:\n self.context = context\n return self\n\n def add(self, other) -> Number:\n if isinstance(other, Number):\n return Number(self.value + other.value).set_context(self.context), None\n\n def sub(self, other) -> Number:\n if isinstance(other, Number):\n return Number(self.value - other.value).set_context(self.context), None\n\n def mul(self, other) -> Number:\n if isinstance(other, Number):\n return Number(self.value * other.value).set_context(self.context), None\n\n def pow(self, other) -> Number:\n if isinstance(other, Number):\n if isinstance(self.value, int) and isinstance(other.value, int):\n return Number(self.value ** other.value).set_context(self.context), None\n else:\n return Number(math.pow(self.value, other.value)).set_context(self.context), None\n\n def div(self, other) -> Number:\n if isinstance(other, Number):\n if other.value == 0:\n return None, RTError(other.pos_start, other.pos_end, 'Division by zero', self.context)\n else:\n return Number(self.value / other.value).set_context(self.context), None\n\n def comp_eq(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value == other.value)).set_context(self.context), None\n\n def comp_ne(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value != other.value)).set_context(self.context), None\n\n def comp_lt(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value < other.value)).set_context(self.context), None\n\n def comp_gt(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value > other.value)).set_context(self.context), None\n\n def comp_lte(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value <= other.value)).set_context(self.context), None\n\n def comp_gte(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value >= other.value)).set_context(self.context), None\n\n def and_(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value and other.value)).set_context(self.context), None\n\n def or_(self, other) -> Number:\n if isinstance(other, Number):\n return Number(int(self.value or other.value)).set_context(self.context), None\n\n def not_(self):\n return Number(1 if self.value == 0 else 0).set_context(self.context), None\n\n def copy(self) -> Number:\n copy = Number(self.value)\n copy.set_pos(self.pos_start, self.pos_end)\n copy.set_context(self.context)\n return copy\n\n def __repr__(self) -> str:\n return str(self.value)\n" }, { "alpha_fraction": 0.7043189406394958, "alphanum_fraction": 0.7076411843299866, "avg_line_length": 26.363636016845703, "blob_id": "3206709b3519561296cf406c5dc8bcc03c803b02", "content_id": "2e212023af911dd0ddc138917b48c220b12b60fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 903, "license_type": "no_license", "max_line_length": 62, "num_lines": 33, "path": "/basiclang/basic.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import annotations\nfrom basiclang.context import SymbolTable\nfrom typing import List, Tuple\n\nfrom basiclang.error import Error\nfrom basiclang.token import Token\nfrom basiclang.lexer import Lexer\nfrom basiclang.parser import Parser\nfrom basiclang.interpreter import Context, Interpreter, Number\n\nglobal_symbol_table = SymbolTable()\nglobal_symbol_table.set(\"null\", Number(0))\n\n\ndef run(fn: str, text: str) -> Tuple[List[Token], Error]:\n lexer = Lexer(fn, text)\n tokens, error = lexer.get_tokens()\n if error:\n return None, error\n\n parser = Parser(tokens)\n ast = parser.parse()\n if ast.error:\n return None, ast.error\n\n interpretor = Interpreter()\n context = Context('<program>')\n context.symbol_table = global_symbol_table\n res = interpretor.visit(ast.node, context)\n return res.value, res.error\n" }, { "alpha_fraction": 0.49152541160583496, "alphanum_fraction": 0.498305082321167, "avg_line_length": 20.14285659790039, "blob_id": "3f45e6d1da9c68ffdb9c9cc2101000343c5597a4", "content_id": "c8280d94bddd1cba1da1b81b6accdcbe9b8c8a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 57, "num_lines": 14, "path": "/shell.py", "repo_name": "shijq23/py-basicinterp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport basiclang.basic\n\nif __name__ == '__main__':\n while True:\n text = input(\"basic > \")\n ast, error = basiclang.basic.run('<stdin>', text)\n\n if error:\n print(error.as_str())\n else:\n print(ast)" } ]
13
sushantdangol/image-steganography-python
https://github.com/sushantdangol/image-steganography-python
8e3bbe42b8bd0ecec5f2d386f4cce2859b898494
b3f9605298d8f03aa52e9a468215ad5445bf2afb
f392ed739fb5dff90b4c0c9cdfe0032566ced02f
refs/heads/master
2020-06-15T21:37:21.427118
2019-07-05T12:41:39
2019-07-05T12:41:39
195,398,446
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6218256950378418, "alphanum_fraction": 0.6266300678253174, "avg_line_length": 25.01785659790039, "blob_id": "bfd575a9823551da07bacaca431c83fdec049143", "content_id": "72ebe2d5ffbba3597696ba025c5531233a3d5e5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2914, "license_type": "no_license", "max_line_length": 110, "num_lines": 112, "path": "/Stegano.py", "repo_name": "sushantdangol/image-steganography-python", "src_encoding": "UTF-8", "text": "from PIL import Image\nimport numpy as np\n\n# Split the text into characters and store them in a list\ndef split(text):\n return [char for char in text]\n\n#convert the characters into their respective ASCII Value\ndef str_to_asc(text):\n list = []\n\n for asc in text:\n list.append(ord(asc))\n\n return list\n\n#convert ascii to string\ndef asc_to_str(text):\n list = []\n\n for st in text:\n list.append(chr(st))\n\n return list\n\n#Convert the ASCII Values into binary\ndef asc_to_bin(asc):\n list = []\n\n for bin in asc:\n list.append(\"{0:08b}\".format(bin))\n\n return list\n\ndef img_to_rgb(img):\n return np.array( img)\n\n#Store the ASCII/String and its respective binary number in pairs\ndef asc_bin():\n # dict_a = {}\n list_a = []\n\n for bin_code, y in zip([char for char in bin_list], split_text):\n list_a.append( (y, [char for char in bin_code]) )\n # dict_a.update( {y: [char for char in bin_code]} )\n\n return list_a\n # return dict_a\n\n#convert rgb values into binary\ndef rgb_to_bin(img):\n\n list_def = []\n\n for x in np.nditer(img):\n list_def.append(\"{0:08b}\".format(x))\n\n return np.array(list_def).reshape(img_rgb.shape)\n\n#convert binary into rgb values\ndef bin_to_rgb(img):\n list_def = []\n\n for x in np.nditer(img):\n list_def.append(int(str(x), 2))\n\n # new_array = np.array(list_def).astype(int).reshape(img_rgb.shape)\n # return new_array\n return np.array(list_def, dtype=np.uint8).reshape(img_rgb.shape) #add dtype to convert fron int32 to uint8\n\n\n'''\nIntit Code\n'''\n\n#Enter the text to be inserted.\ntext_message = input('Enter the text:')\n\n#import image\nnew_img = Image.open('D:\\original.jpeg')\n\n'''Assigning variable to the converted lists'''\nsplit_text = split(text_message) #variable to store the characters that were splitted\nasc_list = str_to_asc(split_text) #variable to store ascii converted values\nbin_list = asc_to_bin(asc_list) #variable to store binary converted values\n\nimg_rgb = img_to_rgb(new_img) #variable to store the list on rgb values from the image\nimg_bin = rgb_to_bin(img_rgb) #variable to store the list of binary value from rgb\nrgb_bin = bin_to_rgb(img_bin) #varialble to store the list of rgb values from binary\n\n\n# print(asc_bin())\n# print(asc_to_bin(asc_list))s\n\n\n'''Check if the original values and converted values match or not'''\n# print(img_bin)\n# print(rgb_bin)\n# print('----------------------------------------------------------------')\n# print(img_rgb)\n\n'''To find the data type of the arrays'''\n# print(rgb_bin.dtype)#data type of converted array\n# print('----------------------------------------------------------------')\n# print(img_rgb.dtype)#data type of original array\n#############################################################\n\n\n'''Save the encrypted Image'''\n# modi_img = Image.fromarray(rgb_bin)\n# modi_img.save('D:\\modified222.jpeg')\n# print('Image has been Encrypted')\n" } ]
1
thiagohpr/projetosCamada
https://github.com/thiagohpr/projetosCamada
b86e62ea5242547eda8e69962842082be79999bf
cb7881a40f1c4c57a0c7b528007c0955fb988860
ea648b49c0cc941e926787aeecd7c142d17ad0d1
refs/heads/master
2023-08-25T23:33:18.547076
2021-11-11T21:28:26
2021-11-11T21:28:26
402,885,571
0
0
null
2021-09-03T20:03:58
2021-10-01T23:12:01
2021-10-01T23:13:53
Python
[ { "alpha_fraction": 0.5037721991539001, "alphanum_fraction": 0.5235362648963928, "avg_line_length": 36.64400100708008, "blob_id": "072fd9657a8104677053729c7a18affc06f702b8", "content_id": "23ad55d448eb78c394b7eaaa7828fb3747c80d59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9466, "license_type": "no_license", "max_line_length": 128, "num_lines": 250, "path": "/p4Protocolo/client/client.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#####################################################\n# Camada Fรญsica da Computaรงรฃo\n#Carareto\n#11/08/2020\n#Aplicaรงรฃo\n####################################################\n\n\n#esta รฉ a camada superior, de aplicaรงรฃo do seu software de comunicaรงรฃo serial UART.\n#para acompanhar a execuรงรฃo e identificar erros, construa prints ao longo do cรณdigo! \n\n\nfrom threading import Timer\nfrom enlace import *\nimport time\nimport numpy as np\nimport random\n# voce deverรก descomentar e configurar a porta com atravรฉs da qual ira fazer comunicaรงao\n# para saber a sua porta, execute no terminal :\n# python -m serial.tools.list_ports\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\n\n#use uma das 3 opcoes para atribuir ร  variรกvel a porta usada\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\nserialName = \"COM3\" # Windows(variacao de)\n\neop1=b'\\xaa'\neop2=b'\\xff'\neop=[eop2,eop1,eop2,eop1]\n\ndef calcula_quant(tamanho):\n if tamanho%114==0:\n return tamanho//114\n else:\n return tamanho//114 + 1\n\ndef cria_head(tipo,n_atual,n_total,n_bytespay):\n id_servidor=0\n head=[0]*10\n head[0]=(tipo).to_bytes(1, byteorder='big')\n head[1]=(0).to_bytes(1, byteorder='big')#id sensor\n head[2]=(id_servidor).to_bytes(1, byteorder='big')#id servidor\n head[3]=(n_total).to_bytes(1, byteorder='big')\n head[4]=(n_atual).to_bytes(1, byteorder='big')\n if tipo==1:\n #id arquivo em handshake\n head[5]=(0).to_bytes(1, byteorder='big')\n else:\n #tamanho do payload\n head[5]=(n_bytespay).to_bytes(1, byteorder='big')\n head[6]=(0).to_bytes(1, byteorder='big')\n head[7]=(0).to_bytes(1, byteorder='big')\n head[8]=(170).to_bytes(1, byteorder='big')\n head[9]=(170).to_bytes(1, byteorder='big')\n return head\n\ndef atualiza_head (pacote, cont):\n pacote[7]=(cont).to_bytes(1, byteorder='big')\n return pacote\ndef cria_lista(binario):\n lista=[]\n for bit in binario:\n lista.append((bit).to_bytes(1, byteorder='big'))\n return lista\n\ndef cria_datagrama(arquivo):\n #Retorna uma lista de datagramas (cada elemento รฉ uma lista de bygtes com head, payload e eop) a partir de um arquivo\n bytes=cria_lista(arquivo)\n payload=114\n \n quant_pay=calcula_quant(len(arquivo))\n lista_datagramas=[0]*(quant_pay+1)\n i=0\n for i in range(quant_pay+1):\n\n if i==0:\n head=cria_head(1,0,quant_pay,0)\n bytes_pay=[]\n elif i!=quant_pay:\n #Se nรฃo รฉ o รบltimo pacote\n head=cria_head(3,i,quant_pay,payload)\n bytes_pay=[0]*payload\n for quant in range(payload):\n bytes_pay[quant]=bytes.pop(0)\n \n else:\n #Se รฉ o รบltimo pacote\n head=cria_head(3,i,quant_pay,len(arquivo)%payload)\n bytes_resto=(len(arquivo))%payload\n bytes_pay=[0]*bytes_resto\n for quant in range(bytes_resto):\n bytes_pay[quant]=bytes.pop(0)\n\n \n \n data=head+bytes_pay+eop\n lista_datagramas[i]=data\n i+=1\n return lista_datagramas\n\n\ndef cria_log(formato,head):\n \n tipo=str(int.from_bytes(head[0], byteorder='big'))\n tamanho_total=str(int.from_bytes(head[5], byteorder='big')+14)\n linha=time.ctime()+\" / \"+formato+\" / \"+tipo+\" / \"+tamanho_total\n if tipo == '3':\n crc=(hex(int.from_bytes(head[8], byteorder='big'))[2:]+hex(int.from_bytes(head[9], byteorder='big'))[2:]).upper()\n linha+=\" / \"+str(int.from_bytes(head[4], byteorder='big'))+\" / \"+str(int.from_bytes(head[3], byteorder='big'))+\" / \"+crc\n return linha\n\ndef log_envio(head):\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/client/log.txt\", \"a\") as file:\n file.write(cria_log(\"envio\",head))\n file.write('\\n')\ndef log_recebeu(head):\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/client/log.txt\", \"a\") as file:\n file.write(cria_log(\"receb\",head))\n file.write('\\n')\ndef cria_lista(binario):\n lista=[]\n for bit in binario:\n lista.append((bit).to_bytes(1, byteorder='big'))\n return lista\ndef main():\n try:\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa รฉ a camada inferior ร  aplicaรงรฃo. Observe que um parametro\n #para declarar esse objeto รฉ o nome da porta.\n com1 = enlace('COM4')\n\n \n # Ativa comunicacao. Inicia os threads e a comunicaรงรฃo seiral \n com1.enable()\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/client/log.txt\", \"w\") as file:\n file.write(\"Comunicaรงรฃo aberta\")\n file.write('\\n')\n print ('Comunicaรงรฃo aberta')\n\n txBuffer=(255).to_bytes(114*9+10, byteorder='big')\n datagramas=cria_datagrama(txBuffer)\n numPck=int.from_bytes(datagramas[0][3], byteorder='big')\n inicia=False\n encerrar=False\n startTimer2 = int(time.time())\n while not inicia:\n #envia mensagem t1\n print(\"Iniciando Handshake\")\n txBuffer=datagramas[0]\n\n com1.sendData(np.asarray(txBuffer))\n log_envio(txBuffer)\n print(txBuffer)\n\n\n time.sleep(5)\n rxBuffer,nRx=com1.getData(14)\n if rxBuffer[0]==2:\n #se recebeu ok:\n log_recebeu(cria_lista(rxBuffer))\n print(\"Cliente recebeu mensagem de confirmaรงรฃo.\")\n inicia=True\n cont=1\n else:\n print(\"Cliente nรฃo recebeu a confirmaรงรฃo.\")\n time_now = int(time.time())\n if time_now >= startTimer2 + 20:\n msgTipo5=cria_head(5,0,numPck,0)\n msgTipo5=msgTipo5+eop\n com1.sendData(np.asarray(msgTipo5))\n log_envio(msgTipo5)\n encerrar=True\n if encerrar==True:\n break\n if encerrar==False:\n while cont<=numPck:\n #envia pckg cont (mensagem t3)\n print(\"Enviando pacote {}.\".format(cont))\n txBuffer=datagramas[cont]\n print(txBuffer)\n\n com1.sendData(np.asarray(txBuffer))\n log_envio(txBuffer)\n\n startTimer2 = int(time.time())\n\n recebeuConf=False\n while not recebeuConf:\n if not encerrar:\n rxBuffer,nRx=com1.getData(14)\n if rxBuffer[0]==4:\n log_recebeu(cria_lista(rxBuffer))\n #se recebeu mensagem t4 (confirmaรงรฃo do server):\n print(\"Recebeu confirmaรงรฃo do servidor.\")\n if cont < numPck:\n datagramas[cont+1]=atualiza_head(datagramas[cont+1],cont)\n cont+=1\n recebeuConf=True\n else:\n #estourou timer 1\n if rxBuffer==(255).to_bytes(1, byteorder='big'):\n #reenviar pckg cont (mensagem t3)\n #reset Timer1\n print(\"Timer 1 estourado. Reenviando pacote {}.\".format(cont))\n txBuffer=datagramas[cont]\n com1.sendData(np.asarray(txBuffer))\n log_envio(txBuffer)\n\n time_now = int(time.time())\n if time_now >= startTimer2 + 20:\n print(\"Timer 2 encerrado. Timeout de envio do arquivo, finalizando a comunicaรงรฃo.\")\n msgTipo5=cria_head(5,cont,numPck,0)\n msgTipo5=msgTipo5+eop\n com1.sendData(np.asarray(msgTipo5))\n log_envio(msgTipo5)\n encerrar=True\n else:\n rxBuffer,nRx=com1.getData(14)\n if rxBuffer[0]==6:\n log_recebeu(cria_lista(rxBuffer))\n print(\"Erro no nรบmero do pacote.\")\n #corrigir cont\n cont=rxBuffer[6]\n txBuffer=datagramas[cont]\n com1.sendData(np.asarray(txBuffer))\n log_envio(txBuffer)\n startTimer2 = int(time.time())\n else:\n break\n if encerrar==True:\n break\n\n\n\n print(\"-------------------------\")\n print(\"Comunicaรงรฃo encerrada\")\n print(\"-------------------------\")\n com1.disable()\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/client/log.txt\", \"a\") as file:\n file.write(\"Comunicaรงรฃo encerrada\")\n\n except Exception as erro:\n print(\"ops! :-\\\\\")\n print(erro)\n com1.disable()\n \n\n # so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5618374347686768, "alphanum_fraction": 0.5995288491249084, "avg_line_length": 14.722222328186035, "blob_id": "02886f4a5bc07cd8636b3d4e20d25e146f8dff9e", "content_id": "1de83e5ece73c7984bde4e2040c008a2075a8a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 849, "license_type": "no_license", "max_line_length": 34, "num_lines": 54, "path": "/p6UART/tx_uart.ino", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "// MCK 21MHz\nvoid _sw_uart_wait_half_T() {\n for(int i = 0; i < 1093; i++)\n asm(\"NOP\");\n}\n\nvoid _sw_uart_wait_T() {\n _sw_uart_wait_half_T();\n _sw_uart_wait_half_T();\n}\n\nvoid setup() {\n pinMode(4,OUTPUT);\n}\n\n\nvoid loop() {\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n\n\n //startbit\n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n\n //envio do caracter t (01110100)\n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n\n //paridade \n digitalWrite(4,LOW);\n _sw_uart_wait_T();\n\n //stopbit\n digitalWrite(4,HIGH);\n _sw_uart_wait_T();\n\n delay(3000);\n\n}\n" }, { "alpha_fraction": 0.4883174002170563, "alphanum_fraction": 0.5143277049064636, "avg_line_length": 33.36868667602539, "blob_id": "9e7918a7948d83a8b3b08d62cfe5319aea64179d", "content_id": "1e1eb4e668e86518b3f402f08c9c36bafee70997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6842, "license_type": "no_license", "max_line_length": 134, "num_lines": 198, "path": "/p3Datagrama/server/aplicacao.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#####################################################\n# Camada Fรญsica da Computaรงรฃo\n#Carareto\n#11/08/2020\n#Aplicaรงรฃo\n####################################################\n\n\n#esta รฉ a camada superior, de aplicaรงรฃo do seu software de comunicaรงรฃo serial UART.\n#para acompanhar a execuรงรฃo e identificar erros, construa prints ao longo do cรณdigo! \n\n\nfrom enlace import *\nimport time\nimport numpy as np\n\n# voce deverรก descomentar e configurar a porta com atravรฉs da qual ira fazer comunicaรงao\n# para saber a sua porta, execute no terminal :\n# python -m serial.tools.list_ports\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\n\n#use uma das 3 opcoes para atribuir ร  variรกvel a porta usada\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\nserialName = \"COM3\" # Windows(variacao de)\n\nimageR=\"/imagem.png\"\n\ndef calcula_quant(tamanho):\n if tamanho%114==0:\n return tamanho//114\n else:\n return tamanho//114 + 1\n\ndef cria_head(n_atual,n_total,n_bytespay):\n head=[0]*10\n head[0]=(170).to_bytes(1, byteorder='big')\n head[1]=(n_atual).to_bytes(1, byteorder='big')\n head[2]=(n_total).to_bytes(1, byteorder='big')\n head[3]=(n_bytespay).to_bytes(1, byteorder='big')\n head[4]=(0).to_bytes(1, byteorder='big')\n head[5]=(0).to_bytes(1, byteorder='big')\n head[6]=(0).to_bytes(1, byteorder='big')\n head[7]=(0).to_bytes(1, byteorder='big')\n head[8]=(0).to_bytes(1, byteorder='big')\n head[9]=(0).to_bytes(1, byteorder='big')\n \n return head\n\ndef cria_lista(binario):\n lista=[]\n for bit in binario:\n lista.append((bit).to_bytes(1, byteorder='big'))\n return lista\n\ndef cria_datagrama(arquivo):\n #Retorna uma lista de datagramas (cada elemento รฉ uma lista de bygtes com head, payload e eop) a partir de um arquivo\n bytes=cria_lista(arquivo)\n payload=114\n \n quant_pay=calcula_quant(len(arquivo))\n lista_datagramas=[0]*quant_pay\n i=0\n for datagrama in range(quant_pay):\n if i!=quant_pay-1:\n #Se nรฃo รฉ o รบltimo pacote\n head=cria_head(i+1,quant_pay,payload)\n bytes_pay=[0]*payload\n for quant in range(payload):\n bytes_pay[quant]=bytes.pop(0)\n \n else:\n #Se รฉ o รบltimo pacote\n head=cria_head(i+1,quant_pay,len(arquivo)%payload)\n bytes_resto=(len(arquivo))%payload\n bytes_pay=[0]*bytes_resto\n for quant in range(bytes_resto):\n bytes_pay[quant]=bytes.pop(0)\n\n eop=[(170).to_bytes(1, byteorder='big')]*4\n data=head+bytes_pay+eop\n lista_datagramas[i]=data\n i+=1\n return lista_datagramas\n\ndef agrupa_pacotes(datagrama):\n lista = []\n for pacote in datagrama:\n payload = int.from_bytes(pacote[3], byteorder='big')\n for byte in pacote[10:payload+10]:\n lista.append(byte)\n print(len(lista))\n return (b''.join(lista))\n \n\n\n\ndef main():\n try:\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa รฉ a camada inferior ร  aplicaรงรฃo. Observe que um parametro\n #para declarar esse objeto รฉ o nome da porta.\n com1 = enlace('COM3')\n \n \n # Ativa comunicacao. Inicia os threads e a comunicaรงรฃo seiral \n com1.enable()\n print(\"comunicacao aberta\")\n rxBuffer = 0\n lista = []\n handshake = True\n while handshake:\n while rxBuffer != (204).to_bytes(1, byteorder='big'):\n rxBuffer, nRx = com1.getData(1)\n print(rxBuffer)\n lista.append(rxBuffer)\n time.sleep(0.05)\n \n com1.sendData((100).to_bytes(1, byteorder='big'))\n rxBuffer, nRx = com1.getData(1)\n if rxBuffer == (100).to_bytes(1, byteorder='big'):\n handshake = False\n elif rxBuffer == (101).to_bytes(1, byteorder='big'):\n lista = []\n \n\n print(lista)\n print('inicando leitura dos pacotes')\n\n if int.from_bytes(lista[0], byteorder='big') == 170:\n pacotes = int.from_bytes(lista[2], byteorder='big')\n else:\n pacotes = int.from_bytes(lista[1], byteorder='big')\n\n\n datagrama = []\n for i in range(pacotes):\n datagrama.append([])\n \n \n i=0\n \n while i != pacotes:\n rxBuffer, nRx = com1.getData(1)\n datagrama[i].append(rxBuffer)\n print(rxBuffer)\n if rxBuffer == (204).to_bytes(1, byteorder='big'):\n if datagrama[i][len(datagrama[i])-2] == (170).to_bytes(1, byteorder='big'):\n if i>=1:\n if int.from_bytes(datagrama[i][1], byteorder='big') == int.from_bytes(datagrama[i-1][1], byteorder='big') + 1:\n print('nรบmero de pacote correto')\n if len(datagrama[i]) == int.from_bytes(datagrama[i][3], byteorder='big') + 14:\n print('nรบmero de bytes correto')\n com1.sendData((100).to_bytes(1, byteorder='big'))\n i+=1\n time.sleep(0.1)\n else: \n print('nรบmero de bytes incorreto')\n com1.sendData((101).to_bytes(1, byteorder='big'))\n datagrama[i]=[]\n else:\n print('nรบmero de pacote incorreto')\n com1.sendData((101).to_bytes(1, byteorder='big'))\n datagrama[i]=[]\n \n else:\n if len(datagrama[i]) == int.from_bytes(datagrama[i][3], byteorder='big') + 14:\n print('nรบmero de bytes correto')\n com1.sendData((100).to_bytes(1, byteorder='big'))\n i+=1\n time.sleep(0.1)\n else: \n print('nรบmero de bytes incorreto')\n com1.sendData((101).to_bytes(1, byteorder='big'))\n datagrama[i]=[] \n\n \n print(agrupa_pacotes(datagrama))\n com1.sendData((100).to_bytes(1, byteorder='big'))\n \n \n com1.disable()\n \n \n\n \n \n\n \n \n except Exception as erro:\n print(\"ops! :-\\\\\")\n print(erro)\n com1.disable()\n \n\n #so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7240506410598755, "alphanum_fraction": 0.7493671178817749, "avg_line_length": 21.352941513061523, "blob_id": "a1622a2bedda732eefc10f73feb0dddc1adac7f0", "content_id": "4488213f05348219cdbe53fde9799b10e67767b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 400, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/p8/gravar_audio.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport sounddevice as sd\r\nimport soundfile as sf\r\nimport matplotlib.pyplot as plt\r\nfrom suaBibSignal import signalMeu\r\nimport time\r\n\r\nprint('Comeรงarรก em 3 segundos')\r\ntime.sleep(3)\r\nprint('Inรญcio da gravaรงรฃo')\r\nfilename='my-file2.wav'\r\nfs=44100\r\nduration = 5\r\nnumAmostras=duration*fs\r\naudio = sd.rec(int(numAmostras), fs, channels=1)\r\nsd.wait()\r\nsf.write(filename, audio, fs)" }, { "alpha_fraction": 0.5967671275138855, "alphanum_fraction": 0.6509392857551575, "avg_line_length": 27.587499618530273, "blob_id": "b45877cf715882043270e4e4954408110180be85", "content_id": "0f929d714a471658ed3dd177ddcce2490e68af00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2293, "license_type": "no_license", "max_line_length": 143, "num_lines": 80, "path": "/p7/encode_versaoAlunos.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "\n\n#importe as bibliotecas\nimport numpy as np\nimport sounddevice as sd\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nfrom scipy import signal as window\nfrom suaBibSignal import signalMeu\n\n\nfrequencias={0:[941,1336],1:[697,1209],2:[697,1336],3:[697,1477],4:[770,1209],5:[770,1336],6:[770,1477],7:[852,1209],8:[852,1336],9:[852,1477]}\n\n\n# def signal_handler(signal, frame):\n# print('You pressed Ctrl+C!')\n# sys.exit(0)\n\n#converte intensidade em Db, caso queiram ...\ndef todB(s):\n sdB = 10*np.log10(s)\n return(sdB)\n\ndef main():\n print(\"Inicializando encoder\")\n \n #declare um objeto da classe da sua biblioteca de apoio (cedida) \n sinal=signalMeu()\n\n #declare uma variavel com a frequencia de amostragem, sendo 44100\n fs=44100\n #voce importou a bilioteca sounddevice como, por exemplo, sd. entao\n # os seguintes parametros devem ser setados:\n \n #tempo em segundos que ira emitir o sinal acustico \n duration = 4\n \n#relativo ao volume. Um ganho alto pode saturar sua placa... comece com .3 \n gainX = 0.3\n gainY = 0.3\n\n\n print(\"Gerando Tons base\")\n \n #gere duas senoides para cada frequencia da tabela DTMF ! Canal x e canal y \n #use para isso sua biblioteca (cedida)\n #obtenha o vetor tempo tb.\n #deixe tudo como array\n\n #printe a mensagem para o usuario teclar um numero de 0 a 9. \n perguntando=True\n while perguntando:\n NUM=input(\"Digite uma tecla de 0 a 9: \")\n if NUM in str(list(frequencias.keys())):\n perguntando=False\n NUM=int(NUM)\n #nao aceite outro valor de entrada.\n print(\"Gerando Tom referente ao sรญmbolo : {}\".format(NUM))\n \n \n #construa o sinal a ser reproduzido. nao se esqueca de que รฉ a soma das senoides\n f1=frequencias[NUM][0]\n f2=frequencias[NUM][1]\n \n x1,s1=sinal.generateSin(f1,gainX,duration,fs)\n x2,s2=sinal.generateSin(f2,gainY,duration,fs)\n \n #printe o grafico no tempo do sinal a ser reproduzido\n plt.plot(x1,s1+s2)\n plt.title(f'Senรณide {f1}Hz e {f2}Hz pelo tempo do emissor')\n \n sinal.plotFFT(s1+s2,fs)\n \n # reproduz o som\n sd.play(s1+s2, fs)\n # Exibe grรกficos\n plt.show()\n # aguarda fim do audio\n sd.wait()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5776777267456055, "alphanum_fraction": 0.6008362770080566, "avg_line_length": 34.735633850097656, "blob_id": "54c75c3ba79b536c109376ea8792795ac1d2cf17", "content_id": "1ca2d6f5b1f64556509d45a2b86a4f505a243632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "no_license", "max_line_length": 223, "num_lines": 87, "path": "/p2ClientServer/cliente/client.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#####################################################\n# Camada Fรญsica da Computaรงรฃo\n#Carareto\n#11/08/2020\n#Aplicaรงรฃo\n####################################################\n\n\n#esta รฉ a camada superior, de aplicaรงรฃo do seu software de comunicaรงรฃo serial UART.\n#para acompanhar a execuรงรฃo e identificar erros, construa prints ao longo do cรณdigo! \n\n\nfrom enlace import *\nimport time\nimport numpy as np\nimport random\n# voce deverรก descomentar e configurar a porta com atravรฉs da qual ira fazer comunicaรงao\n# para saber a sua porta, execute no terminal :\n# python -m serial.tools.list_ports\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\n\n#use uma das 3 opcoes para atribuir ร  variรกvel a porta usada\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\nserialName = \"COM3\" # Windows(variacao de)\n\nimageR=\"img/imagem.png\"\nimageW=\"img/imagemCopia.png\"\n\n\n\n\ndef cria_sequencia():\n tamanho=random.randint(10,30)\n comandos=[(255).to_bytes(2, byteorder='big'),(0).to_bytes(1, byteorder='big'),(15).to_bytes(1, byteorder='big'),(240).to_bytes(1, byteorder='big'),(65280).to_bytes(2, byteorder='big'),(255).to_bytes(1, byteorder='big')]\n lista=[0]*(tamanho+1)\n for i in range (tamanho):\n com=random.choice(comandos)\n if len(com)==2:\n com=(int.from_bytes(com, byteorder='big')+131072).to_bytes(3, byteorder='big')\n lista[i+1]=com\n lista[0]=(170).to_bytes(1, byteorder='big')\n lista.append((1).to_bytes(1, byteorder='big'))\n return lista,tamanho\n\ndef main():\n try:\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa รฉ a camada inferior ร  aplicaรงรฃo. Observe que um parametro\n #para declarar esse objeto รฉ o nome da porta.\n com1 = enlace('COM3')\n \n \n # Ativa comunicacao. Inicia os threads e a comunicaรงรฃo seiral \n com1.enable()\n print ('Comunicaรงรฃo aberta')\n #Se chegamos atรฉ aqui, a comunicaรงรฃo foi aberta com sucesso. Faรงa um print para informar.\n\n #txBuffer: sequรชncia de comandos a ser enviado para o outro computador.\n \n seq,tam=cria_sequencia()\n i=1\n for comand in seq:\n \n print ('Transmissรฃo do comando {} comeรงando'.format(i))\n txBuffer=comand\n print ('Enviando {}'.format(txBuffer))\n com1.sendData(np.asarray(txBuffer))\n i+=1\n time.sleep(0.2)\n print (\"Comandos enviados pelo cliente: {}\".format(tam))\n rxBuffer,nRx=com1.getData(1)\n print (\"Comandos recebidos pelo servidor: {}\".format(int.from_bytes(rxBuffer, byteorder='big')))\n \n print(\"-------------------------\")\n print(\"Comunicaรงรฃo encerrada\")\n print(\"-------------------------\")\n com1.disable()\n \n except Exception as erro:\n print(\"ops! :-\\\\\")\n print(erro)\n com1.disable()\n \n\n #so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6809492707252502, "alphanum_fraction": 0.7046802639961243, "avg_line_length": 29.959182739257812, "blob_id": "57bceee3dc64cdfe085ffc24c0d722327e48c5e8", "content_id": "fa87c97030a8b67eec25dfa22678406484ab6003", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 93, "num_lines": 49, "path": "/p8/decode.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Show a text-mode spectrogram using live microphone data.\"\"\"\n\n#Importe todas as bibliotecas\nimport numpy as np\nimport sounddevice as sd\nimport soundfile as sf\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nfrom scipy import signal as window\nfrom suaBibSignal import signalMeu\nfrom peakutils.plot import plot as pplot\nfrom funcoes_LPF import LPF\n\n\ndef main():\n figure,axs = plt.subplots(1,2)\n print(\"Inicializando decoder\")\n\n # 8. Verifique que o sinal recebido tem a banda dentro de 10kHz e 18kHz (faรงa o Fourier).\n sinal=signalMeu()\n modulado, fs = sf.read('modulado.wav')\n\n # 9. Demodule o รกudio enviado pelo seu colega.\n print('Demodulando o sinal')\n x2,portadora=sinal.generateSin(14000,1,5,fs)\n demodulado=modulado*portadora\n\n xf, yf = sinal.calcFFT(demodulado, fs)\n axs[0].set_title('Grรกfico 6: Sinal demodulado no domรญnio da frequรชncia')\n axs[0].plot(xf,yf)\n \n # 10. Filtre as frequรชncias superiores a 4kHz.\n print('Filtrando o sinal com Low Pass Filter')\n low_pass_freq=4000\n demodulado_lpf=LPF(demodulado,low_pass_freq,fs)\n\n xf2, yf2 = sinal.calcFFT(demodulado_lpf, fs)\n axs[1].set_title('Grรกfico 7: Sinal demodulado e filtrado no domรญnio da frequรชncia')\n axs[1].plot(xf2,yf2)\n \n plt.show()\n # 11. Execute o รกudio do sinal demodulado e verifique que novamente รฉ audรญvel.\n print('Executando o รกudio demodulado')\n sd.play(demodulado_lpf, fs)\n sd.wait()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6436126828193665, "alphanum_fraction": 0.6708706021308899, "avg_line_length": 32.671234130859375, "blob_id": "f09b5cbc71a3a74df5ce5b3ab32eb01153302ea5", "content_id": "9cd3ac81ad4ebefacd31912f03b66bbf182e6e98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2484, "license_type": "no_license", "max_line_length": 116, "num_lines": 73, "path": "/p8/encode.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#importe as bibliotecas\nimport numpy as np\nimport sounddevice as sd\nimport soundfile as sf\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nfrom scipy import signal as window\nfrom suaBibSignal import signalMeu\nfrom funcoes_LPF import LPF\n\ndef main():\n sinal=signalMeu()\n print(\"Inicializando encoder\")\n figure,axs = plt.subplots(2, 3)\n # 1. Faรงa a leitura de um arquivo de รกudio .wav de poucos segundos (entre 2 e 5) previamente gravado com uma\n # taxa de amostragem de 44100Hz.\n data, fs = sf.read('my-file2.wav')\n\n # 2. Normalize esse sinal: multiplicar o sinal por uma constante (a maior possรญvel), de modo que todos os pontos\n # do sinal permaneรงam dentro do intervalo[-1,1].\n time=5\n n = time*fs\n x = np.linspace(0.0, time, n)\n k=1/max(abs(data))\n print(f'Plotando o sinal com k={k}')\n axs[0,0].set_title(f'Grรกfico 1: Sinal k*data, com k={k}')\n axs[0,0].plot(x,k*data)\n\n k_data=k*data\n\n # 3. Filtre e elimine as frequรชncias acima de 4kHz.\n print('Filtrando o sinal com Low Pass Filter')\n low_pass_freq=4000\n k_data_lpf=LPF(k_data,low_pass_freq,fs)\n\n print(f'Plotando o sinal com k={k}')\n axs[0,1].set_title(f'Grรกfico 2: Sinal filtrado no tempo')\n axs[0,1].plot(x,k_data_lpf)\n\n xf, yf = sinal.calcFFT(k_data_lpf, fs)\n axs[0,2].set_title('Grรกfico 3: Sinal filtrado na frequรชncia')\n axs[0,2].plot(xf,yf)\n\n # 4. Reproduza o sinal e verifique que continua audรญvel (com menos qualidade).\n sd.play(k_data_lpf, fs)\n sd.wait()\n\n # 5. Module esse sinal de รกudio em AM com portadora de 14 kHz. (Essa portadora deve ser uma senoide\n # comeรงando em zero)\n x2,portadora=sinal.generateSin(14000,1,5,fs)\n\n sinal_modulado=(k_data_lpf)*portadora\n axs[1,0].set_title(f'Grรกfico 4: Sinal modulado no tempo')\n axs[1,0].plot(x,sinal_modulado)\n \n xf2, yf2 = sinal.calcFFT(sinal_modulado, fs)\n axs[1,1].set_title('Grรกfico 5: Sinal modulado na frequรชncia')\n axs[1,1].plot(xf2,yf2)\n \n # 6. Execute e verifique que nรฃo รฉ perfeitamente audรญvel.\n\n #Estรก comentado pois o รกudio dรณi o ouvido.\n # sd.play(sinal_modulado, fs)\n # sd.wait()\n\n # 7. Envie um arquivo com รกudio modulado para sua dupla ou, mais divertido, execute o รกudio e peรงa para que\n # seu colega grave o รกudio modulado.\n sf.write('modulado.wav', sinal_modulado, fs)\n print('Arquivo de รกudio modulado salvo!')\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6372625827789307, "alphanum_fraction": 0.6744730472564697, "avg_line_length": 30.760330200195312, "blob_id": "7bce9012839c6b96ddc92c88df4c58350dc6cd55", "content_id": "925472240ade4d1b1f92a246255e2daf521539c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3859, "license_type": "no_license", "max_line_length": 143, "num_lines": 121, "path": "/p7/decode_versaoAlunos.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\"\"\"Show a text-mode spectrogram using live microphone data.\"\"\"\n\n#Importe todas as bibliotecas\nimport time\nimport numpy as np\nimport sounddevice as sd\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nfrom scipy import signal as window\nfrom suaBibSignal import signalMeu\nimport peakutils\nfrom peakutils.plot import plot as pplot\n\n\n\nfrequencias={(941,1336):0,(697,1209):1,(697,1336):2,(697,1477):3,(770,1209):4,(770,1336):5,(770,1477):6,(852,1209):7,(852,1336):8,(852,1477):9}\n\n\ndef identificaPicos(indexes):\n frequencias2=[941,1336,697,1209,1477,770,852]\n erro=1\n lista=[]\n for pico in indexes:\n for freq in frequencias2:\n if pico>=freq-erro and pico<=freq+erro:\n lista.append(freq)\n if len(lista)!=2:\n print(len(lista))\n return \"Erro\"\n else:\n return lista[0],lista[1]\n\ndef identificaTecla(f):\n return frequencias[f]\n\n#funcao para transformas intensidade acustica em dB\ndef todB(s):\n sdB = 10*np.log10(s)\n return(sdB)\n\n\ndef main():\n \n #declare um objeto da classe da sua biblioteca de apoio (cedida)\n sinal=signalMeu()\n #declare uma variavel com a frequencia de amostragem, sendo 44100\n fs=44100\n #voce importou a bilioteca sounddevice como, por exemplo, sd. entao\n # os seguintes parametros devem ser setados:\n \n sd.default.samplerate = fs #taxa de amostragem\n sd.default.channels = 2 #voce pode ter que alterar isso dependendo da sua placa\n \n\n\n # faca um print na tela dizendo que a captacao comecarรก em n segundos. e entao \n #use um time.sleep para a espera\n espera=5\n print(f'A captaรงรฃo comeรงarรก em {espera} segundos.')\n time.sleep(espera)\n \n #faca um print informando que a gravacao foi inicializada\n print('Inรญcio da gravaรงรฃo')\n \n #declare uma variavel \"duracao\" com a duracao em segundos da gravacao. poucos segundos ... \n duration = 5\n numAmostras=duration*fs\n #calcule o numero de amostras \"numAmostras\" que serao feitas (numero de aquisicoes)\n \n audio = sd.rec(int(numAmostras), fs, channels=1)\n sd.wait()\n print(\"... FIM\")\n \n \n #analise sua variavel \"audio\". pode ser um vetor com 1 ou 2 colunas, lista ...\n #grave uma variavel com apenas a parte que interessa (dados)\n y=[]\n for v in audio:\n y.append(v[0])\n\n # use a funcao linspace e crie o vetor tempo. Um instante correspondente a cada amostra!\n t = np.linspace(0,duration,numAmostras)\n\n # plot do grafico รกudio vs tempo!\n\n plt.plot(t, y)\n plt.title('รudio recebido no domรญnio do Tempo')\n \n \n ## Calcula e exibe o Fourier do sinal audio. como saida tem-se a amplitude e as frequencias\n xf, yf = sinal.calcFFT(y, fs)\n plt.figure(\"F(y)\")\n plt.plot(xf,yf)\n plt.grid()\n plt.title('Fourier audio')\n \n\n #esta funcao analisa o fourier e encontra os picos\n #voce deve aprender a usa-la. ha como ajustar a sensibilidade, ou seja, o que รฉ um pico?\n #voce deve tambem evitar que dois picos proximos sejam identificados, pois pequenas variacoes na\n #frequencia do sinal podem gerar mais de um pico, e na verdade tempos apenas 1.\n \n index = peakutils.indexes(yf,thres=0.3,min_dist=100)\n pplot(xf, yf, index)\n #printe os picos encontrados! \n #https://peakutils.readthedocs.io/en/latest/reference.html\n #https://peakutils.readthedocs.io/en/latest/tutorial_a.html\n #encontre na tabela duas frequencias proximas ร s frequencias de pico encontradas e descubra qual foi a tecla\n #print a tecla.\n # print(f'Frequรชncia 1:{xf[index[0]]}')\n # print(f'Frequรชncia 2:{xf[index[1]]}')\n freq=[]\n for i in index:\n freq.append(xf[i])\n print(\"Apertou a tecla: {}\".format(identificaTecla(identificaPicos(freq))))\n ## Exibe grรกficos\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.522457480430603, "alphanum_fraction": 0.5473211407661438, "avg_line_length": 33.25274658203125, "blob_id": "5cc5fd2f1a5157afa003a68ca161310108cee2b2", "content_id": "bc56095f91fb49acf5a78fcfd2f1c197c82be708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6279, "license_type": "no_license", "max_line_length": 125, "num_lines": 182, "path": "/p3Datagrama/client/client.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#####################################################\n# Camada Fรญsica da Computaรงรฃo\n#Carareto\n#11/08/2020\n#Aplicaรงรฃo\n####################################################\n\n\n#esta รฉ a camada superior, de aplicaรงรฃo do seu software de comunicaรงรฃo serial UART.\n#para acompanhar a execuรงรฃo e identificar erros, construa prints ao longo do cรณdigo! \n\n\nfrom threading import Timer\nfrom enlace import *\nimport time\nimport numpy as np\nimport random\n# voce deverรก descomentar e configurar a porta com atravรฉs da qual ira fazer comunicaรงao\n# para saber a sua porta, execute no terminal :\n# python -m serial.tools.list_ports\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\n\n#use uma das 3 opcoes para atribuir ร  variรกvel a porta usada\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\nserialName = \"COM3\" # Windows(variacao de)\n\n\n\ndef calcula_quant(tamanho):\n if tamanho%114==0:\n return tamanho//114\n else:\n return tamanho//114 + 1\n\ndef cria_head(n_atual,n_total,n_bytespay):\n head=[0]*10\n head[0]=(170).to_bytes(1, byteorder='big')\n head[1]=(n_atual).to_bytes(1, byteorder='big')\n head[2]=(n_total).to_bytes(1, byteorder='big')\n head[3]=(n_bytespay).to_bytes(1, byteorder='big')\n head[4]=(170).to_bytes(1, byteorder='big')\n head[5]=(170).to_bytes(1, byteorder='big')\n head[6]=(170).to_bytes(1, byteorder='big')\n head[7]=(170).to_bytes(1, byteorder='big')\n head[8]=(170).to_bytes(1, byteorder='big')\n head[9]=(170).to_bytes(1, byteorder='big')\n \n return head\n\ndef cria_lista(binario):\n lista=[]\n for bit in binario:\n lista.append((bit).to_bytes(1, byteorder='big'))\n return lista\n\ndef cria_datagrama(arquivo):\n #Retorna uma lista de datagramas (cada elemento รฉ uma lista de bygtes com head, payload e eop) a partir de um arquivo\n bytes=cria_lista(arquivo)\n payload=114\n \n quant_pay=calcula_quant(len(arquivo))\n lista_datagramas=[0]*(quant_pay+1)\n i=0\n for datagrama in range(quant_pay+1):\n\n if i==0:\n head=cria_head(0,quant_pay,0)\n bytes_pay=[]\n elif i!=quant_pay:\n #Se nรฃo รฉ o รบltimo pacote\n head=cria_head(i,quant_pay,payload)\n bytes_pay=[0]*payload\n for quant in range(payload):\n bytes_pay[quant]=bytes.pop(0)\n \n else:\n #Se รฉ o รบltimo pacote\n head=cria_head(i,quant_pay,len(arquivo)%payload)\n bytes_resto=(len(arquivo))%payload\n bytes_pay=[0]*bytes_resto\n for quant in range(bytes_resto):\n bytes_pay[quant]=bytes.pop(0)\n\n eop=[(170).to_bytes(1, byteorder='big')]*3\n eop.append((204).to_bytes(1, byteorder='big'))\n data=head+bytes_pay+eop\n lista_datagramas[i]=data\n i+=1\n return lista_datagramas\n\ndef main():\n try:\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa รฉ a camada inferior ร  aplicaรงรฃo. Observe que um parametro\n #para declarar esse objeto รฉ o nome da porta.\n com1 = enlace('COM3')\n \n \n # Ativa comunicacao. Inicia os threads e a comunicaรงรฃo seiral \n com1.enable()\n print ('Comunicaรงรฃo aberta')\n #Se chegamos atรฉ aqui, a comunicaรงรฃo foi aberta com sucesso. Faรงa um print para informar.\n \n txBuffer=(255).to_bytes(114*2+10, byteorder='big')\n datagramas=cria_datagrama(txBuffer)\n hand=True\n print (\"Iniciando o Handshake\")\n for byte in datagramas[0]:\n txBuffer=byte\n com1.sendData(np.asarray(txBuffer))\n time.sleep(0.2)\n \n\n\n while hand:\n print(\"Esperando handshake do server.\")\n rxBuffer,nRx=com1.getData(1)\n print(rxBuffer)\n if rxBuffer==(255).to_bytes(1, byteorder='big'):\n res=input(\"Reenviar: S/N\")\n if res==\"S\":\n com1.sendData((101).to_bytes(1, byteorder='big'))\n print (\"Reenviando o Handshake\")\n com1.sendData(np.asarray(datagramas[0]))\n \n else:\n hand=False\n programa=False\n \n else:\n hand=False\n programa=True\n com1.sendData((100).to_bytes(1, byteorder='big'))\n print(\"Recebeu confirmaรงรฃo\")\n \n i=1 \n print (\"Iniciando envio do datagrama\")\n while programa:\n #enviar o datagrama[i] byte a byte\n \n if i==int.from_bytes(datagramas[0][2], byteorder='big')+1:\n print(\"Envio de todos os pacotes confirmados!\")\n programa=False\n receber=True\n else:\n print ('Transmissรฃo do pacote {} comeรงando'.format(i))\n for byte in datagramas[i]:\n txBuffer=byte\n com1.sendData(np.asarray(txBuffer))\n time.sleep(0.2)\n\n rxBuffer,nRx=com1.getData(1)\n print(rxBuffer)\n if rxBuffer==(100).to_bytes(1, byteorder='big'):\n print(\"Servidor confirmou o pacote.\")\n i+=1\n\n elif rxBuffer==(101).to_bytes(1, byteorder='big'):\n print(\"Servidor recebeu o pacote errado. Reenviando.\")\n elif rxBuffer==(255).to_bytes(1, byteorder='big'):\n print(\"Timeout no recebimento da confirmaรงรฃo. Finalizando o programa.\")\n programa=False\n receber=False\n\n\n if receber:\n rxBuffer,nRx=com1.getData(1)\n print(\"Cliente recebeu {}.\".format(rxBuffer))\n print(\"-------------------------\")\n print(\"Comunicaรงรฃo encerrada\")\n print(\"-------------------------\")\n com1.disable()\n \n except Exception as erro:\n print(\"ops! :-\\\\\")\n print(erro)\n com1.disable()\n \n\n # so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.44533780217170715, "alphanum_fraction": 0.46677833795547485, "avg_line_length": 40.845794677734375, "blob_id": "bd968000e85fa594404cad498f363f8c8953aac2", "content_id": "3b78f2fc3646062bbef7f5deb31570900d4158ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8995, "license_type": "no_license", "max_line_length": 128, "num_lines": 214, "path": "/p4Protocolo/server/aplicacao.py", "repo_name": "thiagohpr/projetosCamada", "src_encoding": "UTF-8", "text": "#####################################################\n# Camada Fรญsica da Computaรงรฃo\n#Carareto\n#11/08/2020\n#Aplicaรงรฃo\n####################################################\n\n\n#esta รฉ a camada superior, de aplicaรงรฃo do seu software de comunicaรงรฃo serial UART.\n#para acompanhar a execuรงรฃo e identificar erros, construa prints ao longo do cรณdigo! \n\n\nfrom enlace import *\nimport time\nimport numpy as np\n\n# voce deverรก descomentar e configurar a porta com atravรฉs da qual ira fazer comunicaรงao\n# para saber a sua porta, execute no terminal :\n# python -m serial.tools.list_ports\n# se estiver usando windows, o gerenciador de dispositivos informa a porta\n\n#use uma das 3 opcoes para atribuir ร  variรกvel a porta usada\n#serialName = \"/dev/ttyACM0\" # Ubuntu (variacao de)\n#serialName = \"/dev/tty.usbmodem1411\" # Mac (variacao de)\nserialName = \"COM3\" # Windows(variacao de)\n\nmeu_id=0\ndef cria_head(tipo,n_atual,n_total,n_bytespay):\n id_servidor=0\n head=[0]*10\n head[0]=(tipo).to_bytes(1, byteorder='big')\n head[1]=(0).to_bytes(1, byteorder='big')#id sensor\n head[2]=(id_servidor).to_bytes(1, byteorder='big')#id servidor\n head[3]=(n_total).to_bytes(1, byteorder='big')\n head[4]=(n_atual).to_bytes(1, byteorder='big')\n if tipo==1:\n #id arquivo em handshake\n head[5]=(0).to_bytes(1, byteorder='big')\n\n else:\n #tamanho do payload\n head[5]=(n_bytespay).to_bytes(1, byteorder='big')\n head[6]=(n_atual).to_bytes(1, byteorder='big')\n head[7]=(170).to_bytes(1, byteorder='big')\n head[8]=(170).to_bytes(1, byteorder='big')\n head[9]=(170).to_bytes(1, byteorder='big')\n return head\n\ndef cria_log(formato,head):\n \n tipo=str(int.from_bytes(head[0], byteorder='big'))\n tamanho_total=str(int.from_bytes(head[5], byteorder='big')+14)\n linha=time.ctime()+\" / \"+formato+\" / \"+tipo+\" / \"+tamanho_total\n if tipo == '3':\n crc=(hex(int.from_bytes(head[8], byteorder='big'))[2:]+hex(int.from_bytes(head[9], byteorder='big'))[2:]).upper()\n linha+=\" / \"+str(int.from_bytes(head[4], byteorder='big'))+\" / \"+str(int.from_bytes(head[3], byteorder='big'))+\" / \"+crc\n return linha\n\ndef log_envio(head):\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/server/log.txt\", \"a\") as file:\n file.write(cria_log(\"envio\",head))\n file.write('\\n')\ndef log_recebeu(head):\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/server/log.txt\", \"a\") as file:\n file.write(cria_log(\"receb\",head))\n file.write('\\n')\ndef cria_lista(binario):\n lista=[]\n for bit in binario:\n lista.append((bit).to_bytes(1, byteorder='big'))\n return lista\neop1=b'\\xaa'\neop2=b'\\xff'\neop_fixo=[eop2,eop1,eop2,eop1]\n\ndef main():\n try:\n #declaramos um objeto do tipo enlace com o nome \"com\". Essa รฉ a camada inferior ร  aplicaรงรฃo. Observe que um parametro\n #para declarar esse objeto รฉ o nome da porta.\n com1 = enlace('COM3')\n \n # Ativa comunicacao. Inicia os threads e a comunicaรงรฃo seiral \n com1.enable()\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/server/log.txt\", \"w\") as file:\n file.write(\"Comunicaรงรฃo aberta\")\n file.write('\\n')\n print(\"Comunicacao aberta\")\n encerrar=False\n ocioso = True\n startTimer2 = int(time.time())\n while ocioso:\n rxBuffer, nRx = com1.getData(14)\n if rxBuffer[0] == 1:\n print(\"mensagem certa\")\n if rxBuffer[2]==meu_id:\n log_recebeu(cria_lista(rxBuffer))\n print(\"รฉ pra mim\")\n ocioso = False\n numPckg = rxBuffer[3]\n print(\"numero certo\")\n cont = 1\n cont_anterior=0\n msgTipo2=cria_head(2,cont,numPckg,0)\n \n msgTipo2=msgTipo2+eop_fixo\n print(msgTipo2)\n com1.sendData(np.asarray(msgTipo2))\n log_envio(msgTipo2)\n else:\n time_now = int(time.time())\n if time_now >= startTimer2 + 20:\n ocioso = True\n msgTipo5=cria_head(5,0,0,0)\n msgTipo5=msgTipo5+eop_fixo \n com1.sendData(np.asarray(msgTipo5))\n log_envio(msgTipo5)\n encerrar=True\n if encerrar==True:\n break\n time.sleep(1)\n \n\n if encerrar==False:\n arquivo=[0]*numPckg\n while cont <= numPckg:\n recebeu = False\n seconds_to_go_for = 20\n timer2 = int(time.time())\n\n while not recebeu:\n if encerrar==False:\n head, headn = com1.getData(10)\n print(head)\n if len (head)>1:\n payload, payloadn = com1.getData(head[5])\n print(payload)\n eop, eopn = com1.getData(4)\n print(eop)\n if head[0] == 3:\n recebeu = True\n log_recebeu(cria_lista(head))\n print(\"mensagem certa\")\n print (head[7])\n if head[4] == head[7] + 1:\n if head[4]==cont_anterior+1:\n if len(payload) == head[5]:\n arquivo[cont-1] = payload\n cont_anterior=cont\n msgTipo4=cria_head(4,cont,numPckg,0)\n msgTipo4=msgTipo4+eop_fixo\n\n com1.sendData(np.asarray(msgTipo4))\n log_envio(msgTipo4)\n cont+=1\n print(\"enviando confirmaรงรฃo\")\n print(cont)\n print(numPckg)\n else:\n msgTipo6=cria_head(6,cont,numPckg,0)\n msgTipo6=msgTipo6+eop_fixo \n com1.sendData(np.asarray(msgTipo6))\n log_envio(msgTipo6)\n print(\"erro na quantidade de bytes\")\n else:\n msgTipo6=cria_head(6,cont,numPckg,0)\n msgTipo6=msgTipo6+eop_fixo \n com1.sendData(np.asarray(msgTipo6))\n log_envio(msgTipo6)\n print(\"erro nรบmero do pacote\")\n else:\n msgTipo6=cria_head(6,cont,numPckg,0)\n msgTipo6=msgTipo6+eop_fixo \n com1.sendData(np.asarray(msgTipo6))\n log_envio(msgTipo6)\n print(\"erro nรบmero do pacote\")\n else:\n time.sleep(1)\n\n time_now = int (time.time())\n if time_now >= timer2 + seconds_to_go_for:\n print(\"Timer 2 encerrado. Timeout de envio do arquivo, finalizando a comunicaรงรฃo.\")\n ocioso = True\n\n msgTipo5=cria_head(5,cont,numPckg,0)\n msgTipo5=msgTipo5+eop_fixo \n\n com1.sendData(np.asarray(msgTipo5))\n log_envio(msgTipo5)\n encerrar=True\n\n else:\n if head == (255).to_bytes(1, byteorder='big'):\n msgTipo4=cria_head(4,cont,numPckg,0)\n msgTipo4=msgTipo4+eop_fixo \n com1.sendData(np.asarray(msgTipo4))\n log_envio(msgTipo4)\n else:\n break\n if encerrar==True:\n break\n\n com1.disable()\n with open(\"C:/Users/thpro/Desktop/Camada Fรญsica/projetosCamada/p4Protocolo/server/log.txt\", \"a\") as file:\n file.write(\"Comunicaรงรฃo encerrada\")\n\n except Exception as erro:\n print(\"ops! :-\\\\\")\n print(erro)\n com1.disable()\n \n\n #so roda o main quando for executado do terminal ... se for chamado dentro de outro modulo nao roda\nif __name__ == \"__main__\":\n main()\n" } ]
11
DanielSank/smashdb
https://github.com/DanielSank/smashdb
7020d916accf906eb562527826e0f33645b3bb5c
6ddf45aed0a95eb3e51306e4416751ea822757a5
1be545007be7a94278bb0ace273b8265f07eb058
refs/heads/master
2021-01-13T04:44:35.453541
2017-01-18T10:52:00
2017-01-18T10:52:00
79,099,559
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7836257219314575, "alphanum_fraction": 0.7836257219314575, "avg_line_length": 41.75, "blob_id": "6a58b4d35466a7266d9edec2278d5f93564f6785", "content_id": "7dd5e6b7e990c8c7688764fc675d889ec5a0bb6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "no_license", "max_line_length": 128, "num_lines": 4, "path": "/README.md", "repo_name": "DanielSank/smashdb", "src_encoding": "UTF-8", "text": "# smashdb\nDatabase of smash games played\n\nWe're in the earliest stages of development. The [issue tracker](https://github.com/DanielSank/smashdb/issues) is our todo list.\n" }, { "alpha_fraction": 0.6661322712898254, "alphanum_fraction": 0.6681362986564636, "avg_line_length": 26.711111068725586, "blob_id": "348786cf03f352fc9a32a80e1a60e4806fdd9e3f", "content_id": "3656e06df9de84bb2095ce9e2fcdea8cbef96367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2495, "license_type": "no_license", "max_line_length": 80, "num_lines": 90, "path": "/models.py", "repo_name": "DanielSank/smashdb", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, Integer, String, Date, ForeignKey\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nPLAYER_NAME_LENGTH = 64\nTOURNAMENT_NAME_LENGTH = 255\n\n\nBase = declarative_base()\n\n\nclass Player(Base):\n __tablename__ = 'players'\n\n # local\n id = Column(Integer, primary_key=True)\n name = Column(String(PLAYER_NAME_LENGTH), unique=True)\n\n # one -> many\n placements = relationship('Placement', back_populates='player')\n\n\nclass Game(Base):\n __tablename__ = 'games'\n\n # local\n id = Column(Integer, primary_key=True)\n stocks_remaining = Column(Integer, nullable=True)\n order_in_set = Column(Integer, nullable=True)\n\n # many -> one\n winner_id = Column(Integer, ForeignKey('players.id'), nullable=False)\n winner = relationship('Player', foreign_keys=[winner_id])\n loser_id = Column(Integer, ForeignKey('players.id'), nullable=False)\n loser = relationship('Player', foreign_keys=[loser_id])\n\n set_id = Column(Integer, ForeignKey('sets.id'), nullable=True)\n set = relationship('Set', back_populates='games')\n\n __table_args__ = (UniqueConstraint(\n 'set_id',\n 'order_in_set',\n name='_set_order_in_set_uc'),)\n\n\nclass Set(Base):\n __tablename__ = 'sets'\n\n id = Column(Integer, primary_key=True)\n\n # many -> one\n tournament_id = Column(Integer, ForeignKey('tournaments.id'), nullable=True)\n tournament = relationship('Tournament', back_populates='sets')\n\n # one -> many\n games = relationship('Game', back_populates='set')\n\n\nclass Tournament(Base):\n __tablename__ = 'tournaments'\n\n # local\n id = Column(Integer, primary_key=True)\n name = Column(String(TOURNAMENT_NAME_LENGTH), unique=True)\n start_date = Column(Date)\n end_date = Column(Date)\n\n # one -> many\n placements = relationship('Placement', back_populates='tournament')\n sets = relationship('Set', back_populates='tournament')\n\n\nclass Placement(Base):\n __tablename__ = 'placements'\n\n # local\n id = Column(Integer, primary_key=True)\n place = Column(Integer, nullable=False)\n\n # many -> one\n player_id = Column(Integer, ForeignKey('players.id'), nullable=False)\n player = relationship('Player', back_populates='placements')\n\n tournament_id = Column(\n Integer,\n ForeignKey('tournaments.id'),\n nullable=False)\n tournament = relationship('Tournament', back_populates='placements')\n\n" }, { "alpha_fraction": 0.6362135410308838, "alphanum_fraction": 0.6362135410308838, "avg_line_length": 26.938461303710938, "blob_id": "a86f1a5092695bc04c5fd14a6e71394f07ca1185", "content_id": "f1dd22cb4f2014165814b923a37807b624951be5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 78, "num_lines": 65, "path": "/client.py", "repo_name": "DanielSank/smashdb", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\n\nimport os\n\n\nimport sqlalchemy as sa\nimport sqlalchemy.orm as orm\nfrom sqlalchemy.orm.exc import NoResultFound\nimport yaml\n\n\nimport smashdb.models as models\n\n\n# UTILITY\n\ndef get_url(role=None):\n if role is None:\n role = os.environ['SMASHDB_ROLE']\n home = os.path.expanduser('~')\n config_file_name = os.path.join(home, \".smashdb\", \"config.yml\")\n with open(config_file_name, 'r') as stream:\n parameters = yaml.load(stream)[role]\n url = r'mysql+mysqldb://{}:{}@{}'.format(\n parameters['USERNAME'],\n parameters['PASSWORD'],\n parameters['HOST'])\n return url\n\n\ndef make_session(role, echo=False):\n url = get_url(role)\n engine = sa.create_engine(url, echo=echo)\n return orm.sessionmaker(bind=engine)()\n\n\ndef get_or_create(session, model, get_params, create_params=None):\n \"\"\"Get a or create an instance in the database.\n Args:\n session:\n model: Class of the object to create.\n get_params (dict): parameters needed to uniquely identify an already\n existing entity.\n create_params (dict): Additional parameters needed if the entity does\n not already exist and has to be created\n\n Returns:\n an ORM object representing the instance.\n (bool): True if the instance was created, False if it already existed.\n \"\"\"\n if create_params is None:\n create_params = {}\n try:\n instance = session.query(model).filter_by(**get_params).one()\n except NoResultFound:\n instance = None\n if instance:\n created = False\n else:\n all_params = dict(get_params.items() + create_params.items())\n instance = model(**all_params)\n session.add(instance)\n created = True\n return instance, created\n\n" }, { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.8500000238418579, "avg_line_length": 8.75, "blob_id": "a6864223c5f9a54c1df2dca225ec66041aee1649", "content_id": "25c9f82c71149b05749e3dfef3590b32d060d1ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "no_license", "max_line_length": 12, "num_lines": 4, "path": "/requirements.txt", "repo_name": "DanielSank/smashdb", "src_encoding": "UTF-8", "text": "alembic\nmysql-python\nsqlalchemy\npyyaml\n\n" } ]
4
MagicSword/gae-flask-html5
https://github.com/MagicSword/gae-flask-html5
d18c3d9f664c9f2b1fa810f8db21bef61431d0bf
5a064e1c87d18b3eeaa6f0ff95f5c11aec185be1
e4db011dc8be1fb4b2420771e16ccb8fbb860196
refs/heads/master
2021-01-16T17:41:57.958383
2011-04-05T01:18:30
2011-04-05T01:18:30
1,609,044
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7260869741439819, "alphanum_fraction": 0.7260869741439819, "avg_line_length": 22, "blob_id": "ca9a1e91ab696058733b7c6d1e4ca9fb90db96a2", "content_id": "59e963c9bc44bdfed94c25e67ac8ef637de51786", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/app/models.py", "repo_name": "MagicSword/gae-flask-html5", "src_encoding": "UTF-8", "text": "\"\"\"\nPython Datastore API: http://code.google.com/appengine/docs/python/datastore/\n\"\"\"\n\nfrom google.appengine.ext import db\n\n\nclass Todo(db.Model):\n text = db.StringProperty()\n created_at = db.DateTimeProperty(auto_now=True)\n" }, { "alpha_fraction": 0.5708634257316589, "alphanum_fraction": 0.5770553946495056, "avg_line_length": 34.02409744262695, "blob_id": "716e112018017fbcff1fa1ab2639f569aa87ab17", "content_id": "e3afd402f83568c179b5034e8ed5851abbb410bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5814, "license_type": "no_license", "max_line_length": 82, "num_lines": 166, "path": "/README.md", "repo_name": "MagicSword/gae-flask-html5", "src_encoding": "UTF-8", "text": "gae-flask-html5\n===============\n\n<pre><code>\n\n E\n DtD\n .itGD ffffffffffffffffff \n :tttG E fffffffffjjjjjjjjf \n ;ttjLD .. ## fffffffffjjjjjjjjf \n ijjjfGEED;.:::D WE,KE fffLLLLLLfjjjjjjff \n jfffLLt:.::::::t D### # # fff...... jff \n LLLLf::.::ffjt::D ,i#G.#DG fff...... jff \n DLL:::..:;:itii::E G##W t fff...fffjjjjjjjff \n EL::::.:;,.,jiii:j #W# fff,..fffjjjjjjjff \n ,E:::::.:i,;:i;ii:: #Ki, Lfff..... jjf; \n D::::::::ffL;,,ii,:, L# W . jfff..... jjf. \n j::::::..LLL:.iii;:j ,#G ,fff..... jjf \n i:::::::.LLi;;;ii,:i ##Gt. D ffffffffjjj jjf \n LDtjfLLf:::GGG.:jii:: WW # . fff..fffjjj jjf \n EDtitj;GGG:,:t:,;;iii::;DL K#E## fff..fffjjj jff \n DLLfjLGGGGDG::::i::,ii;:GGLD D# iK : fff..... jff \n DitifLDD;;;::::::iiiii:,GDD L#GK# j, ffff.... .jjff \n DEEE E;,::::::::,:::t t###K f# # ffffffffjjjjjjff \n D:::::::::::,E E############ ffffffffjjjjjfff \n EDG,:::jEE ;EKL ,fffffffjjffffL, \n iffffffi \n .. \n\n\n github.com/zachwill/gae-flask-html5\n\n</code></pre>\n\n\nChangelog\n---------\n\n###V1.0.2\n\n* Minor fixes to `views.py` file.\n* Ajax functionality for adding todos.\n\n###V1.0.1\n\n* Fixed bug with `werkzeug_debugger_appengine` submodule.\n* Minor changes to files (fixed grammar issues, deleted comments, etc).\n\n### V1.0\n\n* Added send email functionality.\n * *NOTE*: You'll have to update the `from_address` variable in `views.py`\n for it to work properly.\n * Don't forget to run the development server with the\n `dev_appserver.py --enable_sendmail .` command.\n* Added `test.py` file for running unittest. Deleted `gaeunit.py`.\n * Tests kept in `tests` directory.\n * Now uses GAE v1.4.3's `testbed` functionality.\n\n\nWhat is this?\n-------------\n\nIt's just a simple [Flask](http://flask.pocoo.org/) skeleton for\n[Google App Engine](http://appengine.google.com/),\nbut made with all the baked-in-goodness of\n[html5 boilerplate](https://github.com/paulirish/html5-boilerplate).\n\nI'm planning on using this for my GAE projects going forward (I really\nlike the speed of [Flask](http://flask.pocoo.org/) compared to\n[Django-Nonrel](http://code.google.com/appengine/articles/django-nonrel.html)\non GAE), so I thought someone else might find it useful, too.\n\nJust about everything is ready to go right out of the box -- including\n`QUnit` for JavaScript tests and a `tests` directory for Python's `unittest`.\nAlso, I included a `style.less` file since I primarily only use the\n[Less.app](http://incident57.com/less/) when writing stylesheets nowadays.\n\n\nWhy should I use it?\n---------------------\n\nI stumbled a bit figuring out how to add tests and use the `unittest` module,\nand also setup an `appengine_console.py` file to connect to GAE's remote API,\nso this skeleton might come in handy for you.\n\nI looked at two other Flask GAE skeletons on Github\n([flask-gae-skeleton](https://github.com/blossom/flask-gae-skeleton)\nand [flask-gae-template](https://github.com/jugyo/flask-gae-template)\n-- both of which were awesome for learning), and I adapted what\nI felt were some of their best parts. Recently, I've updated this project's\nstructure after browsing the source code of\n[another GAE project skeleton](https://github.com/franciscosouza/labs).\n\nLastly, as an added bonus, the scripts are PEP8 compliant.\n\n\nHow do I use it?\n----------------\n\nMake sure you have the [Google App Engine SDK](http://appengine.google.com/)\ninstalled on your computer, and you've created an application for your\nGoogle account. The SDK will also install symlinks to its packages on your\ncomputer.\n\n### Steps to Get Up and Running\n\n1. Create a new application on [Google App Engine](http://appengine.google.com/).\n\n2. `git clone` this repo into your personal project folder.\n\n3. Amend the `app.yaml` file with your application's name.\n\n4. Run the project from your development server (use the `dev_appserver.py .`\ncommand or use the Google App Engine Launcher GUI).\n\n5. Browse the sample application in your web browser: `http://localhost:8080`\n\n6. Once you're ready to read the source code, check out the `app` directory.\n\n7. All static media is stored in the `static` directory.\n * This includes your favicon, `robots.txt`, and `humans.txt`\n\n8. Before you deploy, don't forget to:\n * Update the `humans.txt` file.\n * Uncomment the `default_expiration` in `app.yaml`\n\n\nUseful Commands\n---------------\n\n### Setup\n\n git clone https://github.com/zachwill/gae-flask-html5.git <your_app_name_here>\n\n### Run\n\n dev_appserver.py .\n\nIf you're planning on using email functionality with the development server,\nuse the following command:\n\n dev_appserver.py --enable_sendmail .\n\n### Deploy\n\n appcfg.py update .\n\n### Test\n\nTests are written using GAE v1.4.3's `testbed` functionality. Currently, [there\nis a known bug](http://goo.gl/tDQTz) for users without `PIL` installed.\n\n python test.py\n\n### Remote Console\n\n python appengine_console.py .\n\n\nTodo\n----\n\n* add more tests\n* add pep8.py to libs\n * create PEP8 TestCase, too\n" } ]
2
italotabatinga/whos-that-pokemon
https://github.com/italotabatinga/whos-that-pokemon
b350a5b70ad8d10c70fe59855b79d3f72bebb497
5424110e7d85478e940ce852404c98c5ed7cc37c
7a680567f47496b8089b20b438e0114d5d32acee
refs/heads/master
2020-03-06T23:15:48.448081
2018-04-13T00:12:17
2018-04-13T00:12:17
127,128,182
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6736842393875122, "alphanum_fraction": 0.7052631378173828, "avg_line_length": 14.833333015441895, "blob_id": "f18c141c0da0e3284e6c069d47300424d1a17229", "content_id": "a0e977e5bbb65c61ccdbe96002ebe5e21da96c8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "permissive", "max_line_length": 58, "num_lines": 12, "path": "/randomChooser.py", "repo_name": "italotabatinga/whos-that-pokemon", "src_encoding": "UTF-8", "text": "import random\n\ndef generateList():\n\tnumLow = 0\n\tnumHigh = 145\n\n\tlistOfNumbers = []\n\n\tfor x in range (0, 5):\n\t listOfNumbers.append(random.randint(numLow, numHigh))\n\n\treturn listOfNumbers\n" }, { "alpha_fraction": 0.5923781991004944, "alphanum_fraction": 0.6111915111541748, "avg_line_length": 38.11320877075195, "blob_id": "9d3428fa55231bb91b6e47d4f16fb14fb3b07573", "content_id": "0b59b9dcdbb1abbc5f11c08992b439eafe15f584", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2073, "license_type": "permissive", "max_line_length": 99, "num_lines": 53, "path": "/Particle.py", "repo_name": "italotabatinga/whos-that-pokemon", "src_encoding": "UTF-8", "text": "from PyQt5.QtGui import QVector2D, QPainter, QColor\nfrom PyQt5.QtWidgets import QGraphicsItem, QWidget, QStyleOptionGraphicsItem\nfrom PyQt5.QtCore import QRectF, QPointF, QRandomGenerator, Qt\nfrom random import randint\nclass Particle(QGraphicsItem):\n \"\"\"Particle class to simulate physics\"\"\"\n def __init__(self, pos: QPointF, **kwargs):\n \"\"\"Constructor to create a particle, receives a QtVector2D and can \\\n receive 'r' that is radius (float) and 'speed' that is speed (QVector2d)\"\"\"\n super().__init__()\n self.setPos(pos.x(), pos.y())\n\n self.rad = kwargs.get('r', 2)\n self.speed = kwargs.get('speed', QPointF(QRandomGenerator(randint(0,500)).bounded(-10,10),\\\n QRandomGenerator(randint(0,500)).bounded(-10,10)))\n\n def hasCollidedX(self):\n \"\"\"Return true if the particle has collided with the vertical \\\n borders\"\"\"\n newpos = self.pos()\n # print(newpos.x())\n if(newpos.x() - self.rad < 0 or newpos.x()+self.rad > self.scene().width()):\n return True\n return False\n\n def hasCollidedY(self):\n \"\"\"Return true if the particle has collided with the horizontal \\\n borders\"\"\"\n newpos = self.pos()#self.mapToScene(self.pos())\n if(newpos.y() - self.rad < 0 or newpos.y()+self.rad > self.scene().height()):\n return True\n return False\n\n def advance(self, step):\n \"\"\"Update the motion of the particle\"\"\"\n if not step:\n return\n self.setPos(self.pos() + self.speed)\n self.scene().upAlpha(self)\n if self.hasCollidedX():\n self.speed.setX(self.speed.x()*-1)\n if self.hasCollidedY():\n self.speed.setY(self.speed.y()*-1)\n\n def boundingRect(self):\n # pass\n return QRectF(-self.rad, -self.rad, self.rad*2, self.rad*2)\n\n def paint(self, painter: QPainter, a, b):\n \"\"\"Show particle on the screen\"\"\"\n painter.setPen(Qt.NoPen)\n painter.setBrush(QColor(255, 0, 255, 255))\n painter.drawEllipse(self.boundingRect())\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7820512652397156, "avg_line_length": 38, "blob_id": "8f129d1164ca6c65cc42cce02340bf2c54d76705", "content_id": "075a9b1ec439399035c58e17e0c3a115a527e12c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 78, "license_type": "permissive", "max_line_length": 57, "num_lines": 2, "path": "/README.md", "repo_name": "italotabatinga/whos-that-pokemon", "src_encoding": "UTF-8", "text": "# whos-that-pokemon\nA app developed with PyQt5 to make you guess the Pokemon.\n" }, { "alpha_fraction": 0.6011526584625244, "alphanum_fraction": 0.616000771522522, "avg_line_length": 37.484962463378906, "blob_id": "a79e33c153aac1e41879a11be7cc8f486b3ac414", "content_id": "9f802a6dcccc0a6141505df4e2dd1911807e9a8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10240, "license_type": "permissive", "max_line_length": 169, "num_lines": 266, "path": "/Main.py", "repo_name": "italotabatinga/whos-that-pokemon", "src_encoding": "UTF-8", "text": "๏ปฟfrom PyQt5.QtWidgets import QApplication, \\\n QWidget, QPushButton, QFrame, QSlider, QCheckBox, QFileDialog,\\\n QGraphicsScene, QGraphicsView, QVBoxLayout, QGroupBox,\\\n QHBoxLayout, QGraphicsPixmapItem, QGraphicsItem\nfrom PyQt5.QtGui import QPainter, QImage, QColor, QPainterPath, \\\n QPen, QMouseEvent, QPolygon, QPalette, QColor, QPixmap, QImage\nfrom PyQt5.QtCore import Qt, QRect, QPointF, QRandomGenerator, QTimer,\\\n QObject, QRectF, QUrl\nfrom PyQt5.QtMultimedia import QSound, QMediaPlayer, QMediaContent\n\nimport sys\nimport random\nimport time\n\nfrom Particle import Particle\nimport randomChooser\n\n\nBUTTON_MIN_WIDTH = 100\nBUTTON_x = 120\nBUTTON_Y = 430\n\npokemons = ['Goldeen', 'Kadabra', 'Vaporeon', 'Grimer', 'Machamp', 'Oddish',\\\n'Poliwhirl', 'Squirtle', 'Doduo', 'Charmander', 'Golem', 'Horsea', 'Magmar',\\\n'Dragonite', 'Charizard', 'Drowzee', 'Electrode', 'Ponyta', 'Rhydon', 'Caterpie',\\\n'Zapdos', 'Pidgey', 'Voltorb', 'Shellder', 'Bulbasaur', 'Clefable', 'Omanyte',\\\n'Hitmonchan', 'Mankey', 'Nidoking', 'Magnemite', 'Geodude', 'Zubat', 'Cubone',\\\n'Nidorino', 'Gastly', 'Seaking', 'Magneton', 'Ditto', 'Articuno', 'Alakazam',\\\n'Pikachu', 'Koffing', 'Golbat', 'Pidgeotto', 'Eevee', 'Muk', 'Starmie', 'Rattata',\\\n'Slowpoke', 'Cloyster', 'Nidoran', 'Nidorina', 'Hitmonlee', 'Aerodactyl', 'Ekans',\\\n'Weepinbell', 'Gengar', 'Nidoqueen', 'Magikarp', 'Metapod', 'Machoke', 'Tentacruel',\\\n'Tauros', 'Venomoth', 'Exeggutor', 'Onix', 'Spearow', 'Mr.Mime', 'Kingler',\\\n'Gloom', 'Sandslash', 'Raichu', 'Moltres', 'Staryu', 'Lickitung', 'Abra',\\\n'Arbok', 'Psyduck', 'Diglett', 'Wartortle', 'Slowbro', 'Dodrio', 'Raticate',\\\n'Dratini', 'Porygon', 'Beedrill', 'Tentacool', 'Omastar', 'Poliwag', 'Kakuna',\\\n'Gyarados', 'Machop', 'Dragonair', 'Venusaur', 'Victreebel', 'Arcanine', 'Flareon',\\\n'Rapidash', 'Clefairy', 'Growlithe', 'Vulpix', 'Scyther', 'Jynx', 'Seadra',\\\n'Paras', 'Weezing', 'Dugtrio', 'Golduck', 'Charmeleon', 'Primeape', 'Blastoise',\\\n'Seel', 'Farfetch', 'Mewtwo', 'Marowak', 'Ivysaur', 'Tangela', 'Ninetales',\\\n'Pidgeot', 'Bellsprout', 'Krabby', 'Electabuzz', 'Chansey', 'Pinsir', 'Persian',\\\n'Lapras', 'Fearow', 'Exeggcute', 'Hypno', 'Parasect', 'Kangaskhan', 'Haunter',\\\n'Kabutops', 'Dewgong', 'Venonat', 'Sandshrew', 'Weedle', 'Wigglytuff', 'Jolteon',\\\n'Graveler', 'Vileplume', 'Jigglypuff', 'Butterfree', 'Poliwrath', 'Rhyhorn', 'Kabuto'] \n\nclass states:\n normal=0\n\nclass GrafScene(QGraphicsScene):\n def __init__(self, parent):\n super().__init__(parent)\n self.view = QGraphicsView(self)\n self.image = QImage()\n\n def initUI(self):\n self.setItemIndexMethod(QGraphicsScene.BspTreeIndex)\n self.setBackgroundBrush(QColor(255, 0, 0))\n # pokemon = QGraphicsPixmapItem(QPixmap(\"assets/gengar.png\"))\n bgimg = QPixmap(\"assets/whosthatpokemon.jpg\")\n # print(bgimg.width(), bgimg.height())\n self.addPixmap(QPixmap(\"assets/whosthatpokemon.jpg\"))\n\n # img = image.load(\"assets/gengar.png\")\n self.pokemon = QGraphicsPixmapItem()\n self.setNewPokemon(\"Gengar\")\n\n self.answer = QGraphicsPixmapItem()\n\n # pokemon.pixmap(\n self.addItem(self.pokemon)\n self.addParticles(150)\n self.view.setFixedSize(761,431)\n self.view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n # self.scene.setSceneRect(5, 5, self.width()-10, 300)\n self.view.setBackgroundBrush(QColor(0, 255, 0))#QImage(\"assets/whosthatpokemon.png\"))\n # self.view.setCacheMode(QGraphicsView.CacheBackground)\n self.view.setRenderHint(QPainter.Antialiasing)\n # self.scene.views\n\n def addParticles(self, count: int):\n gen = QRandomGenerator()\n for i in range(count):\n # print(random(self.scene.width()))\n part = Particle(QPointF(gen.bounded(self.width()),\\\n gen.bounded(self.height())))\n part.setVisible(False)\n self.addItem(part)\n\n def setNewPokemon(self, name):\n self.image.load(\"assets/\" + name.lower() + \".png\")\n for i in range(self.image.width()):\n for j in range(self.image.height()):\n color = self.image.pixelColor(i,j)\n if color.alpha() != 0:\n color.setAlpha(5)\n self.image.setPixel(i,j,color.rgba())\n self.pokemon.setPixmap(QPixmap().fromImage(self.image))\n pokepix = self.pokemon.pixmap()\n self.pokemon.setPos(220-self.image.width()/2,220-self.image.height()/2)\n\n def setAnswer(self, name):\n self.image.load(\"assets/\" + name.lower() + \".png\")\n self.pokemon.setPixmap(QPixmap().fromImage(self.image))\n pokepix = self.pokemon.pixmap()\n self.pokemon.setPos(220-self.image.width()/2,220-self.image.height()/2)\n\n def upAlpha(self, particle: QGraphicsItem):\n if self.pokemon.collidesWithItem(particle):\n # particle.setVisible(False)\n pos = QPointF(particle.pos().x()-220+self.image.width()/2,particle.pos().y()-220+self.image.height()/2)# self.pokemon.mapToItem(self.pokemon, particle.pos())\n border = 2\n if pos.x() > border and pos.x() < self.image.width()-border and\\\n pos.y() > border and pos.y() < self.image.height()-border:\n for i in range(-border,border):\n for j in range(-border,border):\n color = self.image.pixelColor(pos.x()+i, pos.y()+j)\n if color.alpha() > 0 and color.alpha() < 245:\n color.setAlpha(color.alpha()+10)\n self.image.setPixel(pos.x()+i, pos.y()+j, color.rgba())\n self.pokemon.setPixmap(QPixmap().fromImage(self.image))\n self.pokemon.show()\n\n\nclass GrafWin(QFrame):\n def __init__(self):\n super().__init__()\n\n self.listOfPokemons = []\n self.answerPokemon = ''\n self.time = 0\n\n self.createButtons()\n self.initUI()\n\n def initUI(self):\n self.setFixedSize(785,500)\n self.setWindowTitle('Who\\'s that Pokรฉmon?')\n # self.gf = GrafWidget(self)\n # self.gf.setGeometry(5, 5, 590, 400)\n self.setLayout(QVBoxLayout())\n\n self.buttonsGroup = QGroupBox()\n self.createBtnsLayout()\n self.buttonsGroup.hide()\n\n self.scene = GrafScene(self)\n self.scene.initUI()\n\n self.layout().addWidget(self.scene.view)\n self.layout().addWidget(self.buttonsGroup)\n self.layout().addWidget(self.b1)\n # self.layout().\n self.scene.setSceneRect(QRectF(self.scene.view.rect()))\n self.timer = QTimer()\n self.timer.timeout.connect(self.scene.advance)\n self.timer.start(1000/66)\n\n self.show()\n\n def createBtnsLayout(self):\n layout = QHBoxLayout()\n layout.addWidget(self.b2)\n layout.addWidget(self.b3)\n layout.addWidget(self.b4)\n layout.addWidget(self.b5)\n layout.addWidget(self.b6)\n self.buttonsGroup.setLayout(layout)\n\n def updatePokemons(self):\n self.choosePokemon()\n self.answerPokemon = self.listOfPokemons[random.randint(0, 4)]\n\n def createButtons(self):\n self.b1 = QPushButton(\"Generate\", self)\n self.b1.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b1.move(5 + 2 * BUTTON_x, 30 + BUTTON_Y)\n self.b1.clicked.connect(self.on_click_start)\n\n self.b2 = QPushButton()\n self.b2.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b2.move(5, BUTTON_Y)\n self.b2.clicked.connect(self.on_click_choose)\n\n self.b3 = QPushButton()\n self.b3.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b3.move(5 + 1 * BUTTON_x, BUTTON_Y)\n self.b3.clicked.connect(self.on_click_choose)\n\n self.b4 = QPushButton()\n self.b4.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b4.move(5 + 2 * BUTTON_x, BUTTON_Y)\n self.b4.clicked.connect(self.on_click_choose)\n\n self.b5 = QPushButton()\n self.b5.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b5.move(5 + 3 * BUTTON_x, BUTTON_Y)\n self.b5.clicked.connect(self.on_click_choose)\n\n self.b6 = QPushButton()\n self.b6.setMinimumWidth(BUTTON_MIN_WIDTH)\n self.b6.move(5 + 4 * BUTTON_x, BUTTON_Y)\n self.b6.clicked.connect(self.on_click_choose)\n\n def updateButtons(self):\n self.b2.setText(self.listOfPokemons[0])\n self.b3.setText(self.listOfPokemons[1])\n self.b4.setText(self.listOfPokemons[2])\n self.b5.setText(self.listOfPokemons[3])\n self.b6.setText(self.listOfPokemons[4])\n\n self.buttonsGroup.show()\n self.b1.hide()\n\n\n def choosePokemon(self):\n\n randomNumbers = randomChooser.generateList()\n self.listOfPokemons.clear()\n\n for i in range(5):\n self.listOfPokemons.append(pokemons[randomNumbers[i]])\n\n def on_click_start(self):\n self.updatePokemons()\n self.updateButtons()\n self.scene.setNewPokemon(self.answerPokemon)\n self.show()\n self.time = time.time()\n\n\n for i in self.scene.items():\n if isinstance(i,Particle):\n i.setVisible(True)\n\n QSound.play(\"audio/whoisthatpokemon.wav\")\n\n\n def on_click_nop(self):\n pass\n\n def on_click_choose(self):\n for i in self.scene.items():\n if isinstance(i,Particle):\n i.setVisible(False)\n self.scene.setAnswer(self.answerPokemon)\n\n self.time = time.time() - self.time\n print(\"Tempo transcorrido: \" + str(self.time) + \" segundos!\")\n guessedPokemon = self.sender().text()\n answerPokemon = self.answerPokemon\n\n if guessedPokemon == answerPokemon:\n print(\"Acertou, Mizeravi!\\n\")\n QSound.play(\"audio/success.wav\")\n else:\n print(\"Achou que era o \" + guessedPokemon + \"? Achou errado, otario!\\nO pokemon correto era: \" + answerPokemon + \"\\n\")\n QSound.play(\"audio/fail.wav\")\n\n self.b1.show()\n self.buttonsGroup.hide()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = GrafWin()\n ex.show()\n sys.exit(app.exec_())\n" } ]
4
omarKady/Parser
https://github.com/omarKady/Parser
20e0fd796878960dd6685f1c5cfd3ad9ade9301c
990b813cc0af69577a22055a08e31f36f090acee
f1b5b35be6f1d4e58ca7d543c94e9be34453fce9
refs/heads/main
2023-06-04T20:52:11.687022
2021-06-27T17:52:31
2021-06-27T17:52:31
380,804,157
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6289535164833069, "alphanum_fraction": 0.6306516528129578, "avg_line_length": 37.93388366699219, "blob_id": "180b110d34cb26bf19b62619869d81af1869a143", "content_id": "8c04daa82f85641113cd82f1abde31742ddac90d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4711, "license_type": "no_license", "max_line_length": 99, "num_lines": 121, "path": "/parser.py", "repo_name": "omarKady/Parser", "src_encoding": "UTF-8", "text": "import argparse, xmltodict, os, time, json, csv\nfrom pymongo import MongoClient \n\nclass BaseParser:\n def __init__(self, customers_file, vehicles_file = None):\n self.customers_file = customers_file\n self.vehicles_file = vehicles_file\n\n def parse_to_json(self):\n raise NotImplementedError\n\n def output_file_naming(self, customers_file):\n file_name = os.path.basename(self.customers_file)\n file_name_without_extension = os.path.splitext(file_name)[0]\n output_file_with_extension = file_name_without_extension + '.json'\n ts = time.time()\n final_output_file_name = str(ts) + '_' + output_file_with_extension\n return final_output_file_name\n\n\n def output_file_path(self, customers_file):\n dirnam = os.path.dirname(self.customers_file)\n file_name = self.output_file_naming(customers_file)\n json_file_path = dirnam + '/' + file_name\n return json_file_path\n\n def send_json_to_mongodb(self, converted_json, colloction_type):\n myclient = MongoClient(\"mongodb://localhost:27017/\")\n db = myclient[\"trufla\"]\n Collection = db[colloction_type]\n with open(converted_json) as file:\n file_data = json.load(file)\n \n if isinstance(file_data, list):\n Collection.insert_many(file_data) \n else:\n Collection.insert_one(file_data)\n \n\nclass XMLParser(BaseParser):\n def __init__(self, customers_file, vehicles_file = None):\n super().__init__(customers_file, vehicles_file)\n self.parse_to_json(customers_file)\n\n # TODO : read input file (Customers) : transform xml to dict\n def read_input_file(self, customers_file):\n xml_file = open(self.customers_file, \"r\")\n my_obj = xmltodict.parse(xml_file.read())\n py_dict = json.dumps(my_obj, indent=4)\n return py_dict\n\n # TODO : save : transform dict to json and write\n def parse_to_json(self, customers_file):\n file_path = self.output_file_path(customers_file)\n python_dict = self.read_input_file(customers_file)\n with open(file_path, \"w\") as my_file:\n my_file.write(python_dict)\n self.send_json_to_mongodb(file_path, 'xml')\n\nclass CSVParser(BaseParser):\n def __init__(self, customers_file, vehicles_file = None):\n super().__init__(customers_file, vehicles_file)\n self.parse_to_json(customers_file, vehicles_file)\n\n # TODO : read input file (Customers) : transform csv to dict\n def read_input_file(self, customers_file, vehicles_file):\n customers_list = []\n with open(customers_file, \"r\") as csv_file:\n c_reader = csv.DictReader(csv_file)\n for row in c_reader:\n customers_list.append(dict(row))\n\n vehicles_list = []\n with open(vehicles_file, \"r\") as csv_file:\n v_reader = csv.DictReader(csv_file)\n for row in v_reader:\n vehicles_list.append(dict(row))\n\n return customers_list, vehicles_list\n\n # merge two dict in one that has all customer data plus vehicles and transform in one dict\n def merge_customer_dict_with_vehicles(self, customers, vehicles):\n for customer in customers:\n customer_id = customer[\"id\"]\n customer_vehicles = []\n for vehicle in vehicles:\n if vehicle[\"owner_id\"] == customer_id:\n customer_vehicles.append(vehicle)\n customer.update({\"vehicle\": customer_vehicles})\n return customers\n\n \n\n # TODO : save : transform dict to json and write\n def parse_to_json(self, customers_file, vehicles_file):\n file_path = self.output_file_path(customers_file)\n customers, vehicles = self.read_input_file(customers_file, vehicles_file)\n final_customers_with_vehicles = self.merge_customer_dict_with_vehicles(customers, vehicles)\n converted_json = json.dumps(final_customers_with_vehicles, indent=4)\n with open(file_path, \"w\") as my_file:\n my_file.write(converted_json)\n self.send_json_to_mongodb(file_path, 'csv')\n\n\nif __name__ == \"__main__\":\n # TODO: parsing args\n parse = argparse.ArgumentParser()\n parse.add_argument(\"-format\")\n parse.add_argument(\"-customers_file\")\n parse.add_argument(\"-vehicles_file\")\n args = parse.parse_args()\n format = args.format.lower()\n customers_file = args.customers_file\n vehicles_file = args.vehicles_file\n\n if format == 'xml':\n xml_parser = XMLParser(customers_file)\n elif format == 'csv':\n csv_parser = CSVParser(customers_file, vehicles_file)\n else:\n print(\"Not Supported ... This program support only xml and csv\")\n" }, { "alpha_fraction": 0.7255411148071289, "alphanum_fraction": 0.732467532157898, "avg_line_length": 43.42307662963867, "blob_id": "676cd1bfe8e2f05c7ac7097d12adec8970d61c0b", "content_id": "7858f64d866b49763bf4231e1415d050e4729ede", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 141, "num_lines": 26, "path": "/README.md", "repo_name": "omarKady/Parser", "src_encoding": "UTF-8", "text": "# Parser-by-Python\npython parther program\n\nThis is parsing program to parse xml and csv files to json files and save to mongodb ..\n\n# Installation\n1 - install pip and xmltodict ($ pip install xmltodict)\n2 - install mongoDB (https://docs.mongodb.com/manual/tutorial/) Ubuntu: (https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)\n3 - install pymongo library (allows interaction with the MongoDB database through Python)\n ($ pip install pymongo)\n4 - Create DB : Trufla >> start mongo and open shell then : \n a - use trufla\n b - create user admin for trufla db :\n db.createUser({\tuser: \"trufla_admin\", pwd: \"P@ssw0rd\",roles:[{role: \"userAdmin\" , db:\"trufla\"}]})\n c - create two collections (xml , csv)\n\n# Run Script \nOpen terminal\ncase 1 : (xml)\n$ python3 parser.py -format xml -customers_file 'path-to-customers-file.xml'\ncase 2 : (csv)\n$ python3 parser.py -format csv -customers_file 'path-to-customers-file.csv' -vehicles_file 'path-to-vehicles_file.csv'\n\n# Finally\nYou will recieve json output file in same directory of input file ..\nand then check trufla db collections (xml and csv) to make sure files are kept in right way .\n" } ]
2
yash12khandelwal/Lanes_Mahindra
https://github.com/yash12khandelwal/Lanes_Mahindra
b88c8513f65063bd6ba31b84f2bcfa6de599f9dd
cb72ef6b70110c5803df79aee2b280c24e3da47f
2a921f1c663f77fe364f8c0bc3a0c5f2d5c03849
refs/heads/master
2023-08-22T00:12:30.274853
2021-10-23T06:14:25
2021-10-23T06:14:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5883256793022156, "alphanum_fraction": 0.6754992604255676, "avg_line_length": 52.14285659790039, "blob_id": "742c28ceea13664eb67e3ec08d060266fc4ebecb", "content_id": "a5d19b91ea93afe2aa1d847daabad09c954ab23e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2604, "license_type": "no_license", "max_line_length": 113, "num_lines": 49, "path": "/cfg/Tutorials.cfg", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nPACKAGE = \"node\"\n\nfrom dynamic_reconfigure.parameter_generator_catkin import *\n\ngen = ParameterGenerator()\ngen.add(\"is_debug\", bool_t, 0, \"d\", False)\n\ngen.add(\"iteration\", int_t, 0, \"An integer parameter\", 100,1,1500)\ngen.add(\"maxDist\", int_t, 0, \"An integer parameter\", 300,0,1000)\ngen.add(\"minLaneInlier\", int_t, 0, \"An integer parameter\", 1500,0,2500)\ngen.add(\"minPointsForRANSAC\", int_t, 0, \"An integer parameter\", 500,0,1500)\n# gen.add(\"grid_size\", int_t, 0, \"grid size for removing too many ransac points\", 3, 1, 100)\n\ngen.add(\"pixelPerMeter\", double_t, 0, \"A double parameter\",134,0,400)\n\ngen.add(\"horizon\", int_t, 0, \"Position of horizon in the image\",500,0,1280)\ngen.add(\"horizon_offset\", int_t, 0, \"Region in which we can consider intersection of line\",200,0,400) \n\ngen.add(\"transformedPoints0_lowerbound\", int_t, 0, \"A double parameter\",30,0,300)\ngen.add(\"transformedPoints0_upperbound\", int_t, 0, \"A double parameter\",800,300,1100)\n\ngen.add(\"point1_y\", int_t, 0, \"A double parameter\",30,0,300)\ngen.add(\"point2_x\", int_t, 0, \"A double parameter\",100,0,300)\n\n# gen.add(\"h\", int_t, 0, \"Height of gassian template\",30,0,100)\n# gen.add(\"w\", int_t, 0, \"Width of gaussian template\",10,0,100)\n# gen.add(\"variance\", double_t, 0, \"Variance or spread of Gaussian\",2.1,0,3.5)\n\ngen.add(\"yshift\", double_t, 0, \"Distance between the first point of image and lidar\",0.6,0,2)\n\n# gen.add(\"hysterisThreshold_min\", double_t, 0, \"Minimum value of hysteris threshold\", 0.39,0,1)\n# gen.add(\"hysterisThreshold_max\", double_t, 0, \"Maximum values of hysteris threshold\", 0.45,0,1)\n\n# gen.add(\"y\", int_t, 0, \"Center of the region of interest\", 400, 0, 1000)\n# gen.add(\"lane_width\", int_t, 0, \"Widht of lanes\", 320, 0, 800)\n# gen.add(\"k1\", int_t, 0, \"Center of the region of interest\", 50, 0, 200)\n# gen.add(\"k2\", int_t, 0, \"Center of the region of interest\", 50, 0, 200)\n\ngen.add(\"medianBlurkernel\", int_t, 0, \"median blur kernel size for cleaning intersectionImages\", 3, 3, 21)\ngen.add(\"neighbourhoodSize\", int_t, 0, \"neighbourhood size or block size for adaptive thresholding\", 25, 1, 100);\ngen.add(\"constantSubtracted\", int_t, 0, \"constant subtracted during adaptive thresholding\", -30, -100, 100)\n\ngen.add(\"region\", int_t, 0, \"region of interest for viewing only one lane\", 600, 400, 1000)\n\ngen.add(\"baseDistance1\", int_t, 0, \"minimum value of c2 - c1\", 40, 20, 500)\ngen.add(\"centroidDistance\", int_t, 0, \"minimum distance between the centroid of parabola\", 20, 20, 500)\n\nexit(gen.generate(PACKAGE, \"node\", \"Tutorials\"))\n" }, { "alpha_fraction": 0.7494553327560425, "alphanum_fraction": 0.7614378929138184, "avg_line_length": 23.1842098236084, "blob_id": "cefc2617270fb57f5ac043156c77ee68878e9725", "content_id": "f1752cc223a62c624d1092976ad62e777bb0e721", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 918, "license_type": "no_license", "max_line_length": 102, "num_lines": 38, "path": "/include/laneDetector_utils.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef _LANEDETECTOR_UTILS_HPP_\n#define _LANEDETECTOR_UTILS_HPP_\n\n#include \"opencv/cv.h\"\n#include <opencv2/highgui/highgui.hpp>\n#include <bits/stdc++.h>\n\nusing namespace std;\nusing namespace cv;\n\nextern int lowThreshold;\nextern int highThreshold;\n\nstring int_to_string(int n);\nint findIntersection(Vec4i l1, Vec4i l2);\nvoid transformPoints(Point2f* inputPoints, Point2f* transformedPoints, Mat transform2, int npoints);\nvoid transformLines(vector<Vec4i>& inputLines, vector<Vec4i>& transformedLines, Mat transform);\nMat getTemplateX(float sigma, int h, int w);\nMat getTemplateX2(float sigma, int h, int w, float theta);\nvoid hysterisThreshold(Mat img, Mat& des, float lowThres, float highThres);\n#endif\n\n\n/*\nSign Conventions:\n\nfindIntersections: gives the distance of the intersection of the two lines from the top of the segment\n\ncv::Line takes points as\n--------->\tfirst co-ordinate\n|\n|\n|\n|\n|\nsecond co-ordinate\n\n*/" }, { "alpha_fraction": 0.45181867480278015, "alphanum_fraction": 0.48804596066474915, "avg_line_length": 24.93055534362793, "blob_id": "de9ee7c537843be4d7f5542dbbbb84262404d20f", "content_id": "5df940ae9a70a134aa329ae51f41f25b95833b50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 20537, "license_type": "no_license", "max_line_length": 157, "num_lines": 792, "path": "/include/ransac_new_2.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef RANSAC_NEW_2\n#define RANSAC_NEW_2\n\n\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <bits/stdc++.h>\n\n/*\n parabolas are fit assuming top left as origin - x towards right and y downwards\n */\n\nusing namespace std;\nusing namespace cv;\n\nint wTh=108;\nint stepsize = 225;\nbool return_prev_params=0;\n//structure to define the Parabola parameters\ntypedef struct Parabola\n{\n int numModel = 0;\n float a1 = 0.0;\n float c1 = 0.0;\n float a2 = 0.0;\n // float b2 = 0.0;\n float c2 = 0.0;\n} Parabola;\n\nvector<Parabola> v;\n// Parabola operator+(const Parabola &x)\n// {\n// Parabola ans;\n// ans.numModel = numModel+x.numModel;\n// ans.a1= a1- x.a1;\n// ans.c1= c1- x.c1;\n// ans.a2= a2- x.a2;\n// ans.c2= c2- x.c2;\n\n// }\n\nPoint centroid(float a,float c,Mat img);\n\nfloat dist(Point A,Point B)\n{\n return (sqrt(pow(A.x-B.x,2)+pow(A.y-B.y,2)));\n}\n\nParabola swap(Parabola param) {\n\n float temp1, temp3;\n temp1=param.a1;\n // temp2=param.b1;\n temp3=param.c1;\n\n param.a1=param.a2;\n // param.b1=param.b2;\n param.c1=param.c2;\n\n param.a2=temp1;\n // param.b2=temp2;\n param.c2=temp3;\n\n return param;\n}\n\nParabola classify_lanes(Mat img,Parabola present,Parabola previous)\n{\n float a1=present.a1;\n float a2=present.a2;\n float c1=present.c1;\n float c2=present.c2;\n int number_of_lanes=present.numModel;\n\n if(number_of_lanes==2)\n {\n if(c2<c1)\n {\n present=swap(present);\n return present;\n }\n else \n return present;\n }\n\n else if(number_of_lanes==1)\n {\n //if intersection on left or right lane possible\n if(a1*c1<0 && a1*(img.cols-c1)>0)\n {\n float y1=sqrt(-1.0*a1*c1);\n float y2=sqrt(a1*(img.cols-c1));\n\n if(y1>(2*img.rows)/5 && y1<(3*img.rows)/5 && y2>(2*img.rows)/5 && y2<(3*img.rows)/5)\n {\n return previous;\n }\n\n }\n\n if(a2*c2<0 && a2*(img.cols-c2)>0)\n {\n float y1=sqrt(-1.0*a2*c2);\n float y2=sqrt(a2*(img.cols-c2));\n\n if(y1>(2*img.rows)/5 && y1<(3*img.rows)/5 && y2>(2*img.rows)/5 && y2<(3*img.rows)/5)\n {\n return previous;\n }\n }\n\n if(a1!=0 && c1>(img.cols/2))\n {\n present=swap(present);\n return present;\n }\n\n if(a2!=0 && c2<(img.cols/2))\n {\n present=swap(present);\n return present;\n }\n\n }\n\n return present;\n\n}\n\n//calculation of Parabola parameters based on 3 randonmly selected points\nfloat get_a(Point p1, Point p2)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n // int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n // int y3 = p3.y;\n\n float del = (y1 - y2)*(y1 + y2);\n float del_a = (x1 - x2);\n float a;\n a = del/(del_a);\n\n if(fabs(a)>500)\n return FLT_MAX;\n else\n return a;\n}\n\nfloat get_c(Point p1, Point p2)\n{\n int x1 = p1.x;\n int y1 = p1.y;\n \n int x2 = p2.x;\n int y2 = p2.y;\n \n float del = (x1 - x2)*y2*y2;\n float del_a = (y1 - y2)*(y1 + y2);\n\n return (x2 - (del/(del_a)));\n}\n\nfloat get_c_2(Point p1, float a)\n{\n float c = p1.x - ((p1.y)*(p1.y))/a;\n\n return c;\n}\n\nfloat min(float a, float b){\n if(a<=b)\n return a;\n return b;\n}\n\n//calculate distance of passed point from curve\nfloat get_del(Point p, float a, float c)\n{\n float predictedX = ((p.y*p.y)/(a) + c);\n float errorx = fabs(p.x - predictedX);\n\n //#TODO add fabs\n float predictedY = sqrt(fabs(a*(p.x-c)));\n float errory = fabs(p.y - predictedY);\n\n return min(errorx, errory);\n}\n\n\n\n//removes both the lanes if they intersect within the image frame\nbool isIntersectingLanes(Mat img, Parabola param) {\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n\n if(a1==a2)\n return false;\n float x = (a1*c1 - a2*c2)/(a1-a2);\n \n //checks if intersection is within\n\n float y_2 = a1*(x-c1);\n\n if (y_2 > 0 && sqrt(y_2) < (img.rows) && x > 0 && x < img.cols) return true;\n return false;\n}\n\nbool isIntersectingLanes_2(Mat img, Parabola param){\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n int x1,x2,y;\n\n for(int i=0; i<img.rows-stepsize; i++){\n y = img.rows - i;\n x1 = ((y*y)/a1) + c1;\n x2 = ((y*y)/a2) + c2;\n if(fabs(x1 - x2) < 5)\n {\n //cout << fabs(x1-x2);\n return true;\n }\n }\n return false;\n}\n\nParabola no_sudden_change(Parabola bestTempParam, Mat img, Parabola previous)\n{\n if(bestTempParam.numModel==2 && previous.numModel == 2)\n {\n bestTempParam = classify_lanes(img, bestTempParam, previous);\n if(fabs(bestTempParam.a1 - previous.a1) > 100 || fabs(bestTempParam.c1 - previous.c1) > 100)\n {\n bestTempParam.a1 = 0;\n bestTempParam.c1 = 0;\n bestTempParam.numModel--;\n }\n if(fabs(bestTempParam.a2 - previous.a2) > 100 || fabs(bestTempParam.c2 - previous.c2) > 100)\n {\n bestTempParam.a2 = 0;\n bestTempParam.c2 = 0;\n bestTempParam.numModel--;\n }\n }\n\n else if(bestTempParam.numModel == 2 && previous.numModel == 1)\n {\n bestTempParam = classify_lanes(img, bestTempParam, previous);\n if((previous.a1 ==0 && previous.c1==0))\n {\n if(fabs(bestTempParam.a2 - previous.a2) > 100 || fabs(bestTempParam.c2 - previous.c2) > 100)\n {\n bestTempParam.a2 = 0;\n bestTempParam.c2 = 0;\n bestTempParam.numModel--;\n }\n }\n else\n {\n if(fabs(bestTempParam.a1 - previous.a1) > 100 || fabs(bestTempParam.c1 - previous.c1) > 100)\n {\n bestTempParam.a1 = 0;\n bestTempParam.c1 = 0;\n bestTempParam.numModel--;\n }\n }\n }\n\n else if(bestTempParam.numModel == 1 && previous.numModel == 2)\n {\n bestTempParam = classify_lanes(img, bestTempParam, previous);\n if(bestTempParam.a1 == 0 && bestTempParam.c1 == 0)\n {\n if(fabs(bestTempParam.a2 - previous.a2) > 100 || fabs(bestTempParam.c2 - previous.c2) > 100)\n {\n bestTempParam.a2 = 0;\n bestTempParam.c2 = 0;\n bestTempParam.numModel--;\n }\n }\n else\n {\n if(fabs(bestTempParam.a1 - previous.a1) > 100 || fabs(bestTempParam.c1 - previous.c1) > 100)\n {\n bestTempParam.a1 = 0;\n bestTempParam.c1 = 0;\n bestTempParam.numModel--;\n }\n }\n }\n\n else if(bestTempParam.numModel == 1 && previous.numModel == 1)\n {\n bestTempParam = classify_lanes(img, bestTempParam, previous);\n if((bestTempParam.a1 == 0 && bestTempParam.c1 == 0) && (previous.a1 == 0 && previous.c1 == 0))\n {\n if(fabs(bestTempParam.a2 - previous.a2) > 100 || fabs(bestTempParam.c2 - previous.c2) > 100)\n {\n bestTempParam.a2 = 0;\n bestTempParam.c2 = 0;\n bestTempParam.numModel--;\n }\n }\n else if((bestTempParam.a2 == 0 && bestTempParam.c2 == 0) && (previous.a2 == 0 && previous.c2 == 0))\n {\n if(fabs(bestTempParam.a1 - previous.a1) > 100 || fabs(bestTempParam.c1 - previous.c1) > 100)\n {\n bestTempParam.a1 = 0;\n bestTempParam.c1 = 0;\n bestTempParam.numModel--;\n }\n }\n }\n return bestTempParam;\n}\n\n//choose Parabola parameters of best fit curve basis on randomly selected 3 points\nParabola ransac(vector<Point> ptArray, Parabola param, Mat img, Parabola previous)\n{\n int numDataPts = ptArray.size();\n \n Parabola bestTempParam;\n\n //initialising no. of lanes\n bestTempParam.numModel=2;\n \n\n int score_gl = 0;\n int score_l_gl = 0, score_r_gl = 0;\n\n //check for no lane case here\n\n // loop of iterations\n for(int i = 0; i < iteration; i++)\n {\n int p1 = random()%ptArray.size(), p2 = random()%ptArray.size(), p3 = random()%ptArray.size(), p4 = random()%ptArray.size();\n \n\n if(p1==p2 || p1==p3 || p1==p4 || p3==p2 || p4==p2 || p3==p4){\n i--;\n continue;\n }\n \n Point ran_points[4];\n ran_points[0] = ptArray[p1];\n ran_points[1] = ptArray[p2];\n ran_points[2] = ptArray[p3];\n ran_points[3] = ptArray[p4];\n\n int flag = 0;\n Point temp;\n\n for(int m = 0; m < 3; m++)\n { \n for(int n = 0; n < 3 - m; n++)\n {\n if(ran_points[n].x > ran_points[n+1].x)\n {\n temp = ran_points[n];\n ran_points[n] = ran_points[n+1];\n ran_points[n+1] = temp;\n }\n } \n }\n\n\n if(ran_points[0].x == ran_points[1].x || ran_points[2].x==ran_points[3].x || ran_points[0].y == ran_points[1].y || ran_points[2].y==ran_points[3].y){\n i--;\n continue;\n }\n\n Parabola tempParam;\n tempParam.numModel = 2;\n tempParam.a1 = get_a(ran_points[0], ran_points[1]);\n tempParam.c1 = get_c(ran_points[0], ran_points[1]);\n if(true){\n tempParam.a2 = get_a(ran_points[2], ran_points[3]);\n tempParam.c2 = get_c(ran_points[2], ran_points[3]);\n }\n else{\n tempParam.a2 = tempParam.a1;\n tempParam.c2 = get_c_2(ran_points[2], tempParam.a2);\n }\n \n \n int score_common = 0;/*, comm_count = 0;*/\n int score_l_loc = 0, score_r_loc = 0;\n\n //looping over image\n for(int p = 0; p < ptArray.size(); p++)\n {\n\n int flag_l = 0; //for points on 1st curve\n int flag_r = 0; //for points on 2nd curve\n\n float dist_l = get_del(ptArray[p], tempParam.a1, tempParam.c1);\n\n if(dist_l < maxDist)\n {\n flag_l = 1;\n }\n\n float dist_r = get_del(ptArray[p], tempParam.a2, tempParam.c2);\n\n if(dist_r < maxDist)\n {\n flag_r = 1;\n }\n\n if(flag_l == 1 && flag_r == 1) {\n score_common++;\n }\n else {\n if (flag_l == 1) {\n score_l_loc++;\n }\n if (flag_r == 1) {\n score_r_loc++;\n }\n }\n } //end of loop over image\n\n\n if (dist(centroid(tempParam.a1,tempParam.c1,img),centroid(tempParam.a2,tempParam.c2,img)) < 20.0){\n //cout<<\"centroid issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n else if(fabs(tempParam.c1 - tempParam.c2) < 40.0){\n //cout<<\"c1-c2 issue. \"<<fabs(tempParam.c1 - tempParam.c2)<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n // intersection in image taken\n \n else if( isIntersectingLanes_2(img, tempParam)) {\n //cout<<\" isIntersectingLanes issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n\n else if ((score_common/(score_common + score_l_loc + score_r_loc+1))*100 > common_inliers_thresh) {\n //cout<<\"common points issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n // else if(fabs(tempParam.a1 - tempParam.a2) < 200)\n // {\n // //cout<<\"a1 - a2 issue.\"<<endl;\n // if(score_r_loc > score_l_loc)\n // {\n // tempParam.a1 = 0;\n // tempParam.c1 = 0;\n // score_l_loc = 0;\n // tempParam.numModel--;\n // }\n // else\n // {\n // tempParam.a2 = 0;\n // tempParam.c2 = 0;\n // score_r_loc = 0;\n // tempParam.numModel--;\n // }\n // }\n\n\n if(fabs(tempParam.a1) <120 && fabs(tempParam.c1) > 150 && score_l_loc!=0)\n {\n //cout<<\"horizontal issue.\"<<endl;\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n\n if(fabs(tempParam.a2) <120 && fabs(tempParam.c2) > 150 && score_r_loc!=0)\n {\n //cout<<\"horizontal issue.\"<<endl;\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n\n\n\n if (tempParam.numModel==2 && (score_l_loc + score_r_loc ) > 2*score_gl) {\n\n score_l_gl=score_l_loc;\n score_r_gl=score_r_loc;\n score_gl = (score_r_gl + score_l_gl)/2;\n\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n bestTempParam.a2=tempParam.a2;\n bestTempParam.c2=tempParam.c2;\n bestTempParam.numModel = tempParam.numModel;\n }\n\n if (tempParam.numModel==1 && (score_l_loc + score_r_loc) > score_gl) {\n\n score_l_gl=score_l_loc;\n score_r_gl=score_r_loc;\n score_gl = score_r_gl + score_l_gl;\n\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n bestTempParam.a2=tempParam.a2;\n bestTempParam.c2=tempParam.c2;\n bestTempParam.numModel = tempParam.numModel;\n }\n } //end of iteration loop\n cout <<\"score_l_gl : \"<<score_l_gl<<\" score_r_gl : \"<<score_r_gl<<endl;\n\n if(score_l_gl!=0 && (score_l_gl) < minLaneInlier){\n cout<<\"left lane removed\"<< endl;\n bestTempParam.a1=0;\n bestTempParam.c1=0;\n bestTempParam.numModel--;\n }\n if(score_r_gl!=0 && (score_r_gl) < minLaneInlier){\n cout<<\"right lane removed\"<<endl;\n bestTempParam.a2=0;\n bestTempParam.c2=0;\n bestTempParam.numModel--;\n }\n bestTempParam = no_sudden_change(bestTempParam, img, previous);\n \n cout<<\"bestTempParam.numModel : \"<<bestTempParam.numModel<<endl;\n cout<<\"bestTempParam.a1 : \"<<bestTempParam.a1<<\" bestTempParam.c1 : \"<<bestTempParam.c1<<endl;\n cout<<\"bestTempParam.a2 : \"<<bestTempParam.a2<<\" bestTempParam.c2 : \"<<bestTempParam.c2<<endl;\n\n return bestTempParam;\n}\n\nPoint centroid(float a,float c,Mat img)\n{\n Point A;\n int i,j,x,y;\n int sum_x = 0,sum_y = 0,count=1;\n\n for(j=0;j<img.rows;j++)\n {\n y = img.rows-j;\n x = ((y*y)/(a) + c);\n\n if(x>=0 && x<img.cols)\n {\n sum_y+=y;\n sum_x+=x;\n count++;\n }\n }\n\n A.x=sum_x/count;\n A.y=sum_y/count;\n\n return A;\n}\n\n\n\nParabola getRansacModel(Mat img,Parabola previous)\n{\n //apply ransac for first time it will converge for one lane\n vector<Point> ptArray1;\n\n cout<<\"NEW RANSAC\\n\";\n \n // if (grid_white_thresh >= grid_size*grid_size) {\n // grid_white_thresh = grid_size*grid_size -1;\n // }\n\n Mat plot_grid(img.rows,img.cols,CV_8UC1,Scalar(0));\n\n\n int count = 0;\n for(int i=((grid_size-1)/2);i<img.rows-(grid_size-1)/2;i+=grid_size)\n {\n for(int j=((grid_size-1)/2);j<img.cols-(grid_size-1)/2;j+=grid_size)\n {\n count=0;\n for(int x=(j-(grid_size-1)/2);x<=(j+(grid_size-1)/2);x++)\n {\n\n for(int y=(i-(grid_size-1)/2);y<=(i+(grid_size-1)/2);y++)\n {\n if(img.at<uchar>(y,x)>wTh){\n\n count++;\n // plot_grid.at<uchar>(i,j)=255;\n }\n }\n }\n\n if(count>grid_white_thresh) {\n ptArray1.push_back(Point(j , img.rows - i));\n plot_grid.at<uchar>(i, j) = 255;\n \n }\n }\n }\n\n\n namedWindow(\"grid\",WINDOW_NORMAL);\n \n imshow(\"grid\",plot_grid);\n\n\n\n //declare a Parabola vaiable to store the Parabola\n Parabola param;\n //get parameters of first Parabola form ransac function\n\n if(ptArray1.size() > minPointsForRANSAC )\n {\n return_prev_params = 1;\n param = ransac(ptArray1, param, img, previous);\n\n }\n\n else {\n return previous;\n }\n //Lane classification based on previous frames\n //if two lanes\n\n param=classify_lanes(img,param,previous);\n\n // if(v.size() < 6)\n // {\n // v.push_back(param);\n // }\n // else\n // {\n // vector<Parabola>::iterator it; \n // it = v.begin(); \n // v.erase(it); \n // }\n // Parabola final;\n // float div=0;\n // for(int i=0; i<v.size(); i++) \n // {\n // final.a1+= v[i].a1/(pow(2,i));\n // final.c1+= v[i].c1/(pow(2,i));\n // final.a2+= v[i].a2/(pow(2,i));\n // final.c2+= v[i].c2/(pow(2,i));\n // div+= (1/pow(2,i));\n // }\n // final.a1=final.a1/div;\n // final.a2=final.a2/div;\n // final.c1=final.c1/div;\n // final.c2=final.c2/div;\n // final.numModel= final.numModel/div;\n\n // if(final.numModel >= 1.5) final.numModel = 2;\n // else final.numModel = 1;\n\n return param;\n}\n\nMat drawLanes(Mat fitLanes, Parabola lanes) {\n\n Mat output=fitLanes.clone();\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < fitLanes.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(output, left_curve, false, Scalar(255, 0, 0), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(output, right_curve, false, Scalar(0, 0, 255), 3, CV_AA);\n\n return output;\n}\n\nMat drawLanes_white(Mat img, Parabola lanes) {\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < img.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = img.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = img.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(img, left_curve, false, Scalar(255), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(img, right_curve, false, Scalar(255), 3, CV_AA);\n\n return img;\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.5550881028175354, "alphanum_fraction": 0.6045227646827698, "avg_line_length": 26.366907119750977, "blob_id": "ba34819af618674eeb2fbe055ce26f035012d63c", "content_id": "645a043f6c0fbe291dcf1faf29e08516b4e57a3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3803, "license_type": "no_license", "max_line_length": 275, "num_lines": 139, "path": "/src/laneDetector_utils.cpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <bits/stdc++.h>\n#include <iostream>\n\nusing namespace std;\nusing namespace cv;\n\nextern int lowThreshold;\nextern int highThreshold;\n\nstring int_to_string(int n)\n{\n std::ostringstream stm;\n stm << n;\n return stm.str();\n}\n\nint findIntersection(Vec4i l1, Vec4i l2)\n{\n\tdouble m1, m2, c1, c2;\n\n\tm1=((double)l1[3]-l1[1])/((double)l1[2]-l1[0]);\n\tc1=(double)l1[3]-m1*l1[2];\n\n\tm2=((double)l2[3]-l2[1])/((double)l2[2]-l2[0]);\n\tc2=(double)l2[3]-m2*l2[2];\n\n\tdouble yi, xi;\n\n\txi=(c1-c2)/(m2-m1);\n\tyi=m2*xi+c2;\n\n\treturn (int)yi;\n}\n\nvoid transformPoints(Point2f* inputPoints, Point2f* transformedPoints, Mat transform, int npoints)\n{\n for(int i=0; i<npoints; i++)\n {\n transformedPoints[i].x = (inputPoints[i].x*transform.at<double>(0, 0)+inputPoints[i].y*transform.at<double>(0, 1)+1.0*transform.at<double>(0, 2))/(inputPoints[i].x*transform.at<double>(2, 0)+inputPoints[i].y*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2));\n transformedPoints[i].y = (inputPoints[i].x*transform.at<double>(1, 0)+inputPoints[i].y*transform.at<double>(1, 1)+1.0*transform.at<double>(1, 2))/(inputPoints[i].x*transform.at<double>(2, 0)+inputPoints[i].y*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2));\n }\n return;\n}\n\nvoid transformLines(vector<Vec4i>& inputLines, vector<Vec4i>& transformedLines, Mat transform)\n{\n\tfor( size_t i = 0; i < inputLines.size(); i++ )\n {\n Vec4i l = inputLines[i];\n Point2f inputPoints[2], transformedPoints[2];\n inputPoints[0] = Point2f( l[0], l[1] );\n inputPoints[1] = Point2f( l[2], l[3] ); \n transformPoints(inputPoints, transformedPoints, transform, 2);\n transformedLines.push_back(Vec4i(transformedPoints[0].x, transformedPoints[0].y, transformedPoints[1].x, transformedPoints[1].y));\n }\n}\n\nMat getTemplateX(float sigma, int h, int w)\n{\n\tfloat m=0;\n\tMat templatex=Mat(h, w, CV_8UC1);\n\tfor(int i=0;i<h;i++)\n\t\tfor(int j=0;j<w;j++)\n\t\t{\n\t\t\tfloat x=j-w/2;\n\t\t\ttemplatex.at<uchar>(i, j)=275*(1/(sigma*sigma))*exp(-x*x/(2*sigma*sigma))*(1-x*x/(sigma*sigma))+128;\n\t\t\tm=max(m, 275*(1/(sigma*sigma))*exp(-x*x/(2*sigma*sigma))*(1-x*x/(sigma*sigma))+128);\n\t\t}\n\t//cout<<\"max=\"<<m<<endl;\n\treturn templatex;\n}\n\nMat getTemplateX2(float sigma, int h, int w, float theta)\n{\n\t//sigma = 2, 275; sigma = 1, 120; 1.6->270\n\tfloat m=0;\n\tMat templatex=Mat(h, w, CV_8UC1);\n\tfor(int i=0;i<h;i++)\n\t\tfor(int j=0;j<w;j++)\n\t\t{\n\t\t\tfloat x=j-w/2 + tan(theta*3.14159/180)*(i-h/2);\n\t\t\ttemplatex.at<uchar>(i, j)=275*(1/(sigma*sigma))*exp(-x*x/(2*sigma*sigma))*(1-x*x/(sigma*sigma))+128;\n\t\t\tm=max(m, 275*(1/(sigma*sigma))*exp(-x*x/(2*sigma*sigma))*(1-x*x/(sigma*sigma))+128);\n\t\t}\n\t//cout<<theta<<endl;\n\t//cout<<\"max: \"<<m<<endl;\n\t// namedWindow(\"template\", WINDOW_NORMAL);\n\t// imshow(\"template\", templatex);\n\treturn templatex;\n}\n\nvoid hysterisThreshold(Mat img, Mat& des, float lowThres, float highThres)\n{\n\tMat out=img-img;\n\tint i, j, k, l;\n\n\tint vis[img.rows][img.cols];\n\tfor(i=0;i<img.rows;i++)\n\t\tfor(j=0;j<img.cols;j++)\n\t\t\tvis[i][j]=-1;\n\n\tfor(i=0;i<img.rows;i++)\n\t\tfor(j=0;j<img.cols;j++)\n\t\t{\n\t\t\tif(img.at<float>(i,j)>=highThres && vis[i][j]!=1)\n\t\t\t{\n\t\t\t\t//init bfs to mark all nearby points\n\t\t\t\tqueue<Point> q;\n\t\t\t\tq.push(Point(i,j));\n\n\t\t\t\twhile(!q.empty())\n\t\t\t\t{\n\t\t\t\t\tPoint current=q.front();\n\t\t\t\t\tq.pop();\n\n\t\t\t\t\tvis[i][j]=1;\n\n\t\t\t\t\tif(img.at<float>(current.x, current.y)>lowThres)\n\t\t\t\t\t\tout.at<float>(current.x, current.y)=img.at<float>(current.x, current.y);\n\n\t\t\t\t\tfor(k=current.x-1;k<=current.x+1;k++)\n\t\t\t\t\t\tfor(l=current.y-1;l<=current.y+1;l++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif(k<0 || k>=img.rows || l<0 || l>=img.cols)\n\t\t\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\t\t\tif(img.at<float>(k, l)>lowThres && vis[k][l]!=1)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tq.push(Point(k, l));\n\t\t\t\t\t\t\t\tvis[k][l]=1;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tdes=out;\n\treturn;\n}" }, { "alpha_fraction": 0.4733508825302124, "alphanum_fraction": 0.5070092082023621, "avg_line_length": 24.22980308532715, "blob_id": "daadb24570ed54ad218ea8f3d262069adad95c59", "content_id": "7a654d7ab802e7d3c97e261e4691e8835fdda2b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14053, "license_type": "no_license", "max_line_length": 159, "num_lines": 557, "path": "/include/ransac_new_2_yash.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef RANSAC_NEW_2\n#define RANSAC_NEW_2\n\n\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <bits/stdc++.h>\n\n/*\n parabolas are fit assuming top left as origin - x towards right and y downwards\n */\n\nusing namespace std;\nusing namespace cv;\n\n//structure to define the Parabola parameters\ntypedef struct Parabola\n{\n int numModel = 0;\n float a1 = 0.0;\n float c1 = 0.0;\n float a2 = 0.0;\n float c2 = 0.0;\n} Parabola;\n\nPoint centroid(float a,float c,Mat img);\n\nfloat dist(Point A,Point B)\n{\n return (sqrt(pow(A.x-B.x,2)+pow(A.y-B.y,2)));\n}\n\nParabola swap(Parabola param) {\n\n float temp1, temp3;\n temp1=param.a1;\n temp3=param.c1;\n\n param.a1=param.a2;\n param.c1=param.c2;\n\n param.a2=temp1;\n param.c2=temp3;\n\n return param;\n}\n\nParabola classify_lanes(Mat img,Parabola present,Parabola previous)\n{\n float a1=present.a1;\n float a2=present.a2;\n float c1=present.c1;\n float c2=present.c2;\n int number_of_lanes=present.numModel;\n\n if(number_of_lanes==2)\n {\n if(c2<c1)\n {\n present=swap(present);\n return present;\n }\n else \n return present;\n }\n\n else if(number_of_lanes==1)\n {\n //if intersection on left or right lane possible\n if(a1*c1<0 && a1*(img.cols-c1)>0)\n {\n float y1=sqrt(-1.0*a1*c1);\n float y2=sqrt(a1*(img.cols-c1));\n\n if(y1>(2*img.rows)/5 && y1<(3*img.rows)/5 && y2>(2*img.rows)/5 && y2<(3*img.rows)/5)\n {\n return previous;\n }\n\n }\n\n if(a2*c2<0 && a2*(img.cols-c2)>0)\n {\n float y1=sqrt(-1.0*a2*c2);\n float y2=sqrt(a2*(img.cols-c2));\n\n if(y1>(2*img.rows)/5 && y1<(3*img.rows)/5 && y2>(2*img.rows)/5 && y2<(3*img.rows)/5)\n {\n return previous;\n }\n }\n\n if(a1!=0 && c1>(img.cols/2))\n {\n present=swap(present);\n return present;\n }\n\n if(a2!=0 && c2<(img.cols/2))\n {\n present=swap(present);\n return present;\n }\n\n }\n\n return present;\n\n}\n\n//calculation of Parabola parameters based on 3 randonmly selected points\nfloat get_a(Point p1, Point p2)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n // int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n // int y3 = p3.y;\n\n float del = (y1 - y2)*(y1 + y2);\n float del_a = (x1 - x2);\n float a;\n a = del/(del_a);\n\n if(fabs(a)>500)\n return FLT_MAX;\n else\n return a;\n}\n\nfloat get_c(Point p1, Point p2)\n{\n int x1 = p1.x;\n int y1 = p1.y;\n \n int x2 = p2.x;\n int y2 = p2.y;\n \n float del = (x1 - x2)*y2*y2;\n float del_a = (y1 - y2)*(y1 + y2);\n\n return (x2 - (del/(del_a)));\n}\n\nfloat get_c_2(Point p1, float a)\n{\n float c = p1.x - ((p1.y)*(p1.y))/a;\n\n return c;\n}\n\nfloat min(float a, float b){\n if(a<=b)\n return a;\n return b;\n}\n\n//calculate distance of passed point from curve\nfloat get_del(Point p, float a, float c)\n{\n float predictedX = ((p.y*p.y)/(a) + c);\n float errorx = fabs(p.x - predictedX);\n\n //#TODO add fabs\n float predictedY = sqrt(fabs(a*(p.x-c)));\n float errory = fabs(p.y - predictedY);\n\n return min(errorx, errory);\n}\n\n\n\n//removes both the lanes if they intersect within the image frame\nbool isIntersectingLanes(Mat img, Parabola param) {\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n\n if(a1==a2)\n return false;\n float x = (a1*c1 - a2*c2)/(a1-a2);\n \n //checks if intersection is within\n\n float y_2 = a1*(x-c1);\n\n if (y_2 > 0 && sqrt(y_2) < (img.rows) && x > 0 && x < img.cols) return true;\n return false;\n}\n\nbool isIntersectingLanes_2(Mat img, Parabola param){\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n int x1,x2,y;\n\n for(int i=0; i<img.rows; i++){\n y = img.rows - i;\n x1 = ((y*y)/a1) + c1;\n x2 = ((y*y)/a2) + c2;\n if((x1 - x2) < 15)\n return true;\n }\n return false;\n}\n\n//choose Parabola parameters of best fit curve basis on randomly selected 3 points\nParabola ransac(vector<Point> ptArray, Parabola param, Mat img)\n{\n int numDataPts = ptArray.size();\n \n Parabola bestTempParam;\n\n //initialising no. of lanes\n bestTempParam.numModel=2;\n \n int score_gl = 0;\n int score_l_gl = 0, score_r_gl = 0;\n\n // loop of iterations\n for(int i = 0; i < iteration; i++)\n {\n int p1 = random()%ptArray.size(), p2 = random()%ptArray.size(), p3 = random()%ptArray.size(), p4 = random()%ptArray.size();\n \n\n if(p1==p2 || p1==p3 || p1==p4 || p3==p2 || p4==p2 || p3==p4){\n i--;\n continue;\n }\n \n Point ran_points[4];\n ran_points[0] = ptArray[p1];\n ran_points[1] = ptArray[p2];\n ran_points[2] = ptArray[p3];\n ran_points[3] = ptArray[p4];\n\n int flag = 0;\n Point temp;\n\n // sorting values on the basis of increasing x\n for(int m = 0; m < 3; m++)\n { \n for(int n = 0; n < 3 - m; n++)\n {\n if(ran_points[n].x > ran_points[n+1].x)\n {\n temp = ran_points[n];\n ran_points[n] = ran_points[n+1];\n ran_points[n+1] = temp;\n }\n } \n }\n\n // removed a condition\n if(ran_points[0].x == ran_points[1].x || ran_points[2].x == ran_points[3].x || ran_points[0].y == ran_points[1].y || ran_points[2].y==ran_points[3].y){\n i--;\n continue;\n }\n\n Parabola tempParam;\n tempParam.numModel = 2;\n tempParam.a1 = get_a(ran_points[0], ran_points[1]);\n tempParam.c1 = get_c(ran_points[0], ran_points[1]);\n\n if(true){\n tempParam.a2 = get_a(ran_points[2], ran_points[3]);\n tempParam.c2 = get_c(ran_points[2], ran_points[3]);\n }\n else{\n tempParam.a2 = tempParam.a1;\n tempParam.c2 = get_c_2(ran_points[2], tempParam.a2);\n }\n \n \n int score_common = 0;/*, comm_count = 0;*/\n int score_l_loc = 0, score_r_loc = 0;\n\n //looping over image\n for(int p = 0; p < ptArray.size(); p++)\n {\n\n int flag_l = 0; //for points on 1st curve\n int flag_r = 0; //for points on 2nd curve\n\n float dist_l = get_del(ptArray[p], tempParam.a1, tempParam.c1);\n\n if(dist_l < maxDist)\n {\n flag_l = 1;\n }\n\n float dist_r = get_del(ptArray[p], tempParam.a2, tempParam.c2);\n\n if(dist_r < maxDist)\n {\n flag_r = 1;\n }\n\n if(flag_l == 1 && flag_r == 1) {\n score_common++;\n }\n else {\n if (flag_l == 1) {\n score_l_loc++;\n }\n if (flag_r == 1) {\n score_r_loc++;\n }\n }\n } //end of loop over image\n\n\n // if centroid of lanes are closer than 20 than then drop the lane with less no of points\n if (dist(centroid(tempParam.a1,tempParam.c1,img),centroid(tempParam.a2,tempParam.c2,img)) < centroidDistance){\n cout<<\"centroid issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n // if c2 - c1 is less than 40 drop one lane with less no of inliers\n else if(fabs(tempParam.c1 - tempParam.c2) < baseDistance1){\n cout<<\"c1-c2 issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n // if common points are too much then drop the lane with less no of inliers\n else if ((score_common/(score_common + score_l_loc + score_r_loc+1))*100 > common_inliers_thresh) {\n cout<<\"common points issue.\"<<endl;\n if(score_r_loc > score_l_loc)\n {\n tempParam.a1 = 0;\n tempParam.c1 = 0;\n score_l_loc = 0;\n tempParam.numModel--;\n }\n else\n {\n tempParam.a2 = 0;\n tempParam.c2 = 0;\n score_r_loc = 0;\n tempParam.numModel--;\n }\n }\n\n\n // updating the best params for two lane case\n // condition is that the sum of left and right inliers should be greater than 2 times the global inliers\n if (tempParam.numModel==2 && (score_l_loc + score_r_loc ) > 2*score_gl) {\n\n score_l_gl=score_l_loc;\n score_r_gl=score_r_loc;\n score_gl = (score_r_gl + score_l_gl)/2;\n\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n bestTempParam.a2=tempParam.a2;\n bestTempParam.c2=tempParam.c2;\n bestTempParam.numModel = tempParam.numModel;\n }\n\n // updating the best params for one lane case\n // condition in the sum of left and right inliers should be greater than global inliers\n if (tempParam.numModel==1 && (score_l_loc + score_r_loc) > score_gl) {\n\n score_l_gl=score_l_loc;\n score_r_gl=score_r_loc;\n score_gl = score_r_gl + score_l_gl;\n\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n bestTempParam.a2=tempParam.a2;\n bestTempParam.c2=tempParam.c2;\n bestTempParam.numModel = tempParam.numModel;\n }\n } //end of iteration loop\n\n cout <<\"score_l_gl : \"<<score_l_gl<<\" score_r_gl : \"<<score_r_gl<<endl;\n\n if(score_l_gl!=0 && (score_l_gl) < minLaneInlier){\n cout<<\"left lane removed\"<< endl;\n bestTempParam.a1=0;\n bestTempParam.c1=0;\n bestTempParam.numModel--;\n }\n if(score_r_gl!=0 && (score_r_gl) < minLaneInlier){\n cout<<\"right lane removed\"<<endl;\n bestTempParam.a2=0;\n bestTempParam.c2=0;\n bestTempParam.numModel--;\n }\n cout<<\"bestTempParam.numModel : \"<<bestTempParam.numModel<<endl;\n cout<<\"bestTempParam.a1 : \"<<bestTempParam.a1<<\" bestTempParam.c1 : \"<<endl;\n cout<<\" bestTempParam.a2 : \"<<bestTempParam.a2<<\" bestTempParam.c2 : \"<<bestTempParam.c2<<endl;\n\n return bestTempParam;\n}\n\nPoint centroid(float a,float c,Mat img)\n{\n Point A;\n int i,j,x,y;\n int sum_x = 0,sum_y = 0,count=1;\n\n for(j=0;j<img.rows;j++)\n {\n y = img.rows-j;\n x = ((y*y)/(a) + c);\n\n if(x>=0 && x<img.cols)\n {\n sum_y+=y;\n sum_x+=x;\n count++;\n }\n }\n\n A.x=sum_x/count;\n A.y=sum_y/count;\n\n return A;\n}\n\n\n\nParabola getRansacModel(Mat img,Parabola previous)\n{\n vector<Point> ptArray1;\n\n for(int i = 0; i < img.rows; i++)\n {\n for(int j = 0; j < img.cols; j++)\n {\n if(img.at<uchar>(i,j)>128)\n ptArray1.push_back(Point(j, img.rows - i));\n }\n }\n\n //declare a Parabola vaiable to store the Parabola\n Parabola param;\n //get parameters of first Parabola form ransac function\n\n if(ptArray1.size() > minPointsForRANSAC)\n {\n param = ransac(ptArray1, param, img);\n\n }\n\n else {\n return previous;\n }\n\n //Lane classification based on previous frames\n param=classify_lanes(img,param,previous);\n\n return param;\n}\n\nMat drawLanes(Mat fitLanes, Parabola lanes) {\n\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < fitLanes.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, left_curve, false, Scalar(255, 255, 255), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, right_curve, false, Scalar(255, 255, 255), 3, CV_AA);\n\n return fitLanes;\n}\n\nMat drawLanes_white(Mat img, Parabola lanes) {\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < img.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = img.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = img.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(img, left_curve, false, Scalar(255, 0, 0), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(img, right_curve, false, Scalar(0, 0, 255), 3, CV_AA);\n\n return img;\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.5802494883537292, "alphanum_fraction": 0.6091954112052917, "avg_line_length": 27.882591247558594, "blob_id": "80b5e67d12965cc1aad2eb742b80242ef20a8a96", "content_id": "d071c0c95e852334e1f3f176051d1591e6220f7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14268, "license_type": "no_license", "max_line_length": 270, "num_lines": 494, "path": "/src/laneDetector_ransac.cpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <bits/stdc++.h>\n#include <iostream>\n#include <ros/ros.h>\n#include \"geometry_msgs/Point.h\"\n#include \"geometry_msgs/Vector3.h\"\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <fstream>\n#include \"geometry_msgs/Polygon.h\"\n// #include \"armadillo\"\n#include <nav_msgs/Odometry.h>\n#include \"tf/tf.h\"\n#include <sensor_msgs/LaserScan.h>\n#include <dynamic_reconfigure/server.h>\n#include <lanes/TutorialsConfig.h>\n\n#include <params.hpp>\n#include \"lsd.cpp\"\n#include \"laneDetector_utils.cpp\"\n#include \"houghP.hpp\"\n#include \"laneDetector_utils.hpp\"\n#include \"lsd.h\"\n#include <ransac_new_2_yash.hpp>\n\n// #include <ransac.hpp>\n// #include \"mlesac.hpp\"\n#include <matrixTransformation.hpp>\n\nParabola lanes, previous;\n\nvector<Point> cost_map_lanes;\nsensor_msgs::LaserScan scan_global;\n\nvoid callback(node::TutorialsConfig &config, uint32_t level);\nMat findIntensityMaxima(Mat img);\nMat findEdgeFeatures(Mat img, bool top_edges);\nMat find_all_features(Mat edgeFeature, Mat b);\nMat fit_ransac(Mat img, Mat all_features);\nvoid publish_lanes(Mat lanes_by_ransac);\nvoid detect_lanes(Mat img);\nvoid imageCb(const sensor_msgs::ImageConstPtr& msg);\nsensor_msgs::LaserScan imageConvert(Mat image);\n\nint flag=1;\n\nusing namespace std;\nusing namespace cv;\nusing namespace ros;\n\nMat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1);\nint size_X = 800;\nint size_Y = 1000;\n\nvoid callback(node::TutorialsConfig &config, uint32_t level)\n{\n is_debug = config.is_debug;\n\n // ransac parameters\n\titeration = config.iteration;\n\tmaxDist = config.maxDist;\n\tminLaneInlier = config.minLaneInlier;\n\tminPointsForRANSAC = config.minPointsForRANSAC;\n\t// grid_size = config.grid_size;\n\n // publish parameters\n\tpixelPerMeter = config.pixelPerMeter;\n\n\n // edgeFeatures parameters\n\thorizon = config.horizon;\n\thorizon_offset = config.horizon_offset;\n\n\ttransformedPoints0_lowerbound = config.transformedPoints0_lowerbound;\n\ttransformedPoints0_upperbound = config.transformedPoints0_upperbound;\n\tpoint1_y = config.point1_y;\n\tpoint2_x = config.point2_x;\n\n\t// intensity maxima parameters\n\t// h = config.h;\n\t// w = config.w;\n\t// variance = config.variance;\n\t// hysterisThreshold_min = config.hysterisThreshold_min;\n\t// hysterisThreshold_max = config.hysterisThreshold_max;\n\n\t// distance between first point of image and lidar\n\tyshift = config.yshift;\n\n\t// region of interest in all_features\n\t// y = config.y;\n\t// lane_width = config.lane_width;\n\t// k1 = config.k1;\n\t// k2 = config.k2;\n\n\t// blue channel image parameters\n\tmedianBlurkernel = config.medianBlurkernel;\n neighbourhoodSize = config.neighbourhoodSize;\n\n constantSubtracted = config.constantSubtracted;\n\n region = config.region;\n\n baseDistance1 = config.baseDistance1;\n centroidDistance = config.centroidDistance;\n}\n\n// Mat findIntensityMaxima(Mat img)\n// {\n// Mat topview = top_view(img, ::transform, size_X, size_Y);\n\n// GaussianBlur(topview, topview, Size(5, 15), 0, 0);\n// blur(topview, topview, Size(25,25));\n\n// if(is_debug == true){\n// \tnamedWindow(\"topview\", WINDOW_NORMAL);\n// \timshow(\"topview\", topview);\n// }\n\n// cvtColor(topview, topview, CV_BGR2GRAY);\n// medianBlur(topview, topview, 3);\n\n// // template matching\n// Mat t0, t1, t2, t3, t4, t5, t6;\n// matchTemplate(topview, getTemplateX2(2.1, h, w, -10), t0, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, 0), t1, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, 10), t2, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, -20), t3, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, +20), t4, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, +30), t5, CV_TM_CCOEFF_NORMED);\n// matchTemplate(topview, getTemplateX2(2.1, h, w, -30), t6, CV_TM_CCOEFF_NORMED);\n\n// Mat t = t0-t0;\n// for(int i=0;i<t.rows;i++)\n// for(int j=0;j<t.cols;j++)\n// {\n// t.at<float>(i, j) = max( t4.at<float>(i, j), max(t3.at<float>(i, j), max(t0.at<float>(i, j), max(t1.at<float>(i, j), t2.at<float>(i, j)))));\n// t.at<float>(i, j) = max( t.at<float>(i, j), max(t5.at<float>(i, j), t6.at<float>(i, j)) );\n// }\n\n// // ########threshold###########\n// hysterisThreshold(t, topview, hysterisThreshold_min, hysterisThreshold_max);\n\n// Mat result=Mat(topview.rows+h-1, topview.cols+w-1,CV_8UC1, Scalar(0));\n// for(int i=0;i<topview.rows;i++)\n// for(int j=0;j<topview.cols;j++)\n// result.at<uchar>(i+(h-1)/2,j+(w-1)/2)=255*topview.at<float>(i,j);\n\n\n// if(is_debug==true)\n// {\n// namedWindow(\"intensityMaxima\", WINDOW_NORMAL);\n// imshow(\"intensityMaxima\", result);\n// }\n\n// return result;\n// }\n\nMat findEdgeFeatures(Mat img, bool top_edges)\n{\n vector<Vec4i> lines, lines_top;\n vector<int> line_lens;\n Mat edges;\n\n // this will create lines using lsd \n if(top_edges == false)\n {\n \n Mat src = img;\n Mat tmp, src_gray;\n cvtColor(src, tmp, CV_RGB2GRAY);\n tmp.convertTo(src_gray, CV_64FC1);\n\n int cols = src_gray.cols;\n int rows = src_gray.rows;\n image_double image = new_image_double(cols, rows);\n image->data = src_gray.ptr<double>(0);\n ntuple_list ntl = lsd(image);\n Mat lsd = Mat::zeros(rows, cols, CV_8UC1);\n Point pt1, pt2;\n for (int j = 0; j != ntl->size ; ++j)\n {\n Vec4i t;\n\n pt1.x = int(ntl->values[0 + j * ntl->dim]);\n pt1.y = int(ntl->values[1 + j * ntl->dim]);\n pt2.x = int(ntl->values[2 + j * ntl->dim]);\n pt2.y = int(ntl->values[3 + j * ntl->dim]);\n t[0]=pt1.x;\n t[1]=pt1.y;\n t[2]=pt2.x;\n t[3]=pt2.y;\n lines.push_back(t);\n int width = int(ntl->values[4 + j * ntl->dim]);\n\n line(lsd, pt1, pt2, Scalar(255), width + 1, CV_AA);\n }\n free_ntuple_list(ntl);\n edges=lsd;\n }\n\n // this will create lines using canny\n else\n {\n Mat topview = top_view(img, ::transform, size_X, size_Y);\n Canny(topview, edges, 200, 300);\n HoughLinesP(edges, lines_top, 1, CV_PI/180, 60, 60, 50);\n transformLines(lines_top, lines, ::transform.inv());\n }\n\n for( size_t i = 0; i < lines.size(); i++ )\n {\n Vec4i l = lines[i];\n Point2f inputPoints[2], transformedPoints[2];\n\n inputPoints[0] = Point2f( l[0], l[1] );\n inputPoints[1] = Point2f( l[2], l[3] ); \n transformPoints(inputPoints, transformedPoints, ::transform, 2);\n\n if(transformedPoints[0].x<transformedPoints0_lowerbound || transformedPoints[0].x>transformedPoints0_upperbound || transformedPoints[1].x<transformedPoints0_lowerbound || transformedPoints[1].x>transformedPoints0_upperbound || l[1] < point1_y || l[2] < point2_x)\n {\n lines.erase(lines.begin() + i);\n i--;\n }\n }\n\n int is_lane[lines.size()];\n for(int i=0;i<lines.size();i++)\n is_lane[i]=0;\n\n for(size_t i=0;i<lines.size();i++)\n for(size_t j=0;j<lines.size();j++)\n if(abs(findIntersection(lines[i],lines[j])-horizon)<horizon_offset)\n {\n is_lane[i]+=1;\n is_lane[j]+=1;\n }\n\n vector<Vec4i> lane_lines, lane_lines_top;\n\n for(int i=0;i<lines.size();i++)\n if(is_lane[i]>10)\n lane_lines.push_back(lines[i]);\n\n transformLines(lane_lines, lane_lines_top, ::transform);\n Mat edgeFeatures;\n Mat filtered_lines = Mat(img.size(),CV_8UC1, Scalar(0));\n for( size_t i = 0; i < lane_lines.size(); i++ )\n {\n Vec4i l = lane_lines[i];\n line(filtered_lines, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255), 5, CV_AA);\n }\n edgeFeatures = top_view(filtered_lines, ::transform, size_X, size_Y);\n\n if(is_debug==true)\n {\n namedWindow(\"edges\", WINDOW_NORMAL);\n namedWindow(\"edgeFeatures\", WINDOW_NORMAL);\n namedWindow(\"filtered_lines\", WINDOW_NORMAL);\n\n imshow(\"edges\", edges);\n imshow(\"edgeFeatures\", edgeFeatures);\n imshow(\"filtered_lines\", filtered_lines);\n }\n\n return edgeFeatures;\n}\n\nMat find_all_features(Mat edgeFeature, Mat b)\n{\n\tMat all_features = Mat(size_Y, size_X,CV_8UC1, Scalar(0));\n\n bitwise_and(edgeFeature, b, all_features);\n\n namedWindow(\"all_features\", WINDOW_NORMAL);\n imshow(\"all_features\", all_features);\n\n return all_features;\n}\n\nMat fit_ransac(Mat img, Mat all_features_frontview)\n{\n\t// Mat all_features_frontview = front_view(all_features, ::transform);\n\n Mat fitRansac = Mat(size_Y, size_X,CV_8UC1, Scalar(0));\n\n lanes = getRansacModel(all_features_frontview, previous);\n previous=lanes;\n\n Mat fitLanes = drawLanes(fitRansac, lanes);\n Mat originalLanes = drawLanes_white(top_view(img, ::transform, size_X, size_Y), lanes);\n // originalLanes = front_view(originalLanes, ::transform);\n\n namedWindow(\"lanes_by_ransac\", WINDOW_NORMAL);\n imshow(\"lanes_by_ransac\", fitLanes);\n\n namedWindow(\"Orignal_lanes_by_ransac\", WINDOW_NORMAL);\n imshow(\"Orignal_lanes_by_ransac\", originalLanes);\n\n return fitLanes;\n}\n\nvoid publish_lanes(Mat lanes_by_ransac)\n{\n scan_global = imageConvert(lanes_by_ransac); \n}\n\nMat blueChannelProcessing(Mat img)\n{\n Mat channels[3];\n split(img, channels);\n Mat b = channels[0];\n\n GaussianBlur(b , b, Size( 9, 9), 0, 0);\n adaptiveThreshold(b,b,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,neighbourhoodSize, constantSubtracted);\n medianBlur(b,b,medianBlurkernel);\n\n if(is_debug){\n \tnamedWindow(\"blue channel image\", WINDOW_NORMAL);\n \timshow(\"blue channel image\", b);\n }\n\n return b;\n}\n\nMat roi(Mat all_features)\n{\n for(int i = 0; i < all_features.rows; i++)\n {\n for(int j = 0; j < all_features.cols; j++)\n {\n if(j > region)\n all_features.at<uchar>(i,j) = 0;\n }\n }\n\n return all_features;\n}\n\nvoid detect_lanes(Mat img)\n{\n if(is_debug==true)\n {\n namedWindow(\"original\", WINDOW_NORMAL);\n imshow(\"original\", img);\n }\n\n // initialize boundary with a matrix of (800*400)\n Mat boundary = Mat(size_Y, size_X, CV_8UC1, Scalar(0));\n\n // intenity maximum image made\n // Mat intensityMaxima = findIntensityMaxima(img);\n\n // image with edge features made\n Mat edgeFeature = findEdgeFeatures(img, false);\n\n // blue channel image\n Mat b = blueChannelProcessing(img);\n Mat b_topview = top_view(b, ::transform, size_X, size_Y);\n\n if(is_debug) {\n namedWindow(\"b_topview\", WINDOW_NORMAL);\n imshow(\"b_topview\", b_topview);\n }\n\n // return all features in top view\n Mat all_features = find_all_features(edgeFeature, b_topview);\n\n all_features = roi(all_features);\n\n // Mat all_features_frontview = front_view(all_features, ::transform);\n\n namedWindow(\"all_features\", WINDOW_NORMAL);\n imshow(\"all_features\", all_features);\n\n // namedWindow(\"all_features_frontview\", WINDOW_NORMAL);\n // imshow(\"all_features_frontview\", all_features_frontview);\n\n Mat lanes_by_ransac = fit_ransac(img, all_features);\n // Mat lanes_by_ransac = fit_ransac(all_features_frontview);\n\n publish_lanes(lanes_by_ransac);\n}\n\nvoid imageCb(const sensor_msgs::ImageConstPtr& msg)\n{\n flag=1;\n Mat img;\n cv_bridge::CvImagePtr cv_ptr;\n\n cout<<\"in callback\"<<endl;\n\n try\n {\n cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);\n img = cv_ptr->image;\n }\n catch (cv_bridge::Exception& e)\n {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n if( !img.data ) { printf(\"Error loading A \\n\"); return ; }\n\n detect_lanes(img);\n}\n\nsensor_msgs::LaserScan imageConvert(Mat img) /// Input binary image for conversion to laserscan\n{\n int bins = 1080;\n int row = img.rows;\n int col = img.cols;\n sensor_msgs::LaserScan scan;\n scan.angle_min = -CV_PI/2;\n scan.angle_max = CV_PI/2;\n scan.angle_increment = CV_PI/bins;\n double inf = std::numeric_limits<double>::infinity();\n scan.range_max = inf; \n\n scan.header.frame_id = \"laser\";\n scan.header.stamp = ros::Time::now();\n scan.scan_time = 0.025;\n scan.time_increment = (float)(scan.scan_time)/bins;\n\n namedWindow(\"check\", WINDOW_NORMAL);\n imshow(\"check\", img);\n\n for (int i=0;i<bins;i++)\n {\n scan.ranges.push_back(scan.range_max);\n }\n\n scan.range_max = 80;\n for(int i = 0; i < row; ++i)\n {\n for(int j = 0; j < col; ++j)\n {\n if(img.at<uchar>(i,j)>0)\n {\n float a = (j - col/2)/pixelPerMeter;\n float b = (row - i)/pixelPerMeter + yshift;\n\n double angle = atan(a/b);\n\n double r = sqrt(a*a + b*b);\n\n int k = (angle - scan.angle_min)/(scan.angle_increment);\n if (r < scan.ranges[bins-k-1]) {\n scan.ranges[bins-k-1] = r ;\n }\n\n }\n }\n }\n\n return scan; /// returns Laserscan data\n}\n\nint main(int argc, char **argv)\n{\n init(argc, argv, \"lanes\");\n NodeHandle nh_;\n image_transport::ImageTransport it_(nh_);\n\n dynamic_reconfigure::Server<node::TutorialsConfig> server;\n dynamic_reconfigure::Server<node::TutorialsConfig>::CallbackType f;\n f = boost::bind(&callback, _1, _2);\n server.setCallback(f);\n\n image_transport::Subscriber image_sub_ = it_.subscribe(\"/camera/image_color\", 1,&imageCb);\n\n // Publisher lanepub = nh_.advertise<geometry_msgs::Polygon>(\"lane_points\", 1);\n Publisher lanes_pub = nh_.advertise<sensor_msgs::LaserScan>(\"/lanes\", 10);\n \n Rate r(1);\n while(ok())\n {\n if(flag==0)\n {\n cout<< \"Waiting for Image\" << endl;\n }\n else\n { \n lanes_pub.publish(scan_global);\n }\n\n\n\t\tflag=0;\n\t\twaitKey(500);\n\t\tspinOnce();\n\t\tr.sleep();\n }\n\n destroyAllWindows();\n}\n" }, { "alpha_fraction": 0.4640451967716217, "alphanum_fraction": 0.5199416279792786, "avg_line_length": 32.87705993652344, "blob_id": "37448f29355c41c6728709fc39d43dc1c2c6d030", "content_id": "c9ab99beb687d8e2b98c0b56ea77d88177c7e103", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 26728, "license_type": "no_license", "max_line_length": 224, "num_lines": 789, "path": "/src/laneDetector_commented.cpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <bits/stdc++.h>\n#include <iostream>\n\n#include <ros/ros.h>\n\n#include \"geometry_msgs/Point.h\"\n#include \"geometry_msgs/Vector3.h\"\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <fstream>\n#include \"geometry_msgs/Polygon.h\"\n\n#include \"lsd.cpp\"\n#include \"laneDetector_utils.cpp\"\n#include \"../include/houghP.hpp\"\n#include \"../include/laneDetector_utils.hpp\"\n#include \"../include/lsd.h\"\n\n// #include \"armadillo\"\n\nusing namespace std;\nusing namespace cv;\n\nstatic const std::string OPENCV_WINDOW = \"Image window\";\nint flag=1;\nbool FIRST_FRAME = true;\narma::vec curve; //curve is a armadillo vector\n\nstring image_name;\n\nbool tracking_status = false;\nint frames_tracked = 0;\n\nbool last_frame_low_features = false, current_frame_low_features = false;\nbool one_frame_low_features = false;\n\nVideoWriter out;\n\nMat findEdgeFeatures(Mat img, bool top_edges, bool debug)\n{\n //medianBlur(img, img, 3);\n Mat transform = (Mat_<double>(3, 3) << 3.57576055e-01, 2.07240514e+00, -5.91721615e+02, 1.67087151e-01, 9.43860355e+00, -2.02168615e+03, 1.81143049e-04, 1.03884056e-02, -1.97849048e+00);\n int horizon = 180; // Hardcode for kitti dataset\n vector<Vec4i> lines, lines_top;\n vector<int> line_lens;\n Mat edges;\n \n //detects line segments in 2 ways 1)by using lsd and 2)by using hough lines. it plots all detected lines in an image edges\n if(top_edges == false)\n {\n \n Mat src = img;\n Mat tmp, src_gray;\n cvtColor(src, tmp, CV_RGB2GRAY);\n tmp.convertTo(src_gray, CV_64FC1);\n\n\n int cols = src_gray.cols;\n int rows = src_gray.rows;\n image_double image = new_image_double(cols, rows);\n image->data = src_gray.ptr<double>(0);\n ntuple_list ntl = lsd(image);\n Mat lsd = Mat::zeros(rows, cols, CV_8UC1);\n Point pt1, pt2;\n for (int j = 0; j != ntl->size ; ++j)\n {\n Vec4i t;\n\n pt1.x = int(ntl->values[0 + j * ntl->dim]);\n pt1.y = int(ntl->values[1 + j * ntl->dim]);\n pt2.x = int(ntl->values[2 + j * ntl->dim]);\n pt2.y = int(ntl->values[3 + j * ntl->dim]);\n t[0]=pt1.x;\n t[1]=pt1.y;\n t[2]=pt2.x;\n t[3]=pt2.y;\n lines.push_back(t);\n int width = int(ntl->values[4 + j * ntl->dim]);\n \n // done by me\n if (width == 0) width++;\n\n line(lsd, pt1, pt2, Scalar(255), width, CV_AA);\n }\n free_ntuple_list(ntl);\n edges=lsd;\n }\n else\n {\n Mat topview;\n warpPerspective(img, topview, transform, Size(400, 800), INTER_NEAREST, BORDER_CONSTANT); //convert to top view\n Canny(topview, edges, 200, 300);\n HoughLinesP(edges, lines_top, 1, CV_PI/180, 60, 60, 50);\n transformLines(lines_top, lines, transform.inv());\n }\n for( size_t i = 0; i < lines.size(); i++ )\n {\n Vec4i l = lines[i]; //lines is a vector which stores all the line segments detected, each element contains 2 points(4 coordinates)\n Point2f inputPoints[2], transformedPoints[2];\n inputPoints[0] = Point2f( l[0], l[1] );\n inputPoints[1] = Point2f( l[2], l[3] ); \n transformPoints(inputPoints, transformedPoints, transform, 2);\n\n if(transformedPoints[0].x<75 || transformedPoints[0].x>325 || transformedPoints[1].x<75 || transformedPoints[1].x>325 || l[1] < 200 || l[2] < 200)\n {\n lines.erase(lines.begin() + i);\n i--;\n }\n }\n\n int is_lane[lines.size()]; //polling for a line segment to be part of a lane.\n for(int i=0;i<lines.size();i++)\n is_lane[i]=0;\n\n for(size_t i=0;i<lines.size();i++)\n for(size_t j=0;j<lines.size();j++)\n if(abs(findIntersection(lines[i],lines[j])-horizon)<10)\n {\n is_lane[i]+=1;\n is_lane[j]+=1;\n }\n\n vector<Vec4i> lane_lines, lane_lines_top;\n\n for(int i=0;i<lines.size();i++)\n if(is_lane[i]>10) //threshold for line segment to be part of lane\n lane_lines.push_back(lines[i]); //store the candidates of lanes in a lane_lines vector\n\n transformLines(lane_lines, lane_lines_top, transform);\n\n Mat edgeFeatures(800, 400 ,CV_8UC1, Scalar(0));\n for( size_t i = 0; i < lane_lines_top.size(); i++ )\n {\n Vec4i l = lane_lines_top[i];\n line(edgeFeatures, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255), 1, CV_AA);\n }\n\n if(debug==true)\n {\n Mat filtered_lines = img.clone();\n for( size_t i = 0; i < lane_lines.size(); i++ )\n {\n Vec4i l = lane_lines[i];\n line(filtered_lines, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0, 0), 5, CV_AA);\n }\n imshow(\"edges\", edges);\n imshow(\"edgeFeatures\", edgeFeatures);\n imshow(\"filtered_lines\", filtered_lines);\n }\n\n // cvtColor(edgeFeatures, edgeFeatures, CV_BGR2GRAY);\n return edgeFeatures;\n}\n\n// Mat findRoadBoundaries(Mat img, bool debug)\n// {\n// Mat road = imread(\"/home/tejus/Documents/ml/KittiSeg/results/\"+image_name);\n// Mat road_top, road_boundary;\n\n// Mat transform = (Mat_<double>(3, 3) << 3.57576055e-01, 2.07240514e+00, -5.91721615e+02, 1.67087151e-01, 9.43860355e+00, -2.02168615e+03, 1.81143049e-04, 1.03884056e-02, -1.97849048e+00);\n// warpPerspective(road, road_top, transform, Size(400, 800), INTER_NEAREST, BORDER_CONSTANT);\n\n// Canny(road_top, road_boundary, 200, 300);\n\n// if(debug)\n// {\n// imshow(\"road_top\", road_top);\n// imshow(\"road_boundary\", road_boundary);\n// }\n\n// return road_boundary;\n// }\n\n\n\n\nMat findIntensityMaxima(Mat img, bool debug)\n{\n Mat topview;\n Mat transform = (Mat_<double>(3, 3) << 3.57576055e-01, 2.07240514e+00, -5.91721615e+02, 1.67087151e-01, 9.43860355e+00, -2.02168615e+03, 1.81143049e-04, 1.03884056e-02, -1.97849048e+00);\n warpPerspective(img, topview, transform, Size(400, 800), INTER_NEAREST, BORDER_CONSTANT);//find top view using homography matrix\n\n if (debug==true)\n imshow(\"topview\", topview);\n\n int h = 51, w = 35; // height and width of the Gaussian template to match with the lanes\n cvtColor(topview, topview, CV_BGR2GRAY);\n GaussianBlur(topview, topview, Size(1, 11), 0.01, 4);\n Mat t0, t1, t2, t3, t4, t5, t6;\n /* create gaussian templates of sigma 2, h and w dimensions and angle in degrees[getTemplateX2 in laneDetector_utils.cpp]. \n Then match with topview using template matching and store the Mat containing the scores of template matching*/\n \n matchTemplate(topview, getTemplateX2(2, h, w, -10), t0, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, 0), t1, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, 10), t2, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, -20), t3, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, +20), t4, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, +30), t5, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2, h, w, -30), t6, CV_TM_CCOEFF_NORMED);\n\n Mat t = t0-t0;\n for(int i=0;i<t.rows;i++)\n for(int j=0;j<t.cols;j++)\n {\n t.at<float>(i, j) = max( t4.at<float>(i, j), max(t3.at<float>(i, j), max(t0.at<float>(i, j), max(t1.at<float>(i, j), t2.at<float>(i, j)))));\n t.at<float>(i, j) = max( t.at<float>(i, j), max(t5.at<float>(i, j), t6.at<float>(i, j)) ); /*comparing with the Mat containing scores for gaussians\n at different angles we take the max score for each pixel of the images */ \n }\n\n\n hysterisThreshold(t, topview, 0.2, 0.4); //if a pixel has intensity above the first threshold and is connected to a point having intensity above the second threshold then make white\n\n Mat result=Mat(topview.rows+h-1, topview.cols+w-1, CV_8UC3, Scalar(0,0,0)); //padding for the template\n for(int i=0;i<topview.rows;i++)\n for(int j=0;j<topview.cols;j++)\n result.at<Vec3b>(i+(h-1)/2,j+(w-1)/2)={255*topview.at<float>(i,j), 255*topview.at<float>(i,j), 255*topview.at<float>(i,j)}; //topview of lanes is stored in result and is roughly converted to BGR image\n\n\n // if(debug==true)\n imshow(\"intensityMaxima\", result);\n\n return result;\n}\n\nvoid initializeCurve()\n{\n cout<<\"Reinitializing !!\"<<endl<<endl;\n curve << 0 << 0 << 200 << 74;\n}\n\n\nbool checkLaneChange(Mat img){\n Mat transform = (Mat_<double>(3, 3) << 3.57576055e-01, 2.07240514e+00, -5.91721615e+02, 1.67087151e-01, 9.43860355e+00, -2.02168615e+03, 1.81143049e-04, 1.03884056e-02, -1.97849048e+00);\n transform=transform.inv();\n int x = 0 ;\n int y_left = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0) - curve[3]/2;\n int y_right = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0) + curve[3]/2;\n\n x = 800;\n int transformed_yleft = (y_left*transform.at<double>(0, 0)+x*transform.at<double>(0, 1)+1.0*transform.at<double>(0, 2))/(y_left*transform.at<double>(2, 0)+x*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2)); \n int transformed_yright = (y_right*transform.at<double>(0, 0)+x*transform.at<double>(0, 1)+1.0*transform.at<double>(0, 2))/(y_right*transform.at<double>(2, 0)+x*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2));\n\n //the y coordinates in original image of the left and right lanes the the bottom. if the left lane coordinate is greater than cols/2 then the lane tracked \n //is right and we shift it left, or vice versa\n if(transformed_yright<(img.cols/2))\n {\n cout<<\"Lane shift right\"<<endl;\n curve[2] += curve[3];\n return true;\n \n }\n else if(transformed_yleft>(img.cols/2))\n {\n cout<<\"Lane shift left\"<<endl;\n curve[2] -= curve[3];\n return true;\n }\n cout<< transformed_yleft<< \" \" << transformed_yright<< endl;\n cout<< transformed_yleft<< \" \" << transformed_yright<< endl;\n return false;\n}\n\nbool checkPrimaryLane(Mat boundary, Mat edges, Mat intensityMaxima)\n{\n cout<<\"c0\"<<endl;\n\n // makiing a feature image similar to size of edges and initilalizing it with 0\n Mat features = edges - edges;\n \n // editing features on the basis of intesityMaxima, boundary, edges image\n // making particular pixel value of feature image 1 on basis of condition\n for(int i=0;i<features.rows;i++)\n for(int j=0;j<features.cols;j++)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>0 || boundary.at<uchar>(i, j)>0 || edges.at<uchar>(i, j)>0)\n features.at<uchar>(i, j) = 255;\n }\n\n float error = 0;\n int n_points = 0;\n int nl_points=0, nr_points=0;\n\n cout<<\"check\"<<endl;\n //give a guess of curve\n //x= f(i) from bottom.Calculate y for an x and a given curve. now iterate between [(i,w/2-k1),(i,w/2+k2)][offset -ve] and [(i,w/2-k1),(i,w/2+k2)][offset +ve]\n //calculate number of left and right inliners in nl and nr variables\n //calculate the min error(proportional to offset of an inliner) and store in min_row_error\n //sum of all min_row_errors is the error term which has to be minimised\n for(int i=350;i<edges.rows;i++)\n {\n float w = curve[3];\n int k1 = 25;\n int k2 = 15;\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n float min_row_error = 99999;\n\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 ) offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>0 && j<400)\n {\n if(features.at<uchar>(i, j)>0)\n {\n min_row_error = min(min_row_error, (float)pow(j-y, 2));\n n_points++;\n\n if(l==0) nr_points++;\n else nl_points++;\n }\n }\n if( l > 0 ) offset = -offset;\n }\n }\n error += min_row_error;\n }\n cout<<error<<\", \"<<error/(n_points+1)<<\", \"<<n_points<<endl;\n cout<<nl_points<<\",\"<<nr_points<<endl;\n cout<<max(nl_points, nr_points)<<endl;\n\n last_frame_low_features = current_frame_low_features;\n //assume nl>nr\n //low feature is true if (nl<2500)&(nr<700) OR nr<500\n //if min<500 low features are true\n //if min belongs 500 to 700 then check if max is less than 2500 for low features to be true\n //if min> 700 then always false\n current_frame_low_features = (max(nl_points, nr_points) < 2500 && min(nl_points, nr_points) < 700) || min(nl_points, nr_points) < 500;\n\n one_frame_low_features = min(nl_points, nr_points) < 450;\n\n //previous consideration of low features included\n if(tracking_status==true)\n {\n if(current_frame_low_features && last_frame_low_features)\n {\n frames_tracked = 0;\n tracking_status = false;\n return false;\n }\n return true;\n }\n else\n {\n if(current_frame_low_features || last_frame_low_features)\n {\n frames_tracked = 0;\n return false;\n }\n else\n {\n frames_tracked++;\n if(frames_tracked>=5)\n {\n tracking_status = true;\n return true;\n }\n return false;\n }\n }\n}\n\nvector<Point> fitCurve(Mat img, Mat boundary, Mat edges, Mat intensityMaxima, bool debug=false)\n{\n //takes input from intensity maxima and edge features both passed in top view\n //boundry is a blank image\n\n\n //if the frame is first then we hard code the predicted curve\n if(FIRST_FRAME==true)\n {\n //initializeCurve();\n curve << 0 << 0 << 200 << 60;\n FIRST_FRAME = false;\n }\n\n tracking_status = checkPrimaryLane(boundary, edges, intensityMaxima); //for updating current_lane_low features and tracking\n \n if(!tracking_status)\n {\n initializeCurve();\n }\n\n checkLaneChange(img); //shifts the lane to central lane if our guess tracked the left or right lanes\n\n float w = curve[3];\n int k1 = 15;\n int k2 = 10;\n\n int n_points = 0;\n for(int i=350;i<edges.rows;i++)\n {\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n bool found_closest_left = false;\n bool found_closest_right = false;\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 )\n offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>0 && j<400)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(edges.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(boundary.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n }\n if( l > 0 )\n offset = -offset;\n }\n }\n }\n\n // if(n_points==0)\n // {\n // cout<<\"Tracking lost: No points on curve!\"<<endl;\n // return;\n // }\n // else\n // {\n // cout<<\"n_points: \"<<n_points<<endl;\n // }\n\n arma::mat B(n_points, 4);\n arma::vec X(n_points);\n n_points = 0;\n\n //Mat all_features = edges - edges;\n Mat all_features=Mat(800, 400,CV_8UC3, Scalar(0,0,0));\n for(int i=0;i<edges.rows;i++)\n for(int j=0;j<edges.cols;j++)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5)\n {\n all_features.at<Vec3b>(i, j) = {255, 100, 100};\n }\n else if(edges.at<uchar>(i, j)>5)\n {\n all_features.at<Vec3b>(i, j) = {100, 255, 100};\n }\n else if(boundary.at<uchar>(i, j)>5)\n {\n all_features.at<Vec3b>(i, j) = {100, 100, 255};\n }\n }\n //Mat features = intensityMaxima - intensityMaxima;\n Mat features=Mat(800, 400,CV_8UC3, Scalar(0,0,0));\n\n for(int i=350;i<edges.rows;i++)\n {\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n bool found_closest_left = false;\n bool found_closest_right = false;\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 )\n offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>0 && j<400)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n features.at<Vec3b>(i, j) = {255, 100, 100};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(edges.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n features.at<Vec3b>(i, j) = {100, 255, 100};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(boundary.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left)))\n {\n features.at<Vec3b>(i, j) = {100, 100, 255};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else\n features.at<Vec3b>(i, j) = {100, 100, 100};\n }\n if( l > 0 )\n offset = -offset;\n }\n }\n }\n\n double lambda = 0.1; \n arma::vec offset;\n offset << 0 << 0 << 200 << 60;\n arma::vec new_curve = inv(B.t()*B + lambda*arma::eye(4, 4))*(B.t()*X + lambda*offset);\n\n cout<<\"new curve: \"<<new_curve<<endl;\n\n for(int i=0;i<n_points;i++)\n {\n int x = B.at(i, 1);\n int y = X[i];\n features.at<Vec3b>(features.rows-x, y) = {255, 255, 255};\n }\n\n\n bool large_width_change = abs((curve[3]-new_curve[3])/curve[3]) > 0.5 || new_curve[3] < 40 || new_curve[3] > 150;\n if(!large_width_change && !one_frame_low_features)\n {\n curve[0] = (curve[0]+new_curve[0])/2;\n curve[1] = (curve[1]+new_curve[1])/2;\n curve[2] = (curve[2]+new_curve[2])/2;\n curve[3] = (curve[3]+new_curve[3])/2;\n }\n else\n {\n curve[0] = (curve[0]+new_curve[0])/2;\n curve[1] = (curve[1]+new_curve[1])/2;\n curve[2] = new_curve[2] - new_curve[3]/2 + curve[3]/2;\n curve[3] = curve[3];\n cout<<\"width not updated!\"<<endl;\n }\n if(debug==true)\n {\n imshow(\"features\", features);\n imshow(\"all_features\", all_features);\n }\n Mat topview, lanes;\n Mat transform = (Mat_<double>(3, 3) << 3.57576055e-01, 2.07240514e+00, -5.91721615e+02, 1.67087151e-01, 9.43860355e+00, -2.02168615e+03, 1.81143049e-04, 1.03884056e-02, -1.97849048e+00);\n warpPerspective(img, topview, transform, Size(400, 800), INTER_NEAREST, BORDER_CONSTANT);\n\n int y1, y2, y3, y4;\n for(int i=160;i<topview.rows;i++)\n {\n int x = topview.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n\n if(y<0 || y>400) continue;\n\n topview.at<Vec3b>(i, y) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2) = {255, 0, 0};\n\n topview.at<Vec3b>(i, y+1) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2+1) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2+1) = {255, 0, 0};\n\n topview.at<Vec3b>(i, y-1) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2-1) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2-1) = {255, 0, 0};\n if(i==400)\n {\n y1=y - w/2;\n y4=y + w/2;\n }\n if(i==799)\n {\n y2=y - w/2;\n y3=y + w/2;\n }\n }\n\n // float y1, y2;\n\n // y1 = curve[0]*pow(topview.rows-160, 2) + curve[1]*pow(topview.rows-160, 1) + curve[2]*pow(topview.rows-160, 0);\n // y2 = curve[0]*pow(1, 2) + curve[1]*pow(1, 1) + curve[2]*pow(1, 0);\n\n // temp.push_back(Point(y1 - w/2, 160));\n // temp.push_back(Point(y1 + w/2, 160));\n // temp.push_back(Point(y2 - w/2, topview.rows));\n // temp.push_back(Point(y2 + w/2, topview.rows));\n\n // cout<<\"(x1,y1): \"<< y1 - w/2 << 160 << endl;\n // cout<<\"(x2,y2): \"<< y1 + w/2 << 160 << endl;\n // cout<<\"(x3,y3): \"<< y2 - w/2 << topview.rows << endl;\n // cout<<\"(x4,y4): \"<< y2 + w/2 << topview.rows << endl;\n \n imshow(\"trial\",topview);\n warpPerspective(topview, lanes, transform.inv(), Size(1242, 375), INTER_NEAREST, BORDER_CONSTANT);\n \n // those pixels in which lane was not tracked were made 0, so now making those pixel values same as orignal image pixel value\n for(int i=0;i<lanes.rows;i++)\n for(int j=0;j<lanes.cols;j++)\n if(lanes.at<Vec3b>(i,j)[0]==0 && lanes.at<Vec3b>(i,j)[1]==0 && lanes.at<Vec3b>(i,j)[2]==0)\n {\n lanes.at<Vec3b>(i,j)[0] = img.at<Vec3b>(i,j)[0];\n lanes.at<Vec3b>(i,j)[1] = img.at<Vec3b>(i,j)[1];\n lanes.at<Vec3b>(i,j)[2] = img.at<Vec3b>(i,j)[2];\n }\n\n if(!tracking_status)\n lanes = img;\n\n if(tracking_status)\n putText(lanes, \"Tracking Lanes\", Point(50,100), FONT_HERSHEY_SIMPLEX, 1, Scalar(0,255,0), 2);\n else\n putText(lanes, \"Initializing Tracking.\", Point(50,100), FONT_HERSHEY_SIMPLEX, 1, Scalar(0,0,255), 2);\n\n\n imshow(\"lanes\", lanes);\n\n vector<Point> temp;\n temp.push_back(Point(400,y1));\n temp.push_back(Point(800,y2));\n temp.push_back(Point(800,y3));\n temp.push_back(Point(400,y4));\n return temp;\n \n}\n\nvector<Point> detect_lanes(Mat img, bool debug = true)\n{\n // display orignal image\n if(debug==true)\n imshow(\"original\", img);\n\n //Mat boundary = findRoadBoundaries(img, true);\n \n // initialize boundary with a matrix of (800*400)\n // Mat boundary = Mat(800, 400, CV_8UC1, Scalar(0));\n\n // intenity maximum image made\n Mat intensityMaxima = findIntensityMaxima(img, true);\n \n // image with edge features made\n Mat edgeFeature = findEdgeFeatures(img, false, true);\n\n // curve fit on the basis of orignal image, maxima intensity image and edgeFeature image\n \n vector<Point> v;\n // v = fitCurve(img, boundary, edgeFeature, intensityMaxima, true);\n // imshow(\"boundary\",boundary);\n \n // printing curve dimensions\n cout<<curve<<endl;\n return v;\n}\n\n\nvector<Point> a;\n\nvoid imageCb(const sensor_msgs::ImageConstPtr& msg)\n{\n flag=1;\n Mat img;\n cv_bridge::CvImagePtr cv_ptr;\n\n cout<<\"in callback\"<<endl;\n\n try\n {\n cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);\n img = cv_ptr->image;\n }\n catch (cv_bridge::Exception& e)\n {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n if( !img.data ) { printf(\"Error loading A \\n\"); return ; }\n\n a = detect_lanes(img,false);\n waitKey(1);\n}\n\nint main(int argc, char **argv)\n{\n // ros declaration\n ros::init(argc, argv, \"lanes\");\n\n ros::NodeHandle nh_;\n image_transport::ImageTransport it_(nh_);\n image_transport::Subscriber image_sub_;\n \n ros::Publisher lanepub;\n cv_bridge::CvImagePtr cv_ptr;\n \n geometry_msgs::Point32 p1;\n geometry_msgs::Point32 p2;\n geometry_msgs::Point32 p3;\n geometry_msgs::Point32 p4;\n\n image_sub_ = it_.subscribe(\"Image\", 1,&imageCb);\n\n lanepub = nh_.advertise<geometry_msgs::Polygon>(\"lane_points\", 1);\n \n ros::Rate r(1);\n while(ros::ok())\n {\n if(a.empty()||flag==0)\n {\n cout<< \"Waiting for Image\" << endl;\n }\n else\n { \n geometry_msgs::Polygon lanes;\n p1.x = a[0].x;\n p1.y = a[0].y;\n\n p2.x = a[1].x;\n p2.y = a[1].y;\n\n p3.x = a[2].x;\n p3.y = a[2].y;\n\n p4.x = a[3].x;\n p4.y = a[3].y;\n cout<< \"publishing lanes\"<<endl;\n lanes.points.push_back(p1);\n lanes.points.push_back(p2);\n lanes.points.push_back(p3);\n lanes.points.push_back(p4);\n\n lanepub.publish(lanes);\n }\n\n\n flag=0;\n\n ros::spinOnce();\n r.sleep();\n }\n waitKey(1);\n destroyAllWindows();\n return 0;\n}\n\n// while(ros::ok())\n// {\n// ImageConverter ic;\n// r.sleep();\n// ros::spinOnce();\n// }" }, { "alpha_fraction": 0.45895442366600037, "alphanum_fraction": 0.4897840619087219, "avg_line_length": 24.23486328125, "blob_id": "fd753b9bb6b4d9fc66dd56233c9f10c186fac6fa", "content_id": "45891ea8b5f14b0b250e92439f75ead1dec7f314", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13753, "license_type": "no_license", "max_line_length": 157, "num_lines": 545, "path": "/include/ransac.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef RANSAC\n#define RANSAC\n\n\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <bits/stdc++.h>\n\n/*\n parabolas are fit assuming top left as origin - x towards right and y downwards\n */\n\nusing namespace std;\nusing namespace cv;\n\nPoint centroid(float a,float c,Mat img);\n\nfloat dist(Point A,Point B)\n{\n return (sqrt(pow(A.x-B.x,2)+pow(A.y-B.y,2)));\n}\n\n//structure to define the Parabola parameters\ntypedef struct Parabola\n{\n int numModel = 0;\n float a1 = 0.0;\n float c1 = 0.0;\n float a2 = 0.0;\n // float b2 = 0.0;\n float c2 = 0.0;\n} Parabola;\n\nParabola swap(Parabola param) {\n\n float temp1, temp2, temp3;\n temp1=param.a1;\n // temp2=param.b1;\n temp3=param.c1;\n\n param.a1=param.a2;\n // param.b1=param.b2;\n param.c1=param.c2;\n\n param.a2=temp1;\n // param.b2=temp2;\n param.c2=temp3;\n\n return param;\n}\n\n//calculation of Parabola parameters based on 3 randonmly selected points\nfloat get_a(Point p1, Point p2)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n // int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n // int y3 = p3.y;\n\n float del = (y1 - y2)*(y1 + y2);\n float del_a = (x1 - x2);\n float a;\n a = del/(del_a);\n\n if(fabs(a)>500)\n return FLT_MAX;\n else\n return a;\n}\n\n// float get_b(Point p1, Point p2, Point p3)\n// {\n// int x1 = p1.x;\n// int x2 = p2.x;\n// int x3 = p3.x;\n// int y1 = p1.y;\n// int y2 = p2.y;\n// int y3 = p3.y;\n// float del = (y2 - y1)*(y3 - y2)*(y1 - y3);\n// float del_b = (x3 - x2)*((y2*y2) - (y1*y1)) - (x2 - x1)*((y3*y3) - (y2*y2));\n// return(del_b / del);\n// }\nfloat get_c(Point p1, Point p2)\n{\n int x1 = p1.x;\n int y1 = p1.y;\n \n int x2 = p2.x;\n int y2 = p2.y;\n \n float del = (x1 - x2)*y2*y2;\n float del_a = (y1 - y2)*(y1 + y2);\n\n return (x2 - (del/(del_a)));\n}\n\nfloat min(float a, float b)\n{\n\tif(a<=b)\n\t\treturn a;\n\treturn b;\n}\n\n//calculate distance of passed point from curve\nfloat get_del(Point p, float a, float c)\n{\n float predictedX = ((p.y*p.y)/(a) + c);\n float errorx = fabs(p.x - predictedX);\n\n //#TODO add fabs \n float predictedY = sqrt(fabs(a*(p.x-c)));\n float errory = fabs(p.y - predictedY);\n\n return min(errorx, errory);\n}\n\n\n\n//removes both the lanes if they intersect within the image frame\nbool isIntersectingLanes(Mat img, Parabola param) {\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n\n if(a1==a2)\n return false;\n float x = (a1*c1 - a2*c2)/(a1-a2);\n \n //checks if intersection is within \n\n float y_2 = a1*(x-c1);\n\n if (y_2 > 0 && sqrt(y_2) < (img.rows) && x > 0 && x < img.cols) return true;\n return false;\n\n}\n\n\n\n//choose Parabola parameters of best fit curve basis on randomly selected 3 points\n\n\n\nParabola ransac(vector<Point> ptArray, Parabola param, Mat img)\n{\n int numDataPts = ptArray.size();\n \n Parabola bestTempParam;\n\n //initialising no. of lanes\n bestTempParam.numModel=2;\n \n\n int score_gl = 0;/* comm_count_gl = 0;*/\n // int metric_l_gl = 0, metric_r_gl = 0; \n int score_l_gl = 0, score_r_gl = 0; \n\n //check for no lane case here\n\n cout<<\"1\"<<endl;\n // loop of iterations\n for(int i = 0; i < iteration; i++)\n {\n int p1 = random()%ptArray.size(), p2 = random()%ptArray.size(), p3 = random()%ptArray.size(), p4 = random()%ptArray.size();\n \n\n if(p1==p2 || p1==p3 || p1==p4 || p3==p2 || p4==p2 || p3==p4){\n i--;\n continue;\n }\n cout<<\"2\"<<endl;\n //#TODO points with same x or y should not be passed in (p[0],p[1])&(p[2]&p[3]) \n\n\n if(p2 == p1) p2 = random()%ptArray.size();\n \n if(p3 == p1 || p3 == p2) p3 = random()%ptArray.size();\n // TODO : p4 condition\n \n\n Point ran_points[4];\n ran_points[0] = ptArray[p1];\n ran_points[1] = ptArray[p2];\n ran_points[2] = ptArray[p3];\n ran_points[3] = ptArray[p4];\n\n int flag = 0;\n Point temp;\n\n for(int m = 0; m < 3; m++)\n { \n for(int n = 0; n < 3 - m; n++)\n {\n if(ran_points[n].x > ran_points[n+1].x) \n {\n temp = ran_points[n];\n ran_points[n] = ran_points[n+1];\n ran_points[n+1] = temp;\n }\n } \n }\n\n\n if(ran_points[0].x == ran_points[1].x || ran_points[2].x==ran_points[3].x || ran_points[0].y == ran_points[1].y || ran_points[2].y==ran_points[3].y){\n i--;\n continue;\n }\n cout<<\"3\"<<endl;\n\n Parabola tempParam; \n tempParam.a1 = get_a(ran_points[0], ran_points[1]);\n tempParam.c1 = get_c(ran_points[0], ran_points[1]);\n\n\n tempParam.a2 = get_a(ran_points[2], ran_points[3]); \n tempParam.c2 = get_c(ran_points[2], ran_points[3]);\n\n cout<<\"a1:\"<<tempParam.a1<<\" c1:\"<<tempParam.c1<<endl;\n\n // cout << \"Centroid Dif : \" << dist(centroid(tempParam.a1,tempParam.c1,img),centroid(tempParam.a2,tempParam.c2,img)) << endl;\n\n if (dist(centroid(tempParam.a1,tempParam.c1,img),centroid(tempParam.a2,tempParam.c2,img)) < 80.0)\n continue;\n cout<<\"4\"<<endl;\n\n if(fabs(tempParam.c1 - tempParam.c2) < 40.0)\n continue;\n cout<<\"5\"<<endl;\n\n // intersection only in top 3/8 part of the image taken\n if( isIntersectingLanes(img, tempParam)) {\n i--;\n continue;\n }\n cout<<\"6\"<<endl;\n\n //similar concavity of lanes\n\n \n // # rejected because many cases exist in which a better curve will get fit in opposite concavity\n // # and this will lead to bad lane fitting \n // //similar concavity of lanes\n\n // if (tempParam.a1 * tempParam.a2 < 0) {\n // continue;\n // }\n\n int score_common = 0;/*, comm_count = 0;*/\n int score_l_loc = 0, score_r_loc = 0;\n\n //looping over image\n for(int p = 0; p < ptArray.size(); p++)\n {\n\n int flag_l = 0; //for points on 1st curve\n int flag_r = 0; //for points on 2nd curve\n\n float dist_l = get_del(ptArray[p], tempParam.a1, tempParam.c1);\n\n if(dist_l < maxDist)\n {\n flag_l = 1;\n }\n\n float dist_r = get_del(ptArray[p], tempParam.a2, tempParam.c2);\n\n if(dist_r < maxDist)\n {\n flag_r = 1;\n }\n\n if(flag_l == 1 && flag_r == 1) {\n score_common++;\n }\n else {\n if (flag_l == 1) {\n score_l_loc++;\n }\n if (flag_r == 1) {\n score_r_loc++;\n }\n }\n } //end of loop over image\n\n // float lane_length_l = 1;\n // float lane_length_r = 1;\n // float metric_l = score_l_loc/lane_length_l;\n // float metric_r = score_r_loc/lane_length_r;\n cout << \"score_l_loc: \" << score_l_loc << endl;\n cout << \"score_r_loc: \" << score_r_loc << endl;\n\n if(score_r_loc==0 || score_l_loc==0)\n continue;\n cout << \"Common : \" << score_common << endl;\n\n if ((score_common/(score_common + score_l_loc + score_r_loc))*100 > common_inliers_thresh) {\n i--;\n continue;\n }\n\n // if(metric_l < metric_thresh && metric_r < metric_thresh){\n // continue;\n // }\n\n if (score_l_loc + score_r_loc > score_gl) {\n // metric_l_gl=metric_l;\n // metric_r_gl=metric_r;\n\n score_l_gl=score_l_loc;\n score_r_gl=score_r_loc;\n score_gl = score_r_gl + score_l_gl;\n\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n bestTempParam.a2=tempParam.a2;\n bestTempParam.c2=tempParam.c2;\n }\n\n\n /*\n if(score_loc > score_gl)\n {\n if(w < 100 && w > -100) continue;\n score_gl = score_loc;\n score_l_gl = score_l_loc;\n score_r_gl = score_r_loc;\n a_gl = a;\n lam_gl = lam;\n lam2_gl = lam2;\n w_gl = w;\n p1_g = p1;\n p2_g = p2;\n p3_g = p3;\n p4_g = p4;\n cout<<score_gl<<'\\t';\n // comm_count_gl = comm_count;\n */\n\n\n } //end of iteration loop\n\n if(score_l_gl < minLaneInlier){\n bestTempParam.a1=0;\n bestTempParam.c1=0;\n bestTempParam.numModel--;\n }\n if(score_r_gl < minLaneInlier){\n bestTempParam.a2=0;\n bestTempParam.c2=0;\n bestTempParam.numModel--;\n }\n cout << \"score_l_gl: \" << score_l_gl << endl;\n cout << \"score_r_gl: \" << score_r_gl << endl;\n cout << \"bestTempParam.numModel : \"<<bestTempParam.numModel<<endl;\n return bestTempParam;\n}\n\nPoint centroid(float a,float c,Mat img)\n{\n Point A;\n int i,j,x,y;\n int sum_x = 0,sum_y = 0,count=1;\n\n for(j=0;j<img.rows;j++)\n {\n y = img.rows-j;\n x = ((y*y)/(a) + c);\n\n if(x>=0 && x<img.cols)\n {\n sum_y+=y;\n sum_x+=x;\n count++;\n }\n }\n\n A.x=sum_x/count;\n A.y=sum_y/count;\n\n return A;\n}\n\n\n\nParabola getRansacModel(Mat img,Parabola previous)\n{\n //apply ransac for first time it will converge for one lane\n vector<Point> ptArray1;\n \n if (grid_white_thresh >= grid_size*grid_size) {\n grid_white_thresh = grid_size*grid_size -1;\n }\n\n Mat plot_grid(img.rows,img.cols,CV_8UC1,Scalar(0));\n // cout << \"grid_size: \" << grid_size << endl;\n // cout << \"grid_white_thresh: \" << grid_white_thresh << endl;\n for(int i=((grid_size-1)/2);i<img.rows-(grid_size-1)/2;i+=grid_size)\n {\n for(int j=((grid_size-1)/2);j<img.cols-(grid_size-1)/2;j+=grid_size)\n {\n int count=0;\n for(int x=(j-(grid_size-1)/2);x<=(j+(grid_size-1)/2);x++)\n {\n for(int y=(i-(grid_size-1)/2);y<=(i+(grid_size-1)/2);y++)\n {\n if(img.at<uchar>(y,x)>128){\n count++;\n plot_grid.at<uchar>(i,j)=255;\n }\n }\n }\n if(count>grid_white_thresh)\n ptArray1.push_back(Point(j , img.rows - i));\n }\n }\n cout << \"ptArray1: \" << ptArray1.size() << endl;\n\n namedWindow(\"grid\",0);\n imshow(\"grid\",plot_grid);\n\n //declare a Parabola vaiable to store the Parabola\n Parabola param;\n cout<<\"ransac th\"<<minPointsForRANSAC<<endl;\n //get parameters of first Parabola form ransac function\n if(ptArray1.size() > minPointsForRANSAC)\n {\n cout<<\"No of pts \"<<ptArray1.size()<<endl;\n param = ransac(ptArray1, param, img);\n }\n\n else {\n //param is already initialised to zero\n }\n\n\n //Lane classification based on previous frames\n\n\n //if two lanes\n if(param.numModel==2) {\n\n if(param.c2<param.c1)\n {\n param=swap(param);\n }\n }\n\n //if one lane, assign same as previous frame if it had one lane\n if(param.numModel==1)\n {\n if(previous.numModel==1)\n {\n //if prev frame had right lane\n if(previous.a1==0 && previous.c1==0)\n {\n //if current frame has left lane\n if(param.a2==0 && param.c2==0)\n {\n param = swap(param);\n }\n }\n //if prev frame had left lane\n else if(previous.a2==0 && previous.c2==0)\n {\n //if current frame has right lane\n if(param.a1==0 && param.c1==0)\n {\n param = swap(param);\n }\n }\n }\n\n if(previous.numModel==2)\n {\n Point A=centroid(previous.a1,previous.c1,img);\n Point B=centroid(previous.a2,previous.c2,img);\n Point C;\n\n //if current frame has right lane\n if(param.a1==0&&param.c1==0)\n {\n C=centroid(param.a2,param.c2,img);\n if(dist(A,C)<dist(B,C))\n {\n param = swap(param);\n }\n }\n //if current frame has left lane\n else\n {\n C=centroid(param.a1,param.c1,img);\n if(dist(A,C)>dist(B,C))\n {\n param = swap(param);\n }\n }\n }\n }\n\n\n return param;\n}\n\nMat drawLanes(Mat img, Parabola lanes) {\n\n Mat fitLanes(img.rows,img.cols,CV_8UC3,Scalar(0,0,0));\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < fitLanes.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, left_curve, false, Scalar(255, 0, 0), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, right_curve, false, Scalar(0, 0, 255), 3, CV_AA);\n\n return fitLanes;\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.7859154939651489, "alphanum_fraction": 0.794366180896759, "avg_line_length": 151, "blob_id": "7b724975d56163a06cd9d218f9fbb465581d7dda", "content_id": "ed30d620ab88356be874f7ce2101f82e3d281e4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1065, "license_type": "no_license", "max_line_length": 473, "num_lines": 7, "path": "/README.md", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "## Lane and Stopline Detection for Urban Roads <br>\nThe repository contains code for detecting lanes and stoplines on urban roads. It uses image processing based techniques to extract lane features, suppress noise and fit a parabolic lane model to detect urban lane markings. For stopline detection, clustering and principal component analysis based techinques are used to identify horizontal markings on roads. The code has been tested on the KITTI dataset, and has been deployed on the Mahindra E20 autonomous vehicle. <br>\n<br>\nThe module is integrated on the ROS platform, and uses the [Dynamic Reconfigure](http://wiki.ros.org/dynamic_reconfigure) package to tune parameters. Description for various tuning parameters and working values have been provided in the code. The module uses input from an RGB camera published on `/camera/image_color`, and outputs the lane markings on `/lane_points`, a polygon geometry message. \n\n### Video Link: \n[![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0gNgN58NdnY/0.jpg)](https://www.youtube.com/watch?v=0gNgN58NdnY)\n\n" }, { "alpha_fraction": 0.4819536507129669, "alphanum_fraction": 0.515720546245575, "avg_line_length": 22.219999313354492, "blob_id": "58aeb75805f717035153f6862bd34ecd13d0bf8b", "content_id": "c57914dfd89baba4d27c2a0b483d5c585efe7bf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11609, "license_type": "no_license", "max_line_length": 134, "num_lines": 500, "path": "/include/ransac_new.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef RANSAC_NEW\n#define RANSAC_NEW\n\n\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <bits/stdc++.h>\n\n/*\n parabolas are fit assuming top left as origin - x towards right and y downwards\n */\n\nusing namespace std;\nusing namespace cv;\n\nPoint centroid(float a,float c,Mat img);\n\nfloat dist(Point A,Point B)\n{\n return (sqrt(pow(A.x-B.x,2)+pow(A.y-B.y,2)));\n}\n\n//structure to define the Parabola parameters\ntypedef struct Parabola\n{\n int numModel = 0;\n float a1 = 0.0;\n float c1 = 0.0;\n float a2 = 0.0;\n // float b2 = 0.0;\n float c2 = 0.0;\n} Parabola;\n\nParabola swap(Parabola param) {\n\n float temp1, temp2, temp3;\n temp1=param.a1;\n // temp2=param.b1;\n temp3=param.c1;\n\n param.a1=param.a2;\n // param.b1=param.b2;\n param.c1=param.c2;\n\n param.a2=temp1;\n // param.b2=temp2;\n param.c2=temp3;\n\n return param;\n}\n\nParabola classify_lanes(Mat img,Parabola present,Parabola previous)\n{\n float a1=present.a1;\n float a2=present.a2;\n float c1=present.c1;\n float c2=present.c2;\n int number_of_lanes=present.numModel;\n\n if(number_of_lanes==2)\n {\n if(c2<c1)\n {\n present=swap(present);\n return present;\n }\n else \n return present;\n }\n\n else if(number_of_lanes==1)\n {\n //if intersection on left or right lane possible\n if(a1*c1<0 && a1*(img.cols-c1)>0)\n {\n float y1=sqrt(-1.0*a1*c1);\n float y2=sqrt(a1*(img.cols-c1));\n\n if(y1>0 && y1<img.rows && y2>0 && y2<img.rows)\n {\n return previous;\n }\n\n }\n\n if(a2*c2<0 && a2*(img.cols-c2)>0)\n {\n float y1=sqrt(-1.0*a2*c2);\n float y2=sqrt(a2*(img.cols-c2));\n\n if(y1>0 && y1<img.rows && y2>0 && y2<img.rows)\n {\n return previous;\n }\n }\n\n if((c1!=0 && c1>(2*img.cols/5) && c1<(3*img.cols/5))|| (c2!=0 && c2>(2*img.cols/5) && c2<(3*img.cols/5)))\n {\n return previous;\n }\n\n if(c1!=0 && c1>(3*img.cols/5))\n {\n present=swap(present);\n return present;\n }\n\n else if(c2!=0 && c2<(2*img.cols/5))\n {\n present=swap(present);\n return present;\n }\n\n\n }\n\n}\n\n//calculation of Parabola parameters based on 3 randonmly selected points\nfloat get_a(Point p1, Point p2)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n // int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n // int y3 = p3.y;\n\n float del = (y1 - y2)*(y1 + y2);\n float del_a = (x1 - x2);\n float a;\n a = del/(del_a);\n\n if(fabs(a)>500)\n return FLT_MAX;\n else\n return a;\n}\n\nfloat get_c(Point p1, Point p2)\n{\n int x1 = p1.x;\n int y1 = p1.y;\n \n int x2 = p2.x;\n int y2 = p2.y;\n \n float del = (x1 - x2)*y2*y2;\n float del_a = (y1 - y2)*(y1 + y2);\n\n return (x2 - (del/(del_a)));\n}\n\nfloat min(float a, float b)\n{\n if(a<=b)\n return a;\n return b;\n}\n\n//calculate distance of passed point from curve\nfloat get_del(Point p, float a, float c)\n{\n float predictedX = ((p.y*p.y)/(a) + c);\n float errorx = fabs(p.x - predictedX);\n\n //#TODO add fabs\n float predictedY = sqrt(fabs(a*(p.x-c)));\n float errory = fabs(p.y - predictedY);\n\n return min(errorx, errory);\n}\n\n\n\n//removes both the lanes if they intersect within the image frame\nbool isIntersectingLanes(Mat img, Parabola param) {\n float a1 = param.a1;\n float c1 = param.c1;\n\n float a2 = param.a2;\n float c2 = param.c2;\n\n if(a1==a2)\n return false;\n float x = (a1*c1 - a2*c2)/(a1-a2 );\n \n //checks if intersection is within\n\n float y_2 = a1*(x-c1);\n cout<<\"y_2 : \"<<y_2<<\" x : \"<<x<<endl;\n\n if (y_2 >= 0 && sqrt(y_2) <= (img.rows) && x > 0 && x < img.cols) {\n cout<<\"intersectttttttttttttttttt hooooooooooooooo gyaaaaaaaaaaaaa\"<<endl;\n return true;\n }\n\n return false;\n}\n\n\n\n//choose Parabola parameters of best fit curve basis on randomly selected 3 points\n\n\n\nParabola ransac(vector<Point> ptArray, Parabola bestTempParam, Mat img)\n{\n int numDataPts = ptArray.size();\n\n int score_gl = 0;\n\n // loop of iterations\n for(int i = 0; i < iteration; i++)\n {\n //cout<<\"i : \"<<i<<endl;\n int score_loc=0;\n int p1 = random()%ptArray.size(), p2 = random()%ptArray.size();\n \n\n if(p1==p2 ){\n // i--;\n continue;\n }\n\n Point ran_points[2];\n ran_points[0] = ptArray[p1];\n ran_points[1] = ptArray[p2];\n\n\n int flag = 0;\n Point temp;\n\n if(ran_points[0].x == ran_points[1].x || ran_points[0].y == ran_points[1].y){\n // i--;\n continue;\n }\n\n //cout<<\"mario\"<<endl;\n\n Parabola tempParam;\n tempParam.a1 = get_a(ran_points[0], ran_points[1]);\n tempParam.c1 = get_c(ran_points[0], ran_points[1]);\n\n //a for second curve be same as first\n if(bestTempParam.numModel==1)\n {\n tempParam.a1=bestTempParam.a1;\n }\n\n //cout<<\"doraemon\"<<endl;\n\n //cout<<\"a1: \"<<tempParam.a1<<\" c1: \"<<tempParam.c1<<endl;\n\n // cout << \"Centroid Dif : \" << dist(centroid(tempParam.a1,tempParam.c1,img),centroid(tempParam.a2,tempParam.c2,img)) << endl;\n\n //looping over image\n for(int p = 0; p < ptArray.size(); p++)\n {\n\n float dist_l = get_del(ptArray[p], tempParam.a1, tempParam.c1);\n\n if(dist_l < maxDist)\n {\n score_loc++;\n }\n\n } //end of loop over image\n\n\n //cout<<\"numModel : \"<<bestTempParam.numModel<<\" score_loc : \"<<score_loc<<endl;\n\n if (score_loc > score_gl) {\n\n score_gl = score_loc;\n //cout<<\"score_gl XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXxx : \"<<score_gl<<endl;\n if(bestTempParam.numModel==0)\n {\n bestTempParam.a1=tempParam.a1;\n bestTempParam.c1=tempParam.c1;\n }\n else\n {\n bestTempParam.a2=tempParam.a1;\n bestTempParam.c2=tempParam.c1;\n }\n }\n\n } //end of iteration loop\n\n bestTempParam.numModel++;\n\n if(score_gl < minLaneInlier){\n if(bestTempParam.numModel==1)\n {\n bestTempParam.a1=0;\n bestTempParam.c1=0;\n }\n else if(bestTempParam.numModel==2)\n {\n bestTempParam.a2=0;\n bestTempParam.c2=0;\n }\n bestTempParam.numModel--;\n }\n\n return bestTempParam;\n}\n\nPoint centroid(float a,float c,Mat img)\n{\n Point A;\n int i,j,x,y;\n int sum_x = 0,sum_y = 0,count=1;\n\n for(j=0;j<img.rows;j++)\n {\n y = img.rows-j;\n x = ((y*y)/(a) + c);\n\n if(x>=0 && x<img.cols)\n {\n sum_y+=y;\n sum_x+=x;\n count++;\n }\n }\n\n A.x=sum_x/count;\n A.y=sum_y/count;\n\n return A;\n}\n\ndouble wTh = 128;\n\nParabola getRansacModel(Mat img,Parabola previous)\n{\n //apply ransac for first time it will converge for one lane\n vector<Point> ptArray1,ptArray2;\n \n if (grid_white_thresh >= grid_size*grid_size) {\n grid_white_thresh = grid_size*grid_size -1;\n }\n\n Mat plot_grid(img.rows,img.cols,CV_8UC1,Scalar(0));\n // cout << \"grid_size: \" << grid_size << endl;\n // cout << \"grid_white_thresh: \" << grid_white_thresh << endl;\n for(int i=((grid_size-1)/2);i<img.rows-(grid_size-1)/2;i+=grid_size)\n {\n for(int j=((grid_size-1)/2);j<img.cols-(grid_size-1)/2;j+=grid_size)\n {\n int count=0;\n for(int x=(j-(grid_size-1)/2);x<=(j+(grid_size-1)/2);x++)\n {\n for(int y=(i-(grid_size-1)/2);y<=(i+(grid_size-1)/2);y++)\n {\n if(img.at<uchar>(y,x)>wTh){\n count++;\n plot_grid.at<uchar>(i,j)=255;\n }\n }\n }\n if(count>grid_white_thresh)\n ptArray1.push_back(Point(j , img.rows - i));\n }\n }\n //cout << \"**********************ptArray1: ***************\" << ptArray1.size() << endl;\n\n namedWindow(\"grid\",0);\n imshow(\"grid\",plot_grid);\n\n //declare a Parabola vaiable to store the Parabola\n Parabola param;\n\n //cout<<\"ransac th :\"<<minPointsForRANSAC<<endl;\n //get parameters of first Parabola form ransac function\n if(ptArray1.size() > minPointsForRANSAC)\n {\n //cout<<\"No of pts : \"<<ptArray1.size()<<endl;\n param = ransac(ptArray1, param, img);\n }\n\n if(param.numModel==1)\n {\n for(int i=0;i<ptArray1.size();i++)\n {\n if(get_del(ptArray1[i],param.a1,param.c1)>maxDist)\n {\n ptArray2.push_back(ptArray1[i]);\n }\n }\n if(ptArray2.size()>minLaneInlier)\n {\n //cout<<\"ptArray2 size : \"<<ptArray2.size()<<endl;\n param=ransac(ptArray2,param,img);\n }\n }\n\n //cout<<\"Finally Number of lines detected : \"<<param.numModel<<endl;\n //Lane classification based on previous frames\n\n //common_inliers is useless here\n\n //if two lanes\n\n if(param.numModel==2)\n {\n if(fabs(param.c2-param.c1)<60)\n {\n param.a2=0;\n param.c2=0;\n param.numModel--;\n return param;\n }\n\n if(isIntersectingLanes(img,param))\n {\n return previous;\n }\n\n }\n\n param=classify_lanes(img,param,previous);\n\n \n\n return param;\n}\n\nMat drawLanes(Mat img, Parabola lanes) {\n\n Mat fitLanes(img.rows,img.cols,CV_8UC3,Scalar(0,0,0));\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < fitLanes.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = fitLanes.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, left_curve, false, Scalar(255, 0, 0), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(fitLanes, right_curve, false, Scalar(0, 0, 255), 3, CV_AA);\n\n return fitLanes;\n}\n\nMat drawLanes_white(Mat img, Parabola lanes) {\n\n vector<Point2f> left_lane, right_lane;\n float a1 = lanes.a1, a2 = lanes.a2, c1 = lanes.c1, c2 = lanes.c2;\n\n for (int j = 0; j < img.rows; j++){\n\n float x, y;\n if (a1 != 0 && c1 != 0) {\n y = img.rows - j;\n x = (y*y)/(a1) + c1;\n left_lane.push_back(Point2f(x, j));\n }\n\n if (a2 != 0 && c2 != 0) {\n y = img.rows - j;\n x = (y*y)/(a2) + c2;\n right_lane.push_back(Point2f(x, j));\n }\n\n }\n\n Mat left_curve(left_lane, true);\n left_curve.convertTo(left_curve, CV_32S); //adapt type for polylines\n polylines(img, left_curve, false, Scalar(255, 0, 0), 3, CV_AA);\n\n Mat right_curve(right_lane, true);\n right_curve.convertTo(right_curve, CV_32S); //adapt type for polylines\n polylines(img, right_curve, false, Scalar(0, 0, 255), 3, CV_AA);\n\n return img;\n}\n\n#endif" }, { "alpha_fraction": 0.6550552248954773, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 24.565217971801758, "blob_id": "70efcacd7df64139525782faf4bbdb8a141bcd27", "content_id": "70c295e5dc68d202d0a2df63ff1d1bf101af395f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 97, "num_lines": 46, "path": "/include/params.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef PARAMS\n#define PARAMS\n\nbool is_debug = false;\n\nint iteration = 100; //define no of iteration, max dist squre of pt from our estimated Parabola2\nint maxDist = 300; //define threshold distance to remove white pixel near lane1\nint minLaneInlier = 1500; // 2000 for night\nint common_inliers_thresh = 10;\nint minPointsForRANSAC = 500;\n// int grid_size = 3;\n\nfloat pixelPerMeter = 134;\n\nint horizon = 500;\nint horizon_offset = 200;\n\nint transformedPoints0_lowerbound = 30;\nint transformedPoints0_upperbound = 800;\n\nint point1_y = 30;\nint point2_x = 100;\n\n// int h = 30;\n// int w = 10;\n// float variance = 2.1;\n\nfloat yshift = 0.60; // distance from first view point to lidar in metres\n\n// float hysterisThreshold_min = 0.39;\n// float hysterisThreshold_max = 0.45;\n\n// int y = 400;\n// int lane_width = 320;\n// int k1 = 50;\n// int k2 = 50;\n\nint medianBlurkernel = 3; //kernel size of medianBlur for cleaning intersectionImages\nint neighbourhoodSize = 25; //neighbourhood size or block size for adaptive thresholding\nint constantSubtracted = -30; //constant subtracted during adaptive thresholding\n\nint region = 600;\n\nint baseDistance1 = 40;\nint centroidDistance = 20;\n#endif\n\n" }, { "alpha_fraction": 0.739279568195343, "alphanum_fraction": 0.7452830076217651, "avg_line_length": 19.10344886779785, "blob_id": "a3a22c58872c9ab43a3338a007e4cc5eb02ac734", "content_id": "cba847c4174d1c951339348a41cb790213cafd29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 71, "num_lines": 58, "path": "/CMakeLists.txt", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\n\n# package name \nproject(lanes)\n\nset(CMAKE_CXX_FLAGS \"-w\")\nset(CMAKE_CFLAGE \"-w\")\n\n# Compile as C++14, supported in ROS Kinetic and newer\nadd_compile_options(-std=c++14)\n\n## Find catkin macros and libraries\n## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)\n## is used, also find other catkin packages\nfind_package(catkin REQUIRED COMPONENTS\n\tcv_bridge\n\timage_transport \n\troscpp\n\tstd_msgs\n\trospy\n\tdynamic_reconfigure\n)\n\ngenerate_dynamic_reconfigure_options(\n cfg/Tutorials.cfg\n #...\n)\n\nfind_package(OpenCV REQUIRED)\nfind_package(CUDA REQUIRED)\n#find_package(Armadillo REQUIRED)\n\ncatkin_package()\n\ninclude_directories(\n\tinclude\n\t${catkin_INCLUDE_DIRS}\n \t${OpenCV_INCLUDE_DIRS}\n \t${CUDA_INCLUDE_DIRS}\n# \t${ARMADILLO_INCLUDE_DIRS}\n)\n\nadd_executable(lanes_ransac src/laneDetector_ransac.cpp)\n#add_executable(lanes src/laneDetector.cpp)\n\n#target_link_libraries(lanes \n# ${catkin_LIBRARIES}\n# ${OpenCV_LIBRARIES}\n# ${ARMADILLO_LIBRARIES}\n#)\n\ntarget_link_libraries(lanes_ransac \n ${catkin_LIBRARIES}\n ${OpenCV_LIBRARIES}\n# ${ARMADILLO_LIBRARIES}\n)\n\nadd_dependencies(lanes_ransac ${PROJECT_NAME}_gencfg)\n" }, { "alpha_fraction": 0.6297297477722168, "alphanum_fraction": 0.637837827205658, "avg_line_length": 23.733333587646484, "blob_id": "0b56f8fb7d28f3bdebf5e87cfaee985b206109bd", "content_id": "4188d5e602712da0a3e196296f6013e2794ac083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 370, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/include/houghP.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef _HOUGHP_HPP_\n#define _HOUGHP_HPP_\n\n#include \"opencv/cv.h\"\n#include <opencv2/highgui/highgui.hpp>\n#include <bits/stdc++.h>\n\nusing namespace std;\nusing namespace cv;\n\nvoid HoughLinesP2( Mat& image, vector<Vec4i>& lines, vector<int>& len,\n float rho, float theta, int threshold,\n int lineLength, int lineGap);\n\n#endif" }, { "alpha_fraction": 0.5383435487747192, "alphanum_fraction": 0.5490797758102417, "avg_line_length": 20.733333587646484, "blob_id": "a51f0e8a1e321d6449db6358b950329f93fdea65", "content_id": "b3d9d01c2e8e1ff48b9ffeac0432b24199fc3a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 72, "num_lines": 60, "path": "/include/lane_laser_scan.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#ifndef LANE_LASER_SCAN\n#define LANE_LASER_SCAN\n\n#include \"opencv2/highgui/highgui.hpp\"\n#include \"opencv2/imgproc/imgproc.hpp\"\n#include \"opencv2/core/core.hpp\"\n#include <cmath>\n#include <limits>\n#include <iostream>\n\nusing namespace ros;\nusing namespace std;\nusing namespace cv;\n\n\nsensor_msgs::LaserScan imageConvert(Mat img)\n{\n cvtColor(img,img,CV_BGR2GRAY);\n\n int row = img.rows;\n int col = img.cols;\n sensor_msgs::LaserScan scan;\n scan.angle_min = -CV_PI/2;\n scan.angle_max = CV_PI/2;\n scan.angle_increment = CV_PI/bins;\n double inf = std::numeric_limits<double>::infinity();\n scan.range_max = inf; \n \n scan.header.frame_id = \"laser\";\n\n for (int i=0;i<bins;i++)\n {\n scan.ranges.push_back(scan.range_max);\n }\n\n scan.range_max = 80;\n for(int i = 0; i < row; ++i)\n {\n for(int j = 0; j < col; ++j)\n {\n if(img.at<uchar>(i, j) > 0)\n {\n float a = (col/2 - j)/pixelPerMeter;\n float b = (row - i)/pixelPerMeter + yshift;\n\n double angle = atan(a/b);\n\n double r = sqrt(a*a + b*b);\n\n int k = (angle - scan.angle_min)/(scan.angle_increment);\n scan.ranges[bins-k-1] = r ;\n }\n }\n }\n\n return scan;\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.6772908568382263, "alphanum_fraction": 0.6992031931877136, "avg_line_length": 24.3157901763916, "blob_id": "0884b780519e6a73858db3b0752922df07c82ffb", "content_id": "33758bcafb9ce1f6752996a7e0458a74355ecf29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 502, "license_type": "no_license", "max_line_length": 100, "num_lines": 19, "path": "/include/matrixTransformation.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/imgproc/imgproc.hpp>\r\n#include <opencv2/highgui/highgui.hpp>\r\n#include <opencv2/core/core.hpp>\r\n#include <iostream>\r\n\r\nMat top_view(Mat img, Mat transform, int size_X, int size_Y) {\r\n\r\n\tMat topview;\r\n warpPerspective(img, topview, transform, Size(size_X,size_Y));\r\n\r\n return topview;\r\n}\r\n\r\nMat front_view(Mat img, Mat transform) {\r\n\tMat frontview;\r\n\twarpPerspective(img, frontview, transform.inv(), Size(1920, 1200), INTER_NEAREST, BORDER_CONSTANT);\r\n\r\n\treturn frontview;\r\n}\r\n\r\n" }, { "alpha_fraction": 0.4506620764732361, "alphanum_fraction": 0.5196858644485474, "avg_line_length": 32.52538299560547, "blob_id": "17d218442470e06cccc3f0b6e0037349f7b056bb", "content_id": "48a6bf68d472911bce81e3cc93c337383e0ce7df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 28396, "license_type": "no_license", "max_line_length": 266, "num_lines": 847, "path": "/src/laneDetector.cpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <bits/stdc++.h>\n#include <iostream>\n#include <ros/ros.h>\n#include \"geometry_msgs/Point.h\"\n#include \"geometry_msgs/Vector3.h\"\n#include <image_transport/image_transport.h>\n#include <cv_bridge/cv_bridge.h>\n#include <fstream>\n#include \"geometry_msgs/Polygon.h\"\n#include \"lsd.cpp\"\n#include \"laneDetector_utils.cpp\"\n#include \"../include/houghP.hpp\"\n#include \"../include/laneDetector_utils.hpp\"\n#include \"../include/lsd.h\"\n#include <string>\n//#include \"armadillo\"\n\nusing namespace std;\nusing namespace cv;\n\nstatic const std::string OPENCV_WINDOW = \"Image window\";\nint flag=1;\nbool FIRST_FRAME = true;\narma::vec curve;\n\nstring image_name;\n\nbool tracking_status = false;\nint frames_tracked = 0;\n\nbool last_frame_low_features = false, current_frame_low_features = false;\nbool one_frame_low_features = false;\n\nVideoWriter out;\n\nint h, w, horizon, tranformedPoints0_lowerbound, tranformedPoints0_upperbound, point1_y, point2_x, horizon_offset;\nfloat hysterisThreshold_min, hysterisThreshold_max, variance;\n\n// Mat transform = (Mat_<double>(3, 3) << 4.85067873e+01, 2.62964706e+02, -4.48763439e+04, 3.87871256e-15, 4.00615385e+02, -4.84907692e+04, 6.25698360e-18, 3.45701357e-01, 1.00000000e+00);\n\n// Mat h = (Mat_<double>(3, 3) << 1, 0, 1, 0, 0, 1, 1, 1, 0);\n\n// Mat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1;\n\nMat findEdgeFeatures(Mat img, bool top_edges, bool debug)\n{\n Mat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1);\n \n //medianBlur(img, img, 3);\n vector<Vec4i> lines, lines_top;\n vector<int> line_lens;\n Mat edges;\n\n // this will create lines using lsd \n if(top_edges == false)\n {\n \n Mat src = img;\n Mat tmp, src_gray;\n cvtColor(src, tmp, CV_RGB2GRAY);\n tmp.convertTo(src_gray, CV_64FC1);\n\n int cols = src_gray.cols;\n int rows = src_gray.rows;\n image_double image = new_image_double(cols, rows);\n image->data = src_gray.ptr<double>(0);\n ntuple_list ntl = lsd(image);\n Mat lsd = Mat::zeros(rows, cols, CV_8UC1);\n Point pt1, pt2;\n for (int j = 0; j != ntl->size ; ++j)\n {\n Vec4i t;\n\n pt1.x = int(ntl->values[0 + j * ntl->dim]);\n pt1.y = int(ntl->values[1 + j * ntl->dim]);\n pt2.x = int(ntl->values[2 + j * ntl->dim]);\n pt2.y = int(ntl->values[3 + j * ntl->dim]);\n t[0]=pt1.x;\n t[1]=pt1.y;\n t[2]=pt2.x;\n t[3]=pt2.y;\n lines.push_back(t);\n int width = int(ntl->values[4 + j * ntl->dim]);\n\n line(lsd, pt1, pt2, Scalar(255), width + 1, CV_AA);\n }\n free_ntuple_list(ntl);\n edges=lsd;\n }\n\n // this will create lines using canny\n else\n {\n Mat topview;\n warpPerspective(img, topview, transform, Size(800, 1000), INTER_NEAREST, BORDER_CONSTANT);\n Canny(topview, edges, 200, 300);\n HoughLinesP(edges, lines_top, 1, CV_PI/180, 60, 60, 50);\n transformLines(lines_top, lines, transform.inv());\n }\n\n for( size_t i = 0; i < lines.size(); i++ )\n {\n Vec4i l = lines[i];\n Point2f inputPoints[2], transformedPoints[2];\n\n inputPoints[0] = Point2f( l[0], l[1] );\n inputPoints[1] = Point2f( l[2], l[3] ); \n transformPoints(inputPoints, transformedPoints, transform, 2);\n\n if(transformedPoints[0].x<tranformedPoints0_lowerbound || transformedPoints[0].x>tranformedPoints0_upperbound || transformedPoints[1].x<tranformedPoints0_lowerbound || transformedPoints[1].x>tranformedPoints0_upperbound || l[1] < point1_y || l[2] < point2_x)\n {\n lines.erase(lines.begin() + i);\n i--;\n }\n }\n\n int is_lane[lines.size()];\n for(int i=0;i<lines.size();i++)\n is_lane[i]=0;\n\n for(size_t i=0;i<lines.size();i++)\n for(size_t j=0;j<lines.size();j++)\n if(abs(findIntersection(lines[i],lines[j])-horizon)<horizon_offset)\n {\n is_lane[i]+=1;\n is_lane[j]+=1;\n }\n\n vector<Vec4i> lane_lines, lane_lines_top;\n\n for(int i=0;i<lines.size();i++)\n if(is_lane[i]>10)\n lane_lines.push_back(lines[i]);\n\n transformLines(lane_lines, lane_lines_top, transform);\n\n Mat edgeFeatures(1000, 800 ,CV_8UC1, Scalar(0));\n for( size_t i = 0; i < lane_lines_top.size(); i++ )\n {\n Vec4i l = lane_lines_top[i];\n line(edgeFeatures, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,255,255), 1, CV_AA);\n }\n\n if(debug==true)\n {\n Mat filtered_lines = img.clone();\n for( size_t i = 0; i < lane_lines.size(); i++ )\n {\n Vec4i l = lane_lines[i];\n line(filtered_lines, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0, 0), 5, CV_AA);\n }\n\n namedWindow(\"edges\", WINDOW_NORMAL);\n namedWindow(\"edgeFeatures\", WINDOW_NORMAL);\n namedWindow(\"filtered_lines\", WINDOW_NORMAL);\n\n imshow(\"edges\", edges);\n imshow(\"edgeFeatures\", edgeFeatures);\n imshow(\"filtered_lines\", filtered_lines);\n }\n\n // cvtColor(edgeFeatures, edgeFeatures, CV_BGR2GRAY);\n return edgeFeatures;\n}\n\n// Mat findRoadBoundaries(Mat img, bool debug)\n// {\n// Mat road = imread(\"/home/tejus/Documents/ml/KittiSeg/results/\"+image_name);\n// Mat road_top, road_boundary;\n// Mat transform = (Mat_<double>(3, 3) << 4.85067873e+01, 2.62964706e+02, -4.48763439e+04, 3.87871256e-15, 4.00615385e+02, -4.84907692e+04, 6.25698360e-18, 3.45701357e-01, 1.00000000e+00);\n\n// warpPerspective(road, road_top, transform, Size(800, 1000), INTER_NEAREST, BORDER_CONSTANT);\n\n// Canny(road_top, road_boundary, 200, 300);\n\n// if(debug)\n// {\n// imshow(\"road_top\", road_top);\n// imshow(\"road_boundary\", road_boundary);\n// }\n\n// return road_boundary;\n// }\n\n\nMat findIntensityMaxima(Mat img, bool debug)\n{\n Mat topview;\n Mat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1);\n \n warpPerspective(img, topview, transform, Size(800, 1000), INTER_NEAREST, BORDER_CONSTANT);\n\n // topview = topview(Rect(100,0,500,900));\n GaussianBlur(topview, topview, Size(5, 15), 0, 0);\n blur(topview, topview, Size(25,25));\n\n namedWindow(\"topview\", WINDOW_NORMAL);\n imshow(\"topview\", topview);\n \n // if (debug==true)\n // {\n // namedWindow(\"topview\", WINDOW_NORMAL);\n // imshow(\"topview\", topview);\n // }\n\n cvtColor(topview, topview, CV_BGR2GRAY);\n\n \n medianBlur(topview, topview, 3);\n Mat t0, t1, t2, t3, t4, t5, t6;\n matchTemplate(topview, getTemplateX2(2.1, h, w, -10), t0, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, 0), t1, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, 10), t2, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, -20), t3, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, +20), t4, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, +30), t5, CV_TM_CCOEFF_NORMED);\n matchTemplate(topview, getTemplateX2(2.1, h, w, -30), t6, CV_TM_CCOEFF_NORMED);\n // imshow(\"template1\", getTemplateX2(2.1, h, w, -10));\n // imshow(\"template2\", getTemplateX2(2.1, h, w, 0));\n // imshow(\"template3\", getTemplateX2(2.1, h, w, 10));\n // imshow(\"template4\", getTemplateX2(2.1, h, w, 20));\n // imshow(\"template5\", getTemplateX2(2.1, h, w, 30));\n // imshow(\"template6\", getTemplateX2(2.1, h, w, -20));\n // imshow(\"template7\", getTemplateX2(2.1, h, w, -30));\n\n Mat t = t0-t0;\n for(int i=0;i<t.rows;i++)\n for(int j=0;j<t.cols;j++)\n {\n t.at<float>(i, j) = max( t4.at<float>(i, j), max(t3.at<float>(i, j), max(t0.at<float>(i, j), max(t1.at<float>(i, j), t2.at<float>(i, j)))));\n t.at<float>(i, j) = max( t.at<float>(i, j), max(t5.at<float>(i, j), t6.at<float>(i, j)) );\n }\n\n namedWindow(\"t\", WINDOW_NORMAL);\n imshow(\"t\", t);\n hysterisThreshold(t, topview, hysterisThreshold_min, hysterisThreshold_max);\n\n Mat result=Mat(topview.rows+h-1, topview.cols+w-1,CV_8UC3, Scalar(0,0,0));\n for(int i=0;i<topview.rows;i++)\n for(int j=0;j<topview.cols;j++)\n result.at<Vec3b>(i+(h-1)/2,j+(w-1)/2)={255*topview.at<float>(i,j), 255*topview.at<float>(i,j), 255*topview.at<float>(i,j)};\n\n\n if(debug==true)\n {\n namedWindow(\"intensityMaxima\", WINDOW_NORMAL);\n imshow(\"intensityMaxima\", result);\n }\n\n // waitKey(0);\n\n return result;\n}\n\nvoid initializeCurve()\n{\n cout<<\"Reinitializing !!\"<<endl<<endl;\n curve << 0 << 0 << 380 << 300;\n}\n\n\nbool checkLaneChange(Mat img)\n{\n Mat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1);\n \n\n transform=transform.inv();\n int x = 0 ;\n int y_left = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0) - curve[3]/2;\n int y_right = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0) + curve[3]/2;\n\n x = 800;\n int transformed_yleft = (y_left*transform.at<double>(0, 0)+x*transform.at<double>(0, 1)+1.0*transform.at<double>(0, 2))/(y_left*transform.at<double>(2, 0)+x*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2)); \n int transformed_yright = (y_right*transform.at<double>(0, 0)+x*transform.at<double>(0, 1)+1.0*transform.at<double>(0, 2))/(y_right*transform.at<double>(2, 0)+x*transform.at<double>(2, 1)+1.0*transform.at<double>(2, 2));\n\n if(transformed_yright<(img.cols/2))\n {\n cout<<\"Lane shift right\"<<endl;\n curve[2] += curve[3];\n return true; \n }\n\n else if(transformed_yleft>(img.cols/2))\n {\n cout<<\"Lane shift left\"<<endl;\n curve[2] -= curve[3];\n return true;\n }\n\n return false;\n}\n\nbool checkPrimaryLane(Mat boundary, Mat edges, Mat intensityMaxima)\n{\n // making a feature image similar to size of edges and initilalizing it with 0\n Mat features = edges - edges;\n \n // editing features on the basis of intesityMaxima, boundary, edges image\n // making particular pixel value of feature image 1 on basis of condition\n \n cout << boundary.rows << \" \" << boundary.cols << endl;\n cout << intensityMaxima.rows << \" \" << intensityMaxima.cols << endl;\n cout << edges.rows << \" \" << edges.cols << endl;\n\n for(int i=0;i<features.rows;i++)\n for(int j=0;j<features.cols;j++)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>0 || boundary.at<uchar>(i, j)>0 || edges.at<uchar>(i, j)>0)\n features.at<uchar>(i, j) = 255;\n }\n\n // namedWindow(\"features1\", WINDOW_NORMAL);\n // imshow(\"features1\", features);\n\n float error = 0;\n int n_points = 0;\n int nl_points=0, nr_points=0;\n\n cout<<\"check\"<<endl;\n\n for(int i=150;i<edges.rows;i++)\n {\n float w = curve[3];\n int k1 = 25;\n int k2 = 15;\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n float min_row_error = 99999;\n\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 )\n offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>70 && j<600)\n {\n if(features.at<uchar>(i, j)>0)\n {\n min_row_error = min(min_row_error, (float)pow(j-y, 2));\n n_points++;\n\n if(l==0)\n nr_points++;\n else\n nl_points++;\n }\n }\n if( l > 0 )\n offset = -offset;\n }\n }\n error += min_row_error;\n }\n\n cout<<\"error: \" << error<<\" , \"<< \"error/(n_points+1): \" << error/(n_points+1) <<\" , \"<< \"n_points: \" << n_points << endl;\n cout<< \"nl_points: \" << nl_points <<\",\"<< \"nr_points: \" << nr_points << endl;\n cout<<\"max: \" << max(nl_points, nr_points) << endl;\n cout<<\"min: \" << min(nl_points, nr_points) << endl;\n\n last_frame_low_features = current_frame_low_features;\n //if the minimum is very low or max\n cout<< \"first: \"<< (max(nl_points, nr_points) < 1000 && min(nl_points, nr_points) < 200) << endl;\n cout<< \"second: \"<< (min(nl_points, nr_points) < 100) << endl;\n\n\n current_frame_low_features = (max(nl_points, nr_points) < 1000 && min(nl_points, nr_points) < 200) || min(nl_points, nr_points) < 100;\n\n one_frame_low_features = min(nl_points, nr_points) < 250;\n \n cout<< \"tracking status: \" << tracking_status << endl;\n cout<< \"current_frame_low_features: \" << current_frame_low_features << endl;\n cout<< \"last_frame_low_features: \" << last_frame_low_features << endl;\n\n if(tracking_status==true)\n {\n if(current_frame_low_features && last_frame_low_features)\n {\n frames_tracked = 0;\n tracking_status = false;\n return false;\n }\n return true;\n }\n else\n {\n if(current_frame_low_features || last_frame_low_features)\n {\n frames_tracked = 0;\n return false;\n }\n else\n {\n frames_tracked++;\n\n if(frames_tracked>=5)\n {\n tracking_status = true;\n return true;\n }\n return false;\n }\n }\n}\n\nvector<Point> fitCurve(Mat img, Mat boundary, Mat edges, Mat intensityMaxima, bool debug=false)\n{\n if(FIRST_FRAME==true)\n {\n //initializeCurve();\n curve << 0 << 0 << 380 << 320;\n FIRST_FRAME = false;\n }\n\n tracking_status = checkPrimaryLane(boundary, edges, intensityMaxima);\n \n if(!tracking_status)\n {\n initializeCurve();\n }\n\n checkLaneChange(img);\n\n float w = curve[3];\n int k1 = 15;\n int k2 = 10;\n\n int n_points = 0;\n for(int i=150;i<edges.rows;i++)\n {\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n bool found_closest_left = false;\n bool found_closest_right = false;\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 )\n offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>70 && j<600)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(edges.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(boundary.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n }\n if( l > 0 )\n offset = -offset;\n }\n }\n }\n\n cout<< \"--------------------------\" <<n_points<<endl;\n // if(n_points==0)\n // {\n // cout<<\"Tracking lost: No points on curve!\"<<endl;\n // return;\n // }\n // else\n // {\n // cout<<\"n_points: \"<<n_points<<endl;\n // }\n\n // n_points = 10000;\n arma::mat B(n_points, 4);\n arma::vec X(n_points);\n n_points = 0;\n\n //Mat all_features = edges - edges;\n Mat all_features=Mat(800, 1000,CV_8UC3, Scalar(0,0,0));\n for(int i=0;i<edges.rows;i++)\n for(int j=0;j<edges.cols;j++)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5)\n {\n all_features.at<Vec3b>(i, j) = {255, 100, 100};\n }\n else if(edges.at<uchar>(i, j)>5)\n {\n all_features.at<Vec3b>(i, j) = {100, 255, 100};\n }\n else if(boundary.at<uchar>(i, j)>5)\n {\n all_features.at<Vec3b>(i, j) = {100, 100, 255};\n }\n }\n //Mat features = intensityMaxima - intensityMaxima;\n Mat features=Mat(800, 1000,CV_8UC3, Scalar(0,0,0));\n\n for(int i=150;i<edges.rows;i++)\n {\n int x = edges.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n bool found_closest_left = false;\n bool found_closest_right = false;\n for(int offset = w/2 - k1; offset < w/2 + k2; offset++)\n {\n for(int l = 0; l < 2; l++ )\n {\n if( l > 0 )\n offset = -offset;\n\n int j = (int)(y+offset);\n\n if(j>70 && j<600)\n {\n if(intensityMaxima.at<Vec3b>(i, j)[0]>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n features.at<Vec3b>(i, j) = {255, 100, 100};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(edges.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n features.at<Vec3b>(i, j) = {100, 255, 100};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else if(boundary.at<uchar>(i, j)>5 && ((l==0 && !found_closest_right) || (l==1 && !found_closest_left) ))\n {\n features.at<Vec3b>(i, j) = {100, 100, 255};\n arma::rowvec temp;\n temp << x*x << x << 1 << (-l + 0.5);\n B.row(n_points) = temp;\n X[n_points] = j;\n n_points++;\n if(l==0)\n found_closest_right = true;\n else\n found_closest_left = true;\n }\n else\n features.at<Vec3b>(i, j) = {100, 100, 100};\n }\n if( l > 0 )\n offset = -offset;\n }\n }\n }\n\n double lambda = 0.1; \n arma::vec offset;\n offset << 0 << 0 << 380 << 300;\n arma::vec new_curve = inv(B.t()*B + lambda*arma::eye(4, 4))*(B.t()*X + lambda*offset);\n\n cout<<\"new curve: \"<<new_curve<<endl;\n\n for(int i=0;i<n_points;i++)\n {\n int x = B.at(i, 1);\n int y = X[i];\n features.at<Vec3b>(features.rows-x, y) = {255, 255, 255};\n }\n\n bool large_width_change = abs((curve[3]-new_curve[3])/curve[3]) > 0.5 || new_curve[3] < 250 || new_curve[3] > 400;\n if(!large_width_change && !one_frame_low_features)\n {\n curve[0] = (curve[0]+new_curve[0])/2;\n curve[1] = (curve[1]+new_curve[1])/2;\n curve[2] = (curve[2]+new_curve[2])/2;\n curve[3] = (curve[3]+new_curve[3])/2;\n cout << \"width updated |||||||||||||||||||||||||||||||\"<< endl;\n }\n \n else\n {\n curve[0] = (curve[0]+new_curve[0])/2;\n curve[1] = (curve[1]+new_curve[1])/2;\n curve[2] = new_curve[2] - new_curve[3]/2 + curve[3]/2;\n curve[3] = curve[3];\n cout<<\"width not updated!...................................\"<<endl;\n }\n \n if(debug==true)\n {\n namedWindow(\"features\", WINDOW_NORMAL);\n namedWindow(\"all_features\", WINDOW_NORMAL);\n\n imshow(\"features\", features);\n imshow(\"all_features\", all_features);\n }\n\n Mat topview, lanes;\n Mat transform = (Mat_<double>(3, 3) << -0.2845660084796459, -0.6990548252793777, 691.2703423570697, -0.03794262877137361, -2.020741261264247, 1473.107653024983, -3.138403683957707e-05, -0.001727021397398348, 1);\n \n\n warpPerspective(img, topview, transform, Size(800, 1000), INTER_NEAREST, BORDER_CONSTANT);\n\n int y1, y2, y3, y4;\n for(int i=150;i<topview.rows;i++)\n {\n int x = topview.rows - i;\n int y = curve[0]*pow(x, 2) + curve[1]*pow(x, 1) + curve[2]*pow(x, 0);\n\n\n if(y<70 || y>600) continue;\n\n topview.at<Vec3b>(i, y) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2) = {255, 0, 0};\n\n topview.at<Vec3b>(i, y+1) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2+1) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2+1) = {255, 0, 0};\n\n topview.at<Vec3b>(i, y-1) = {0, 0, 255};\n topview.at<Vec3b>(i, y - w/2-1) = {255, 0, 0};\n topview.at<Vec3b>(i, y + w/2-1) = {255, 0, 0};\n if(i==300)\n {\n y1=y - w/2;\n y4=y + w/2;\n }\n if(i==999)\n {\n y2=y - w/2;\n y3=y + w/2;\n }\n }\n\n // float y1, y2;\n\n // y1 = curve[0]*pow(topview.rows-160, 2) + curve[1]*pow(topview.rows-160, 1) + curve[2]*pow(topview.rows-160, 0);\n // y2 = curve[0]*pow(1, 2) + curve[1]*pow(1, 1) + curve[2]*pow(1, 0);\n\n // temp.push_back(Point(y1 - w/2, 160));\n // temp.push_back(Point(y1 + w/2, 160));\n // temp.push_back(Point(y2 - w/2, topview.rows));\n // temp.push_back(Point(y2 + w/2, topview.rows));\n\n // cout<<\"(x1,y1): \"<< y1 - w/2 << 160 << endl;\n // cout<<\"(x2,y2): \"<< y1 + w/2 << 160 << endl;\n // cout<<\"(x3,y3): \"<< y2 - w/2 << topview.rows << endl;\n // cout<<\"(x4,y4): \"<< y2 + w/2 << topview.rows << endl;\n \n // imshow(\"trial\",topview);\n warpPerspective(topview, lanes, transform.inv(), Size(1920, 1200), INTER_NEAREST, BORDER_CONSTANT);\n \n // those pixels in which lane was not tracked were made 0, so now making those pixel values same as orignal image pixel value\n for(int i=0;i<lanes.rows;i++)\n for(int j=0;j<lanes.cols;j++)\n if(lanes.at<Vec3b>(i,j)[0]==0 && lanes.at<Vec3b>(i,j)[1]==0 && lanes.at<Vec3b>(i,j)[2]==0)\n {\n lanes.at<Vec3b>(i,j)[0] = img.at<Vec3b>(i,j)[0];\n lanes.at<Vec3b>(i,j)[1] = img.at<Vec3b>(i,j)[1];\n lanes.at<Vec3b>(i,j)[2] = img.at<Vec3b>(i,j)[2];\n }\n\n if(!tracking_status)\n lanes = img;\n\n if(tracking_status)\n putText(lanes, \"Tracking Lanes\", Point(50,100), FONT_HERSHEY_SIMPLEX, 1, Scalar(0,255,0), 2);\n else\n putText(lanes, \"Initializing Tracking.\", Point(50,100), FONT_HERSHEY_SIMPLEX, 1, Scalar(0,0,255), 2);\n\n\n namedWindow(\"lanes\", WINDOW_NORMAL);\n imshow(\"lanes\", lanes);\n\n vector<Point> temp;\n temp.push_back(Point(800,y1));\n temp.push_back(Point(1000,y2));\n temp.push_back(Point(1000,y3));\n temp.push_back(Point(800,y4));\n return temp;\n \n}\n\nvector<Point> detect_lanes(Mat img, bool debug = true)\n{\n // display orignal image\n if(debug==true)\n {\n namedWindow(\"original\", WINDOW_NORMAL);\n imshow(\"original\", img);\n }\n\n // Mat boundary = findRoadBoundaries(img, true);\n \n // initialize boundary with a matrix of (800*400)\n Mat boundary = Mat(1000, 800, CV_8UC1, Scalar(0));\n\n // intenity maximum image made\n Mat intensityMaxima = findIntensityMaxima(img, true);\n \n // image with edge features made\n Mat edgeFeature = findEdgeFeatures(img, false, true);\n\n // curve fit on the basis of orignal image, maxima intensity image and edgeFeature image\n \n vector<Point> v;\n v = fitCurve(img, boundary, edgeFeature, intensityMaxima, true);\n \n // printing curve dimensions\n // cout<<curve<<endl;\n return v;\n\n}\n\nvector<Point> a;\n\nvoid imageCb(const sensor_msgs::ImageConstPtr& msg)\n{\n flag=1;\n Mat img;\n cv_bridge::CvImagePtr cv_ptr;\n\n cout<<\"in callback\"<<endl;\n\n try\n {\n \n cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);\n img = cv_ptr->image;\n cout << img.rows << img.cols << endl;\n }\n catch (cv_bridge::Exception& e)\n {\n ROS_ERROR(\"cv_bridge exception: %s\", e.what());\n return;\n }\n if( !img.data ) { printf(\"Error loading A \\n\"); return ; }\n\n a = detect_lanes(img,true);\n waitKey(1);\n}\n\nint main(int argc, char **argv)\n{\n // ros declaration\n ros::init(argc, argv, \"lanes\");\n\n ros::NodeHandle nh_;\n image_transport::ImageTransport it_(nh_);\n image_transport::Subscriber image_sub_;\n\n string h_s, w_s, hysterisThreshold_min_s, hysterisThreshold_max_s, horizon_s, tranformedPoints0_lowerbound_s, tranformedPoints0_upperbound_s, point1_y_s, point2_x_s, horizon_offset_s, variance_s;\n\n h = stod(nh_.param<std::string>(\"/h\",h_s)); \n w = stod(nh_.param<std::string>(\"/w\",w_s));\n hysterisThreshold_min = stod(nh_.param<std::string>(\"/hysterisThreshold_min\",hysterisThreshold_min_s));\n hysterisThreshold_max = stod(nh_.param<std::string>(\"/hysterisThreshold_max\",hysterisThreshold_max_s));\n horizon = stod(nh_.param<std::string>(\"/horizon\",horizon_s));\n tranformedPoints0_lowerbound = stod(nh_.param<std::string>(\"/tranformedPoints0_lowerbound\",tranformedPoints0_lowerbound_s));\n tranformedPoints0_upperbound = stod(nh_.param<std::string>(\"/tranformedPoints0_upperbound\",tranformedPoints0_upperbound_s));\n point1_y = stod(nh_.param<std::string>(\"/point1_y\",point1_y_s));\n point2_x = stod(nh_.param<std::string>(\"/point2_x\",point2_x_s));\n horizon_offset = stod(nh_.param<std::string>(\"/horizon_offset\",horizon_offset_s));\n variance = stod(nh_.param<std::string>(\"/variance\", variance_s));\n\n ros::Publisher lanepub;\n cv_bridge::CvImagePtr cv_ptr;\n \n geometry_msgs::Point32 p1;\n geometry_msgs::Point32 p2;\n geometry_msgs::Point32 p3;\n geometry_msgs::Point32 p4;\n\n image_sub_ = it_.subscribe(\"/camera/image_color\", 1,&imageCb);\n\n lanepub = nh_.advertise<geometry_msgs::Polygon>(\"lane_points\", 1);\n \n ros::Rate r(1);\n while(ros::ok())\n {\n if(a.empty()||flag==0)\n {\n cout<< \"Waiting for Image\" << endl;\n }\n else\n { \n geometry_msgs::Polygon lanes;\n p1.x = a[0].x;\n p1.y = a[0].y;\n\n p2.x = a[1].x;\n p2.y = a[1].y;\n\n p3.x = a[2].x;\n p3.y = a[2].y;\n\n p4.x = a[3].x;\n p4.y = a[3].y;\n cout<< \"publishing lanes\"<<endl;\n lanes.points.push_back(p1);\n lanes.points.push_back(p2);\n lanes.points.push_back(p3);\n lanes.points.push_back(p4);\n\n lanepub.publish(lanes);\n }\n\n\n flag=0;\n\n ros::spinOnce();\n r.sleep();\n }\n waitKey(1);\n destroyAllWindows();\n}\n" }, { "alpha_fraction": 0.5566277503967285, "alphanum_fraction": 0.5913323163986206, "avg_line_length": 22.440475463867188, "blob_id": "b415eb12851dd39d4f0abec3ae0b7da835b94ec7", "content_id": "5810dfdd4281cf1fe3e9df98f4214ecd1c412254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5907, "license_type": "no_license", "max_line_length": 110, "num_lines": 252, "path": "/include/mlesac.hpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "//input: gray-scale Mat and return a model(struct data type)\n//main function: getMlesacModel\n\n#ifndef MLESAC\n#define MLESAC\n\n\n#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <iostream>\n\nusing namespace std;\nusing namespace cv;\n\n//structure to define the model parameters\ntypedef struct model\n{\n int numModel = 0;\n float a1 = 0.0;\n float b1 = 0.0;\n float c1 = 0.0;\n float a2 = 0.0;\n float b2 = 0.0;\n float c2 = 0.0;\n}model;\n\n\n//set threshold for white color\n#define wTh 50\n//define no of iteration, max dist squre of pt from our estimated model\n#define iteration 50\n//iteration for EM algo for choosing best gamma\n#define emIter 3\n#define maxDist 5\n//define sigma for gaussian distribution of inliers\n#define sigmaVal 3.0\n//define threshold distance to remove white pixel near lane1\n#define removeDist 100\n//define minimum number of points to be lie on a lane\n#define minLaneInlier 750\n\n//calculation of model parameters based on 3 randonmly selected points\nfloat get_a(Point p1, Point p2, Point p3)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n int y3 = p3.y;\n float del = (y2 - y1)*(y3 - y2)*(y1 - y3);\n float del_a = (x2 - x1)*(y3 - y2) - (x3 - x2)*(y2 - y1);\n return(del_a / del);\n}\nfloat get_b(Point p1, Point p2, Point p3)\n{\n int x1 = p1.x;\n int x2 = p2.x;\n int x3 = p3.x;\n int y1 = p1.y;\n int y2 = p2.y;\n int y3 = p3.y;\n float del = (y2 - y1)*(y3 - y2)*(y1 - y3);\n float del_b = (x3 - x2)*((y2*y2) - (y1*y1)) - (x2 - x1)*((y3*y3) - (y2*y2));\n return(del_b / del);\n}\nfloat get_c(Point p, float a, float b)\n{\n int x = p.x;\n int y = p.y;\n return(x - (a*y*y) - (b*y));\n}\n\n//calculation of error b/w actual and estimated y\nfloat get_delX(Point p, float a, float b, float c)\n{\n float predictedX = (a*(p.y*p.y) + b*p.y + c);\n return abs(p.x - predictedX);\n}\n\n//calculation of inlier prob based on gaussian distribution\nfloat inlierProb(float error, float sigma, float gamma)\n{\n float inlierP = gamma*(exp(-(error*error)/(2*sigma*sigma))/(sigma*2.50662827463));\n return inlierP;\n}\n\n//calculation of outlier prob based on unform distribution\nfloat outlierProb(float gamma, float globMaxResid)\n{\n float outlierP = (1-gamma)/globMaxResid;\n return outlierP;\n}\n\n//choose model parameters of best fit curve basis on randomly selected 3 points\nmodel mlesac(vector<Point> ptArray, float globMaxResid, model param)\n{\n int dataPt = ptArray.size();\n model tempParam;\n float MinPenalty;\n int maxInlier = 0;\n float sigma = sigmaVal;\n for(int i = 0; i < iteration; ++i)\n {\n float gamma = 0.5; //initialisation\n float currPenalty = 0;\n int d1 = rand() % dataPt;\n int d2 = rand() % dataPt;\n int d3 = rand() % dataPt;\n Point p1 = ptArray[d1];\n Point p2 = ptArray[d2];\n Point p3 = ptArray[d3];\n if((p1.x == p2.x) || (p2.x == p3.x) || (p3.x == p1.x)||(p1.y == p2.y) || (p2.y == p3.y) || (p3.y == p1.y))\n {\n continue;\n }\n //get the model parameters\n float temp_a = get_a(p1, p2, p3);\n float temp_b = get_b(p1, p2, p3);\n float temp_c = get_c(p1, temp_a, temp_b);\n //count inliers on predicted model\n int tempInlier = 0;\n\n //choose best gamma by EM algorithm\n for(int k = 0; k < emIter; ++k)\n {\n float inlierPB = 0;\n for(int j = 0; j < dataPt; ++j)\n {\n Point z = ptArray[j];\n float error = get_delX(z, temp_a, temp_b, temp_c);\n if(error < maxDist)\n {\n ++tempInlier;\n }\n float inlierP = inlierProb(error, sigma, gamma);\n float outlierP = outlierProb(gamma, globMaxResid);\n inlierPB += (inlierP)/(inlierP + outlierP);\n if(k == emIter - 1)\n {\n currPenalty -= log(inlierP + outlierP);\n }\n }\n gamma = inlierPB/dataPt;\n }\n if((MinPenalty > currPenalty) || (i == 0))\n {\n maxInlier = tempInlier;\n MinPenalty = currPenalty;\n tempParam.a1 = temp_a;\n tempParam.b1 = temp_b;\n tempParam.c1 = temp_c;\n }\n }\n if(maxInlier > minLaneInlier)\n {\n if(param.numModel == 0)\n {\n param.a1 = tempParam.a1;\n param.b1 = tempParam.b1;\n param.c1 = tempParam.c1;\n }\n else if(param.numModel == 1)\n {\n param.a2 = tempParam.a1;\n param.b2 = tempParam.b1;\n param.c2 = tempParam.c1;\n }\n param.numModel += 1;\n }\n return param;\n}\n\n//Check wheather a point is near lane1\nbool IsNearLane1(model param1, Point p)\n{\n float dist = get_delX(p, param1.a1, param1.b1, param1.c1);\n if(dist < removeDist)\n {\n return true;\n }\n else\n {\n return false;\n }\n}\n\n\nmodel getMlesacModel(Mat img)\n{\n float diaLen = sqrt(pow(img.rows, 2) + pow(img.cols, 2));\n //apply mlesac for first time it will converge for one lane\n vector<Point> ptArray1;\n for(int i = 0; i < img.rows; ++i)\n {\n for(int j = 0; j < img.cols; ++j)\n {\n int wVal = img.at<uchar>(i,j);\n if(wVal > wTh)\n {\n Point pt;\n pt.x = j;\n pt.y = i;\n ptArray1.push_back(pt);\n }\n }\n }\n\n //declare a model vaiable to store the model\n model param;\n\n //get parameters of first model form ransac function\n if(ptArray1.size() > 500)\n {\n param = mlesac(ptArray1, diaLen, param);\n }\n\n //Remove white pixel form image near lane1 and apply ransac to get lane2\n if(param.numModel > 0)\n {\n vector<Point> ptArray2;\n for(int i = 0; i < img.rows; ++i)\n {\n for(int j = 0; j < img.cols; ++j)\n {\n Point q;\n q.x = j;\n q.y = i;\n int wVal = img.at<uchar>(i,j);\n if(wVal > wTh)\n {\n if(!IsNearLane1(param, q))\n {\n ptArray2.push_back(q);\n }\n }\n }\n }\n\n //get parameters of second model form mlesac function\n if(ptArray2.size() > 500)\n {\n param = mlesac(ptArray2, diaLen, param);\n }\n }\n\n return param;\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.3835635483264923, "alphanum_fraction": 0.40939226746559143, "avg_line_length": 29.041494369506836, "blob_id": "a7cbe0b3bc90ccf8ddb4b996bc76af31e1be6041", "content_id": "02016f5cd9d918761a2d7d026a31ccae5e0ff07b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7240, "license_type": "no_license", "max_line_length": 126, "num_lines": 241, "path": "/src/houghP.cpp", "repo_name": "yash12khandelwal/Lanes_Mahindra", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <iostream>\n\nusing namespace std;\nusing namespace cv;\n\n//Test HoughLinesP2\n/*Mat test_img=imread(\"images/test.png\", 0);\ncv::resize(test_img, test_img, cv::Size(500,500));\nvector<Vec4i> test_lines;\nvector<int> test_lines_len;\nHoughLinesP2(test_img, 1, CV_PI/1800, 150, 80, 200, test_lines, test_lines_len, 10);\ncout<<test_lines.size()<<\" \"<<test_lines_len.size()<<endl;\nfor(int o=0;o<test_lines.size();o++)\n{\n cout<<test_lines[o][0]<<\" \"<<test_lines[o][1]<<\" \"<<test_lines[o][2]<<\" \"<<test_lines[o][3]<<\"-\"<<test_lines_len[o]<<endl;\n circle(test_img, {test_lines[o][0], test_lines[o][1]}, 5, Scalar(255), 1, 8, 0);\n circle(test_img, {test_lines[o][2], test_lines[o][3]}, 5, Scalar(255), 1, 8, 0);\n}\nimshow(\"aaa\", test_img);\nwaitKey(0);*/\n\nvoid HoughLinesP2( Mat& image, std::vector<Vec4i>& lines, std::vector<int>& len,\n float rho, float theta, int threshold,\n int lineLength, int lineGap)\n{\n\n Point pt;\n float irho = 1 / rho;\n //RNG rng((uint64)-1);\n RNG rng(cv::getTickCount());\n\n CV_Assert( image.type() == CV_8UC1 );\n\n int width = image.cols;\n int height = image.rows;\n\n int numangle = cvRound(CV_PI / theta);\n int numrho = cvRound(((width + height) * 2 + 1) / rho);\n\n Mat accum = Mat::zeros( numangle, numrho, CV_32SC1 );\n Mat mask( height, width, CV_8UC1 );\n std::vector<float> trigtab(numangle*2);\n\n for( int n = 0; n < numangle; n++ )\n {\n trigtab[n*2] = (float)(cos((double)n*theta) * irho);\n trigtab[n*2+1] = (float)(sin((double)n*theta) * irho);\n }\n const float* ttab = &trigtab[0];\n uchar* mdata0 = mask.ptr();\n std::vector<Point> nzloc;\n\n // stage 1. collect non-zero image points\n for( pt.y = 0; pt.y < height; pt.y++ )\n {\n const uchar* data = image.ptr(pt.y);\n uchar* mdata = mask.ptr(pt.y);\n for( pt.x = 0; pt.x < width; pt.x++ )\n {\n if( data[pt.x] )\n {\n mdata[pt.x] = (uchar)1;\n nzloc.push_back(pt);\n }\n else\n mdata[pt.x] = 0;\n }\n }\n\n int count = (int)nzloc.size();\n\n // stage 2. process all the points in random order\n for( ; count > 0; count-- )\n {\n // choose random point out of the remaining ones\n int idx = rng.uniform(0, count);\n int max_val = threshold-1, max_n = 0;\n Point point = nzloc[idx];\n Point line_end[2];\n float a, b;\n int* adata = accum.ptr<int>();\n int i = point.y, j = point.x, k, x0, y0, dx0, dy0, xflag;\n int good_line;\n int line_len=0;\n const int shift = 16;\n\n // \"remove\" it by overriding it with the last element\n nzloc[idx] = nzloc[count-1];\n\n // check if it has been excluded already (i.e. belongs to some other line)\n if( !mdata0[i*width + j] )\n continue;\n\n // update accumulator, find the most probable line\n for( int n = 0; n < numangle; n++, adata += numrho )\n {\n int r = cvRound( j * ttab[n*2] + i * ttab[n*2+1] );\n r += (numrho - 1) / 2;\n int val = ++adata[r];\n if( max_val < val )\n {\n max_val = val;\n max_n = n;\n }\n }\n\n // if it is too \"weak\" candidate, continue with another point\n if( max_val < threshold )\n continue;\n\n // from the current point walk in each direction\n // along the found line and extract the line segment\n a = -ttab[max_n*2+1];\n b = ttab[max_n*2];\n x0 = j;\n y0 = i;\n if( fabs(a) > fabs(b) )\n {\n xflag = 1;\n dx0 = a > 0 ? 1 : -1;\n dy0 = cvRound( b*(1 << shift)/fabs(a) );\n y0 = (y0 << shift) + (1 << (shift-1));\n }\n else\n {\n xflag = 0;\n dy0 = b > 0 ? 1 : -1;\n dx0 = cvRound( a*(1 << shift)/fabs(b) );\n x0 = (x0 << shift) + (1 << (shift-1));\n }\n\n for( k = 0; k < 2; k++ )\n {\n int gap = 0, x = x0, y = y0, dx = dx0, dy = dy0;\n\n if( k > 0 )\n dx = -dx, dy = -dy;\n\n // walk along the line using fixed-point arithmetics,\n // stop at the image border or in case of too big gap\n for( ;; x += dx, y += dy )\n {\n uchar* mdata;\n int i1, j1;\n\n if( xflag )\n {\n j1 = x;\n i1 = y >> shift;\n }\n else\n {\n j1 = x >> shift;\n i1 = y;\n }\n\n if( j1 < 0 || j1 >= width || i1 < 0 || i1 >= height )\n break;\n\n mdata = mdata0 + i1*width + j1;\n\n // for each non-zero point:\n // update line end,\n // clear the mask element\n // reset the gap\n if( *mdata )\n {\n gap = 0;\n line_len++;\n line_end[k].y = i1;\n line_end[k].x = j1;\n }\n else if( ++gap > lineGap )\n break;\n }\n }\n\n good_line = std::abs(line_end[1].x - line_end[0].x) >= lineLength ||\n std::abs(line_end[1].y - line_end[0].y) >= lineLength;\n\n for( k = 0; k < 2; k++ )\n {\n int x = x0, y = y0, dx = dx0, dy = dy0;\n\n if( k > 0 )\n dx = -dx, dy = -dy;\n\n // walk along the line using fixed-point arithmetics,\n // stop at the image border or in case of too big gap\n for( ;; x += dx, y += dy )\n {\n uchar* mdata;\n int i1, j1;\n\n if( xflag )\n {\n j1 = x;\n i1 = y >> shift;\n }\n else\n {\n j1 = x >> shift;\n i1 = y;\n }\n\n mdata = mdata0 + i1*width + j1;\n\n // for each non-zero point:\n // update line end,\n // clear the mask element\n // reset the gap\n if( *mdata )\n {\n if( good_line )\n {\n adata = accum.ptr<int>();\n for( int n = 0; n < numangle; n++, adata += numrho )\n {\n int r = cvRound( j1 * ttab[n*2] + i1 * ttab[n*2+1] );\n r += (numrho - 1) / 2;\n adata[r]--;\n }\n }\n *mdata = 0;\n }\n\n if( i1 == line_end[k].y && j1 == line_end[k].x )\n break;\n }\n }\n\n if( good_line )\n {\n Vec4i lr(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);\n lines.push_back(lr);\n //cout<<line_len<<endl;\n len.push_back(line_len);\n }\n }\n}\n" } ]
18
fabriciopirini/UNIFEI-Timetable
https://github.com/fabriciopirini/UNIFEI-Timetable
7d649ea4a2e353909c1dc3cb1ce8893c509223f3
c91cad28e472aa9c3e68bbeee8c21f09494dd218
9ebccaf9f7aed3e2bf8d15bbb67c76db8c0af2cd
refs/heads/master
2021-03-30T23:42:02.330153
2020-06-22T15:23:51
2020-06-22T15:23:51
125,092,292
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5174099206924438, "alphanum_fraction": 0.5218896269798279, "avg_line_length": 44.47222137451172, "blob_id": "a3c75d91aa9993ce4250940bc66a03bc7cf970a0", "content_id": "b49b9e127fb0ac9339473a40fdd815eea22cfd9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4912, "license_type": "permissive", "max_line_length": 134, "num_lines": 108, "path": "/app/app.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template\nfrom scrap import SIGAA\nimport time\nfrom initial_config import *\nfrom models import *\nimport numpy as np\nimport logging\n\n\nclass FlaskService(object):\n\n def __init__(self):\n pass\n\n def do_function(self):\n @APP.route('/', methods=[\"POST\"])\n def pesquisa_disciplina():\n if(request.form.get('pesquisa') == \"\"):\n return render_template('index.html', disciplinas='', turmas='')\n texto = \"\".join(['%', request.form.get('pesquisa').upper(), '%'])\n resultadoTurmas = []\n resultadoDisciplinas = []\n try:\n logging.info(\"Inicio try\")\n qtde = Disciplina.query.filter(Disciplina.nome.like(texto)).count()\n logging.info(str(qtde))\n if qtde > 0:\n resultado = Disciplina.query.filter(Disciplina.nome.like(texto)).order_by(Disciplina.nome.asc()).all()\n\n for item in resultado:\n resultadoDisciplinas.append([item.id, item.nome, item.curso])\n # print(\"Dados disciplinas\")\n listaTurmas = Turma.query.filter_by(idDisciplina=item.id).order_by(Turma.curso.asc(), Turma.turma.asc()).all()\n # print(\"Turmas pesquisadas\")\n for item2 in listaTurmas:\n # print(' '.join([item]))\n resultadoTurmas.append([item2.idDisciplina, item2.turma, item2.docente, item2.horario])\n # print(\"Lista de turmas criadas\")\n # print(resultadoTurmas)\n # resultadoTurmas = Turma.query.filter_by(idDisciplina=resultado.id).all()\n # resultado = Disciplina.query.filter_by(nome='ECOI14.1 - Banco de Dados').all()\n # resultado = (str(disciplina.nome) for disciplina in resultado)\n # return (str(disciplina.nome) for disciplina in resultado)\n return render_template('index.html', disciplinas=resultadoDisciplinas, turmas=resultadoTurmas)\n else:\n return render_template('index.html', disciplinas='0', turmas='')\n except:\n logging.warning(\"Pesquisa nรฃo rolou\")\n return render_template('index.html', disciplinas='1', turmas='')\n\n @APP.route('/', methods=[\"GET\"])\n def inicio():\n return render_template('index.html', disciplinas='', turmas='')\n\n @APP.route('/disciplinas')\n def visualiza_disciplinas_registradas():\n disciplinas = Disciplina.query.all()\n return render_template('lista_disciplinas.html', disciplinas=disciplinas)\n\n @APP.route('/registra_disciplinas', methods=['GET'])\n def formulario_registra_disciplinas():\n return render_template('registra_disciplinas.html')\n\n @APP.route('/registra_disciplinas', methods=['POST'])\n def registra_disciplinas():\n # siglaCurso = request.form.get('siglaCurso')\n cursos = ['EAM', 'EMO', 'ECO', 'ECA',\n 'EMT', 'EPR', 'ESS', 'EEL', 'EME']\n ano = request.form.get('ano')\n semestre = request.form.get('semestre')\n for siglaCurso in cursos:\n x, y = SIGAA.retorna_turmas(siglaCurso, ano, semestre)\n d = SIGAA.format(x, y)\n for item, info in d.items():\n disciplina = Disciplina(\n item.upper(), 'Campus Itabira', siglaCurso)\n DB.session.add(disciplina)\n DB.session.commit()\n DB.session.refresh(disciplina)\n for i in np.arange(0, len(info), 8):\n turma = Turma(disciplina.id, info[i], info[i + 1], info[i + 2], info[i + 3], info[i + 4],\n info[i + 5], info[i + 6], info[i + 7], siglaCurso)\n DB.session.add(turma)\n # DB.session.commit()\n # print(': '.join([str(i % 8), str(elemento)]))\n print(' '.join(['Curso processado:', siglaCurso]))\n DB.session.commit()\n time.sleep(1)\n # for item in d:\n # turma = Turma(item, 'Campus Itabira')\n # DB.session.add(disciplina)\n # DB.session.commit()\n # name = request.form.get('name')\n # email = request.form.get('email')\n\n # guest = Guest(name, email)\n # DB.session.add(guest)\n # DB.session.commit()\n\n # return render_template('guest_confirmation.html', name=name, email=email)\n return render_template('confirma_registro.html')\n\n if __name__ == '__main__':\n app.run(debug=True)\n\n\np = FlaskService()\np.do_function()\n" }, { "alpha_fraction": 0.466292142868042, "alphanum_fraction": 0.48730751872062683, "avg_line_length": 33.575538635253906, "blob_id": "0bf1618d33cde56544f18294e88bdc1befc8ba35", "content_id": "618cdf7b5cc99ec605c283aca9e2df8312f8f2f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4806, "license_type": "permissive", "max_line_length": 120, "num_lines": 139, "path": "/app/scrap.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "import requests\nfrom lxml import html\nimport pickle\nimport re\nimport unicodedata\n\n\nclass SIGAA:\n \"\"\"Acessa SIGGA para retorno das turmas\"\"\"\n detalhes = []\n\n def __init__(self):\n pass\n\n def retorna_turmas(curso, ano, semestre):\n\n with requests.Session() as c:\n url = 'https://sigaa.unifei.edu.br/sigaa/logar.do?dispatch=logOn'\n USERNAME = '35072984818'\n PASSWORD = '@Ftp1992ftp'\n c.get(url)\n login_data = {\n 'user.login': USERNAME,\n 'user.senha': PASSWORD,\n 'width': '1366',\n 'heigth': '768',\n 'urlRedirect': '',\n 'subsistemaRedirect': '',\n 'acao': '',\n 'acessibilidade': ''\n }\n bla = c.post(url, data=login_data, headers={\n \"Referer\": \"https://sigaa.unifei.edu.br/sigaa/verTelaLogin.do\"})\n cursos = {\n 'EAM': '43969906',\n 'EMO': '43969909',\n 'ECO': '43969911',\n 'ECA': '43969913',\n 'EMT': '43969917',\n 'EPR': '43969918',\n 'ESS': '43969920',\n 'EEL': '43969921',\n 'EME': '43969925'\n }\n curso = cursos[curso]\n\n search_data = {\n 'form': 'form',\n 'form:checkNivel': 'on',\n 'form:selectNivelTurma': 'G',\n 'form:checkAnoPeriodo': 'on',\n 'form:inputAno': ano,\n 'form:inputPeriodo': semestre,\n 'form:checkUnidade': 'on',\n 'form:selectUnidade': '254',\n 'form:inputCodDisciplina': '',\n 'form:inputCodTurma': '',\n 'form:inputLocal': '',\n 'form:inputHorario': '',\n 'form:inputNomeDisciplina': '',\n 'form:inputNomeDocente': '',\n 'form:checkCurso': 'on',\n 'form:selectCurso': curso,\n 'form:selectSituacaoTurma': '1',\n 'form:selectTipoTurma': '0',\n 'form:selectModalidade': '0',\n 'form:checkRel': 'on',\n 'form:selectOpcaoOrdenacao': '1',\n 'turmasEAD': 'false',\n 'form:buttonBuscar': 'Buscar',\n 'javax.faces.ViewState': 'j_id2'\n }\n\n url_busca = 'https://sigaa.unifei.edu.br/sigaa/ensino/turma/busca_turma.jsf'\n c.get(url_busca)\n resposta = c.post(url_busca, data=search_data)\n\n # print(tryd.content)\n\n tree = html.fromstring(resposta.content)\n\n disciplinas = tree.xpath(\n '///*[@id=\"lista-turmas\"]/tbody/tr/td/h4/text()')\n\n # print(type(disciplinas))\n # print(disciplinas[0])\n disciplinas = [w.replace('\\t', '').replace(\n '\\n', '').strip() for w in disciplinas]\n # disciplinas = [w.split(\"(\") for w in disciplinas]\n\n # disciplinas = list(filter(None, disciplinas))\n\n # print(disciplinas)\n\n detalhes = tree.xpath('''//*[@id=\"lista-turmas\"]/tbody/tr[contains(@class,\"bordaBottonRelatorio\")]/td/text()\n\t\t\t\t\t\t\t\t\t| ///*[@id=\"lista-turmas\"]/tbody/tr/td/h4/text()''')\n\n detalhes = [w.replace('\\t', '').replace(\n '\\n', '').strip() for w in detalhes]\n\n detalhesNew = []\n for item in detalhes:\n detalhesNew.extend(re.split(r'[\\(\\)]+', item))\n #listaTurmas = [w.split(\"(\") for w in listaTurmas]\n\n # listaTurmas = list(filter(None, listaTurmas))\n\n # print('Lista das Turmas: \\n', listaTurmas)\n\n # codCursos = tree.xpath('//*[@id=\"form:selectCurso\"]/*/@value')\n # cursos = tree.xpath('//*[@id=\"form:selectCurso\"]/*/text()')\n\n # print('Codigos dos Cursos: \\n', codCursos)\n # print('Cursos: \\n', cursos)\n return detalhes, disciplinas\n\n def format(detalhes, disciplinas):\n from collections import defaultdict\n\n d = defaultdict(list)\n subjects = set(disciplinas)\n\n for item in detalhes:\n if item in subjects:\n current_item = unicodedata.normalize('NFKD', item).encode('ASCII', 'ignore')\n # current_item = item\n else:\n d[current_item].append(unicodedata.normalize('NFKD', item).encode('ASCII', 'ignore'))\n # d[current_item].append(item)\n return d\n\n def save_obj(obj, name):\n with open('obj/' + name + '.pkl', 'wb') as f:\n # pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n def load_obj(name):\n with open('obj/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)\n" }, { "alpha_fraction": 0.7450199127197266, "alphanum_fraction": 0.7569721341133118, "avg_line_length": 25.89285659790039, "blob_id": "c07ded9eeb98ca414e33d0df95b5ca291b831410", "content_id": "93cffd8c31c8468604f5022e287c6e31a7e1ba47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "permissive", "max_line_length": 109, "num_lines": 28, "path": "/app/initial_config.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_debugtoolbar import DebugToolbarExtension\nimport psycopg2\n\nAPP = Flask(__name__)\n\nAPP.debug = True\n\nAPP.config['TEMPLATES_AUTO_RELOAD'] = True\n\nAPP.config['SECRET_KEY'] = 'abc123-'\n\nAPP.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nAPP.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://%s:%s@%s:%s/%s?sslmode=require' % (\n 'manager@unifei-timetable', 'supersecretpass', 'unifei-timetable.postgres.database.azure.com', '5432', 'eventregistration'\n)\n\n# initialize the database connection\nDB = SQLAlchemy(APP)\n\n# debug SQLAlchemy queries\n# toolbar = DebugToolbarExtension(APP)\n\n# initialize database migration management\nMIGRATE = Migrate(APP, DB)\n" }, { "alpha_fraction": 0.6384891867637634, "alphanum_fraction": 0.6696642637252808, "avg_line_length": 33.04081726074219, "blob_id": "f8dd0809f82ec0b00a1a1ce857e9e14ad76b8596", "content_id": "9e3745ef8f95164e409b8814bea66a983f87c82b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1668, "license_type": "permissive", "max_line_length": 68, "num_lines": 49, "path": "/app/migrations7329829483/versions/bfe49ec25577_.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: bfe49ec25577\nRevises: 0ed182aac4ff\nCreate Date: 2018-04-30 13:34:39.218818\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bfe49ec25577'\ndown_revision = '0ed182aac4ff'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('disciplinas',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('nome', sa.String(length=150), nullable=True),\n sa.Column('campus', sa.String(length=30), nullable=True),\n sa.Column('curso', sa.String(length=5), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('turmas',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('idDisciplina', sa.Integer(), nullable=False),\n sa.Column('periodo', sa.String(length=10), nullable=True),\n sa.Column('nivel', sa.String(length=10), nullable=True),\n sa.Column('turma', sa.String(length=10), nullable=True),\n sa.Column('docente', sa.String(length=100), nullable=True),\n sa.Column('situacao', sa.String(length=10), nullable=True),\n sa.Column('horario', sa.String(length=1000), nullable=True),\n sa.Column('local', sa.String(length=30), nullable=True),\n sa.Column('matriculados', sa.String(length=30), nullable=True),\n sa.ForeignKeyConstraint(['idDisciplina'], ['disciplinas.id'], ),\n sa.PrimaryKeyConstraint('id', 'idDisciplina')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('turmas')\n op.drop_table('disciplinas')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6505376100540161, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 16, "blob_id": "18f72e737ecf56caec8f79521dfbef8e6fecf619", "content_id": "ea66dd2fa9a03a10d704020b7174e986e2eb0860", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 186, "license_type": "permissive", "max_line_length": 52, "num_lines": 11, "path": "/Dockerfile", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "FROM python:3.6.1\n\nCOPY requirements.txt /\nRUN pip install -r ./requirements.txt\n\nCOPY app/ /app/\n\nWORKDIR /app\n\nENV FLASK_APP=app.py\nCMD flask db upgrade && flask run -h 0.0.0.0 -p 5000" }, { "alpha_fraction": 0.6327043771743774, "alphanum_fraction": 0.6716980934143066, "avg_line_length": 25.5, "blob_id": "6ab203cc336cd5df8a31a9d391017eee8b3cca74", "content_id": "a9605ad4e769fc88c1fbb99496e3e57795b05ff4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "permissive", "max_line_length": 88, "num_lines": 30, "path": "/app/migrations7329829483/versions/0ed182aac4ff_.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 0ed182aac4ff\nRevises: ce642cb311c7\nCreate Date: 2018-04-30 12:54:24.910697\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0ed182aac4ff'\ndown_revision = 'ce642cb311c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('disciplinas', sa.Column('curso', sa.String(length=5), nullable=True))\n op.create_foreign_key(None, 'turmas', 'disciplinas', ['idDisciplina'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'turmas', type_='foreignkey')\n op.drop_column('disciplinas', 'curso')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6242157816886902, "alphanum_fraction": 0.6411543488502502, "avg_line_length": 33.65217208862305, "blob_id": "46fcfc3822b4f93696cfd80e8c90c2a2110bec17", "content_id": "9f7779db02d07cc23fd6e6775b39d7473fbba806", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1594, "license_type": "permissive", "max_line_length": 117, "num_lines": 46, "path": "/app/models.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "from initial_config import *\n\n\nclass Disciplina(DB.Model):\n \"\"\"Modelo de banco de dados para monitorar as disciplinas.\"\"\"\n\n __tablename__ = 'disciplinas'\n id = DB.Column(DB.Integer, primary_key=True)\n nome = DB.Column(DB.String(150))\n campus = DB.Column(DB.String(30))\n curso = DB.Column(DB.String(5))\n\n def __init__(self, nome=None, campus=None, curso=None):\n self.nome = nome\n self.campus = campus\n self.curso = curso\n\n\nclass Turma(DB.Model):\n \"\"\"Modelo de banco de dados para monitorar cada turma e linkar com as disciplinas.\"\"\"\n\n __tablename__ = 'turmas'\n id = DB.Column(DB.Integer, primary_key=True)\n idDisciplina = DB.Column(DB.Integer, DB.ForeignKey('disciplinas.id'), nullable=False)\n periodo = DB.Column(DB.String(10))\n nivel = DB.Column(DB.String(10))\n turma = DB.Column(DB.String(10))\n docente = DB.Column(DB.String(100))\n situacao = DB.Column(DB.String(10))\n horario = DB.Column(DB.String(1000))\n local = DB.Column(DB.String(300))\n matriculados = DB.Column(DB.String(30))\n curso = DB.Column(DB.String(5))\n\n def __init__(self, idDisciplina, periodo=None, nivel=None, turma=None, docente=None, situacao=None, horario=None,\n local=None, matriculados=None, curso=None):\n self.idDisciplina = idDisciplina\n self.periodo = periodo\n self.nivel = nivel\n self.turma = turma\n self.docente = docente\n self.situacao = situacao\n self.horario = horario\n self.local = local\n self.matriculados = matriculados\n self.curso = curso\n" }, { "alpha_fraction": 0.43976274132728577, "alphanum_fraction": 0.45039886236190796, "avg_line_length": 33.42753601074219, "blob_id": "86ca90eb5640b9b65e8e6864b7196dee96327dc0", "content_id": "659ac13958e58e521145335aab55c8968c096429", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4892, "license_type": "permissive", "max_line_length": 240, "num_lines": 138, "path": "/app/templates/index.html", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html lang=\"pt-br\">\r\n<head>\r\n <meta charset='utf-8'/>\r\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\r\n <link href='https://use.fontawesome.com/releases/v5.0.6/css/all.css' rel='stylesheet'>\r\n <link href='../static/fullcalendar.min.css' rel='stylesheet'/>\r\n <link href='../static/fullcalendar.print.min.css' rel='stylesheet' media='print'/>\r\n <script src='../static/moment.min.js'></script>\r\n <script src='../static/jquery.min.js'></script>\r\n <script src='../static/fullcalendar.min.js'></script>\r\n <script src='../static/fullcalendar-pt-br.js'></script>\r\n <script src='../static/base.js'></script>\r\n <style>\r\n\r\n .fc-time-grid-event .fc-time,\r\n .fc-title {\r\n font-size: 1em;\r\n }\r\n\r\n td.fc-today {\r\n background: inherit !important;\r\n }\r\n\r\n .fc-title {\r\n font-weight: bolder;\r\n }\r\n\r\n #calendar {\r\n /* max-width: 900px; */\r\n /* margin: 40px auto; */\r\n padding: 0 10px;\r\n }\r\n\r\n #title {\r\n margin-bottom: 2rem;\r\n }\r\n\r\n h1, h2 {\r\n text-align: center;\r\n }\r\n\r\n #calendar > div.fc-toolbar.fc-header-toolbar {\r\n display: none;\r\n }\r\n\r\n #resultado_pesquisa {\r\n margin-top: 1em;\r\n margin-left: 1em;\r\n overflow-y: scroll;\r\n }\r\n\r\n .closeon {\r\n position: absolute;\r\n top: -2px;\r\n right: 0;\r\n cursor: pointer;\r\n background-color: #FFF;\r\n z-index: 2;\r\n }\r\n\r\n .rowTurma {\r\n padding-left: 1.5em !important;\r\n }\r\n\r\n .rowDisciplina {\r\n font-weight: bolder;\r\n }\r\n\r\n .fas {\r\n font-size: 1.2em;\r\n }\r\n\r\n .adiciona {\r\n cursor: pointer;\r\n }\r\n </style>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://bootswatch.com/4/united/bootstrap.min.css\">\r\n</head>\r\n<body>\r\n<h1 id=\"title\" tabindex=\"0\">UNIFEI-Timetable</h1>\r\n<!-- $('#calendar').fullCalendar('removeEvents');\r\n$('#calendar').fullCalendar('addEventSource', events); -->\r\n<div class=\"container-fluid\">\r\n <div class=\"row justify-content-md-center\">\r\n <div class=\"col-3\">\r\n <div class=\"row justify-content-md-center\">\r\n <form action=\"/\" method=\"POST\">\r\n <label for=\"campoPesquisa\">Pesquisar disciplina</label>\r\n <input type=\"text\" class=\"form-control\" id=\"campoPesquisa\" name=\"pesquisa\"\r\n aria-describedby=\"ajudaPesquisa\" placeholder=\"Digite o nome da disciplina desejada\">\r\n <small id=\"ajudaPesquisa\" class=\"form-text text-muted\">Nos diga o nome da disciplina desejada.\r\n </small>\r\n <button type=\"submit\" class=\"btn btn-default\" onclick=\"$('#carregando').show();\">Pesquisar</button>\r\n <img id=\"carregando\" style=\"display: none\" width=\"90px\" src=\"/static/carregando.gif\" alt=\"Realizando pesquisa\">\r\n </form>\r\n </div>\r\n <div class=\"row justify-content-md-center\">\r\n <table id=\"resultado_pesquisa\" class=\"table table-striped\">\r\n <thead>\r\n <tr>\r\n <th tabindex=\"0\">Resultado</th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n\r\n {% if disciplinas == '0' %}\r\n <tr><td> Nenhuma disciplina encontrada </td></tr>\r\n {% elif disciplinas == '1' %}\r\n <tr><td> A pesquisa nรฃo pode ser concluรญda </td></tr>\r\n {% else %}\r\n {% for disciplina in disciplinas %}\r\n <tr>\r\n <td class=\"rowDisciplina\" tabindex=\"0\">{{ disciplina[1]|e }} - Curso: {{ disciplina[2]|e }}</td>\r\n {% for turma in turmas %}\r\n {% if turma[0] == disciplina[0] %}\r\n <tr>\r\n <td class=\"rowTurma\" tabindex=\"0\">{{ turma[1]|e }} - {{ turma[2]|e }} - {{ turma[3]|e }} <i class=\"fas fa-plus-circle adiciona\" onclick=\"insereEvento('{{ disciplina[1] }} ({{ turma[1] }})')\"></i></td>\r\n {% endif %}\r\n {% endfor %}\r\n </tr>\r\n </tr>\r\n\r\n {% endfor %}\r\n {% endif %}\r\n </table>\r\n </div>\r\n </div>\r\n <div class=\"col-9\">\r\n <h2 tabindex=\"0\">Horรกrio</h2>\r\n <div id='calendar' tabindex=\"0\"></div>\r\n </div>\r\n </div>\r\n</div>\r\n\r\n\r\n</body>\r\n</html>\r\n" }, { "alpha_fraction": 0.516846776008606, "alphanum_fraction": 0.7062937021255493, "avg_line_length": 16.09782600402832, "blob_id": "ac9f9ed59f1f13bc39b4fcaab15080134b082ce7", "content_id": "b37f3c332a60ea264e24383f44caf8b6cb28b59d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1573, "license_type": "permissive", "max_line_length": 30, "num_lines": 92, "path": "/requirements.txt", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "alembic==0.9.1\nappdirs==1.4.3\napturl==0.5.2\nasn1crypto==0.22.0\nastroid==1.6.4\nblinker==1.4\nBrlapi==0.6.5\ncaffeine==2.8.3\ncertifi==2017.4.17\nchardet==3.0.4\nclick==6.7\ncolorama==0.3.7\ncommand-not-found==0.3\ncryptography==2.3\ncupshelpers==1.0\ndefer==1.0.6\ndistro-info==0.17\nEasyProcess==0.2.3\nevdev==0.4.7\nFlask==1.0.2\nFlask-DebugToolbar==0.10.1\nFlask-Migrate==2.0.3\nFlask-Script==2.0.5\nFlask-SQLAlchemy==2.2\nhttplib2==0.18.0\nidna==2.5\nisort==4.3.4\nitsdangerous==0.24\nJinja2==2.10\nkeyring==10.4.0\nkeyrings.alt==2.2\nlanguage-selector==0.1\nlaunchpadlib==1.10.5\nlazr.restfulclient==0.13.5\nlazr.uri==1.0.3\nlazy-object-proxy==1.3.1\nlouis==3.0.0\nlutris==0.4.18\nlxml==4.2.1\nMako==1.0.6\nMarkupSafe==1.0\nmccabe==0.6.1\nmenulibre==2.1.3\nnotify2==0.3\nnumpy==1.14.3\noauth==1.0.1\nolefile==0.44\npackaging==16.8\npexpect==4.2.1\nPillow==5.1.0\npsutil==5.6.6\npsycopg2==2.7.4\npsycopg2-binary==2.7.4\nPyAutoGUI==0.9.36\npycrypto==2.6.1\npycups==1.9.73\npygobject==3.24.1\npyinotify==0.9.6\npylint==1.9.1\nPyMsgBox==1.0.6\npyOpenSSL==17.5.0\npyparsing==2.2.0\npyscreenshot==0.4.2\nPyScreeze==0.1.14\npython-apt==1.4.0b3\npython-debian==0.1.30\npython-editor==1.0.3\npython-xlib==0.14\nPyTweening==1.0.3\npyxdg==0.26\nPyYAML==5.1\nreportlab==3.4.0\nrequests==2.20.0\nscreen-resolution-extra==0.0.0\nSecretStorage==2.3.1\nsimplejson==3.11.1\nsix==1.11.0\nSQLAlchemy==1.1.9\nsystem-service==0.3\nsystemd-python==234\nubuntu-drivers-common==0.0.0\nufw==0.35\nunattended-upgrades==0.1\nurllib3==1.24.2\nvboxapi==1.0\nwadllib==1.3.2\nWerkzeug==0.15.3\nwrapt==1.10.11\nxdiagnose==3.8.8\nxkit==0.0.0\nyoutube-dl==2017.9.24\nzope.interface==4.3.2\n" }, { "alpha_fraction": 0.42848336696624756, "alphanum_fraction": 0.46979036927223206, "avg_line_length": 31.22516632080078, "blob_id": "a8c82eaf5ad28cf3981a995ef4518a6a265f9246", "content_id": "bf57495b04f4c402d4d0baa12ab0f8a13b774b30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4868, "license_type": "permissive", "max_line_length": 233, "num_lines": 151, "path": "/app/static/base.js", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "\"use strict\";\nconst segunda = moment().weekday(1).set({hour: 0, minute: 0, second: 0, millisecond: 0});\nvar disciplinas = [];\n// console.log(segunda)\n$(function () {\n\n $('#calendar').fullCalendar({\n defaultView: 'agendaWeek',\n height: 'auto',\n allDaySlot: false,\n columnHeaderFormat: 'dddd',\n minTime: '07:00:00',\n maxTime: '23:30:00',\n slotDuration: '00:55:00',\n slotLabelFormat: 'H:mm',\n eventBackgroundColor: '#9BD1E9',\n slotEventOverlap: false,\n nowIndicator: false,\n header: {\n left: '',\n center: 'title',\n right: ''\n },\n defaultDate: moment().weekday(1),\n weekNumbers: false,\n navLinks: false,\n editable: false,\n eventLimit: false,\n firstDay: 1,\n timeFormat: 'H:mm',\n weekends: false,\n events: [\n // {\n // id: Math.floor(Math.random() * (100000 - 1) + 1).toString(),\n // title: 'ESSI39.2 - PERรCIAS EM PERICULOSIDADE, INSALUBRIDADE E ACIDENTES DO TRABALHO (PRรTICA)',\n // start: segunda.add(10, 'hours'),\n // end: segunda.add(2, 'hours')\n // }\n // ,{\n // title: 'Click for Google',\n // url: 'http://google.com/',\n // start: '2018-03-28'\n // }\n ],\n eventRender: function (event, element) {\n element.append(\"<span class='closeon'>X</span>\");\n element.find(\".closeon\").click(function () {\n var aux = localStorage.getItem(\"disciplinas\");\n var obj = JSON.parse(aux);\n var i = 0;\n while(i < obj.length){\n if (obj[i].id == event.id) {\n obj.splice(i, 1);\n }\n i += 1;\n }\n // console.log(event.id);\n $('#calendar').fullCalendar('removeEvents', event._id);\n localStorage.setItem(\"disciplinas\", JSON.stringify(obj));\n });\n },\n eventMouseover: function (calEvent, jsEvent) {\n var tooltip = '<div class=\"tooltipevent\" style=\"border-radius: 0.2em;border: 0.125em solid #84AE65;padding:0.5em;width:12.5em;height:auto;background:#B5EF8A;position:absolute;z-index:10001;\">' + calEvent.title + '</div>';\n var $tooltip = $(tooltip).appendTo('body');\n\n $(this).mouseover(function (e) {\n $(this).css('z-index', 10000);\n $tooltip.fadeIn('500');\n $tooltip.fadeTo('10', 1.9);\n }).mousemove(function (e) {\n $tooltip.css('top', e.pageY + 10);\n $tooltip.css('left', e.pageX + 20);\n });\n },\n\n eventMouseout: function (calEvent, jsEvent) {\n $(this).css('z-index', 8);\n $('.tooltipevent').remove();\n },\n\n eventAfterRender:function( event, element, view ) {\n $(element).attr(\"id\",\"event_id_\"+event._id);\n }\n });\n\n var eventosPassados = JSON.parse(localStorage.getItem(\"disciplinas\"));\n $('#calendar').fullCalendar('addEventSource', eventosPassados, true);\n});\n\nfunction formataHorario(horario) {\n var posDias = [\n [\"2\", \"Seg\"],\n [\"3\", \"Ter\"],\n [\"4\", \"Qua\"],\n [\"5\", \"Qui\"],\n [\"6\", \"Sex\"]\n ];\n var posHorarios = [\n [\"1\", \"7:00\"],\n [\"2\", \"7:55\"],\n [\"3\", \"8:50\"],\n [\"4\", \"10:10\"],\n [\"5\", \"11:05\"],\n [\"6\", \"13:30\"],\n [\"7\", \"14:25\"],\n [\"8\", \"15:45\"],\n [\"9\", \"16:40\"],\n [\"10\", \"17:35\"],\n [\"11\", \"19:00\"],\n [\"12\", \"19:50\"],\n [\"13\", \"21:00\"],\n [\"14\", \"21:50\"],\n [\"15\", \"22:40\"]\n ];\n var dias, horas;\n if (horario.search(\"M\") > -1) {\n dias = horario.split(\"M\")[0];\n horas = horario.split(\"M\")[1];\n // for (var i = 0; i < dias.length; i++) {\n // moment dias[i] CONTINUAR AQUI!!!!!\n //}\n }\n // else if (horario.search(\"T\") > -1) {\n // horario.split(\"T\");\n // }\n // else if (horario.search(\"N\") > -1) {\n // horario.split(\"N\");\n // }\n}\n\nfunction insereEvento(nome) {\n var idgerado = Math.floor(Math.random() * (100000 - 1) + 1).toString();\n event = [\n {\n id: idgerado,\n title: nome,\n start: moment().weekday(2).set({hour: 10, minute: 0, second: 0, millisecond: 0})\n }\n ];\n $('#calendar').fullCalendar('addEventSource', event, true);\n console.log(\"Disciplina \" + nome + \" inserida. ID: \" + idgerado);\n\n disciplinas.push(\n {\n id: idgerado,\n title: nome,\n start: moment().weekday(2).set({hour: 10, minute: 0, second: 0, millisecond: 0})\n }\n );\n localStorage.setItem(\"disciplinas\", JSON.stringify(disciplinas));\n}\n" }, { "alpha_fraction": 0.8199999928474426, "alphanum_fraction": 0.8199999928474426, "avg_line_length": 19, "blob_id": "0d8a619f31bf60ddb410cba502a48b3a532a29cb", "content_id": "790d31a07054ef88ec9e8a07ef5b8c879a158da7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "permissive", "max_line_length": 61, "num_lines": 5, "path": "/README.md", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "# UNIFEI_Timetable\n\nOnline Timetable tool for students' efficient course planning\n\nOngoing project!\n" }, { "alpha_fraction": 0.542682945728302, "alphanum_fraction": 0.5731707215309143, "avg_line_length": 19.5, "blob_id": "624f14277f6de7a010c568a5403462ee823cf62c", "content_id": "4e0bd2c08cc3230baf4d94b36018722d497e24a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "permissive", "max_line_length": 47, "num_lines": 8, "path": "/app/test.py", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "from scrap import SIGAA\n\nx, y = SIGAA.retorna_turmas('ECO', '2018', '1')\n\nd = SIGAA.format(x, y)\n\nfor item, k in d.items():\n print(': '.join([str(len(k))]))\n" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.7735849022865295, "avg_line_length": 16.66666603088379, "blob_id": "e538e2e257ce35a0b9d6c78ac500cb678908cfee", "content_id": "90992b86e4579e28a3e9e7307ab6328292fefc6a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 106, "license_type": "permissive", "max_line_length": 23, "num_lines": 6, "path": "/config_file.properties", "repo_name": "fabriciopirini/UNIFEI-Timetable", "src_encoding": "UTF-8", "text": "[DatabaseSection]\ndb.database=SIGAA\ndb.host=127.0.0.1\ndb.port=5432\ndb.user=pirini\ndb.password=abc123-\n" } ]
13
ayushchitrey/Think-Code
https://github.com/ayushchitrey/Think-Code
baa85c57c6815033ec2b7168ea38e68756909e1f
0e2dc25539a3920b8533b114a18066dd7c13692e
3c461e63575f5bfedb5ea858a21e7f425cb2b5a0
refs/heads/main
2023-04-05T09:39:07.465923
2021-04-02T17:04:18
2021-04-02T17:04:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5537790656089783, "alphanum_fraction": 0.555232584476471, "avg_line_length": 49.53845977783203, "blob_id": "8eff41c7ae0a0574ac8a81427d807a9dddb751db", "content_id": "0f4aec4d28c3017bfd6038bcc35cffebef862e6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 694, "license_type": "permissive", "max_line_length": 85, "num_lines": 13, "path": "/Python_Programming - 360DigiTMG/Assignment5_Functions_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2. Write a simple user defined function that greets a person in such a way that:\r\n i) It should accept both name of person and message you want to deliver.\r\n ii) If no message is provided, it should greet a default message โ€˜How are youโ€™\r\n Ex: Hello ---xxxx---, How are you -๏ƒ  default message.\r\n Ex: Hello ---xxxx---, --xx your message xx--- '''\r\n \r\n\r\nname = input(\"Enter the name: \")\r\nmsg = input(\"Enter the message: \")\r\nif msg == \"\": #In-case of No personal message\r\n print(\"Hello \" + name + \". \" + \"How are you ?\")\r\nelse: #In-case of personal message\r\n print(\"Hello \" + name + \". \" + msg)\r\n \r\n \r\n \r\n" }, { "alpha_fraction": 0.5727109313011169, "alphanum_fraction": 0.5996409058570862, "avg_line_length": 25.600000381469727, "blob_id": "c078cc7aa9da0899fe44318be2b67ea0f72591c5", "content_id": "04526a32acd0e99dba8d6e62ee02ca039902d903", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "permissive", "max_line_length": 95, "num_lines": 20, "path": "/Python_Programming - 360DigiTMG/Assignment2_Operators_Q4.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q4. A. How to check the presence of an alphabet โ€˜sโ€™ in word โ€œData Scienceโ€ .\r\n B. How can you obtain 64 by using numbers 4 and 3 .'''\r\n\r\n# A.\r\n\r\nword = \"Data Science\"\r\nprint(\"s in Data Science: \", \"s\" in word) # Python is case-sensitive, presence of 's' is False \r\nprint(\"S in Data Science: \", \"S\" in word) # Python is case-sensitive, presence of 'S' is True\r\n\r\n# B.\r\n\r\nx = 64\r\ny = 3\r\nz = 4\r\n# Equation : x+20y+z = 0\r\nequation = 20*y+4\r\nif equation == x:\r\n print('it is a valid relation')\r\nelse:\r\n print('It is not a valid relation')\r\n \r\n" }, { "alpha_fraction": 0.5154503583908081, "alphanum_fraction": 0.5417488217353821, "avg_line_length": 34.261905670166016, "blob_id": "0c41dbec7a709c3f258c329144852dc5c6fea244", "content_id": "9bbae77280b9c181c617bef0900b9064a2fd7496", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1521, "license_type": "permissive", "max_line_length": 111, "num_lines": 42, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1. Construct 2 lists containing all the available data types (integer, float, string, complex and Boolean)\r\n and do the following.\r\na.\tCreate another list by concatenating above 2 lists\r\nb.\tFind the frequency of each element in the concatenated list.\r\nc.\tPrint the list in reverse order.'''\r\n\r\nlist1 = [10 , 12.5 , 10+2j , \"malika\" , True]\r\nlist2 = [23 , 12.5 , 17+4j , \"hafiza\" , False]\r\n\r\n# Creating another list by concatenating above 2 lists\r\nlist3 = list1 + list2\r\nprint(\"Concatenated list using + :\" + str(list3))\r\n\r\n# Finding the frequency of each element in the concatenated list\r\n\r\n #Array 'freq' will store frequencies of element \r\nfreq = [None] * len(list3); \r\nvisited = -1; \r\n \r\nfor i in range(0, len(list3)): \r\n count = 1; \r\n for j in range(i+1, len(list3)): \r\n if(list3[i] == list3[j]): \r\n count = count + 1; \r\n #To avoid counting same element again \r\n freq[j] = visited; \r\n \r\n if(freq[i] != visited): \r\n freq[i] = count; \r\n \r\n #Displays the frequency of each element present in array \r\nprint(\"---------------------\"); \r\nprint(\" Element | Frequency\"); \r\nprint(\"---------------------\"); \r\nfor i in range(0, len(freq)): \r\n if(freq[i] != visited): \r\n print(\" \" + str(list3[i]) + \" | \" + str(freq[i])); \r\nprint(\"---------------------\"); \r\n\r\n\r\n# Printing the list in reverse order\r\nprint (\"List in the reverse order: \" + str(list3[::-1]))" }, { "alpha_fraction": 0.6092253923416138, "alphanum_fraction": 0.6344647407531738, "avg_line_length": 41.846153259277344, "blob_id": "ee9bb7b00f747eb8b6f49ea4c00de7102334f1a8", "content_id": "70c90a9d0c1f8f81895fc5a2f87e73cc38f2ed8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "permissive", "max_line_length": 86, "num_lines": 26, "path": "/Python_Programming - 360DigiTMG/Assignment4_ConditionalStatements_and_Keywords_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2.\tFind the final train ticket price with the following conditions. \r\n a.\tIf male and senior citizen, 70% of fare is applicable\r\n b.\tIf female and senior citizen, 50% of fare is applicable.\r\n c.\tIf female and normal citizen, 70% of fare is applicable\r\n d.\tIf male and normal citizen, 100% of fare is applicable\r\n [Hint: First check for the gender, then calculate the fare based on age factor.\r\n For both Male and Female, consider them as senior citizens if their age >=60] '''\r\n\r\ngender=str(input(\"Enter Gender: \"))\r\nif gender==\"male\":\r\n age=int(input(\"Enter Age : \"))\r\n if age<60:\r\n print(\"Normal Citizen - 100% of fare is applicable\")\r\n elif age>=60:\r\n print(\"Senior Citizen - 70% of fare is applicable\")\r\n else:\r\n print(\"check that your input is an integer and try again\")\r\n \r\nelif gender==\"female\":\r\n age=int(input(\"Enter Age : \"))\r\n if age<60:\r\n print(\"Normal Citizen - 70% of fare is applicable\")\r\n elif age>=60:\r\n print(\"Senior Citizen - 50% of fare is applicable\")\r\n else:\r\n print(\"check that your input is an integer and try again\")\r\n \r\n \r\n" }, { "alpha_fraction": 0.5886157751083374, "alphanum_fraction": 0.6300129294395447, "avg_line_length": 25.535715103149414, "blob_id": "256243d330255a443c3af30bd7ce7624ac98e1f3", "content_id": "8d0062c9db114dceb13a1aa9a2434ba5bdfb7a16", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 777, "license_type": "permissive", "max_line_length": 105, "num_lines": 28, "path": "/Python_Programming - 360DigiTMG/Assignment5_Functions_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1.\tA) Find the magnitude of (3+5j)\r\n B) list1 = [1,5.5, (10+20j),โ€™data scienceโ€™].. Print default functions and parameters exists in list1.\r\n C) How do we create a sequence of numbers in Python.\r\n D) Read the input from keyboard and print a sequence of numbers up to that number. '''\r\n\r\n# A.\r\nimport numpy as np\r\nvector=np.array([3,4])\r\nmagnitude=np.linalg.norm(vector)\r\nprint(magnitude)\r\n\r\n# B. \r\nlist1 = [1,5.5, (10+20j),\"data science\"]\r\nlist1.append(20)\r\nprint(list1)\r\n\r\n# C. \r\nnumbers=range(1,10)\r\nsequence_of_numbers=[]\r\nfor number in numbers :\r\n if number % 5 in (1,2):\r\n sequence_of_numbers.append(number)\r\n print(sequence_of_numbers)\r\n \r\n# D.\r\nimport numpy as np\r\na = int(input(\"Enter a number: \"))\r\nprint(np.arange(a))\r\n\r\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.7444444298744202, "avg_line_length": 52, "blob_id": "e94f04933bb5df33727afed811438811ce89778c", "content_id": "8c131bc816a4ec1dc44ad215196ac3f54cce8e5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 812, "license_type": "permissive", "max_line_length": 119, "num_lines": 15, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tCreate a data dictionary of 5 states having state name as key and number of covid-19 cases as values.\r\na.\tPrint only state names from the dictionary.\r\nb.\tUpdate another country and itโ€™s covid-19 cases in the dictionary.'''\r\n\r\ndictionary={\"Maharashtra\":990795,\"Andhra Pradesh\":537687,\"Tamil Nadu\":486052,\"Karnataka\":430947,\"Uttar Pradesh\":292029}\r\nprint(\"Dictionary :\" , dictionary)\r\n\r\n#Printing only the State names from the dictionary\r\nprint (\"State Names : %s\" % dictionary.keys()) # or print(dict.keys())\r\n\r\n# Updating another key and its value in the dictionary\r\ndictionary2={\"United States\":6990000}\r\nprint(\"Country that has to be updated in Dictionary :\", dictionary2)\r\ndictionary.update(dictionary2)\r\nprint(\"Dictionary after Updation of another Country & its Covid-19 cases : \", dictionary)\r\n" }, { "alpha_fraction": 0.5627376437187195, "alphanum_fraction": 0.5906210541725159, "avg_line_length": 32.043479919433594, "blob_id": "a9fb97903d8705c09450925510f16a5203b2a4fd", "content_id": "24e9fcb67ea803dc2a27166a1b770b792a61c054", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 789, "license_type": "permissive", "max_line_length": 81, "num_lines": 23, "path": "/Python_Programming - 360DigiTMG/Assignment4_ConditionalStatements_and_Keywords_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tCheck whether the given number is positive and divisible by 5 or not. '''\r\n\r\nnumber = float(input(\"Enter a number: \"))\r\nif number > 0:\r\n print(\"Positive number\")\r\n if(number % 5 == 0):\r\n print(\"Given Number {0} is Divisible by 5\".format(number))\r\n else:\r\n print(\"Given Number {0} is Not Divisible by 5\".format(number))\r\n \r\nelif number == 0:\r\n print(\"Zero\")\r\n if(number % 5 == 0):\r\n print(\"Given Number {0} is Divisible by 5\".format(number))\r\n else:\r\n print(\"Given Number {0} is Not Divisible by 5\".format(number))\r\n \r\nelse:\r\n print(\"Negative number\")\r\n if(number % 5 == 0):\r\n print(\"Given Number {0} is Divisible by 5\".format(number))\r\n else:\r\n print(\"Given Number {0} is Not Divisible by 5\".format(number))\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.477477490901947, "alphanum_fraction": 0.5285285115242004, "avg_line_length": 23.461538314819336, "blob_id": "508426e8d1c4c89e057a2fffd3c72be45eeb616b", "content_id": "3b669cad8bb45f146e46df89c321109aea0e80c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "permissive", "max_line_length": 110, "num_lines": 13, "path": "/Python_Programming - 360DigiTMG/Assignment6_Loops_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tConsider a list [2,3,4,5,6]. Find the total sum of cumulative products of all the numbers in the list.\r\n (sum= 2*3 + 2*3*4 +....) '''\r\n \r\ndef cumprod(lst):\r\n results = []\r\n cur = 1\r\n for n in lst:\r\n cur *= n\r\n results.append(cur)\r\n return results\r\n\r\nlst = [2,3,4,5,6]\r\nprint (cumprod(lst))\r\n\r\n" }, { "alpha_fraction": 0.6658228039741516, "alphanum_fraction": 0.6759493947029114, "avg_line_length": 63.83333206176758, "blob_id": "a5b7057c4b136784de53a96c443360979590fffc", "content_id": "a6d3d750347116e6aa160d2027b2ce766b9ac6a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "permissive", "max_line_length": 110, "num_lines": 6, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q5.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q5.\tWrite a program to accept the userโ€™s first and last name and then print it in the reverse order with a\r\nspace between first name and last name'''\r\nfirst_name = input(\"Enter your First Name : \")\r\nlast_name = input(\"Enter your Last Name : \")\r\n# [::-1] It take the input entered and reverses the order\r\nprint (\"Full Name in the reverse order: \" + first_name[::-1] + \" \" + last_name[::-1])\r\n" }, { "alpha_fraction": 0.583038866519928, "alphanum_fraction": 0.620141327381134, "avg_line_length": 31.176469802856445, "blob_id": "34806c119d0978a571ab311fd4cffa8f2977c251", "content_id": "7bd552833c1212c67948ac24ee635fc433830594", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "permissive", "max_line_length": 80, "num_lines": 17, "path": "/Python_Programming - 360DigiTMG/Assignment4_ConditionalStatements_and_Keywords_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1.\t Take a variable age which is of positive value and check the following:\r\n a. If age is less than 1, print 'infant'\r\n b. If age is between 1 to 14, print 'child'\r\n c. If age is between 15 to 60, print 'Adult'\r\n b.\tIf age is more than 60, print โ€˜senior citizensโ€™ '''\r\n \r\nage=int(input(\"Enter Age : \"))\r\nif age<1:\r\n print(\"INFANT\")\r\nelif age>=1 and age<=14:\r\n print(\"CHILD\")\r\nelif age>=15 and age<60:\r\n print(\"ADULT\")\r\nelif age>=60:\r\n print(\"SENIOR CITIZEN\")\r\nelse:\r\n print(\"check that your input is an integer and try again\")\r\n\r\n" }, { "alpha_fraction": 0.5220588445663452, "alphanum_fraction": 0.5808823704719543, "avg_line_length": 43.16666793823242, "blob_id": "22c98ca69fee6f9840603fe2bd47a552423c152e", "content_id": "18a30d88e6637286acaad066ab594952bdc20491", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "permissive", "max_line_length": 96, "num_lines": 12, "path": "/Python_Programming - 360DigiTMG/Assignment6_Loops_Q4.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q4.\tCreate 2 lists.. one list contains 10 numbers (list1=[0,1,2,3....9]) \r\n and other list contains words of those 10 numbers (list2=['zero','one','two',.... ,'nine']).\r\n Create a dictionary such that list2 are keys and list 1 are values. '''\r\n \r\ndef Convert(list):\r\n res_dct = {list2[i]: list1[i] for i in range(0,10)}\r\n return res_dct\r\n \r\n# Driver code\r\nlist1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\nlist2 = ['zero','one','two','three','four','five','six','seven','eight','nine']\r\nprint(\"Dictionary: \", Convert(list))\r\n\r\n" }, { "alpha_fraction": 0.40625, "alphanum_fraction": 0.4635416567325592, "avg_line_length": 11.428571701049805, "blob_id": "ecc8983655621bbfa74bf8856333eb05800a0362", "content_id": "f965037cd580ce207274e7bdb0cb88a58b02cb0d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "permissive", "max_line_length": 65, "num_lines": 14, "path": "/Python_Programming - 360DigiTMG/Assignment2_Operators_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2. a=5,b=3,c=10.. What will be the output of the following:\r\n A. a/=b\r\n B. c*=5 '''\r\n \r\n#Initializing inputs\r\na = 5\r\nb = 3\r\nc = 10\r\n\r\na /= b\r\nprint(a)\r\n\r\nc *= 5\r\nprint(c)\r\n\r\n\r\n" }, { "alpha_fraction": 0.6654411554336548, "alphanum_fraction": 0.6875, "avg_line_length": 36.71428680419922, "blob_id": "980e73b849f9e53ab76d5775d2d9105d53c4a0cf", "content_id": "7832dd1e787b9e62f6a04a71190174b02fa50b10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "permissive", "max_line_length": 111, "num_lines": 14, "path": "/Python_Programming - 360DigiTMG/Assignment3_Variables_and_ExceptionHandling_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tUsing Exception handling, write a program to find a reciprocal of the number in a such a way that.\r\n a.\tIf reciprocal exists, it should print reciprocal of that number\r\n (take any integer) \r\n b.\tIf reciprocal does not exist, it should print โ€œreciprocal cannot be foundโ€ along with the type of error.\r\n (take 0). '''\r\n\r\nimport numpy as np\r\nx=np.array([1.,2.,.2,.3])\r\nprint(\"original array:\")\r\nprint(x)\r\nr1=np.reciprocal(x)\r\nr2=1/x\r\nassert np.array_equal(r1,r2)\r\nprint(\"Reciprocal for all elements of the said array:\", r1)\r\n\r\n" }, { "alpha_fraction": 0.6436507701873779, "alphanum_fraction": 0.644444465637207, "avg_line_length": 32.75, "blob_id": "30af334a919375bda5d3302cbe3e7b7a9d42cf7d", "content_id": "0b9cfd6c5977031d0f72526286a620f26d54c034", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1260, "license_type": "permissive", "max_line_length": 114, "num_lines": 36, "path": "/Python_Programming - 360DigiTMG/Assignment8_OOPS_Concept_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tExplain Polymorphism in Python? Construct an example for Polymorphism. '''\r\n\r\n# Polymorphism means the ability to take various forms.\r\n# It is the ability of an object to adapt the code to the type of the data it is processing.\r\n# It is a built-in feature.\r\n# Polymorphism allows us to define methods in the child class with the same name as defined in their parent class.\r\n# Polymorphism means same function name (but different signatures) being uses for different types.,\r\n# (i.e.)it helps to describe an action regardless of the type of objects.\r\n \r\nclass India(): \r\n def capital(self): \r\n print(\"New Delhi is the capital of India.\") \r\n \r\n def language(self): \r\n print(\"Hindi is the most widely spoken language of India.\") \r\n \r\n def type(self): \r\n print(\"India is a developing country.\") \r\n \r\nclass USA(): \r\n def capital(self): \r\n print(\"Washington, D.C. is the capital of USA.\") \r\n \r\n def language(self): \r\n print(\"English is the primary language of USA.\") \r\n \r\n def type(self): \r\n print(\"USA is a developed country.\") \r\n \r\nobj_ind = India() \r\nobj_usa = USA() \r\n\r\nfor country in (obj_ind, obj_usa): \r\n country.capital() \r\n country.language() \r\n country.type() \r\n \r\n\r\n" }, { "alpha_fraction": 0.6317365169525146, "alphanum_fraction": 0.7005987763404846, "avg_line_length": 64.80000305175781, "blob_id": "22fea4943221119a8b842e714b6089203263556a", "content_id": "c751dcf2d56ef0571ca19a58c7b4d461e46a1d69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "permissive", "max_line_length": 113, "num_lines": 5, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q4.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q4.\tWrite a Python program which will find all such numbers which are divisible by 7 but are not a \r\nmultiple of 5, between 2000 and 3200 (both included). The numbers obtained should be printed in a comma-separated\r\nsequence on a single line'''\r\nlist = [ x for x in range(2000, 3201) if x % 7 == 0 and x % 5 != 0 ]\r\nprint (list)\r\n" }, { "alpha_fraction": 0.5622837543487549, "alphanum_fraction": 0.6107266545295715, "avg_line_length": 36.400001525878906, "blob_id": "72f7ef21737c53bdb67f788c83c667de8399e1d8", "content_id": "a1077e615c7d80d278d6e1b5b3f7d5b9b6a58a39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "permissive", "max_line_length": 104, "num_lines": 15, "path": "/Python_Programming - 360DigiTMG/Assignment6_Loops_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1.\tPrint the Palindromes (reverse of the number is itself) that are present in between 100 and 250.\r\n (Ex: 121,131,212,222... etc.) '''\r\n\r\nminimum = int(input(\"please enter the minimum value:\"))\r\nmaximum = int(input(\"please enter the maximum value:\"))\r\nprint(\"palindrom Numbers between %d and %d are:\"%(minimum,maximum))\r\nfor num in range (minimum,maximum+1):\r\n temp=num\r\n reverse=0\r\n while(temp>0):\r\n Reminder=temp % 10\r\n reverse =(reverse * 10)+Reminder\r\n temp=temp//10\r\n if(num==reverse):\r\n print(\"%d\" %num,end=' ')\r\n\r\n" }, { "alpha_fraction": 0.5108358860015869, "alphanum_fraction": 0.5758513808250427, "avg_line_length": 16.647058486938477, "blob_id": "b33e656bb3f9f2dbba2d8f679f07048c7184bc30", "content_id": "3a83b374da7049f84d80fbbf3f1bccf0c70ff18e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "permissive", "max_line_length": 78, "num_lines": 17, "path": "/Python_Programming - 360DigiTMG/Assignment2_Operators_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3. A. How can you print a number between 55 and 75.\r\n B. How can you print a number which is either below 55 or above 75.'''\r\n\r\n# A.\r\n\r\nimport random\r\nn = random.randint(55,75)\r\nprint(n)\r\n\r\n \r\n# B.\r\n \r\na=int(input(random.randint(0, 100)))\r\nif a<55 or a>75:\r\n print(a)\r\nelse:\r\n print(\"Try again\")\r\n \r\n" }, { "alpha_fraction": 0.5858310461044312, "alphanum_fraction": 0.6430517435073853, "avg_line_length": 43.625, "blob_id": "8cff23e63bb08b29e5bb47ce0b75a2316e76fa7b", "content_id": "89dfb252a7f1c68dc4f24c61c98fe131f0cd632e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "permissive", "max_line_length": 74, "num_lines": 8, "path": "/Python_Programming - 360DigiTMG/Assignment5_Functions_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.By using a filter function, find positive number present in a list.\r\n Lsit1= [2, 6, -5, 4, -8, -9, 10, 1] '''\r\n\r\n#By using a filter function, find positive number present in a list\r\nnums=[2, 6, -5,4,-8,-9,10,1]\r\nprint(\"original numbers in the list:\",nums)\r\nnew_nums=list(filter(lambda x: x>0,nums))\r\nprint(\"positive numbers in the list:\", new_nums)\r\n\r\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 50, "blob_id": "1407e6c26eb70d35b14e5a41d794e1a13bfd0d5c", "content_id": "4a697e5892b84c0edb123481beb86c2f8fee921b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "permissive", "max_line_length": 88, "num_lines": 2, "path": "/README.md", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "# Think-Code\nPractice Codes and Assignments solved by me on different topics from different websites.\n" }, { "alpha_fraction": 0.5561877489089966, "alphanum_fraction": 0.6102418303489685, "avg_line_length": 23.035715103149414, "blob_id": "b6ad231f9894ab9fa0b5151a54ff9dcb74eb4451", "content_id": "021433c57266e3f11d6b777a7da19b275ddf5030", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 709, "license_type": "permissive", "max_line_length": 103, "num_lines": 28, "path": "/Python_Programming - 360DigiTMG/Assignment2_Operators_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1.\tA. Write an equation which relates 399, 543 and 12345.\r\n B. โ€œWhen I divide 5 with 3, I got 1. But when I divide -5 with 3, I got -2โ€\r\n โ€”How would you justify it.'''\r\n \r\n# A.\r\n\r\nx = 12345\r\ny = 543\r\nz = 399\r\n# Equation : x+22y+z = 0\r\nequation = 22*y + z\r\nif equation == x:\r\n print('it is a valid relation')\r\nelse:\r\n print('It is not a valid relation')\r\n \r\n \r\n# B.\r\n\r\na=5\r\nb=-5\r\nc=3\r\nprint(a/c)\r\nprint(b/c)\r\nprint(a//c)\r\nprint(b//c) # gets rounded off towards the left side, so -2 is the answer\r\n# / - Divide left operand by the right one (always results into float)\r\n# // - Floor division - division that results into whole number adjusted to the left in the number line\r\n\r\n" }, { "alpha_fraction": 0.4386281669139862, "alphanum_fraction": 0.4891696870326996, "avg_line_length": 31.375, "blob_id": "fe0ab050260706528c5e3e08be30d4de75744bd9", "content_id": "0af894aae0f4110e903ee5182cbfe617b4db7070", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "permissive", "max_line_length": 87, "num_lines": 16, "path": "/Python_Programming - 360DigiTMG/Assignment6_Loops_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2.\t Consider a list1 [3,4,5,6,7,8]. Create a new list2 such that \r\n Add 10 to the even number and multiply with 5 if it is odd number in the list1. '''\r\n \r\n# list of numbers \r\nlist1 = [3, 4, 5, 6, 7, 8] \r\n \r\n# iterating each number in list \r\nfor num in list1: \r\n \r\n # checking condition \r\n \r\n if num % 2 == 0: # Checking for even condition\r\n print(num+10, end = \" \")\r\n \r\n if num % 2 != 0: # Checking for odd condition\r\n print(num*5, end = \" \") \r\n \r\n\r\n \r\n" }, { "alpha_fraction": 0.6421933174133301, "alphanum_fraction": 0.6979553699493408, "avg_line_length": 31.5625, "blob_id": "c3ee0763db40f6efacd91b6cfb420f72c022cf82", "content_id": "cae4443d104979b0b9e602ed67065d2dcf125b8e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "permissive", "max_line_length": 100, "num_lines": 32, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2.\tCreate 2 Sets containing integers (numbers from 1 to 10 in one set and 5 to 15 in other set)\r\na.\tFind the common elements in above 2 Sets.\r\nb.\tFind the elements that are not common.\r\nc.\tRemove element 7 from both the Sets.'''\r\n\r\nlist1 = [1,2,3,4,5,6,7,8,9,10]\r\nlist2 = [5,6,7,8,9,10,11,12,13,14,15]\r\n\r\n# Setting list as a Set\r\nlist1_as_set = set(list1)\r\nlist2_as_set = set(list2)\r\n\r\n# Finding the common elements of the sets and lists\r\nintersection = list1_as_set.intersection(list2)\r\nintersection_as_list = list(intersection)\r\nprint(\"Common Elements : \", intersection_as_list)\r\n\r\n# Finding the elements of the sets and lists that are not in common\r\nunion = list1_as_set.union(list2)\r\nunion_as_list = list(union)\r\nprint(\"Union of the 2 lists : \", union_as_list)\r\nunion_as_set = set(union_as_list)\r\n\r\nuncommon = list(union_as_set - intersection)\r\nprint(\"Uncommon Elements : \", uncommon)\r\n\r\n\r\n# Removing element 7 from both the sets\r\nlist1.remove(7)\r\nprint(\"Element 7 removed from list 1 : \", list1)\r\nlist2.remove(7)\r\nprint(\"Element 7 removed from list 2 : \", list2)\r\n\r\n" }, { "alpha_fraction": 0.5678732991218567, "alphanum_fraction": 0.5972850918769836, "avg_line_length": 54, "blob_id": "a704bfd0f1d47460db2ebf342e06b92ce0a48221", "content_id": "bb3f5a836a01b4116b4e5524239f788e052d9019", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 442, "license_type": "permissive", "max_line_length": 84, "num_lines": 7, "path": "/Python_Programming - 360DigiTMG/Assignment1_DataTypes_Q6.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q6.\tWrite a python program to find the volume of a sphere with diameter 12 cm'''\r\n #### d = diameter = 12 cm. Therefore; r = radius = d/2 = 12/2 = 6 cm\r\n#importing libraries to get the value of pi\r\nfrom math import pi\r\nradius_of_sphere = float(input(\"Enter the radius of the Sphere : \"))\r\nvolume_of_sphere = (4/3)*pi*(radius_of_sphere)**3\r\nprint(\"Volume of Sphere = \", volume_of_sphere)\r\n " }, { "alpha_fraction": 0.674648642539978, "alphanum_fraction": 0.6793336868286133, "avg_line_length": 45.92499923706055, "blob_id": "cd9a8da68ede0adc728cb7773d6a2ca18bc1ec5f", "content_id": "35fe10dbc7eef70559c2f7a662b8495d617b2d8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1925, "license_type": "permissive", "max_line_length": 472, "num_lines": 40, "path": "/Python_Programming - 360DigiTMG/Assignment7_Packages_Q3.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q3.\tFor the data set โ€œIndian_citiesโ€\r\n a)\tConstruct histogram on literates_total and comment about the inferences\r\n b)\tConstruct scatter plot between male graduates and female graduates\r\n c)\tConstruct Boxplot on total effective literacy rate and draw inferences\r\n d)\tFind out the number of null values in each column of the dataset and delete them. '''\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nindiancities = pd.read_csv(\"C:\\\\Users\\\\Malika Hafiza Pasha\\\\#Coding\\\\Data_Science\\\\#Python_Programming\\\\Indian_Cities_Dataset.csv\")\r\nindiancities\r\n\r\n# A.\r\nplt.hist(indiancities.literates_total) # histogram\r\n\r\n# B.\r\nx = indiancities.male_graduates\r\ny = indiancities.female_graduates\r\nplt.scatter(x,y) # scatterplot\r\n\r\n# C.\r\nplt.boxplot(indiancities.effective_literacy_rate_total) # boxplot\r\n\r\n# D.\r\ndetails = pd.DataFrame(indiancities, columns =['name_of_city','state_code','state_name','dist_code','population_total','population_male','population_female','0-6_population_total','0-6_population_male','0-6_population_female','literates_total','literates_male','literates_female','sex_ratio','child_sex_ratio','effective_literacy_rate_total','effective_literacy_rate_male','effective_literacy_rate_female','location','total_graduates','male_graduates','female_graduates'],\r\n index =['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v']) \r\nprint(details)\r\n\r\n# show the boolean dataframe \r\nprint(\" \\nshow the boolean Dataframe : \\n\\n\", details.isnull()) \r\n \r\n# Count total NaN at each column in a DataFrame \r\nprint(\" \\nCount total NaN at each column in a DataFrame : \\n\\n\", details.isnull().sum()) \r\n\r\n# Count total NaN in a DataFrame \r\nprint(\" \\nCount total NaN in a DataFrame : \\n\\n\", details.isnull().sum().sum()) \r\n\r\n# drop all rows with any NaN and NaT values\r\ndetails1 = details.dropna()\r\nprint(details1)\r\n\r\n\r\n" }, { "alpha_fraction": 0.6937334537506104, "alphanum_fraction": 0.7334510087966919, "avg_line_length": 39.74074172973633, "blob_id": "a89724ba044d7349eb065c2dae6f2c4d5de3d0de", "content_id": "6de6f0101eb9d4a9fdd992387ca5fd2076101543", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1137, "license_type": "permissive", "max_line_length": 118, "num_lines": 27, "path": "/Python_Programming - 360DigiTMG/Assignment7_Packages_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2. For the dataset โ€œIndian_citiesโ€, \r\n a)\tFind out top 10 states in female-male sex ratio\r\n b)\tFind out top 10 cities in total number of graduates\r\n c)\tFind out top 10 cities and their locations in respect of total effective_literacy_rate. '''\r\n\r\nimport pandas as pd\r\n\r\nindiancities = pd.read_csv(\"C:\\\\Users\\\\Malika Hafiza Pasha\\\\#Coding\\\\Data_Science\\\\#Python_Programming\\\\Indian_Cities_Dataset.csv\")\r\nindiancities\r\n\r\n# A.\r\nprint(\"Top 10 States in female-male sex ratio\")\r\ntop10_states = indiancities.sort_values(by='state_name', ascending = False)\r\ntop10_states_sex_ratio = top10_states.head(10)\r\ntop10_states_sex_ratio\r\n\r\n# B.\r\nprint(\"Top 10 cities in total number of graduates\")\r\ntop10_cities = indiancities.sort_values(by='total_graduates', ascending = False)\r\ntop10_cities_total_graduates = top10_cities.head(10)\r\ntop10_cities_total_graduates\r\n\r\n# C.\r\nprint(\"Top 10 cities and their locations in respect of total effective_literacy_rate\")\r\ntop10_cities1 = indiancities.sort_values(by='effective_literacy_rate_total', ascending = False)\r\ntop10_cities_total_literacy = top10_cities1.head(10)\r\ntop10_cities_total_literacy\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6023392081260681, "alphanum_fraction": 0.605847954750061, "avg_line_length": 36.59090805053711, "blob_id": "741b3699eb064f68f99451e1fbb4ef37862fa499", "content_id": "01b7c9083c6cc58ed1d3a0df01d426fe492f7bfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "permissive", "max_line_length": 105, "num_lines": 22, "path": "/Python_Programming - 360DigiTMG/Assignment8_OOPS_Concept_Q2.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q2.\tConstruct an example for Inheritance. '''\r\n\r\n#Inheritance allows us to define a class that inherits all the methods and properties from another class.\r\n#Parent class is the class being inherited from, also called base class.\r\n#Child class is the class that inherits from another class, also called derived class.\r\n\r\nclass Parent:\r\n def __init__(self , fname, fage):\r\n self.firstname = fname\r\n self.age = fage\r\n def view(self):\r\n print(self.firstname , self.age)\r\nclass Child(Parent):\r\n def __init__(self , fname , fage):\r\n Parent.__init__(self, fname, fage)\r\n self.lastname = \"Pasha\"\r\n def view(self):\r\n print(\"First Name : \" , self.firstname)\r\n print(\"Last Name : \" , self.lastname) \r\n print(\"Age : \", self.age)\r\nob = Child(\"Malika Hafiza\" , '26')\r\nob.view()\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6333724856376648, "alphanum_fraction": 0.6380728483200073, "avg_line_length": 38.33333206176758, "blob_id": "208770efe963a2184bbdc829b0f75fdd55a6d5db", "content_id": "14aeaaeea5475be9a1a51c2c60eb404234006d21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "permissive", "max_line_length": 152, "num_lines": 21, "path": "/Python_Programming - 360DigiTMG/Assignment7_Packages_Q1.py", "repo_name": "ayushchitrey/Think-Code", "src_encoding": "UTF-8", "text": "''' Q1.\tWrite a function that eliminates all the punctuation symbols, html tags, etc. in the given text.\r\n It should return plain text containing alpha numeric characters.\r\n Text=\"NumPy,<1> pandas ?,matplotlib; |,seaborn# SciPy* etc.. are few &Python packages) that are) frequently (/used |@for text% preprocessing$.โ€ '''\r\n \r\n\r\n# define punctuation\r\npunctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~|1'''\r\n\r\ntext= ''' \"NumPy,<1> pandas ?,matplotlib; |,seaborn# SciPy* etc.. are few &Python packages) that are) frequently (/used |@for text% preprocessing$.โ€ '''\r\n\r\n# To take input from the user\r\n# my_str = input(\"Enter a string: \")\r\n\r\n# remove punctuation from the string\r\nno_punct = \"\"\r\nfor char in text:\r\n if char not in punctuations:\r\n no_punct = no_punct + char\r\n\r\n# display the unpunctuated string\r\nprint(no_punct)\r\n\r\n\r\n" } ]
27
euginiius/Phoenix
https://github.com/euginiius/Phoenix
b5a1b42a1ebc14249513326d9172e52015c21456
33a7ebcd7e4859ffc51ce5b66cb48491f3ecff03
4992688af443095ee9b05b96920e8ea50be2b706
refs/heads/master
2020-09-06T17:18:11.449428
2019-12-04T09:14:48
2019-12-04T09:14:48
220,491,694
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6853713989257812, "alphanum_fraction": 0.6853713989257812, "avg_line_length": 28.09433937072754, "blob_id": "bbb317b4718e8b5f0c0aace727b9b7feb0927c42", "content_id": "9d7f613e89926b3ab7f21c05a0e2fb509ddcbfb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3083, "license_type": "no_license", "max_line_length": 81, "num_lines": 106, "path": "/mantenedor/app/views.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.urls import reverse_lazy\nfrom django import forms\n\nfrom .models import Producto\nfrom .forms import ProductoForm, DatosClienteForm\n\nfrom .serializers import ProductoSerializer\nfrom rest_framework import generics\n# Create your views here.\n\nclass API_objects(generics.ListCreateAPIView):\n queryset = Producto.objects.all()\n serializer_class= ProductoSerializer\n\nclass API_objects_details(generics.RetrieveUpdateDestroyAPIView):\n queryset = Producto.objects.all()\n serializer_class= ProductoSerializer\n\ndef pagina_principal(request):\n return render(request, 'app/index.html', {})\n\n\ndef accesorios(request):\n return render(request, 'app/accesorios.html', {})\n\n\ndef carcasas(request):\n return render(request, 'app/carcasas.html', {})\n\n\ndef funkos(request):\n return render(request, 'app/funkos.html', {})\n\n\ndef llaveros(request):\n return render(request, 'app/llaveros.html', {})\n\n\ndef peluches(request):\n return render(request, 'app/peluches.html', {})\n\n\ndef ropa(request):\n return render(request, 'app/ropa.html', {})\n\n\ndef tazones(request):\n return render(request, 'app/tazones.html', {})\n\n\ndef contactenos(request):\n return render(request, 'app/contactenos.html', {})\n\n\ndef entregas(request):\n if request.method == 'POST':\n form = DatosClienteForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.save()\n return redirect('/entregas')\n else:\n return render(request, 'app/entregas.html', {'form': form})\n else:\n form = DatosClienteForm()\n return render(request,'app/entregas.html',{})\n\ndef registrar_producto(request):\n if request.method == \"POST\":\n form = ProductoForm(request.POST, request.FILES)\n if(form.is_valid):\n model_instance = form.save(commit=False)\n model_instance.save()\n return redirect('/registrarProducto')\n else:\n form = ProductoForm()\n return render(request, \"app/ins_producto.html\", {'form': form})\n\n\ndef listar_productos(request):\n productos = Producto.objects.all()\n return render(request, \"app/listar_productos.html\", {'productos': productos})\n\n\ndef editar_producto(request, codigo_producto):\n instancia = Producto.objects.get(id=codigo_producto)\n form = ProductoForm(instance=instancia)\n\n if request.method == \"POST\":\n form= ProductoForm(request.POST, instance= instancia)\n if form.is_valid():\n instancia = form.save(commit=False)\n instancia.save()\n return render(request, \"app/editar_producto.html\",{'form':form})\n\n\ndef borrar_producto(request, codigo_producto):\n instancia = Producto.objects.get(id=codigo_producto)\n instancia.delete()\n return redirect(\"/paginaPrincipal\")\n\ndef listar_marca(request, producto_marca):\n productos = Producto.objects.filter(marca=producto_marca)\n return render(request, \"app/listar_tipo.html\", {'productos': productos})" }, { "alpha_fraction": 0.729629635810852, "alphanum_fraction": 0.729629635810852, "avg_line_length": 26.100000381469727, "blob_id": "c3e3734a538a02832550df30ac630062a3786b81", "content_id": "8c19dea73d1fc646866fe7d154aa0a25f87679d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 54, "num_lines": 10, "path": "/mantenedor/app/serializers.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "from .models import Producto\nfrom rest_framework import serializers\n\n# esta clase nos va a permitir la \n# conversion del objeto a json y viceversa\n\nclass ProductoSerializer(serializers.ModelSerializer):\n class Meta:\n model = Producto\n fields = '__all__'" }, { "alpha_fraction": 0.6901300549507141, "alphanum_fraction": 0.6901300549507141, "avg_line_length": 33.421051025390625, "blob_id": "1fb6d3462b0a1f3b9664d7e1f3b8ece29db35103", "content_id": "90586dc1aa54268a71cfa3568ee0dc6cd61641de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 72, "num_lines": 38, "path": "/mantenedor/app/urls.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom . import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom .views import *\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n# ----- las rutas de la API ---------\nurlpatterns = [\n path('api/', views.API_objects.as_view()),\n path('api/<int:pk>/', views.API_objects_details.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n\n\nurlpatterns += [\n path('registrarProducto', views.registrar_producto),\n path('listarProducto', views.listar_productos),\n path('listarProducto/<str:producto_marca>', views.listar_marca),\n path('editarProducto/<int:codigo_producto>', views.editar_producto),\n path('borrarProducto/<int:codigo_producto>', views.borrar_producto),\n path('paginaPrincipal', views.pagina_principal),\n path('accesorios', views.accesorios),\n path('carcasas', views.carcasas),\n path('funkos', views.funkos),\n path('llaveros', views.llaveros),\n path('peluches', views.peluches),\n path('ropa', views.ropa),\n path('tazones', views.tazones),\n path('contactenos', views.contactenos),\n path('entregas', views.entregas),\n\n]" }, { "alpha_fraction": 0.7096171975135803, "alphanum_fraction": 0.7273576259613037, "avg_line_length": 31.454545974731445, "blob_id": "0d04b9285454d5d443ba4fa32f40135227efad2c", "content_id": "d688aa9553ae31dde91b3fcf20813e8c3f4f3dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 53, "num_lines": 33, "path": "/mantenedor/app/models.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\nfrom django.core.files import File\nfrom urllib.request import urlopen\nfrom tempfile import NamedTemporaryFile\n# Create your models here.\n\nclass Producto(models.Model):\n codigo= models.IntegerField()\n nombre= models.CharField(max_length=30)\n precio= models.IntegerField()\n marca= models.CharField(max_length=20)\n modelo= models.CharField(max_length=20)\n descripcion= models.CharField(max_length=250)\n stock= models.IntegerField()\n imagen = models.FileField(upload_to='static/img')\n\n def __str__(self):\n return self.nombre\n\nclass DatosCliente(models.Model):\n region= models.TextField()\n comuna= models.TextField()\n nombre_cliente= models.CharField(max_length=250)\n email= models.CharField(max_length=100)\n telefono= models.CharField(max_length=13)\n tipo_vivienda= models.TextField()\n rut= models.CharField(max_length=14)\n fecha_nacimiento= models.DateField()\n direccion= models.TextField()\n\n def __str__(self):\n return self.nombre_cliente\n" }, { "alpha_fraction": 0.5038265585899353, "alphanum_fraction": 0.5255101919174194, "avg_line_length": 36.33333206176758, "blob_id": "e22547d78d75d7ef09ee273856008194921a4f2b", "content_id": "2ee894de07bdefa91924c54367675b357519b51e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1568, "license_type": "no_license", "max_line_length": 114, "num_lines": 42, "path": "/mantenedor/app/migrations/0001_initial.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.6 on 2019-11-17 04:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='DatosCliente',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('region', models.TextField()),\n ('comuna', models.TextField()),\n ('nombre_cliente', models.CharField(max_length=250)),\n ('email', models.CharField(max_length=100)),\n ('telefono', models.CharField(max_length=13)),\n ('tipo_vivienda', models.TextField()),\n ('rut', models.CharField(max_length=14)),\n ('fecha_nacimiento', models.DateField()),\n ('direccion', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Producto',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('codigo', models.IntegerField()),\n ('nombre', models.CharField(max_length=30)),\n ('precio', models.IntegerField()),\n ('marca', models.CharField(max_length=20)),\n ('modelo', models.CharField(max_length=20)),\n ('descripcion', models.CharField(max_length=250)),\n ('stock', models.IntegerField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6659528613090515, "alphanum_fraction": 0.6659528613090515, "avg_line_length": 38, "blob_id": "575c6695d94de46364bcc77d49b9628103a9e6c7", "content_id": "e58b7902d21f8c47a1f0f49d1d1555f2a38fcafa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 133, "num_lines": 12, "path": "/mantenedor/app/forms.py", "repo_name": "euginiius/Phoenix", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Producto, DatosCliente\n\nclass ProductoForm(forms.ModelForm):\n class Meta:\n model = Producto\n fields = ['codigo', 'nombre', 'precio', 'marca', 'modelo', 'descripcion', 'stock','imagen']\n\nclass DatosClienteForm(forms.ModelForm):\n class Meta:\n model = DatosCliente\n fields = ['region', 'comuna', 'nombre_cliente', 'email', 'telefono', 'tipo_vivienda', 'rut', 'fecha_nacimiento', 'direccion']" } ]
6
madfiretable/IOTcar
https://github.com/madfiretable/IOTcar
b229caeaaa318fb6435a622fad0b05d71773f03c
5d290c78d46336524374d019253a1a43b421b6f4
2826dd1736d3ec243ebf82c20e151cd3269a1685
refs/heads/main
2023-02-17T21:27:37.160359
2021-01-15T12:38:29
2021-01-15T12:38:29
329,901,620
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5806966423988342, "alphanum_fraction": 0.6091464757919312, "avg_line_length": 19.670330047607422, "blob_id": "c4ffe8b3996da51fa3092df9bf059b1842a1c28a", "content_id": "6831b773996c99f0af703801f02620906495ada9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3761, "license_type": "no_license", "max_line_length": 77, "num_lines": 182, "path": "/test1.py", "repo_name": "madfiretable/IOTcar", "src_encoding": "UTF-8", "text": "import RPi.GPIO as gpio\nimport time\nfrom flask import Flask, render_template, Response, request\nfrom camera_pi import Camera\n\nN1 = 17\nN2 = 22\nN3 = 23\nN4 = 24\nTRIG = 26\nECHO = 3\n\ngpio.setwarnings(False)\ngpio.setmode(gpio.BCM)\ngpio.setup(N1, gpio.OUT)\ngpio.setup(N2, gpio.OUT)\ngpio.setup(N3, gpio.OUT)\ngpio.setup(N4, gpio.OUT)\ngpio.setup(TRIG, gpio.OUT)\ngpio.setup(ECHO, gpio.IN)\npwm1 = gpio.PWM(N2, 100)\npwm2 = gpio.PWM(N3, 100)\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(N1, gpio.OUT)\n gpio.setup(N2, gpio.OUT)\n gpio.setup(N3, gpio.OUT)\n gpio.setup(N4, gpio.OUT)\n gpio.setup(TRIG, gpio.OUT)\n gpio.setup(ECHO, gpio.IN)\n\n\[email protected]('/forward', methods=['GET', 'POST'])\ndef forward():\n init()\n\n while(distance() > 25):\n gpio.output(N1, False)\n gpio.output(N2, True)\n gpio.output(N3, True)\n gpio.output(N4, False)\n time.sleep(0.5)\n\n stop()\n autoBack(0.5)\n return render_template('index.html')\n\n\[email protected]('/back', methods=['GET', 'POST'])\ndef back():\n gpio.cleanup()\n init()\n gpio.output(N1, True)\n gpio.output(N2, False)\n gpio.output(N3, False)\n gpio.output(N4, True)\n return render_template('index.html')\n\n\[email protected](\"/left\", methods=['GET', 'POST'])\ndef left():\n gpio.cleanup()\n init()\n gpio.output(N1, False)\n gpio.output(N2, True)\n gpio.output(N3, False)\n gpio.output(N4, True)\n return render_template('index.html')\n\n\[email protected](\"/right\", methods=['GET', 'POST'])\ndef right():\n gpio.cleanup()\n init()\n gpio.output(N1, True)\n pwm1.stop()\n pwm2.start(0)\n pwm2.ChangeDutyCycle(90)\n gpio.output(N4, False)\n return render_template('index.html')\n\n\[email protected](\"/stop\", methods=['GET', 'POST'])\ndef stop():\n\n init()\n gpio.output(N1, False)\n gpio.output(N4, False)\n gpio.cleanup()\n pwm1.stop()\n pwm2.stop()\n return render_template('index.html')\n\n\ndef autoBack(t):\n gpio.cleanup()\n init()\n gpio.output(N1, True)\n gpio.output(N2, False)\n gpio.output(N3, False)\n gpio.output(N4, True)\n time.sleep(t)\n gpio.cleanup()\n\n\ndef gen(camera):\n \"\"\"Video streaming generator function.\"\"\"\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\[email protected]('/video_feed')\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\ndef speed():\n value = request.form['speed']\n speed = int(value)*10\n pwm1.ChangeDutyCycle(speed)\n pwm2.ChangeDutyCycle(speed)\n print(\"value\", value)\n return render_template('index.html')\n\n\[email protected]('/controlSpeed', methods=['POST'])\ndef controlSpeed():\n value = request.form['speed']\n speed = int(value)*10\n\n init()\n\n while(distance() > 25):\n gpio.output(N1, False)\n pwm1.start(0)\n pwm1.ChangeDutyCycle(speed)\n pwm2.start(0)\n pwm2.ChangeDutyCycle(speed)\n gpio.output(N4, False)\n time.sleep(0.5)\n\n stop()\n autoBack(0.5)\n return render_template('index.html')\n\n\ndef distance():\n gpio.output(TRIG, True)\n time.sleep(0.00001)\n gpio.output(TRIG, False)\n\n start = time.time()\n stop = time.time()\n\n while gpio.input(ECHO) == 0:\n start = time.time()\n while gpio.input(ECHO) == 1:\n stop = time.time()\n\n timeElapsed = stop - start\n distance = (timeElapsed*34300)/2\n print(distance)\n return distance\n\n\nif __name__ == '__main__':\n\n app.run(host='0.0.0.0', port=80, debug=True, threaded=True)" }, { "alpha_fraction": 0.7110568284988403, "alphanum_fraction": 0.7361026406288147, "avg_line_length": 31.098039627075195, "blob_id": "d6f0ac71adb6afa45942b412d67d438e6d4e8517", "content_id": "b978cad70fd86ab86371bc58b12119984a0882a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 294, "num_lines": 51, "path": "/README.md", "repo_name": "madfiretable/IOTcar", "src_encoding": "UTF-8", "text": "# IOTcar\n\n# Introduction\n This is a remote car, which you can control it on your phone and computer\n \n # Features\n You can control the car go front back left and right\n \n # Prepare\n Car kit\n \n DC motor*4\n \n Battery box\n \n Battery*4\n \n L298N motor driver\n \n Dupont Line\n\n# Build up your Car\nClick this link to learn how to build the car kit https://www.youtube.com/watch?v=uW8YVcBjPGU\n# L298N motor driver\nIn order to control your motor, you need to use L298N One motor driver can help us to control our motor in two parts. It depends on the way you connect Dupont Lines. In this project, I devided four motors into left side and right side. It can make sure that this car can turn left or turn right\n\nNote: Make sure that wheels on the same side turn in the same direction(Which took me a lot of time to set up)\n\nClick this link for more detail https://www.youtube.com/watch?v=bNOlimnWZJE&list=PLc6fhBPeC6SBbZFcrHLlPXyR2svfxf1RZ&index=19&t=507s\nreferance http://www.piddlerintheroot.com/l298n-dual-h-bridge/\n\n# Control wheels\nThis is an example for let the car move forward \n\n \n def forward():\n gpio.output(17, False)\n gpio.output(22, True)\n gpio.output(23, True) \n gpio.output(24, False)\n print (\"forward\")\n l_b = tk.Button(t_f, text = 'front', fg = 'blue', command = forward)\n l_b.pack(side = tk.LEFT)\nreferance https://blog.techbridge.cc/2019/09/21/how-to-use-python-tkinter-to-make-gui-app-tutorial/\n\n# Demo Video\nhttps://www.youtube.com/watch?v=iPJDIZXDp3M&ab_channel=Madfiretable\n\n# ๆƒณ่ชช็š„่ฉฑ\n่ชชๅฏฆ่ฉฑ้€™ๆฌกๆˆ‘ๅคชๆ™š้–‹ๅง‹ๆบ–ๅ‚™้€™ๅ€‹ๅฐˆ้กŒ๏ผŒๅพˆ่ฌ่ฌๆ•™ๆŽˆ่ทŸๅŠฉๆ•™็ตฆๆˆ‘้€™้บผๅคš็š„็ฉบ้–“่ทŸๆ™‚้–“ใ€‚้‚„ๆœ‰ๅพˆๅคšๅนซๅŠฉๆˆ‘็š„ๅŒๅญธใ€‚\nๆˆ‘้‚„ๆœ‰ๅพˆๅคšๆƒณๅš็š„ๆฒ’ๅšๅ‡บไพ†๏ผŒ็œŸ็š„ๅพˆๅพŒๆ‚”๏ผŒไฝ†ๅŒๆ™‚ไนŸ็Ÿฅ้“่‡ชๅทฑ่ฆๆœ‰ๅ–„็š„่ฆๅŠƒๆ™‚้–“๏ผŒไธ่ฆๅ†้‡่นˆ้€™ๆฌก็š„่ฆ†่ฝใ€‚\n" }, { "alpha_fraction": 0.5975869297981262, "alphanum_fraction": 0.6316536664962769, "avg_line_length": 18.58333396911621, "blob_id": "709a6bb9d9df9d59b79cf34b5ce28f0ed22689b3", "content_id": "0deaaf73d513a9ee84abc81d48681cde50d580ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1417, "license_type": "no_license", "max_line_length": 61, "num_lines": 72, "path": "/window.py", "repo_name": "madfiretable/IOTcar", "src_encoding": "UTF-8", "text": "# ๅผ•ๅ…ฅๅฅ—ไปถ\nimport tkinter as tk\nimport RPi.GPIO as gpio\nimport time\n\nw = tk.Tk()\nt_f = tk.Frame(w)\n\nt_f.pack()\nb_f = tk.Frame(w)\nb_f.pack(side=tk.BOTTOM)\n\n\ngpio.setmode(gpio.BCM)\ngpio.setup(17, gpio.OUT)\ngpio.setup(22, gpio.OUT)\ngpio.setup(23, gpio.OUT)\ngpio.setup(24, gpio.OUT)\ndef forward():\n gpio.output(17, False)\n gpio.output(22, True)\n gpio.output(23, True) \n gpio.output(24, False)\n print (\"forward\")\n \ndef reverse():\n gpio.output(17, True)\n gpio.output(22, False)\n gpio.output(23, False) \n gpio.output(24, True)\n print (\"reverse\")\n\n \ndef right():\n gpio.output(17, False)\n gpio.output(22, True)\n gpio.output(23, False)\n gpio.output(24, True)\n print (\"left\")\n \ndef left():\n gpio.output(17, True)\n gpio.output(22, False)\n gpio.output(23, True)\n gpio.output(24, False)\n print (\"right\")\n\ndef stop():\n gpio.output(17, False)\n gpio.output(22, False)\n gpio.output(23, False)\n gpio.output(24, False)\n\nl_b = tk.Button(t_f, text='front', fg='blue',command=forward)\nl_b.pack(side=tk.LEFT)\n\nm_b = tk.Button(t_f, text='left', fg='blue',command=left)\nm_b.pack(side=tk.LEFT)\n\nr_b = tk.Button(t_f, text='back', fg='blue',command=reverse)\nr_b.pack(side=tk.LEFT)\n\nt_b = tk.Button(t_f, text='right', fg = 'blue',command=right)\nt_b.pack(side=tk.TOP)\n\n\nb_b = tk.Button(b_f, text='stop', fg='blue', command=stop)\n\nb_b.pack(side=tk.BOTTOM)\n\n\nw.mainloop()" } ]
3
Tianhao-Byrce-LI/Naive-Bayesian-Model
https://github.com/Tianhao-Byrce-LI/Naive-Bayesian-Model
9242728b5c62a131e114a95830c4bb31f0c05468
747586608f783ca6582ded5b715a81420ac5c1a1
09003d50501d367b9c06dda603378c08289a764d
refs/heads/master
2022-07-14T09:28:41.330017
2020-05-14T06:46:47
2020-05-14T06:46:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7257462739944458, "alphanum_fraction": 0.7378731369972229, "avg_line_length": 29.41176414489746, "blob_id": "ab2e29de27c377cacccc311e8785a69c2a761eb6", "content_id": "7ae6f39902ee38ed93d86a6f2f34d6304417cd8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 72, "num_lines": 34, "path": "/News_F.py", "repo_name": "Tianhao-Byrce-LI/Naive-Bayesian-Model", "src_encoding": "UTF-8", "text": "๏ปฟ#-*- encoding: utf-8 -*-\r\nimport pickle\r\n#import numpy as np\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.naive_bayes import BernoulliNB\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import metrics\r\n#import warnings\r\n#from gensim.models import Word2Vec\r\n#from sklearn.preprocessing import LabelEncoder\r\n#from sklearn.model_selection import GridSearchCV\r\n#import pandas as pd\r\n#import jieba\r\n\r\n\r\n\r\nwith open('tfidf_feature.model','rb') as file:\r\n tfidf_feature = pickle.load(file)\r\n X = tfidf_feature['featureMatrix']\r\n y = tfidf_feature['label']\r\n\r\ntrain_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2)\r\n\r\n\r\nclf=MultinomialNB(alpha=1.0e-10).fit(train_X,train_y)\r\ndoc_class_predicted=clf.predict(test_X)\r\nprint(clf.score(test_X, test_y))\r\nprint(metrics.classification_report(test_y,doc_class_predicted))\r\n\r\n\r\nclf1=BernoulliNB().fit(train_X,train_y)\r\ndoc_class_predicted1=clf1.predict_proba(test_X)\r\nprint(clf1.score(test_X, test_y))\r\nprint(metrics.classification_report(test_y,doc_class_predicted1))\r\n\r\n\r\n" } ]
1
jimbobbennett/iot-hub-gps-route-simulator
https://github.com/jimbobbennett/iot-hub-gps-route-simulator
145d1df56310c7c1f2052e9ce67462bbf1dc83a3
c8f8110f8b8eee058bb8eb59ca1889eb79981def
ee61c646abe8a0cdfaafec6b500e960f55d33b9a
refs/heads/main
2023-04-16T18:13:59.472709
2021-04-30T23:41:09
2021-04-30T23:41:09
361,933,782
6
0
null
null
null
null
null
[ { "alpha_fraction": 0.7000467777252197, "alphanum_fraction": 0.7112774848937988, "avg_line_length": 43.52083206176758, "blob_id": "dcd30adf9e38a4c688660da3bdb57bca2e0a01ea", "content_id": "1c0bead93a9ed6dd530d0bed3a071b562ca185fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2137, "license_type": "permissive", "max_line_length": 373, "num_lines": 48, "path": "/README.md", "repo_name": "jimbobbennett/iot-hub-gps-route-simulator", "src_encoding": "UTF-8", "text": "# Azure IoT Hub GPS route simulator\n\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](/LICENSE)\n[![Twitter: jimbobbennett](https://img.shields.io/twitter/follow/jimbobbennett.svg?style=social)](https://twitter.com/jimbobbennett)\n\nThis repo contains a helper Python file that can take a track in [GPX](https://wikipedia.org/wiki/GPS_Exchange_Format) format and send track data to [Azure IoT Hub](https://azure.microsoft.com/services/iot-hub/?WT.mc_id=academic-0000-jabenn). It reads the track information and send each point sequentially, with a defined pause (defaulting to 5 seconds) between each send.\n\n## To use this code\n\n1. Create a Python virtual environment\n\n1. Install the required Pip packages using the `requirements.txt` file\n\n1. Get the connection string for your device from Azure IoT Hub. You can either:\n\n * Create a .env file with the connection string in it with a key of `DEVICE_CONNECTION_STRING`\n * Pass the connection string when running this file with the command line argument `--connection_string`\n\n1. Run the `app.py` file in Python, passing in the file name as a parameter.\n\n1. The GPX file will be read, and the `trkprt` nodes will be read in order and sent to IoT Hub, one every 5 seconds.\n\nEach `trkprt` node will be sent as a telemetry message in the following format:\n\n```json\n{\n \"lat\": \"47.73481\",\n \"lon\": \"-122.257\"\n}\n```\n\n## Arguments\n\n```output\nusage: app.py [-h] [-cs connection_string] [-fq frequency] [-r] [-rv] file\n\npositional arguments:\n file The .gpx file to upload\n\noptional arguments:\n -h, --help show this help message and exit\n -cs connection_string, --connection_string connection_string\n The IoT Hub device connection string to use to connect. You can also set this in a .env file with the DEVICE_CONNECTION_STRING key\n -fq frequency, --frequency frequency\n The number of seconds to wait between sending each point\n -r, --repeat Set this to continuously send the file\n -rv, --reverse Set this to reverse the points in the file after they've all been sent\n```\n" }, { "alpha_fraction": 0.8600000143051147, "alphanum_fraction": 0.8799999952316284, "avg_line_length": 11.75, "blob_id": "440cca2c384e3014d7e3f17772b2abae2dc85da1", "content_id": "35d22fd46d33b0297610ddd25e3f0ce11a5378db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 50, "license_type": "permissive", "max_line_length": 16, "num_lines": 4, "path": "/requirements.txt", "repo_name": "jimbobbennett/iot-hub-gps-route-simulator", "src_encoding": "UTF-8", "text": "azure-iot-device\nbeautifulsoup4\nlxml\npython-dotenv" }, { "alpha_fraction": 0.6923376321792603, "alphanum_fraction": 0.6931155323982239, "avg_line_length": 32.40259552001953, "blob_id": "97b46d1d9b3427c254ba2251ee7427f0edddeaad", "content_id": "5e2c2d6ea797e253e430750cc73f4869694e1c2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2571, "license_type": "permissive", "max_line_length": 261, "num_lines": 77, "path": "/app.py", "repo_name": "jimbobbennett/iot-hub-gps-route-simulator", "src_encoding": "UTF-8", "text": "import argparse\nimport json\nimport os\nimport time\nfrom azure.iot.device import IoTHubDeviceClient\nfrom bs4 import BeautifulSoup\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ntry:\n device_connection_string = os.environ['DEVICE_CONNECTION_STRING']\nexcept KeyError:\n device_connection_string = ''\n\nparser = argparse.ArgumentParser()\nparser.add_argument('file', metavar='file', type=str, help='The .gpx file to upload')\nparser.add_argument('-cs', '--connection_string', metavar='connection_string', type=str, default=device_connection_string, help='The IoT Hub device connection string to use to connect. You can also set this in a .env file with the DEVICE_CONNECTION_STRING key')\nparser.add_argument('-fq', '--frequency', metavar='frequency', type=int, default=5, help='The number of seconds to wait between sending each point')\nparser.add_argument('-r', '--repeat', action='store_true', help='Set this to continuously send the file')\nparser.add_argument('-rv', '--reverse', action='store_true', help='Set this to reverse the points in the file after they\\'ve all been sent')\n\nargs = parser.parse_args()\n\nfile_name = args.file\ndevice_connection_string = args.connection_string\nfrequency = args.frequency\nrepeat = args.repeat\nreverse = args.reverse\n\nif device_connection_string is None or device_connection_string == '':\n print('Missing connection string - either add it to a .env file with a key of DEVICE_CONNECTION_STRING, or pass it as a parameter using --connection_string <connection string>')\n exit()\n\ndevice_client = IoTHubDeviceClient.create_from_connection_string(device_connection_string)\n\n# Connect the client.\nprint('Connecting to Azure IoT Hub...')\ndevice_client.connect()\nprint('Connected!')\n\ndef send_track_part(track_part):\n telemetry = {\n 'lat' : track_part['lat'],\n 'lon' : track_part['lon']\n }\n\n print('Sending telemetry:', telemetry)\n\n device_client.send_message(json.dumps(telemetry))\n\ndef send_file():\n print('Processing route file:', file_name)\n\n with open(file_name, 'r') as gpx_file:\n soup = BeautifulSoup(gpx_file, 'lxml')\n track_parts = soup.find_all('trkpt')\n\n for track_part in track_parts:\n send_track_part(track_part)\n time.sleep(frequency)\n \n if reverse:\n print('Sending file in reverse')\n track_parts.reverse()\n\n for track_part in track_parts:\n send_track_part(track_part)\n time.sleep(frequency)\n\n\nif repeat:\n while True: send_file()\nelse:\n send_file()\n\nprint('Done!')" } ]
3
victorywys/CMU-MultimodalSDK
https://github.com/victorywys/CMU-MultimodalSDK
6b5e0a3c51f9fc900593499036ac237fca92e637
bf723d9c0ca0b48d0c57ed474299305134ed8cdf
3fb4bea727e238bd1419911f1a40365e494d95aa
refs/heads/master
2020-03-23T11:04:25.383886
2018-07-18T19:31:32
2018-07-18T19:31:32
141,481,009
0
0
null
2018-07-18T19:30:12
2018-07-18T19:23:07
2018-07-18T19:23:05
null
[ { "alpha_fraction": 0.7671840190887451, "alphanum_fraction": 0.7760531902313232, "avg_line_length": 74.16666412353516, "blob_id": "0226de36e0e0ddb232f192c518a44a815be25e88", "content_id": "90cff08273eb070bec6a2b69e98241732dcc9776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 119, "num_lines": 6, "path": "/mmsdk/mmdatasdk/dataset/standard_datasets/CMU_MOSI/cmu_mosi.py", "repo_name": "victorywys/CMU-MultimodalSDK", "src_encoding": "UTF-8", "text": "raw={}\nraw[\"words\"]='http://immortal.multicomp.cs.cmu.edu/CMU-MOSI/language/CMU_MOSI_TimestampedWords.csd'\nraw[\"phonemes\"]='http://immortal.multicomp.cs.cmu.edu/CMU-MOSI/language/CMU_MOSI_TimestampedPhones.csd'\nhighlevel={}\nhighlevel[\"glove_vectors\"]='http://immortal.multicomp.cs.cmu.edu/CMU-MOSI/language/CMU_MOSI_TimestampedWordVectors.csd'\nhighlevel[\"FACET 4.1\"]='http://immortal.multicomp.cs.cmu.edu/CMU-MOSI/visual/CMU_MOSI_VisualFacet_4.1.csd'\n" } ]
1
peaceGui/Multi-Agent-Influence-Maximization
https://github.com/peaceGui/Multi-Agent-Influence-Maximization
b7f28e5caab2e463e92ebdba1b2ee365381c15ab
193c494b39d6ecdee6a77a7cff87113e6e711843
df6e67f0b60101bb14f02bb2acc80485abb223ec
refs/heads/master
2023-02-24T20:28:56.982737
2019-09-14T23:48:27
2019-09-14T23:48:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.502068281173706, "alphanum_fraction": 0.5160289406776428, "avg_line_length": 41.0217399597168, "blob_id": "918e88458ff280ed3c8e9cad0944e787500e16fe", "content_id": "a218abb7b4849d4c7527baeb21fa83adabf34d08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 124, "num_lines": 46, "path": "/Env/Approach.py", "repo_name": "peaceGui/Multi-Agent-Influence-Maximization", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass Approach(object):\n def __init__(self, model, t=100, k=5, mode='IC'):\n self.time_steps = t\n self.k = k\n self.model = model\n self.mode = mode\n\n def write_results(self, name):\n print()\n # np.savetxt('../Results/total_benefits_' + name + '.txt', np.array(self.total_benefits), fmt='%.2f', delimiter=' ')\n # np.savetxt('../Results/ave_num_' + name + '.txt', np.array(self.average_req_num), fmt='%.2f', delimiter=' ')\n # np.savetxt('../Results/ratio_' + name + '.txt', np.array(self.ratio), fmt='%.2f', delimiter=' ')\n\n def greedy(self):\n seed_set, spread = [], []\n for _ in range(self.k):\n best_spread = 0\n for user in (set(self.model.G.nodes()) - set(seed_set)):\n count = self.model.diffusion(seed_set + [user], self.time_steps)\n\n if count > best_spread:\n best_spread, node = count, user\n seed_set.append(user)\n spread.append(count)\n np.savetxt('../Results/' + self.mode + '_greedy.txt', spread, fmt='%.2f', delimiter=' ')\n return spread\n\n def celf(self):\n marg_gain = [(user, self.model.diffusion([user], self.time_steps)) for user in set(self.model.G.nodes())]\n Q = sorted(marg_gain, key=lambda x: x[1], reverse=True)\n seed_set, spread = [Q[0][0]], [Q[0][1]]\n for _ in range(self.k - 1):\n Q = Q[1:]\n check = False\n while not check:\n current = Q[0][0]\n Q[0] = (current, self.model.diffusion(seed_set + [current], self.time_steps))\n Q = sorted(Q, key=lambda x: x[1], reverse=True)\n check = (Q[0][0] == current)\n seed_set.append(Q[0][0])\n spread.append(Q[0][1])\n np.savetxt('../Results/' + self.mode + '_celf.txt', spread, fmt='%.2f', delimiter=' ')\n return spread\n\n" }, { "alpha_fraction": 0.5414634346961975, "alphanum_fraction": 0.5463414788246155, "avg_line_length": 29.370370864868164, "blob_id": "ef6ad091311cafbb7ab722a5584e4281d6e19967", "content_id": "89e43f004d616a81419419ba9a1a6b424c695972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 80, "num_lines": 27, "path": "/Env/Agents.py", "repo_name": "peaceGui/Multi-Agent-Influence-Maximization", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nclass UserAgent(object):\n def __init__(self, id_number):\n self.id = id_number\n self.total_sum = 1\n self.neighbors = dict()\n self.active = False\n\n def add_neighbor(self, id_number, weight=None, mode='IC'):\n if mode is 'IC':\n if weight is None:\n self.neighbors[id_number] = np.random.uniform(0, 1)\n else:\n self.neighbors[id_number] = weight\n elif mode is 'LT':\n if weight is None:\n self.neighbors[id_number] = np.random.uniform(0, self.total_sum)\n self.total_sum -= self.neighbors[id_number]\n else:\n self.neighbors[id_number] = weight\n\n\nclass SystemAgent(object):\n def __init__(self, id_number):\n self.id_number = id_number\n" }, { "alpha_fraction": 0.532004177570343, "alphanum_fraction": 0.5414480566978455, "avg_line_length": 33.03571319580078, "blob_id": "f5718ebcb7bab4e174fec3b5e161ec4d9727f1c2", "content_id": "ab096e094a7c5648b8b8e37f45798dfb274522c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/Dataset/PreProcess.py", "repo_name": "peaceGui/Multi-Agent-Influence-Maximization", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport Env.Agents as ag\n\nusers = dict()\n\n\ndef process_data():\n files = os.listdir('../Dataset/Unprocessed Dataset/')\n print(files)\n index = int(input(\"Plz input the index of a dataset:\"))\n file = files[index][:-4]\n with open(file='../Dataset/Unprocessed Dataset/' + file + '.txt', mode='r') as f:\n for line in f.readlines():\n arr = line.strip().split()\n if arr[0] not in users:\n users[arr[0]] = ag.UserAgent(arr[0])\n if arr[1] not in users:\n users[arr[1]] = ag.UserAgent(arr[1])\n users[arr[0]].add_neighbor(arr[1], mode='IC')\n with open(file='../Dataset/' + file + '/' + file + '.txt', mode='w') as f:\n for user in users.values():\n for neighbor in user.neighbors:\n f.write(user.id + ' ' + neighbor + ' ' + str(user.neighbors[neighbor]) + '\\n')\n\n\nif __name__ == '__main__':\n process_data()\n" }, { "alpha_fraction": 0.5848032832145691, "alphanum_fraction": 0.5929443836212158, "avg_line_length": 20.676469802856445, "blob_id": "5411dcdd63293a77e88f7d209bc5f7bba86bec47", "content_id": "d07027000ef96a4a7f574951f318dcaa9365d877", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 737, "license_type": "no_license", "max_line_length": 44, "num_lines": 34, "path": "/Env/mainEnv.py", "repo_name": "peaceGui/Multi-Agent-Influence-Maximization", "src_encoding": "UTF-8", "text": "import time\nfrom Env.ICModel import ICModel\nfrom Approach import Approach\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nK = 10\n\n\ndef plot(results):\n fig = plt.figure()\n x = np.arange(0, K+1, 1)\n for item in results:\n plt.plot(x, [0]+results[item], 'bs')\n plt.legend(results.keys())\n plt.xlabel('Seed set size')\n plt.ylabel('Number of activated users')\n plt.show()\n\n\nif __name__ == '__main__':\n results = dict()\n IC = ICModel()\n app = Approach(IC, k=K)\n\n # start = time.time()\n # app.greedy()\n # print('\\n'+str(time.time()-start))\n\n start = time.time()\n results['CELF'] = app.celf()\n print('\\n' + str(time.time() - start))\n plot(results)\n # print(len(users), len(edges))\n" }, { "alpha_fraction": 0.5203251838684082, "alphanum_fraction": 0.5234521627426147, "avg_line_length": 32.3125, "blob_id": "aea9f92af34be7a1ed9127191ca66b0c7b1a3193", "content_id": "57e555a5031b3d913230d2cbd3bab9a3dfa67077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "no_license", "max_line_length": 102, "num_lines": 48, "path": "/Env/ICModel.py", "repo_name": "peaceGui/Multi-Agent-Influence-Maximization", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport time\nimport Agents as ag\n\n\nclass ICModel(object):\n def __init__(self):\n self.G = self.init_graph()\n # pos = nx.random_layout(self.G)\n #\n # nx.draw_networkx(self.G, pos)\n # plt.show()\n # print(len(self.G.nodes()), len(self.G.edges()))\n\n def init_graph(self):\n dirs = os.listdir('../Dataset')\n print(dirs)\n index = int(input(\"Plz input the index of a dataset:\"))\n graph = nx.read_edgelist('../Dataset/' + dirs[index], nodetype=int, data=(('weight', float),),\n create_using=nx.DiGraph)\n for node in graph.nodes():\n graph.node[node]['SA'] = node\n print(graph.nodes(data=True))\n return graph\n\n def diffusion(self, original_users, times, p=0.01):\n spread = []\n for i in range(times):\n print('\\rTime step: %d' % (i), end='')\n new_active, already_active = original_users[:], original_users[:]\n new_ones = []\n while new_active:\n for user in new_active:\n np.random.seed(i)\n for neighbor in self.G.neighbors(user):\n if np.random.uniform(0, 1) < p:\n new_ones.append(neighbor)\n new_active = list(set(new_ones) - set(already_active))\n already_active += new_active\n spread.append(len(already_active))\n return np.mean(spread)\n\n\nif __name__ == '__main__':\n IC = ICModel()\n" } ]
5
JuanAI/weatherApi
https://github.com/JuanAI/weatherApi
cf6f664b6bdf16b7401853fd614d56b947142b70
ac5c19b8c2342f6bf4697a312492325c62977cca
f5fcc44f0cb6926be5ec1ec39ad85afc4cbf6e20
refs/heads/master
2020-03-21T05:30:20.348800
2019-02-06T18:16:05
2019-02-06T18:16:05
137,854,873
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6343283653259277, "alphanum_fraction": 0.638059675693512, "avg_line_length": 25.27450942993164, "blob_id": "fb21f0bf0d8b8753ba30d0c459a8a6ef41220c75", "content_id": "038d2c3d309419a1bd208329ad49f0c9e8769ff9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1340, "license_type": "no_license", "max_line_length": 101, "num_lines": 51, "path": "/resources/gen_sum.py", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nimport datetime\n\nfrom common.utils import Utils\n\nimport json\nimport os\n\n\nclass GenSummary(Resource):\n\tparser = reqparse.RequestParser()\n\n\tdef get(self, name, surname, date, hour):\n\n\t\tif os.stat(\"api_password.txt\").st_size == 0:\n\t\t\treturn {'status': 'error', 'message': 'you have not saved any api password'}\n\n\t\telse:\n\n\t\t\twith open('api_password.txt') as f:\n\t\t\t\tlines = f.readlines()\n\n\t\t\tapi_user=json.loads(lines[0])\n\n\t\t\tif api_user['name'] == name and api_user['surname'] == surname:\n\t\t\t\tutils_aux=Utils(_id=api_user['id'])\n\t\t\t\tjson_aux=utils_aux.get_json()\n\n\t\t\t\tif json_aux['cod'] != '200':\n\t\t\t\t\treturn {'status': 'error', 'message': 'error api access'}\n\n\t\t\t\telse:\n\n\t\t\t\t\tfiltered=next(filter(lambda x: x['date'] == date and x['hour'] == hour, json_aux['data']), None)\n\t\t\t\t\tdate_ask=datetime.datetime.strptime(date+hour,'%Y%m%d%H%M').strftime('%Y-%m-%d %H:%M')\n\t\t\t\t\tstring='No data for '+str(date_ask)\n\n\t\t\t\t\tprint(filtered)\n\n\t\t\t\t\tif filtered == None:\n\t\t\t\t\t\treturn {'status': 'error', 'message': string}\n\n\t\t\t\t\treturn {'description':filtered['description'],\n\t\t\t\t\t'temperature':filtered['temp-C']+'C',\n\t\t\t\t\t'pressure':filtered['pressure'],\n\t\t\t\t\t'humidity':filtered['humidity']} \n\n\t\t\telse:\n\t\t\t\treturn {'status': 'error', 'message': 'your username do not match'}\n" }, { "alpha_fraction": 0.6517482399940491, "alphanum_fraction": 0.6559440493583679, "avg_line_length": 26.538461685180664, "blob_id": "5b348509f67cd8cd776c6a8e9a12ff6d7dc49d39", "content_id": "a979b8993ae2f82bd91ce50326591e0d2f2ce9cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 715, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/resources/huminfo.py", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nimport datetime\n\nfrom common.utils import Utils\n\n\nclass HumInfo(Resource):\n\tparser = reqparse.RequestParser()\n\n\tdef get(self, date, hour):\n\t\tutils=Utils()\n\t\tjson=utils.get_json() \n\n\t\tif json['cod'] != '200':\n\t\t\treturn {'status': 'error', 'message': 'error api access'}\n\n\t\telse:\n\n\t\t\tfiltered=next(filter(lambda x: x['date'] == date and x['hour'] == hour, json['data']), None)\n\t\t\tdate_ask=datetime.datetime.strptime(date+hour,'%Y%m%d%H%M').strftime('%Y-%m-%d %H:%M')\n\t\t\tstring='No data for '+str(date_ask)\n\t\tif filtered == None:\n\t\t\treturn {'status': 'error', 'message': string}\n\n\t\treturn {'humidity':str(filtered['humidity'])+\"%\"}" }, { "alpha_fraction": 0.5995045304298401, "alphanum_fraction": 0.608587920665741, "avg_line_length": 21.0181827545166, "blob_id": "825a9566acb74a89329d6804a01dc57028d0c012", "content_id": "50a995fa4475a5db94aac60c752fb3160333e0b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 85, "num_lines": 55, "path": "/common/utils.py", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport datetime\n\nclass Utils(object):\n\n\tdef __init__(self, file_path='data.txt', _id=None):\n\t\tself.file_path=file_path\n\t\tself._id=_id\n\n\n\t@staticmethod\n\tdef convert_data(raw_data):\n\n\t\tdata = []\n\t\tif raw_data['cod'] == '200':\n\n\t\t\tfor i in raw_data['list']:\n\n\t\t\t\tdate=i['dt_txt']\n\t\t\t\tdate=datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S')\n\t\t\t\tnew_date=date.strftime('%Y%m%d')\n\t\t\t\tnew_hour=date.strftime('%H%M')\n\n\t\t\t\td={ \n\t\t\t\t'date': new_date,\n\t\t\t\t'hour': new_hour,\n\t\t\t\t'description': i['weather'][0]['description'],\n\t\t\t\t'temp-K': i['main']['temp'],\n\t\t\t\t'temp-C': str(round(float(i['main']['temp'])-273.15)),\n\t\t\t\t'humidity': i['main']['humidity'],\n\t\t\t\t'pressure': i['main']['pressure']\n\t\t\t\t}\n\n\t\t\t\tdata.append(d)\n\n\t\tfinal_data={'cod':raw_data['cod'],'data':data}\n\n\t\treturn final_data\n\n\n\tdef get_json(self):\n\t\tif self._id == None:\t\t\t\n\t\t\twith open('data.txt') as json_file: \n\t\t\t\traw_data = json.load(json_file)\n\n\t\t\tjson_data=self.convert_data(raw_data)\t\t\t\n\n\t\telse:\n\t\t\turl =\"http://api.openweathermap.org/data/2.5/forecast?q=London,uk&appid=\"+self._id\n\t\t\tresponse = requests.get(url)\n\t\t\tjson_data = json.loads(response.text)\n\t\t\tjson_data=self.convert_data(json_data)\n\n\t\treturn json_data\n" }, { "alpha_fraction": 0.6786389350891113, "alphanum_fraction": 0.6786389350891113, "avg_line_length": 23.090909957885742, "blob_id": "eb131172fa403135b9432df16a3a458313d899c3", "content_id": "2d0c4e1a7d76f572a5f4082f6c6f3ec450489a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 64, "num_lines": 22, "path": "/resources/api_id.py", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "import json\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\n\nclass ApiId(Resource):\n\tparser = reqparse.RequestParser()\n\tparser.add_argument('id',\n\t type=str,\n\t required=True,\n\t help=\"This field cannot be left blank!\"\n\t)\n\n\tdef post(self, name, surname):\n\t\tdata = ApiId.parser.parse_args()\n\n\t\tapi_acc = {'name': name, 'surname': surname, 'id': data['id']}\n\n\t\twith open('api_password.txt', 'w') as file:\n\t\t\tfile.write(json.dumps(api_acc))\n\n\n\t\treturn {'message': 'Your Api id has been saved'}" }, { "alpha_fraction": 0.7471526265144348, "alphanum_fraction": 0.7501898407936096, "avg_line_length": 32.79487228393555, "blob_id": "0b1b1b003e47618cbbf35929541a2e3c8603ea3c", "content_id": "12ec45b4ef3b5676dba4a8eeb9288b8b7b659c40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/app.py", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "from flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT, jwt_required, current_identity\nfrom common.utils import Utils\n\nfrom security import authenticate, identity\n\nfrom resources.api_id import ApiId\nfrom resources.gen_sum import GenSummary\nfrom resources.gen_sum_def import GenSummaryDef\nfrom resources.gen_sum_rest import GenSummaryRestr\nfrom resources.huminfo import HumInfo\nfrom resources.pressinfo import PressInfo\nfrom resources.tempinfo import TempInfo\n\n\napp = Flask(__name__)\napp.secret_key = '1234'\napi = Api(app)\n\[email protected]_first_request\ndef deleteContent(fName='api_password.txt'):\n text_file = open(fName, \"w\")\n text_file.close()\n\n\njwt = JWT(app, authenticate, identity)\n\napi.add_resource(ApiId, '/weather/london/<name>/<surname>') #OK\napi.add_resource(GenSummary, '/weather/london/<name>/<surname>/<date>/<hour>') #OK\napi.add_resource(GenSummaryDef, '/weather/london/<date>/<hour>') #OK\napi.add_resource(GenSummaryRestr, '/weather/london/restricted/<date>/<hour>') #OK\napi.add_resource(TempInfo, '/weather/london/<date>/<hour>/temperature') #OK\napi.add_resource(HumInfo, '/weather/london/<date>/<hour>/humidity') #OK\napi.add_resource(PressInfo, '/weather/london/<date>/<hour>/pressure') #OK\n\n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.6643430590629578, "alphanum_fraction": 0.6896916031837463, "avg_line_length": 33.30434799194336, "blob_id": "6d95c6a13fc7c2c6b4fa1e2d00d81fa92913dad3", "content_id": "8b86511fe00ccc71cecb5ad70292fd96efd15abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4734, "license_type": "no_license", "max_line_length": 351, "num_lines": 138, "path": "/README.md", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "# SC_Weather_API\n\nPython flask REST web service providing an API to query the data. \n\n## Author: Juan Carlos Hernรกndez Repilado\n\n## Installation\n\nFor installing the API previously you should have installed the following packages:\n\n* **Python 3.6**\n* **requests**\n* **Flask**\n* **Flask-JWT**\n\nOnce the previous packages are installed, for running the application just type : ```python app.py ```\n\n## Requirements\n\nWhen making calls to the API, it should obtain the following results:\n\n* ```http://<host:ip>/weather/london/<date>/<hour minute>/``` it obtains a general summary of the weather:\n\n```\nNote, temperature converted from Kelvin to C and rounded up.\n\ne.g. curl http://<host:ip>/weather/london/20160706/0900/\n\n{\n\n \"description\": \"few clouds\",\n\n \"temperature\": \"15C\",\n\n \"pressure\": \"1028.12\",\n\n \"humidity\": \"88%\"\n\n}\n```\n* ```http://<host:ip>/weather/london/<date>/<hour minute>/temperature```\n\n```\ne.g. curl http://<host:ip>/weather/london/20160706/0900/temperature/\n\n{\n\n \"temperature\": \"15C\"\n\n}\n```\n* ```http://<host:ip>/weather/london/<date>/<hour minute>/pressure```\n\n```\ne.g. curl http://<host:ip>/weather/london/20160706/0900/pressure/\n\n{\n\n \"pressure\": \"1028.12\"\n\n}\n```\n* ```http://<host:ip>/weather/london/<date>/<hour minute>/humidity```\n\n```\ne.g. curl http://<host:ip>/weather/london/20160706/0900/humidity/\n\n{\n\n \"humidity\": \"88%\"\n\n}\n```\n* When no data is found I would like see the response:\n\n```\ncurl http://<host:ip>/weather/london/17670812/0900/temperature\n\n{\n\n \"status\": \"error\", \"message\": \"No data for 1767-08-12 09:00\"\n\n}\n```\n## Questions\n\n* If I wanted the temperature in Kelvin rather than celcius, how could I specify this in API calls?\nI think one way would be stablishing by default the temperature in Celsius and if the client wants the temperature in Kelvin we could offer it by adding kelvin attribute in the url, for example: ```http://<host:ip>/weather/london/<date>/<hour minute>/temperature/kelvin```\n* How would you test this REST service?\nFor testing python code I would use pytest, and a more complete tool such SonarQube for automated testing.\n* How would you check the code coverage of your tests?\nUsing SonarQube they offer \"Coverage Tool\"\n* How could the API be documented for third-parties to use?\nI think a good way would be Sphinx because is a documentation generator written and used by the Python community.\n* How would you restrict access to this API?\nUsing **Flask-JWT**, you have this option already implemented in the actual API. Now we are going to show the steps for using it:\n\n * First you should have POST request to\n **``` http://<host:ip>/auth ```** with the following username and password in Json format:\n ``` {\n\t\"username\": \"SuperCarer\",\n\t\"password\": \"CaringElderly\"\n }\n ```\n \n * After that copy your access token, and go to **``` http://<host:ip>/weather/london/restricted/<date>/<hour minute> ```**, then in the headers introduce Key:**\"Authorization\"** and Value: **\"JWT YourAccessToken\"**, after that you should see the same result as making GET request to **```http://<host:ip>/weather/london/<date>/<hour minute>/```**.\n\n* What would you suggest is needed to do daily forecast recoveries from openweather.org, keeping the web service up to date?\nThe best way would be accessing the real API (www.openweathermap.org). This option is already implemented. Now we are going to show the steps for using it:\n\n * First you should have POST request to\n **``` http://<host:ip>/weather/london/<name>/<surname> ```** with the following username and password in Json format:\n ```\n {\n\t\"id\": \"c90b7bacc92dede3a46368905e286e\"\n }\n ```\n Please take into consideration that the provided \"id\" should be the id from openweathermap API.\n \n * After that you should receive a message saying :\n ```\n {\n \"message\": \"Your Api id has been saved\"\n }\n ```\n * Then make a GET request to **``` http://<host:ip>/weather/london/<name>/<surname> ```** using the same name and surname as before, you should get the same output like **```http://<host:ip>/weather/london/<date>/<hour minute>/```** but it going to be updated.\n * In case you write the wrong user or surname the system is going to let you know:\n ```\n {\n \"status\": \"error\",\n \"message\": \"your username do not match\"\n }\n ```\n Please take into account that the API does not use a database, only a text file to save the data, but this data is deleted every time you run it.\n \n## Bonus Points (Optional):\n\n* This API is made public in the following ip address: http://138.68.146.96/ it has been uploaded to the servers of Digital Ocean. This first takes you to an html file where you have all steps described to run the API. **The Access restriction is not installed.**\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 9.75, "blob_id": "679fc33f6b1f18801e6ac3e9a28c0fcd334e06d8", "content_id": "f5aa3c4b33d2699b7f5ca0a9171954be538c4a19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 13, "num_lines": 4, "path": "/requirements.txt", "repo_name": "JuanAI/weatherApi", "src_encoding": "UTF-8", "text": "python-3.6.5\nFlask\nFlask-RESTful\nFlask-JWT\n" } ]
7
aaman007/Django-Docker
https://github.com/aaman007/Django-Docker
2e710c32581d5cd049d860891a0e4388f8e89056
5fc01f544b0ed6912c3ab9a97fe99057ae504269
f5c5af13e75fa18922e0ed9fb8531bf607220a21
refs/heads/master
2023-04-24T15:38:08.850855
2021-05-03T18:03:06
2021-05-03T18:03:06
364,017,964
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7886179089546204, "alphanum_fraction": 0.7886179089546204, "avg_line_length": 23.600000381469727, "blob_id": "b8868803a4d717e19dc45eee7f2f7d7180883a06", "content_id": "4862754a279b1d1526de3c50c3297177d1b1d429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/core/views.py", "repo_name": "aaman007/Django-Docker", "src_encoding": "UTF-8", "text": "from django.views.generic import TemplateView\n\n\nclass HomeTemplateView(TemplateView):\n template_name = 'core/home.html'\n" } ]
1
X-KG-X/candles
https://github.com/X-KG-X/candles
8658ca0d14f39c12b4e26268115fb604e0be63ef
0620690ca219d74d9c387345e2d2f283b91d6726
4bc63c0497d3b0474b4ac26c2e6e0cea11559c6b
refs/heads/master
2020-07-11T14:27:04.688455
2019-08-30T20:58:22
2019-08-30T20:58:22
204,568,345
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6431870460510254, "alphanum_fraction": 0.6431870460510254, "avg_line_length": 38.40909194946289, "blob_id": "7bbe36f274a7ee4b3e88ec2d18e3f5b21275e2c1", "content_id": "b59a586b9b25733a1d1ecb4564c54141522f1656", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "no_license", "max_line_length": 101, "num_lines": 22, "path": "/apps/candle_app/urls.py", "repo_name": "X-KG-X/candles", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns=[\n url(r'^buy$', views.buy),\n url(r'^remove/(?P<product_id>\\d+)$', views.remove),\n url(r'^history$', views.history),\n url(r'^cart$',views.cart),\n url(r'^add/(?P<product_id>\\d+)$', views.add),\n url(r'^detail/(?P<product_id>\\d+)$', views.detail),\n url(r'^$', views.index),\n url(r'^check_login$', views.check_login),\n url(r'^check_registration$',views.check_registration),\n url(r'^dashboard$',views.dashboard),\n url(r'^logoff$', views.logoff),\n\n url(r'^search_item$', views.search_item), # search item with string from search bar\n url(r'^search_ajax$', views.search_ajax), # show auto populated result while typing in search bar\n url(r'^update_select_options/(?P<product_id>\\d+)$', views.update_select_options),\n\n url(r'^google_login', views.google_login),\n]" }, { "alpha_fraction": 0.6406490802764893, "alphanum_fraction": 0.6457453370094299, "avg_line_length": 39.09139633178711, "blob_id": "3edea23389d0a8c9b87296c107cca729d5f200d7", "content_id": "d2f042b0be3d252fcf10832ae7921fd2e7fdfd08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14913, "license_type": "no_license", "max_line_length": 221, "num_lines": 372, "path": "/apps/candle_app/views.py", "repo_name": "X-KG-X/candles", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nimport bcrypt\nfrom .models import *\nfrom datetime import datetime\nfrom django.db.models import Q, Sum, Min, F\nimport random, string\n\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\ndef index(request):\n print(\"*\"*50, \"I am in index\")\n return render(request,\"candle_app/index.html\")\n\n# registration\ndef check_registration(request):\n print(\"*\"*50, \"I am in index\")\n errors=User.objects.login_validator(request.POST)\n if len(errors)>0:\n for key, value in errors.items():\n messages.error(request,value)\n return redirect(\"/\")\n else:\n right_user=User.objects.create(first_name=request.POST['f_name'],last_name=request.POST['l_name'], email=request.POST['register_email'],password=bcrypt.hashpw(request.POST['register_pwd'].encode(), bcrypt.gensalt()))\n request.session['right_user_id']=right_user.id\n request.session['cart_id']=1\n # messages.error(request, \"Successfully registered (or logined in)!\")\n return redirect(\"/dashboard\")\n # return HttpResponse(\"sdjyusdy\")\n\n# login\ndef check_login(request):\n print(\"*\"*50, \"I am in check_login\")\n try:\n user = User.objects.get(email=request.POST['login_email']) # hm...is it really a good idea to use the get method here?\n if bcrypt.checkpw(request.POST['login_pwd'].encode(), user.password.encode()):\n print(\"password match\")\n request.session['right_user_id']=User.objects.get(email=request.POST['login_email']).id\n print(request.session['right_user_id'])\n if user.order.all().count()<1:\n request.session['cart_id']=1\n else:\n request.session['cart_id']=user.order.last().id+1\n return redirect(\"/dashboard\")\n # return HttpResponse(\"sdkbfilusd\")\n else:\n messages.error(request,\"failed password\")\n return redirect(\"/\")\n except:\n messages.error(request, \"Wrong Email!!!\")\n return redirect(\"/\")\n\n# dashboard - all the products in our inventories\ndef dashboard(request):\n print(\"*\"*50, \"I am in dashboard\")\n if 'right_user_id' not in request.session:\n print (\"right user id not exist!!\")\n return redirect(\"/\")\n user=User.objects.get(id=request.session['right_user_id'])\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n product=Product.objects.all()\n context={\n 'user':user,\n 'all_products':product,\n 'range' : range(1,11),\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 \n } \n return render(request,\"candle_app/dashboard.html\", context)\n \n# logoff. not deleting items in the cart even after loggin out\ndef logoff(request):\n print(\"*\"*50, \"I am in logoff\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n # Order.objects.all().delete()\n request.session.clear()\n return redirect(\"/\")\n\n# detail - shows product detail when user clicks it.\ndef detail(request, product_id):\n print(\"*\"*50, \"I am in detail\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\") \n user = User.objects.get(id=request.session['right_user_id'])\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n context={\n 'product': Product.objects.get(id=product_id),\n 'user':User.objects.get(id=request.session['right_user_id']),\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0,\n 'range' : range(1,11),\n }\n return render(request, \"candle_app/detail.html\", context)\n\n# add - add items to cart\ndef add(request, product_id):\n print(\"*\"*50, \"I am in add\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n quantity=request.POST['quantity']\n user=User.objects.get(id=request.session['right_user_id'])\n product=Product.objects.get(id=product_id)\n # add items to the cart\n Order.objects.create(cart_id=request.session['cart_id'], user=user, product=product, quantity=quantity)\n \n # # keep track of stocks in the product table\n # product.inventory = product.inventory - int(quantity)\n # product.save()\n # print(\"product:\", product.name, product.inventory)\n # MOVE TO DO IN update_select_options\n\n # keep track of number of items in the cart\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n context = {'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 }\n print (\"context: \", context)\n return render(request, 'candle_app/partials/num_items_cart.html', context)\n # return redirect(\"/dashboard\")\n\ndef update_select_options(request, product_id) :\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n user=User.objects.get(id=request.session['right_user_id'])\n product=Product.objects.get(id=product_id)\n \n # keep track of stocks in the product table\n quantity=request.POST['quantity']\n product.inventory = product.inventory - int(quantity)\n product.save()\n print(\"product:\", product.name, product.inventory)\n\n context={\n 'user':user,\n 'product':product,\n 'range' : range(1,11),\n } \n return render(request,\"candle_app/partials/update_select_options.html\", context)\n\n# cart - show items in the cart\ndef cart(request):\n print(\"*\"*50, \"I am in cart\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n user=User.objects.get(id=request.session['right_user_id'])\n orders=Order.objects.filter(user=user)\n total=0.0\n for order in orders:\n total += order.quantity*order.product.price\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n \n # orders in the cart - group by product name / product size / fragrance and shows summed quantity, ..\n orders_grouped = orders.values('product__id', 'product__name', 'product__size', 'product__fragrance').annotate(num_q=Sum('quantity'), \n price=Min('product__price'),sub_total=Sum(F('quantity')*F('product__price')))\n context={\n # 'orders':orders,\n 'orders_grouped' : orders_grouped,\n 'user':user,\n 'total':f\"${total}\",\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 \n }\n return render(request,\"candle_app/cart.html\", context)\n\n# history - show order (purchased) history\ndef history(request):\n print(\"*\"*50, \"I am in history\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n user=User.objects.get(id=request.session['right_user_id'])\n # user_orders=History.objects.filter(user=user)\n \n # collect order information by history_id\n # history_ids = History.objects.all().values_list('history_id').distinct()\n history_ids = History.objects.filter(user=user).values_list('history_id').distinct()\n history_order_set = []\n for history_id in history_ids :\n history_id = history_id[0] # get unique history id\n\n # get items ordered under history_id\n orders = History.objects.filter(history_id=history_id, user=user)\n orders_set = []\n total_price = 0.0\n for order in orders :\n subtotal_price = (order.quantity * order.product.price)\n product_name_with_size = f\"{order.product.name} ({order.product.size})\"\n orders_set.append((product_name_with_size, order.quantity, subtotal_price))\n total_price += subtotal_price\n\n history_order_set.append({\"history_id\" : history_id, \n \"orders_set\" : orders_set,\n \"total_price\" : total_price,\n \"created_at\" : order.created_at\n })\n # sort by created_at\n history_order_set = sorted(history_order_set, key=lambda i : i['created_at'], reverse=True)\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n \n context={\n 'user':user,\n # 'user_histories':user_histories\n 'history_order_set' : history_order_set,\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 \n }\n return render(request,\"candle_app/history.html\", context)\n\n# remove - remove from the cart\ndef remove(request, product_id):\n print(\"*\"*50, \"I am in cart\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n user = User.objects.get(id=request.session['right_user_id'])\n orders=Order.objects.filter(product=Product.objects.get(id=product_id), user=user)\n back_inventory = orders.aggregate(s=Sum('quantity'))['s']\n orders.delete()\n\n # put the quantity back to the inventory\n product = Product.objects.get(id=product_id)\n product.inventory = product.inventory + back_inventory\n product.save()\n print(\"product:\", product.name, product.inventory)\n\n return redirect(\"/cart\")\n\n# search keyword from search bar in product database\ndef search_item(request) :\n if 'search' not in request.POST :\n return redirect(\"/dashboard\")\n\n keyword = request.POST['search'].lower()\n user=User.objects.get(id=request.session['right_user_id'])\n\n if (keyword == \"\") :\n return redirect(\"/dashboard\")\n\n keyword_list = keyword.split()\n keyword_complete = keyword_list[0:len(keyword_list)-1]\n keyword_incomplete = keyword_list[len(keyword_list)-1]\n\n # get products has typed_incomplete in their name\n product_candidates = Product.objects.filter(Q(name__icontains=keyword_incomplete) | Q(size__icontains=keyword_incomplete))\n\n product_result = []\n # for each candidates - check if it has all the word in typed_complete\n for product in product_candidates :\n p_name = product.name + \" \" + product.size\n if (productStringContains(p_name, keyword_complete)) :\n product_result.append(product)\n\n num_search = len(product_result)\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n context = {\n 'user':user,\n 'keyword' : keyword,\n 'num_search' : num_search,\n 'products_list' : product_result,\n 'range' : range(1,11),\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 \n }\n return render(request, \"candle_app/dashboard_searched.html\", context)\n\n# search result while typing - AJAX\ndef search_ajax(request) :\n typed = request.POST['search'].lower()\n\n if (len(typed) == 0) :\n context = {\"products_list\" : Product.objects.all(), 'range': range(1,11)}\n return render(request, 'candle_app/search_ajax_img.html', context)\n\n typed_list = typed.split()\n typed_complete = typed_list[0:(len(typed_list)-1)]\n typed_incomplete = typed_list[len(typed_list)-1]\n\n # get products has typed_incomplete in their name\n product_candidates = Product.objects.filter(Q(name__icontains=typed_incomplete) | Q(size__icontains=typed_incomplete))\n\n product_result = []\n # for each candidates - check if it has all the word in typed_complete\n for product in product_candidates :\n p_name = product.name + \" \" + product.size\n if (productStringContains(p_name, typed_complete)) :\n product_result.append(product)\n\n context = {\"products_list\" : product_result, 'range': range(1,11)}\n return render(request, 'candle_app/search_ajax_img.html', context)\n\n# helper function for checking a product has list of strings in their name\ndef productStringContains(string, word_list) :\n # check if string contains all the word in word_list\n string = string.lower()\n string_list = string.split()\n for word in word_list :\n if not word in string_list :\n return False\n return True\n\n# buy - \ndef buy(request):\n print(\"*\"*50, \"I am in buy\")\n if 'right_user_id' not in request.session:\n return redirect(\"/\")\n user = User.objects.get(id=request.session['right_user_id'])\n\n # create a unique history id to be able to keep track of items ordered (purchased) at once\n purchased_at = str(datetime.now())\n history_id = f\"{user.id}_{purchased_at}_{randomword(10)}\"\n while (True) :\n history_with_id = History.objects.filter(history_id=history_id)\n if (len(history_with_id) == 0) :\n break\n # create another history id\n history_id = f\"{user.id}_{purchased_at}_{randomword(10)}\"\n\n # for order in Order.objects.all():\n for order in Order.objects.filter(user=user) :\n History.objects.create(history_id=history_id, user=user, product=order.product, quantity=order.quantity)\n\n #send confirmation email\n subject = 'Thank you for your purchase'\n message = 'It means a world to us that you have chosen our Brand! Enjoy our Candles. Thank you again!'\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [user.email]\n send_mail( subject, message, email_from, recipient_list )\n \n # Order.objects.all().delete()\n Order.objects.filter(user=user).delete()\n num_items_in_cart = Order.objects.filter(user=user).aggregate(total_quantity=Sum('quantity'))['total_quantity'] \n context={\n 'user': user,\n 'num_items_in_cart':num_items_in_cart if num_items_in_cart != None else 0 \n }\n return render (request,\"candle_app/confirm.html\", context)\n\n# helper function to generate a random string\ndef randomword(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\n# google login - test\ndef google_login(request) :\n print (\"## I'm in google login test\")\n print (request)\n print (\"email:\", request.GET['email'])\n print (\"name:\",request.GET['name'])\n email = request.GET['email']\n name = request.GET['name']\n\n name_list = name.split()\n first_name = name_list[0]\n last_name = \"XX\"\n \n if (len(name_list) > 1) : \n last_name = name_list[1]\n\n user = User.objects.filter(email=email)\n if (not user) :\n user = User.objects.create(\n first_name=first_name,\n last_name=last_name,\n email=email,\n password=randomword(8)\n )\n request.session['right_user_id'] = user.id\n request.session['cart_id']=1\n print (request.session['right_user_id'])\n # return redirect('/dashboard')'\n return HttpResponse(\"anything\")\n else : \n request.session['right_user_id'] = user[0].id\n request.session['cart_id']=1\n print (request.session['right_user_id'])\n # return redirect('/dashboard')\n return HttpResponse(\"anything\")\n # return HttpResponse(\"testing\")" } ]
2
shirashiki/pyplay
https://github.com/shirashiki/pyplay
a766e6197a7d586bc000fbe28e1eba1f76b671f7
33d94d89da4465c6ab582c45ca59a25ba669d696
ae002d9a315370345ff557c166dd87f6afad2cd2
refs/heads/master
2020-04-06T16:43:29.429604
2015-09-30T23:11:21
2015-09-30T23:11:21
30,511,210
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.550000011920929, "alphanum_fraction": 0.550000011920929, "avg_line_length": 13.5, "blob_id": "acab4b1bb2b8c2d0541fb1c11460347604d8d3ff", "content_id": "9b9aac6e34965aea09fe09b6a0366c9f75073ad1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "permissive", "max_line_length": 21, "num_lines": 4, "path": "/pynoob/simpleprogram.py", "repo_name": "shirashiki/pyplay", "src_encoding": "UTF-8", "text": "__author__ = 'silvio'\n\ndef main(args):\n print('hello')\n\n\n" } ]
1
ZHercher/Period-Doubling-and-Chaos-Project-G-from-Chapter-3
https://github.com/ZHercher/Period-Doubling-and-Chaos-Project-G-from-Chapter-3
d3570490e6e78e3922086555a6097f3f79a5e848
52b096dd4a10db30fdc599ad369bc2c3f0a28af2
2e074b6cfb42e860fe46ff3710e461836eb62e4c
refs/heads/master
2020-04-26T02:24:42.819754
2019-03-01T04:21:01
2019-03-01T04:21:01
173,234,028
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4973214268684387, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 17.6842098236084, "blob_id": "4243bf3b7e874872f0bb2d92674b151e7babb85f", "content_id": "e2e685b20509a274e94ab9f2e9b23868336f1599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 65, "num_lines": 57, "path": "/bifurcation.py", "repo_name": "ZHercher/Period-Doubling-and-Chaos-Project-G-from-Chapter-3", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nTHIS FILE IS USED TO DRAW THE BIFURCATION DIAGRAMS OF EACH METHOD\r\n\"\"\"\r\n\r\ndef eulers(n, h, p0=0.1):\r\n\tt_vals = [0]\r\n\tp_vals = [p0]\r\n\r\n\tfor i in range(n-1):\r\n\t\tt_n = t_vals[-1]\r\n\t\tp_n = p_vals[-1]\r\n\r\n\t\tt_vals.append(t_n + h)\r\n\t\tp_vals.append((1+10*h)*p_n - 10*h*p_n**2)\r\n\treturn p_vals\r\n\r\ndef runge_kutta(n, h, p_0=0.1):\r\n\r\n\tf = lambda p: 10*p*(1-p)\r\n\r\n\tp_vals = [p_0]\r\n\r\n\tfor i in range(n-1):\r\n\t\tp_n = p_vals[-1]\r\n\t\tk1 = h*f(p_n)\r\n\t\tk2 = h*f(p_n + k1/2)\r\n\t\tk3 = h*f(p_n + k2/2)\r\n\t\tk4 = h*f(p_n + k3)\r\n\r\n\t\tp_vals.append(p_n + (1/6)*(k1 + k2/2 + k3/2 + k4))\r\n\t\t\r\n\treturn p_vals\r\n\r\ndef distinct(l1):\r\n\talready_seen = []\r\n\toutput = []\r\n\tfor x in l1:\r\n\t\tif x not in already_seen:\r\n\t\t\toutput.append(x)\r\n\treturn output\r\n\r\nh_step = 0.0005\r\nx_lower = 0.15\r\nx_upper = 0.5\r\ndx = x_upper - x_lower\r\nx_count = int(dx//h_step)\r\nx_axis = [x_lower+x*h_step for x in range(x_count)]\r\nvals = [distinct(runge_kutta(400, h)[-40:]) for h in x_axis]\r\n\r\nfor te, pe in zip(x_axis, vals):\r\n\tplt.scatter([te]*len(pe), pe, s=1, c='#4286f4')\r\n\r\nplt.xlabel('h')\r\nplt.ylabel('approximate solution')\r\nplt.show()" }, { "alpha_fraction": 0.5100133419036865, "alphanum_fraction": 0.5674232244491577, "avg_line_length": 22.225807189941406, "blob_id": "d11937075df89b6792505eada083b94061868282", "content_id": "470b6bbb24559b87af17591a1fc7113c6bd86829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 749, "license_type": "no_license", "max_line_length": 82, "num_lines": 31, "path": "/values.py", "repo_name": "ZHercher/Period-Doubling-and-Chaos-Project-G-from-Chapter-3", "src_encoding": "UTF-8", "text": "\"\"\"\r\nTHIS FILE IS USED TO GENERATE A LATEX-FORMATTED TABLE OF VALUES FROM EULERS METHOD\r\n\"\"\"\r\n\r\ndef eulers(n, h, p0=0.1):\r\n\tt_vals = [0]\r\n\tp_vals = [p0]\r\n\r\n\tfor i in range(n-1):\r\n\t\tt_n = t_vals[-1]\r\n\t\tp_n = p_vals[-1]\r\n\r\n\t\tt_vals.append(t_n + h)\r\n\t\tp_vals.append((1+10*h)*p_n - 10*h*p_n**2)\r\n\treturn p_vals\r\n\r\ncol1 = eulers(41, .18)\r\ncol2 = eulers(41, .23)\r\ncol3 = eulers(41, .25)\r\ncol4 = eulers(41, .30)\r\n\r\nlatex_table_string = ''\r\n\r\nfor i in range(len(col1)):\r\n\tlatex_table_string += str(i) + ' & '\r\n\tlatex_table_string += str(round(col1[i],3)) + ' & '\r\n\tlatex_table_string += str(round(col2[i],3)) + ' & '\r\n\tlatex_table_string += str(round(col3[i],3)) + ' & '\r\n\tlatex_table_string += str(round(col4[i],3)) + ' \\\\\\\\\\n'\r\n\r\nprint(latex_table_string)" }, { "alpha_fraction": 0.4813374876976013, "alphanum_fraction": 0.5443235039710999, "avg_line_length": 16.399999618530273, "blob_id": "716f638243ef65abf3cde5c50f9eeb58704eaf5d", "content_id": "1b421ccc343aa050d78c392d7706ecf2e8baac11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "no_license", "max_line_length": 100, "num_lines": 70, "path": "/dfield.py", "repo_name": "ZHercher/Period-Doubling-and-Chaos-Project-G-from-Chapter-3", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\n\"\"\"\r\nTHIS FILE IS USED TO DRAW THE DIRECTION FIELD OF THE DIFF EQ AND APPROXIMATION METHOD STEPS ALONG IT\r\n\"\"\"\r\n\r\ndef eulers(n, h, p0=0.1):\r\n\tt_vals = [0]\r\n\tp_vals = [p0]\r\n\r\n\tfor i in range(n-1):\r\n\t\tt_n = t_vals[-1]\r\n\t\tp_n = p_vals[-1]\r\n\r\n\t\tt_vals.append(t_n + h)\r\n\t\tp_vals.append((1+10*h)*p_n - 10*h*p_n**2)\r\n\treturn p_vals\r\n\r\ndef runge_kutta(n, h, p_0=0.1):\r\n\r\n\tf = lambda p: 10*p*(1-p)\r\n\r\n\tp_vals = [p_0]\r\n\r\n\tfor i in range(n-1):\r\n\t\tp_n = p_vals[-1]\r\n\t\tk1 = h*f(p_n)\r\n\t\tk2 = h*f(p_n + k1/2)\r\n\t\tk3 = h*f(p_n + k2/2)\r\n\t\tk4 = h*f(p_n + k3)\r\n\r\n\t\tp_vals.append(p_n + (1/6)*(k1 + k2/2 + k3/2 + k4))\r\n\tprint(p_vals)\r\n\treturn p_vals\r\n\r\n\r\n#h_vals=[0.18, 0.23, 0.25, 0.3]\r\nh_vals = [0.27]\r\ninit_vals = [0.1]\r\n\r\ndp_dt_str = \"10*p*(1-p)\"\r\n\r\nplane_step = 0.05\r\narr_len = 0.2\r\n\r\nX = np.arange(0, 4.5, plane_step)\r\nY = np.arange(-0.2, 1.4, plane_step)\r\n\r\nV = np.concatenate([np.vstack(10*Y*(1-Y)) for i in range(len(X))], axis=1)\r\nU = np.ones((len(V),len(V[0])))\r\n\r\nplt.quiver(X,Y,U,V)\r\n\r\n\r\nfor h in h_vals:\r\n\tfor init_val in init_vals:\r\n\t\thx = np.arange(0, 4.5, h)\r\n\t\thy = np.array(runge_kutta(len(hx), h, init_val))\r\n\r\n\t\tplt.plot(hx,hy)\r\n\r\nplt.xlabel('t')\r\nplt.ylabel('p')\r\n\r\naxes = plt.gca()\r\naxes.set_xlim([0,4])\r\n\r\nplt.show()" } ]
3
micahjones13/Sorting
https://github.com/micahjones13/Sorting
12be741fcf3a55e615eca0587076038d32cbb390
374c2682868c9b925e863ea58edce2f39aabfed6
a585c9917db20f20288f2d53116f7e1a8f62f317
refs/heads/master
2020-08-07T13:12:08.061171
2019-10-10T16:28:07
2019-10-10T16:28:07
213,464,382
0
0
null
2019-10-07T19:06:01
2019-10-07T19:06:03
2019-10-10T16:28:08
null
[ { "alpha_fraction": 0.5265687704086304, "alphanum_fraction": 0.5706275105476379, "avg_line_length": 31.284482955932617, "blob_id": "c3da611734d112d4e2dd31f8dfdeaf1ff96bab78", "content_id": "a49b6573c3af0e6d5bef08ea4d442d17945a18d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3769, "license_type": "no_license", "max_line_length": 159, "num_lines": 116, "path": "/src/iterative_sorting/iterative_sorting.py", "repo_name": "micahjones13/Sorting", "src_encoding": "UTF-8", "text": "# TO-DO: Complete the selection_sort() function below\ndef selection_sort(arr):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n # takes first index, therefore splitting it into sorted vs unsorted\n cur_index = i\n # sets smallest_index equal to cur_index, which means the first index [0] is smallest rn\n # same as not even using cur_index. could just set smallest_index = i\n smallest_index = cur_index\n # TO-DO: find next smallest element\n # (hint, can do in 3 loc)\n # loop again through arr, comparing the cur_index (i) to new index var j\n # if at any point in the loop arr[smallest_index] > arr[j], then it's no longer the smallest\n for j in range(cur_index, len(arr)):\n # print(i, 'i', j, 'j', arr[j], 'arrj', arr[i], 'arri')\n if arr[smallest_index] > arr[j]:\n # replace smallest with new smalles\n smallest_index = j\n # print(j, 'J after IF')\n\n # TO-DO: swap\n arr[cur_index], arr[smallest_index] = arr[smallest_index], arr[cur_index]\n # print(arr, 'after swap')\n\n return arr\n\n\n\"\"\"\n\nExample:\nFirst Pass:\n( 5 1 4 2 8 ) โ€“> ( 1 5 4 2 8 ), Here, algorithm compares the first two elements, and swaps since 5 > 1.\n( 1 5 4 2 8 ) โ€“> ( 1 4 5 2 8 ), Swap since 5 > 4\n( 1 4 5 2 8 ) โ€“> ( 1 4 2 5 8 ), Swap since 5 > 2\n( 1 4 2 5 8 ) โ€“> ( 1 4 2 5 8 ), Now, since these elements are already in order (8 > 5), algorithm does not swap them.\n\nSecond Pass:\n( 1 4 2 5 8 ) โ€“> ( 1 4 2 5 8 )\n( 1 4 2 5 8 ) โ€“> ( 1 2 4 5 8 ), Swap since 4 > 2\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\nNow, the array is already sorted, but our algorithm does not know if it is completed. The algorithm needs one whole pass without any swap to know it is sorted.\n\nThird Pass:\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\n( 1 2 4 5 8 ) โ€“> ( 1 2 4 5 8 )\n\n\n\n\n\"\"\"\n# TO-DO: implement the Bubble Sort function below\n# Bubble sort compares neaighbors and rearannges based on the comparison\n\n\ndef bubble_sort(arr):\n # loop through the array\n for i in range(len(arr)):\n # print(i, 'i')\n # loop again so you can compare to neighbor\n # need len(arr)- 1 or else out of bounds error\n for j in range(0, len(arr)-1):\n # if j is bigger than it's neighbor j+1,\n if arr[j] > arr[j+1]:\n # swap the postions\n arr[j], arr[j+1] = arr[j+1], arr[j]\n # print(arr, 'iterative')\n # print(arr)\n return arr\n\n\n# 1. Loop through your array\n# - Compare each element to its neighbor\n# - If elements in wrong position (relative to each other, swap them)\n# 2. If no swaps performed, stop. Else, go back to the element at index 0 and repeat step 1.\n#! Made this just to test what it looked like with only 1 for loop. It rearranges the first number, but stops after that\n# def bubble_test(arr):\n# for i in range(len(arr) - 1):\n# if arr[i] > arr[i+1]:\n# arr[i], arr[i+1] = arr[i+1], arr[i]\n# print(arr)\n# return arr\n\n# STRETCH: implement the Count Sort function below\n\n\ndef count_sort(arr, maximum=-1):\n\n return arr\n\n\nmy_arr = [9, 6, 4, 7, 8, 1, 0, 2, 5, 3]\n\n# selection_sort(my_arr)\n# bubble_sort(my_arr)\n# bubble_test(my_arr)\n\n\n\"\"\"\ntried this for bubble sort first, but it's basically the select sort\ndef bubble_sort(arr):\n for i in range(len(arr) - 1):\n cur_index = i\n for j in range(i, len(arr)):\n if arr[cur_index] > arr[j]:\n arr[cur_index], arr[j] = arr[j], arr[cur_index]\n i += 1\n else:\n i += 1\n print(arr)\n return arr\n\n\n\"\"\"\n" }, { "alpha_fraction": 0.5660337209701538, "alphanum_fraction": 0.5852357745170593, "avg_line_length": 26.092485427856445, "blob_id": "f5b8d81e53a5945166cb1b0858ed33e7b5106607", "content_id": "93183d049e2785fad88f7959050dfec96c390455", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4687, "license_type": "no_license", "max_line_length": 114, "num_lines": 173, "path": "/src/recursive_sorting/recursive_sorting.py", "repo_name": "micahjones13/Sorting", "src_encoding": "UTF-8", "text": "# TO-DO: complete the helpe function below to merge 2 sorted arrays\n#left = a, right = b\n\n\ndef merge(arrA, arrB):\n # print(arrA, arrB, 'arrays')\n\n elements = len(arrA) + len(arrB)\n merged_arr = [0] * elements\n\n# starting values for a and b arr index's\n a = 0\n b = 0\n # loop as many times as elements is long to reshape the merged_arr\n for i in range(0, elements):\n # if a is greater than then length of arrA, then it's empty. Fill in merged with arrB automatically\n if a >= len(arrA):\n merged_arr[i] = arrB[b]\n # up counters if the if statement is hit in order to move up in comparisons\n b += 1\n # same thing here, but b is empty\n elif b >= len(arrB):\n merged_arr[i] = arrA[a]\n a += 1\n # if the value at arrA[a] is less than arrb[b], store that value into merged_arr at that point in loop (i)\n elif arrA[a] < arrB[b]:\n merged_arr[i] = arrA[a]\n a += 1\n # same thing, but a is greater\n elif arrA[a] > arrB[b]:\n merged_arr[i] = arrB[b]\n b += 1\n\n # print(merged_arr, 'MERGEDARR')\n\n return merged_arr\n\n\n# TO-DO: implement the Merge Sort function below USING RECURSION (5 lines)\ndef merge_sort(arr):\n # TO-DO\n # 1. While your data set contains more than one item, split it in half.\n # eventually each item should be in it's own little list\n # base case\n if len(arr) <= 1:\n return arr\n # find the middle of the array\n mid = len(arr) // 2\n # split the array into 2 - right and left of mid\n split_right = arr[mid:]\n split_left = arr[:mid]\n # print('mid', mid)\n # print(split_right, 'split_right')\n # print(split_left, 'split left')\n # keep splitting the right and left until they are all in they're own little list\n left = merge_sort(split_left)\n right = merge_sort(split_right)\n # put em back together\n arr = merge(left, right)\n # print(arr, 'merge_sort arr return')\n return arr\n\n\n# STRETCH: implement an in-place merge sort algorithm\ndef merge_in_place(arr, start, mid, end):\n # TO-DO\n\n return arr\n\n\ndef merge_sort_in_place(arr, l, r):\n # TO-DO\n\n return arr\n\n\n# STRETCH: implement the Timsort function below\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\ndef timsort(arr):\n\n return arr\n\n\nmy_arr = [9, 6, 4, 7, 8, 1, 0, 2, 5, 3]\narr1 = [6]\narr2 = [9]\n# Quicksort from lesson\n# pick a pivot, move to right spot\n\n\ndef quicksort(arr):\n if len(arr) <= 1:\n return arr\n # select a pivot, often times this is the last or first element\n pivot = arr[-1]\n # move all elements smaller than pivot to left, all greater to right\n left = []\n right = []\n # don't care about last item because that's the pivot\n for i in range(len(arr) - 1):\n item = arr[i]\n if item < pivot:\n left.append(item)\n else:\n right.append(item)\n\n return quicksort(left) + [pivot] + quicksort(right)\n # while LHS and RHS are greater than 1, repeat stpes 1-3 on each\n\n# merge sort from class\n\n\ndef merge1(arrA, arrB):\n # take 2 sorted arrays, merge them into single sorted array containg all elements from both\n # create new array of len(arrA + arrB)\n elements = len(arrA) + len(arrB)\n merged_arr = [0] * elements\n # create markers for a and b starting at 0\n a = 0\n b = 0\n # while a nad b are < len(arrA) and len(arrB)\n for i in range(0, elements):\n # compare the items at indices a/b, add the smallest to the merged array\n # increment a or b, whichever was smallest\n if a >= len(arrA):\n merged_arr[i] = arrB[b]\n b += 1\n elif b >= len(arrB):\n merged_arr[i] = arrA[a]\n a += 1\n elif arrA[a] < arrB[b]:\n merged_arr[i] = arrA[a]\n a += 1\n else: # arrA[a] >= arrB[b]:\n merged_arr[i] = arrB[b]\n b += 1\n return merged_arr\n\n\ndef merge_sort1(arr):\n # take an unsorted list, retrurn sorted list\n if len(arr) <= 1:\n return arr\n else:\n # split this in half, sort the halves\n left_half = arr[0: len(arr) // 2]\n right_half = arr[len(arr) // 2:]\n left_sorted = merge_sort1(left_half)\n right_sorted = merge_sort(right_half)\n # merge sorted halves\n return merge(left_sorted, right_sorted)\n\n\n# print(quicksort(my_arr))\n# print(merge(arr1, arr2))\nprint(merge_sort(my_arr))\n\n\n\"\"\"\n\n\n9, 6, 4 , 7, 8, 1, 0, 2, 5, 3\n\nleft: 9 6 4 7 8 ||| right: 1 0 2 5 3\nagain with left\nleft 9 6 || right 4 7 8\nagain with left \nleft: 9 right 6 \n\n\n\n6 9 4 8 7 0 1 2 3 5 \n\"\"\"\n" } ]
2
bitcoineazy/yamdb_final
https://github.com/bitcoineazy/yamdb_final
d8d9ae20fed1b5dbfbd1fdb58d4b9a8b8c882cb0
d1e37008d6c5ce76dc68dedd98e822a0c182d6e4
eeaa8da41ec3ad6dfd1b69c0625869f3cf26830e
refs/heads/master
2023-07-18T23:59:56.615702
2021-09-20T00:34:00
2021-09-20T00:34:00
406,472,770
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7680798172950745, "alphanum_fraction": 0.7705735564231873, "avg_line_length": 29.846153259277344, "blob_id": "f121d86c2dedbbb42df04d356642aa85a3de63f1", "content_id": "06fc1958210382844cff851076fda04797a46d7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 76, "num_lines": 13, "path": "/titles/urls.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\nrouter = DefaultRouter()\nrouter.register(r'categories', views.CategoryViewSet, basename='categories')\nrouter.register(r'titles', views.TitleViewSet, basename='titles')\nrouter.register(r'genres', views.GenreViewSet, basename='genres')\n\nurlpatterns = [\n path('v1/', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.7172312140464783, "alphanum_fraction": 0.7304860353469849, "avg_line_length": 35.702701568603516, "blob_id": "fcf3e4a7426c59a7ca5bf6c6c872e9b0e25cab8a", "content_id": "8c7d35f8778a54d48c31ba032fbda38175e9d15a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1358, "license_type": "no_license", "max_line_length": 71, "num_lines": 37, "path": "/reviews/views.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom titles.models import Title\n\nfrom .models import Review\nfrom .permissions import IsAbleToChange\nfrom .serializers import CommentSerializer, ReviewSerializer\n\n\nclass ReviewViewSet(viewsets.ModelViewSet):\n serializer_class = ReviewSerializer\n permission_classes = (IsAbleToChange, IsAuthenticatedOrReadOnly)\n\n def get_queryset(self):\n title = get_object_or_404(Title, id=self.kwargs['title_id'])\n return title.reviews.all()\n\n def perform_create(self, serializer):\n title_id = get_object_or_404(Title, id=self.kwargs['title_id'])\n serializer.save(title_id=title_id, author=self.request.user)\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n permission_classes = (IsAbleToChange, IsAuthenticatedOrReadOnly)\n serializer_class = CommentSerializer\n\n def get_queryset(self):\n title = get_object_or_404(Title, id=self.kwargs['title_id'])\n review = get_object_or_404(\n Review, title_id=title, id=self.kwargs['review_id'])\n return review.comments.all()\n\n def perform_create(self, serializer):\n review = get_object_or_404(Review, id=self.kwargs['review_id'])\n serializer.save(review_id=review, author=self.request.user)\n" }, { "alpha_fraction": 0.6386554837226868, "alphanum_fraction": 0.6386554837226868, "avg_line_length": 21.66666603088379, "blob_id": "17027ab0306fb3fff5d042a2195f29415fc46705", "content_id": "e2ea2c20d66d915040cd3eabb95e597266da85fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/titles/resources.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from import_export import resources\n\nfrom .models import Category, Genre, Title\n\n\nclass CategoryResource(resources.ModelResource):\n class Meta:\n model = Category\n fields = ('id', 'name', 'slug')\n\n\nclass GenreResource(resources.ModelResource):\n class Meta:\n model = Genre\n fields = ('id', 'name', 'slug')\n\n\nclass TitleResource(resources.ModelResource):\n class Meta:\n model = Title\n fields = ('id', 'name', 'year', 'category')\n" }, { "alpha_fraction": 0.6809248328208923, "alphanum_fraction": 0.6809248328208923, "avg_line_length": 34.306121826171875, "blob_id": "da9e730e0664daab096ed28eed15991ed0bfe633", "content_id": "dfa325eb17cdbc60a7985b424188960d552c7159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "no_license", "max_line_length": 63, "num_lines": 49, "path": "/titles/views.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, mixins, viewsets\nfrom django.db.models import Avg\n\nfrom .filters import SlugRangeFilter\nfrom .models import Category, Genre, Title\nfrom .permissions import AdminOrReadOnly\nfrom .serializers import (CategorySerializer, GenreSerializer,\n TitleSafeSerializer, TitleSerializer)\n\n\nclass CategoryViewSet(mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n queryset = Category.objects.all()\n permission_classes = [AdminOrReadOnly]\n serializer_class = CategorySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['name', ]\n lookup_field = 'slug'\n\n\nclass GenreViewSet(mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n queryset = Genre.objects.all()\n permission_classes = [AdminOrReadOnly]\n serializer_class = GenreSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['name', ]\n lookup_field = 'slug'\n\n\nclass TitleViewSet(viewsets.ModelViewSet):\n queryset = Title.objects.annotate(\n rating=Avg('reviews__score')).order_by('id')\n permission_classes = [AdminOrReadOnly]\n serializer_class = TitleSafeSerializer\n filterset_class = SlugRangeFilter\n filter_backends = [DjangoFilterBackend]\n\n def get_serializer_class(self):\n method = self.request.method\n if method in ['POST', 'PATCH']:\n return TitleSerializer\n if method == 'GET':\n return TitleSafeSerializer\n" }, { "alpha_fraction": 0.7034631967544556, "alphanum_fraction": 0.7034631967544556, "avg_line_length": 27.875, "blob_id": "cddbd09ea1e4f3270dc1002b46258c1ae0436a72", "content_id": "7161581a454174db5862ccb305a9774a73cc923b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 69, "num_lines": 16, "path": "/users/admin.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom import_export.admin import ImportMixin\n\nfrom .models import User\nfrom .resources import UserResource\n\n\[email protected](User)\nclass CustomUserAdmin(ImportMixin, UserAdmin):\n list_display = ('username', 'email', 'role', 'confirmation_code',\n 'first_name', 'last_name')\n readonly_fields = [\n 'date_joined',\n ]\n resource_class = UserResource\n" }, { "alpha_fraction": 0.6683416962623596, "alphanum_fraction": 0.6683416962623596, "avg_line_length": 21.11111068725586, "blob_id": "04dc083e091edc65f5963fe9d0bbc3dc4232dd48", "content_id": "1c7eab4196630b0e2d1d05b8653eb56cd56d555e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/users/resources.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from import_export import resources\n\nfrom .models import User\n\n\nclass UserResource(resources.ModelResource):\n class Meta:\n model = User\n fields = ('id', 'username', 'email', 'role')\n" }, { "alpha_fraction": 0.6068702340126038, "alphanum_fraction": 0.6068702340126038, "avg_line_length": 31.75, "blob_id": "6df0f95179eac9c0d35919198579bdc452a92a35", "content_id": "79e282f64df00a68d5afa5ec6a5fc60922140f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 77, "num_lines": 40, "path": "/titles/serializers.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Category, Genre, Title\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('name', 'slug')\n model = Category\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('name', 'slug')\n model = Genre\n\n\nclass TitleSafeSerializer(serializers.ModelSerializer):\n category = CategorySerializer(required=False, read_only=True)\n genre = GenreSerializer(Genre, required=False, many=True, read_only=True)\n rating = serializers.FloatField(read_only=True)\n\n class Meta:\n fields = ('id', 'name', 'year', 'rating', 'description', 'genre',\n 'category')\n model = Title\n\n\nclass TitleSerializer(serializers.ModelSerializer):\n category = serializers.SlugRelatedField(queryset=Category.objects.all(),\n slug_field='slug')\n genre = serializers.SlugRelatedField(queryset=Genre.objects.all(),\n many=True,\n slug_field='slug')\n rating = serializers.FloatField(read_only=True)\n\n class Meta:\n fields = ('id', 'name', 'year', 'rating', 'description', 'genre',\n 'category')\n model = Title\n" }, { "alpha_fraction": 0.6771844625473022, "alphanum_fraction": 0.6771844625473022, "avg_line_length": 30.69230842590332, "blob_id": "5122f5335418c3b6dae8e8967e9faed61395e0ee", "content_id": "c5b58fcb0a094e8a86fd38b6fbd486817376824f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/titles/permissions.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from rest_framework import permissions\n\nfrom .models import User\n\n\nclass AdminOrReadOnly(permissions.BasePermission):\n def has_permission(self, request, view):\n if request.method in permissions.SAFE_METHODS:\n return True\n if not request.user.is_authenticated:\n return False\n return (request.user.role == User.UserRole.ADMIN\n or request.user.is_staff)\n" }, { "alpha_fraction": 0.5020297765731812, "alphanum_fraction": 0.7009472250938416, "avg_line_length": 15.795454978942871, "blob_id": "e2f0ade2126d7f66282a98dd30a947926fe89a66", "content_id": "8040fceea681d30e8646f044114f17a8679cef9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 739, "license_type": "no_license", "max_line_length": 36, "num_lines": 44, "path": "/requirements.txt", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "asgiref==3.2.7\natomicwrites==1.4.0\nattrs==19.3.0\ncertifi==2020.4.5.1\nchardet==3.0.4\ncolorama==0.4.4\ndefusedxml==0.7.1\ndiff-match-patch==20200713\nDjango==3.0.5\ndjango-filter==2.4.0\ndjango-import-export==2.5.0\ndjangorestframework==3.11.0\ndjangorestframework-simplejwt==4.6.0\net-xmlfile==1.0.1\nidna==2.9\nimportlib-metadata==1.6.0\niniconfig==1.1.1\nisort==5.8.0\nMarkupPy==1.14\nmore-itertools==8.2.0\nodfpy==1.4.1\nopenpyxl==3.0.7\npackaging==20.3\npluggy==0.13.1\npy==1.8.1\nPyJWT==2.0.1\npyparsing==2.4.7\npytest==5.4.1\npytest-django==3.9.0\npytils==0.3\npsycopg2-binary==2.8.6\npytz==2019.3\nPyYAML==5.4.1\nrequests==2.23.0\nsix==1.14.0\nsqlparse==0.3.1\ntablib==3.0.0\ntoml==0.10.2\nurllib3==1.25.9\nwcwidth==0.1.9\nxlrd==2.0.1\nxlwt==1.3.0\nzipp==3.1.0\ngunicorn\n" }, { "alpha_fraction": 0.7348394989967346, "alphanum_fraction": 0.7348394989967346, "avg_line_length": 29.035715103149414, "blob_id": "8d21f7bcf45f34bed330d52c993049c7b098c7a7", "content_id": "2b2dea63d9b08e2e2398e6397f3b489203d441cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/titles/admin.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom import_export.admin import ImportMixin\n\nfrom .models import Category, Genre, Title\nfrom .resources import CategoryResource, GenreResource, TitleResource\n\n\nclass CategoryAdmin(ImportMixin, admin.ModelAdmin):\n list_display = ('pk', 'name', 'slug')\n empty_value_display = '-ะฟัƒัั‚ะพ-'\n resource_class = CategoryResource\n\n\nclass GenreAdmin(ImportMixin, admin.ModelAdmin):\n list_display = ('pk', 'name', 'slug')\n empty_value_display = '-ะฟัƒัั‚ะพ-'\n resource_class = GenreResource\n\n\nclass TitleAdmin(ImportMixin, admin.ModelAdmin):\n list_display = ('pk', 'name', 'year', 'description', 'category')\n empty_value_display = '-ะฟัƒัั‚ะพ-'\n resource_class = TitleResource\n\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Title, TitleAdmin)\n" }, { "alpha_fraction": 0.5960307121276855, "alphanum_fraction": 0.601792573928833, "avg_line_length": 27.925926208496094, "blob_id": "9064cc3e67b1f93de797f4a30018b92db6adb274", "content_id": "f8d510ad829bf65b162fac9b74921127ee61e7b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1629, "license_type": "no_license", "max_line_length": 69, "num_lines": 54, "path": "/titles/models.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib.auth import get_user_model\nfrom django.db import models\nfrom pytils.translit import slugify\n\nUser = get_user_model()\n\n\nclass Category(models.Model):\n name = models.CharField('ะะฐะทะฒะฐะฝะธะต ะบะฐั‚ะตะณะพั€ะธะธ', max_length=100)\n slug = models.SlugField(unique=True, blank=True, null=True)\n\n def save(self, *args, **kwargs):\n if self.slug in ('', None):\n self.slug = slugify(self.name)\n super(Category, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['slug']\n\n\nclass Genre(models.Model):\n name = models.CharField('ะะฐะทะฒะฐะฝะธะต ะถะฐะฝั€ะฐ', max_length=100)\n slug = models.SlugField(unique=True, blank=True, null=True)\n\n def save(self, *args, **kwargs):\n if self.slug in ('', None):\n self.slug = slugify(self.name)\n super(Genre, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['slug']\n\n\nclass Title(models.Model):\n name = models.CharField('ะะฐะทะฒะฐะฝะธะต ะฟั€ะพะธะทะฒะตะดะตะฝะธั', max_length=100)\n year = models.IntegerField('ะ“ะพะด ะฒั‹ั…ะพะดะฐ', blank=True, null=True)\n description = models.TextField('ะžะฟะธัะฐะฝะธะต', blank=True, null=True)\n category = models.ForeignKey(Category,\n related_name='titles',\n on_delete=models.SET_NULL,\n blank=True, null=True)\n genre = models.ManyToManyField(Genre)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['id']\n" }, { "alpha_fraction": 0.6233659982681274, "alphanum_fraction": 0.6282680034637451, "avg_line_length": 33.97142791748047, "blob_id": "e9fe154846bf549f14d26f1905c1ef4f4d5793ee", "content_id": "710f2aaf70f106629a85e54808c577bccf985205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 72, "num_lines": 35, "path": "/reviews/serializers.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\n\nfrom .models import Comment, Review, Title, User\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n queryset=User.objects.all(),\n slug_field='username', default=serializers.CurrentUserDefault())\n\n class Meta:\n fields = ('id', 'author', 'score', 'text', 'pub_date')\n model = Review\n\n def validate(self, data):\n author = self.context['request'].user\n title_id = get_object_or_404(\n Title, id=self.context[\n 'request'].parser_context['kwargs'].get('title_id'))\n if ((self.context['request'].method == 'POST'\n and not Review.objects.filter(\n title_id=title_id, author=author).exists())\n or self.context['request'].method == 'PATCH'):\n return data\n raise serializers.ValidationError('ะฃะถะต ะพัั‚ะฐะฒะธะปะธ ะพั‚ะทั‹ะฒ')\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username', read_only=True)\n\n class Meta:\n fields = ('id', 'author', 'text', 'pub_date')\n model = Comment\n" }, { "alpha_fraction": 0.7107142806053162, "alphanum_fraction": 0.7133928537368774, "avg_line_length": 39, "blob_id": "df226a8ca05f8aea29b6c651f05b03c3290af123", "content_id": "ecaacc33450fb5f6e6d169ab8652378f25e5fa0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/users/views.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from rest_framework import filters, permissions, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom .models import User\nfrom .permissions import IsYAMDBAdministrator\nfrom .serializers import UserSerializer\n\n\nclass UserList(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n lookup_field = 'username'\n permission_classes = [IsYAMDBAdministrator]\n filter_backends = [filters.SearchFilter]\n search_fields = ['username', ]\n\n @action(detail=False, methods=['get', 'put', 'patch'], url_path='me',\n permission_classes=[permissions.IsAuthenticated])\n def me(self, request):\n user = self.request.user\n if request.method == \"GET\":\n serializer = self.get_serializer(user)\n return Response(serializer.data)\n serializer = self.get_serializer(user, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save(role=user.role, partial=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n" }, { "alpha_fraction": 0.7520581483840942, "alphanum_fraction": 0.7815587520599365, "avg_line_length": 58.23577117919922, "blob_id": "51353045ca43b112d52412570136d8a31283219f", "content_id": "d8a9e2c70ba9fa626e99eceab74104f42e6c27f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11073, "license_type": "no_license", "max_line_length": 347, "num_lines": 123, "path": "/README.md", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "# YaMDB\n\n![yamdb_workflow](https://github.com/bitcoineazy/yamdb_final/actions/workflows/yamdb_workflow.yml/badge.svg)\n\n[![docker](https://img.shields.io/badge/-Docker-464646??style=flat-square&logo=docker)](https://www.docker.com/)\n[![PostgreSQL](https://img.shields.io/badge/-PostgreSQL-464646??style=flat-square&logo=PostgreSQL)](https://www.postgresql.org/)\n[![NGINX](https://img.shields.io/badge/-NGINX-464646??style=flat-square&logo=NGINX)](https://nginx.org/ru/)\n[![Python](https://img.shields.io/badge/-Python-464646??style=flat-square&logo=Python)](https://www.python.org/)\n[![Django](https://img.shields.io/badge/-Django-464646??style=flat-square&logo=Django)](https://www.djangoproject.com/)\n[![GitHub%20Actions](https://img.shields.io/badge/-GitHub%20Actions-464646??style=flat-square&logo=GitHub%20actions)](https://github.com/features/actions)\n[![GitHub](https://img.shields.io/badge/-GitHub-464646??style=flat-square&logo=GitHub)](https://github.com/)\n\n\n***REST API*** ะดะปั ัะฑะพั€ะฐ ะพั‚ะทั‹ะฒะพะฒ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะตะน ะฝะฐ ะฟั€ะพะธะทะฒะตะดะตะฝะธั.<br>\n* ะ ะตััƒั€ั ะดะพัั‚ัƒะฟะตะฝ ะฟะพ ะฐะดั€ะตััƒ [62.84.112.39/admin/](http://62.84.112.39/admin/), [62.84.112.39/redoc/](http://62.84.112.39/redoc/)\n* ะ”ะฐะฝะฝั‹ะต ะดะปั ะฒั…ะพะดะฐ ะฒ ะธะฝั‚ะตั€ั„ะตะนั ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะฐ ```username:pass = admin:admin```\n\nะžะฑั€ะฐั‰ะตะฝะธะต ะบ API ัะตั€ะฒะธัะฐ:\n* http://62.84.112.39/api/v1/auth/token/\n* http://62.84.112.39/api/v1/users/\n* http://62.84.112.39/api/v1/titles/\n* http://62.84.112.39/api/v1/genres/\n* http://62.84.112.39/api/v1/categories/\n* http://62.84.112.39/api/v1/titles/{title_id}/reviews/\n* http://62.84.112.39/api/v1/titles/{title_id}/reviews/{review_id}/\n* http://62.84.112.39/api/v1/titles/{title_id}/reviews/{review_id}/comments/\n\n\n# ะฃัั‚ะฐะฝะพะฒะบะฐ ะธ ะทะฐะฟัƒัะบ ัะตั€ะฒะธัะฐ\n \n1. ะฃัั‚ะฐะฝะพะฒะธั‚ัŒ: [docker](https://www.docker.com/get-started), [docker-compose](https://docs.docker.com/compose/install/)\n2. ะกะพะฑั€ะฐั‚ัŒ ะฑะฐะทัƒ ะดะฐะฝะฝั‹ั… ะฝะฐ ะพัะฝะพะฒะต ั€ะตััƒั€ัะพะฒ: ```sudo docker-compose exec web python manage.py makemigrations && sudo docker-compose exec web python manage.py migrate```\n3. ะกะพะทะดะฐั‚ัŒ ะฟั€ะพั„ะธะปัŒ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะฐ: ```sudo docker-compose exec web python manage.py createsuperuser```\n4. ะกะพะฑั€ะฐั‚ัŒ ัั‚ะฐั‚ะธะบัƒ: ```sudo docker-compose exec web python manage.py collectstatic```\n5. ะงะตั€ะตะท ะธะฝั‚ะตั€ั„ะตะนั ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะฐ [0.0.0.0/admin](https://0.0.0.0/admin) ะผะพะถะฝะพ ัะพะทะดะฐะฒะฐั‚ัŒ ะฝะพะฒั‹ะต ะทะฐะฟะธัะธ ะฒ ะฑะด\n6. ะกะพะฑั€ะฐั‚ัŒ ะฟั€ะพะตะบั‚ ะธ ะทะฐะฟัƒัั‚ะธั‚ัŒ: ```docker-compose up --build```\n\n# ะ”ะพะบัƒะผะตะฝั‚ะฐั†ะธั\n\n\n- ะะฐั…ะพะดะธั‚ัั ะฟะพ ะฐะดั€ะตััƒ [0.0.0.0/redoc](https://0.0.0.0/redoc/) ะธะปะธ ะฝะฐ ัะตั€ะฒะตั€ะต [62.84.112.39/redoc/](http://62.84.112.39/redoc/)\n- ะšะฐะถะดั‹ะน ั€ะตััƒั€ั ะพะฟะธัะฐะฝ ะฒ ะดะพะบัƒะผะตะฝั‚ะฐั†ะธะธ: ัƒะบะฐะทะฐะฝั‹ ัะฝะดะฟะพะธะฝั‚ั‹ (ะฐะดั€ะตัะฐ, ะฟะพ ะบะพั‚ะพั€ั‹ะผ ะผะพะถะฝะพ ัะดะตะปะฐั‚ัŒ ะทะฐะฟั€ะพั), ั€ะฐะทั€ะตัˆั‘ะฝะฝั‹ะต ั‚ะธะฟั‹ ะทะฐะฟั€ะพัะพะฒ, ะฟั€ะฐะฒะฐ ะดะพัั‚ัƒะฟะฐ ะธ ะดะพะฟะพะปะฝะธั‚ะตะปัŒะฝั‹ะต ะฟะฐั€ะฐะผะตั‚ั€ั‹, ะตัะปะธ ัั‚ะพ ะฝะตะพะฑั…ะพะดะธะผะพ.\n\n# ะ ะตััƒั€ัั‹ API\n\n- auth: ะฐัƒั‚ะตะฝั‚ะธั„ะธะบะฐั†ะธั.\n- users: ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะธ.\n- titles: ะฟั€ะพะธะทะฒะตะดะตะฝะธั, ะบ ะบะพั‚ะพั€ั‹ะผ ะฟะธัˆัƒั‚ ะพั‚ะทั‹ะฒั‹ (ะพะฟั€ะตะดะตะปั‘ะฝะฝั‹ะน ั„ะธะปัŒะผ, ะบะฝะธะณะฐ ะธะปะธ ะฟะตัะตะฝะบะฐ).\n- categories: ะบะฐั‚ะตะณะพั€ะธะธ (ั‚ะธะฟั‹) ะฟั€ะพะธะทะฒะตะดะตะฝะธะน (ยซะคะธะปัŒะผั‹ยป, ยซะšะฝะธะณะธยป, ยซะœัƒะทั‹ะบะฐยป).\n- genres: ะถะฐะฝั€ั‹ ะฟั€ะพะธะทะฒะตะดะตะฝะธะน. ะžะดะฝะพ ะฟั€ะพะธะทะฒะตะดะตะฝะธะต ะผะพะถะตั‚ ะฑั‹ั‚ัŒ ะฟั€ะธะฒัะทะฐะฝะพ ะบ ะฝะตัะบะพะปัŒะบะธะผ ะถะฐะฝั€ะฐะผ.\n- reviews: ะพั‚ะทั‹ะฒั‹ ะฝะฐ ะฟั€ะพะธะทะฒะตะดะตะฝะธั. ะžั‚ะทั‹ะฒ ะฟั€ะธะฒัะทะฐะฝ ะบ ะพะฟั€ะตะดะตะปั‘ะฝะฝะพะผัƒ ะฟั€ะพะธะทะฒะตะดะตะฝะธัŽ.\n- comments: ะบะพะผะผะตะฝั‚ะฐั€ะธะธ ะบ ะพั‚ะทั‹ะฒะฐะผ. ะšะพะผะผะตะฝั‚ะฐั€ะธะน ะฟั€ะธะฒัะทะฐะฝ ะบ ะพะฟั€ะตะดะตะปั‘ะฝะฝะพะผัƒ ะพั‚ะทั‹ะฒัƒ.\n\nะžั‚ะทั‹ะฒั‹:\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะพั‚ะทั‹ะฒะพะฒ;\n+ ัะพะทะดะฐั‚ัŒ ะฝะพะฒั‹ะน ะพั‚ะทั‹ะฒ;\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ะพั‚ะทั‹ะฒ ะฟะพ id;\n+ ั‡ะฐัั‚ะธั‡ะฝะพ ะพะฑะฝะพะฒะธั‚ัŒ ะพั‚ะทั‹ะฒ ะฟะพ id;\n+ ัƒะดะฐะปะธั‚ัŒ ะพั‚ะทั‹ะฒ ะฟะพ id.\n\nะšะพะผะผะตะฝั‚ะฐั€ะธะธ ะบ ะพั‚ะทั‹ะฒะฐะผ:\n\n+ ะŸะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะบะพะผะผะตะฝั‚ะฐั€ะธะตะฒ ะบ ะพั‚ะทั‹ะฒัƒ ะฟะพ id;\n+ ัะพะทะดะฐั‚ัŒ ะฝะพะฒั‹ะน ะบะพะผะผะตะฝั‚ะฐั€ะธะน ะดะปั ะพั‚ะทั‹ะฒะฐ, ะฟะพะปัƒั‡ะธั‚ัŒ ะบะพะผะผะตะฝั‚ะฐั€ะธะน ะดะปั ะพั‚ะทั‹ะฒะฐ ะฟะพ id;\n+ ั‡ะฐัั‚ะธั‡ะฝะพ ะพะฑะฝะพะฒะธั‚ัŒ ะบะพะผะผะตะฝั‚ะฐั€ะธะน ะบ ะพั‚ะทั‹ะฒัƒ ะฟะพ id;\n+ ัƒะดะฐะปะธั‚ัŒ ะบะพะผะผะตะฝั‚ะฐั€ะธะน ะบ ะพั‚ะทั‹ะฒัƒ ะฟะพ id.\n\nJWT-ั‚ะพะบะตะฝ:\n\n+ ะžั‚ะฟั€ะฐะฒะปะตะฝะธะต confirmation_code ะฝะฐ ะฟะตั€ะตะดะฐะฝะฝั‹ะน email;\n+ ะฟะพะปัƒั‡ะตะฝะธะต JWT-ั‚ะพะบะตะฝะฐ ะฒ ะพะฑะผะตะฝ ะฝะฐ email ะธ confirmation_code.\n\nะŸะพะปัŒะทะพะฒะฐั‚ะตะปะธ:\n\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะตะน;\n+ ัะพะทะดะฐะฝะธะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฟะพะปัƒั‡ะธั‚ัŒ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฟะพ username;\n+ ะธะทะผะตะฝะธั‚ัŒ ะดะฐะฝะฝั‹ะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฟะพ username;\n+ ัƒะดะฐะปะธั‚ัŒ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฟะพ username;\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ะดะฐะฝะฝั‹ะต ัะฒะพะตะน ัƒั‡ะตั‚ะฝะพะน ะทะฐะฟะธัะธ;\n+ ะธะทะผะตะฝะธั‚ัŒ ะดะฐะฝะฝั‹ะต ัะฒะพะตะน ัƒั‡ะตั‚ะฝะพะน ะทะฐะฟะธัะธ.\n\nะšะฐั‚ะตะณะพั€ะธะธ (ั‚ะธะฟั‹) ะฟั€ะพะธะทะฒะตะดะตะฝะธะน:\n\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะบะฐั‚ะตะณะพั€ะธะน;\n+ ัะพะทะดะฐั‚ัŒ ะบะฐั‚ะตะณะพั€ะธัŽ;\n+ ัƒะดะฐะปะธั‚ัŒ ะบะฐั‚ะตะณะพั€ะธัŽ.\n\nะšะฐั‚ะตะณะพั€ะธะธ ะถะฐะฝั€ะพะฒ:\n\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะถะฐะฝั€ะพะฒ\n+ ัะพะทะดะฐั‚ัŒ ะถะฐะฝั€;\n+ ัƒะดะฐะปะธั‚ัŒ ะถะฐะฝั€.\n\nะŸั€ะพะธะทะฒะตะดะตะฝะธั, ะบ ะบะพั‚ะพั€ั‹ะผ ะฟะธัˆัƒั‚ ะพั‚ะทั‹ะฒั‹:\n\n+ ะฟะพะปัƒั‡ะธั‚ัŒ ัะฟะธัะพะบ ะฒัะตั… ะพะฑัŠะตะบั‚ะพะฒ;\n+ ัะพะทะดะฐั‚ัŒ ะฟั€ะพะธะทะฒะตะดะตะฝะธะต ะดะปั ะพั‚ะทั‹ะฒะพะฒ;\n+ ะธะฝั„ะพั€ะผะฐั†ะธั ะพะฑ ะพะฑัŠะตะบั‚ะต;\n+ ะพะฑะฝะพะฒะธั‚ัŒ ะธะฝั„ะพั€ะผะฐั†ะธัŽ ะพะฑ ะพะฑัŠะตะบั‚ะต;\n+ ัƒะดะฐะปะธั‚ัŒ ะฟั€ะพะธะทะฒะตะดะตะฝะธะต.\n\nะŸะพะปัŒะทะพะฒะฐั‚ะตะปัŒัะบะธะต ั€ะพะปะธ\n- ะะฝะพะฝะธะผ โ€” ะผะพะถะตั‚ ะฟั€ะพัะผะฐั‚ั€ะธะฒะฐั‚ัŒ ะพะฟะธัะฐะฝะธั ะฟั€ะพะธะทะฒะตะดะตะฝะธะน, ั‡ะธั‚ะฐั‚ัŒ ะพั‚ะทั‹ะฒั‹ ะธ ะบะพะผะผะตะฝั‚ะฐั€ะธะธ.\nะัƒั‚ะตะฝั‚ะธั„ะธั†ะธั€ะพะฒะฐะฝะฝั‹ะน ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ (user) โ€” ะผะพะถะตั‚ ั‡ะธั‚ะฐั‚ัŒ ะฒัั‘, ะบะฐะบ ะธ ะะฝะพะฝะธะผ, ะผะพะถะตั‚ ะฟัƒะฑะปะธะบะพะฒะฐั‚ัŒ ะพั‚ะทั‹ะฒั‹ ะธ ัั‚ะฐะฒะธั‚ัŒ ะพั†ะตะฝะบะธ ะฟั€ะพะธะทะฒะตะดะตะฝะธัะผ (ั„ะธะปัŒะผะฐะผ/ะบะฝะธะณะฐะผ/ะฟะตัะตะฝะบะฐะผ), ะผะพะถะตั‚ ะบะพะผะผะตะฝั‚ะธั€ะพะฒะฐั‚ัŒ ะพั‚ะทั‹ะฒั‹; ะผะพะถะตั‚ ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ ะธ ัƒะดะฐะปัั‚ัŒ ัะฒะพะธ ะพั‚ะทั‹ะฒั‹ ะธ ะบะพะผะผะตะฝั‚ะฐั€ะธะธ, ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ ัะฒะพะธ ะพั†ะตะฝะบะธ ะฟั€ะพะธะทะฒะตะดะตะฝะธะน. ะญั‚ะฐ ั€ะพะปัŒ ะฟั€ะธัะฒะฐะธะฒะฐะตั‚ัั ะฟะพ ัƒะผะพะปั‡ะฐะฝะธัŽ ะบะฐะถะดะพะผัƒ ะฝะพะฒะพะผัƒ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŽ.\n- ะœะพะดะตั€ะฐั‚ะพั€ (moderator) โ€” ั‚ะต ะถะต ะฟั€ะฐะฒะฐ, ั‡ั‚ะพ ะธ ัƒ ะัƒั‚ะตะฝั‚ะธั„ะธั†ะธั€ะพะฒะฐะฝะฝะพะณะพ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั, ะฟะปัŽั ะฟั€ะฐะฒะพ ัƒะดะฐะปัั‚ัŒ ะธ ั€ะตะดะฐะบั‚ะธั€ะพะฒะฐั‚ัŒ ะปัŽะฑั‹ะต ะพั‚ะทั‹ะฒั‹ ะธ ะบะพะผะผะตะฝั‚ะฐั€ะธะธ.\n- ะะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ (admin) โ€” ะฟะพะปะฝั‹ะต ะฟั€ะฐะฒะฐ ะฝะฐ ัƒะฟั€ะฐะฒะปะตะฝะธะต ะฒัะตะผ ะบะพะฝั‚ะตะฝั‚ะพะผ ะฟั€ะพะตะบั‚ะฐ. ะœะพะถะตั‚ ัะพะทะดะฐะฒะฐั‚ัŒ ะธ ัƒะดะฐะปัั‚ัŒ ะฟั€ะพะธะทะฒะตะดะตะฝะธั, ะบะฐั‚ะตะณะพั€ะธะธ ะธ ะถะฐะฝั€ั‹. ะœะพะถะตั‚ ะฝะฐะทะฝะฐั‡ะฐั‚ัŒ ั€ะพะปะธ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัะผ.\n- ะกัƒะฟะตั€ัŽะทะตั€ Django ะดะพะปะถะตะฝ ะฒัะตะณะดะฐ ะพะฑะปะฐะดะฐั‚ัŒ ะฟั€ะฐะฒะฐะผะธ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะฐ, ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ั ะฟั€ะฐะฒะฐะผะธ admin. ะ”ะฐะถะต ะตัะปะธ ะธะทะผะตะฝะธั‚ัŒ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒัะบัƒัŽ ั€ะพะปัŒ ััƒะฟะตั€ัŽะทะตั€ะฐ โ€” ัั‚ะพ ะฝะต ะปะธัˆะธั‚ ะตะณะพ ะฟั€ะฐะฒ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะฐ. ะกัƒะฟะตั€ัŽะทะตั€ โ€” ะฒัะตะณะดะฐ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€, ะฝะพ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ โ€” ะฝะต ะพะฑัะทะฐั‚ะตะปัŒะฝะพ ััƒะฟะตั€ัŽะทะตั€.\n\nะกะฐะผะพัั‚ะพัั‚ะตะปัŒะฝะฐั ั€ะตะณะธัั‚ั€ะฐั†ะธั ะฝะพะฒั‹ั… ะฟะพะปัŒะทะพะฒะฐั‚ะตะปะตะน\n1. ะŸะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะพั‚ะฟั€ะฐะฒะปัะตั‚ POST-ะทะฐะฟั€ะพั ั ะฟะฐั€ะฐะผะตั‚ั€ะฐะผะธ email ะธ username ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/signup/.\n2. ะกะตั€ะฒะธั YaMDB ะพั‚ะฟั€ะฐะฒะปัะตั‚ ะฟะธััŒะผะพ ั ะบะพะดะพะผ ะฟะพะดั‚ะฒะตั€ะถะดะตะฝะธั (confirmation_code) ะฝะฐ ัƒะบะฐะทะฐะฝะฝั‹ะน ะฐะดั€ะตั email.\nะŸะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะพั‚ะฟั€ะฐะฒะปัะตั‚ POST-ะทะฐะฟั€ะพั ั ะฟะฐั€ะฐะผะตั‚ั€ะฐะผะธ username ะธ confirmation_code ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/token/, ะฒ ะพั‚ะฒะตั‚ะต ะฝะฐ ะทะฐะฟั€ะพั ะตะผัƒ ะฟั€ะธั…ะพะดะธั‚ token (JWT-ั‚ะพะบะตะฝ).\n- ะŸะพะปัŒะทะพะฒะฐั‚ะตะปั ะผะพะถะตั‚ ัะพะทะดะฐั‚ัŒ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ โ€” ั‡ะตั€ะตะท ะฐะดะผะธะฝ-ะทะพะฝัƒ ัะฐะนั‚ะฐ ะธะปะธ ั‡ะตั€ะตะท POST-ะทะฐะฟั€ะพั ะฝะฐ ัะฟะตั†ะธะฐะปัŒะฝั‹ะน ัะฝะดะฟะพะธะฝั‚ api/v1/users/ (ะพะฟะธัะฐะฝะธะต ะฟะพะปะตะน ะทะฐะฟั€ะพัะฐ ะดะปั ัั‚ะพะณะพ ัะปัƒั‡ะฐั โ€” ะฒ ะดะพะบัƒะผะตะฝั‚ะฐั†ะธะธ). ะ’ ัั‚ะพั‚ ะผะพะผะตะฝั‚ ะฟะธััŒะผะพ ั ะบะพะดะพะผ ะฟะพะดั‚ะฒะตั€ะถะดะตะฝะธั ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŽ ะพั‚ะฟั€ะฐะฒะปัั‚ัŒ ะฝะต ะฝัƒะถะฝะพ.\nะŸะพัะปะต ัั‚ะพะณะพ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะดะพะปะถะตะฝ ัะฐะผะพัั‚ะพัั‚ะตะปัŒะฝะพ ะพั‚ะฟั€ะฐะฒะธั‚ัŒ ัะฒะพะน email ะธ username ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/signup/ , ะฒ ะพั‚ะฒะตั‚ ะตะผัƒ ะดะพะปะถะฝะพ ะฟั€ะธะนั‚ะธ ะฟะธััŒะผะพ ั ะบะพะดะพะผ ะฟะพะดั‚ะฒะตั€ะถะดะตะฝะธั.\nะ”ะฐะปะตะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะพั‚ะฟั€ะฐะฒะปัะตั‚ POST-ะทะฐะฟั€ะพั ั ะฟะฐั€ะฐะผะตั‚ั€ะฐะผะธ username ะธ confirmation_code ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/token/, ะฒ ะพั‚ะฒะตั‚ะต ะฝะฐ ะทะฐะฟั€ะพั ะตะผัƒ ะฟั€ะธั…ะพะดะธั‚ token (JWT-ั‚ะพะบะตะฝ), ะบะฐะบ ะธ ะฟั€ะธ ัะฐะผะพัั‚ะพัั‚ะตะปัŒะฝะพะน ั€ะตะณะธัั‚ั€ะฐั†ะธะธ.\n\n- ะŸะพัะปะต ั€ะตะณะธัั‚ั€ะฐั†ะธะธ ะธ ะฟะพะปัƒั‡ะตะฝะธั ั‚ะพะบะตะฝะฐ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะผะพะถะตั‚ ะพั‚ะฟั€ะฐะฒะธั‚ัŒ PATCH-ะทะฐะฟั€ะพั ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/users/me/ ะธ ะทะฐะฟะพะปะฝะธั‚ัŒ ะฟะพะปั ะฒ ัะฒะพั‘ะผ ะฟั€ะพั„ะฐะนะปะต (ะพะฟะธัะฐะฝะธะต ะฟะพะปะตะน โ€” ะฒ ะดะพะบัƒะผะตะฝั‚ะฐั†ะธะธ).\n\nะกะพะทะดะฐะฝะธะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปั ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ะพะผ\n\n- ะŸะพะปัŒะทะพะฒะฐั‚ะตะปั ะผะพะถะตั‚ ัะพะทะดะฐั‚ัŒ ะฐะดะผะธะฝะธัั‚ั€ะฐั‚ะพั€ โ€” ั‡ะตั€ะตะท ะฐะดะผะธะฝ-ะทะพะฝัƒ ัะฐะนั‚ะฐ ะธะปะธ ั‡ะตั€ะตะท POST-ะทะฐะฟั€ะพั ะฝะฐ ัะฟะตั†ะธะฐะปัŒะฝั‹ะน ัะฝะดะฟะพะธะฝั‚ api/v1/users/ (ะพะฟะธัะฐะฝะธะต ะฟะพะปะตะน ะทะฐะฟั€ะพัะฐ ะดะปั ัั‚ะพะณะพ ัะปัƒั‡ะฐั โ€” ะฒ ะดะพะบัƒะผะตะฝั‚ะฐั†ะธะธ). ะ’ ัั‚ะพั‚ ะผะพะผะตะฝั‚ ะฟะธััŒะผะพ ั ะบะพะดะพะผ ะฟะพะดั‚ะฒะตั€ะถะดะตะฝะธั ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŽ ะพั‚ะฟั€ะฐะฒะปัั‚ัŒ ะฝะต ะฝัƒะถะฝะพ.\nะŸะพัะปะต ัั‚ะพะณะพ ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะดะพะปะถะตะฝ ัะฐะผะพัั‚ะพัั‚ะตะปัŒะฝะพ ะพั‚ะฟั€ะฐะฒะธั‚ัŒ ัะฒะพะน email ะธ username ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/signup/ , ะฒ ะพั‚ะฒะตั‚ ะตะผัƒ ะดะพะปะถะฝะพ ะฟั€ะธะนั‚ะธ ะฟะธััŒะผะพ ั ะบะพะดะพะผ ะฟะพะดั‚ะฒะตั€ะถะดะตะฝะธั.\nะ”ะฐะปะตะต ะฟะพะปัŒะทะพะฒะฐั‚ะตะปัŒ ะพั‚ะฟั€ะฐะฒะปัะตั‚ POST-ะทะฐะฟั€ะพั ั ะฟะฐั€ะฐะผะตั‚ั€ะฐะผะธ username ะธ confirmation_code ะฝะฐ ัะฝะดะฟะพะธะฝั‚ /api/v1/auth/token/, ะฒ ะพั‚ะฒะตั‚ะต ะฝะฐ ะทะฐะฟั€ะพั ะตะผัƒ ะฟั€ะธั…ะพะดะธั‚ token (JWT-ั‚ะพะบะตะฝ), ะบะฐะบ ะธ ะฟั€ะธ ัะฐะผะพัั‚ะพัั‚ะตะปัŒะฝะพะน ั€ะตะณะธัั‚ั€ะฐั†ะธะธ.\n\n\n" }, { "alpha_fraction": 0.6769596338272095, "alphanum_fraction": 0.6769596338272095, "avg_line_length": 34.08333206176758, "blob_id": "20ea5fe78226a1bd1e55c0b80adeb3d688ffdf90", "content_id": "34daeaf78bc957f7879fb08c66304862e3fdda26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/reviews/permissions.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from rest_framework import permissions\n\nfrom .models import User\n\n\nclass IsAbleToChange(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n return (request.user.role == User.UserRole.ADMIN\n or request.user.role == User.UserRole.MODERATOR\n or obj.author == request.user)\n" }, { "alpha_fraction": 0.7060301303863525, "alphanum_fraction": 0.7211055159568787, "avg_line_length": 22.41176414489746, "blob_id": "636f4a0275a2187708fbc619509b0020dc8573e0", "content_id": "3171f0b275a5e2b53f67b4cab73179c934262dd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 61, "num_lines": 17, "path": "/users/urls.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\nfrom .user_auth.views import get_confirmation_code, get_token\n\nrouter_v1 = DefaultRouter()\n\nrouter_v1.register('', views.UserList)\n\nurlpatterns = [\n\n path('v1/users/', include(router_v1.urls)),\n path('v1/auth/email/', get_confirmation_code),\n path('v1/auth/token/', get_token),\n\n]\n" }, { "alpha_fraction": 0.6282919645309448, "alphanum_fraction": 0.6305493116378784, "avg_line_length": 34.91891860961914, "blob_id": "c2400c3a2ffa2bdfa68a67d50417c0f8c0381eef", "content_id": "d1fd3eb46e7efd55568c008fe20767a4203a28b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/reviews/models.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib.auth import get_user_model\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom titles.models import Title\n\nUser = get_user_model()\n\n\nclass Review(models.Model):\n title_id = models.ForeignKey(Title, related_name='reviews',\n on_delete=models.CASCADE)\n text = models.TextField('ะžั‚ะทั‹ะฒ')\n author = models.ForeignKey(User, on_delete=models.CASCADE,\n related_name='reviews')\n score = models.PositiveSmallIntegerField(\n validators=[MinValueValidator(1), MaxValueValidator(10)])\n pub_date = models.DateTimeField('ะดะฐั‚ะฐ ะพั‚ะทั‹ะฒะฐ', auto_now_add=True)\n\n class Meta:\n ordering = ['-pub_date']\n constraints = [\n models.UniqueConstraint(\n fields=['title_id', 'text'], name='unique_review'),\n ]\n\n\nclass Comment(models.Model):\n review_id = models.ForeignKey(Review, related_name='comments',\n on_delete=models.CASCADE)\n text = models.TextField('ะšะพะผะตะฝั‚ะฐั€ะธะน')\n author = models.ForeignKey(User, on_delete=models.CASCADE,\n related_name='comments')\n pub_date = models.DateTimeField('ะดะฐั‚ะฐ ะบะพะผะผะตะฝั‚ะฐั€ะธั', auto_now_add=True)\n\n class Meta:\n ordering = ['-pub_date']\n" }, { "alpha_fraction": 0.6372881531715393, "alphanum_fraction": 0.6406779885292053, "avg_line_length": 27.095237731933594, "blob_id": "da294d661278af7a84ff091fdab83a199cc6f74d", "content_id": "0262624d4197a55614bc7c015aa318d8f32a33a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/users/models.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass User(AbstractUser):\n class UserRole(models.TextChoices):\n USER = 'user'\n ADMIN = 'admin'\n MODERATOR = 'moderator'\n\n email = models.EmailField(unique=True)\n role = models.CharField(max_length=20, choices=UserRole.choices,\n default=UserRole.USER)\n bio = models.TextField(blank=True)\n confirmation_code = models.TextField(null=True, default='')\n\n def __str__(self):\n return self.username\n\n class Meta:\n ordering = ['id']\n" }, { "alpha_fraction": 0.6668133735656738, "alphanum_fraction": 0.67341548204422, "avg_line_length": 41.867923736572266, "blob_id": "ccf8eed6e13eeb08d35dea3e2b691460b47f36bc", "content_id": "537591ef66f4a96c85de4204856a7dd518765381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2272, "license_type": "no_license", "max_line_length": 78, "num_lines": 53, "path": "/users/user_auth/views.py", "repo_name": "bitcoineazy/yamdb_final", "src_encoding": "UTF-8", "text": "from django.contrib.auth import get_user_model\nfrom django.core.mail import send_mail\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import permissions, status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom rest_framework_simplejwt.tokens import AccessToken\n\nfrom api_yamdb.settings import SERVER_EMAIL\nfrom users.serializers import (UserAuthSerializer, UserConfirmationSerializer)\n\nUser = get_user_model()\n\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef get_confirmation_code(request):\n serializer = UserAuthSerializer(data=request.data)\n if serializer.is_valid():\n username = serializer.validated_data['username']\n email = serializer.validated_data['email']\n user, created = User.objects.get_or_create(username=username,\n email=email)\n confirmation_code = User.objects.make_random_password()\n user.confirmation_code = confirmation_code\n user.save()\n send_mail('Confirmation', f'Your code: {user.confirmation_code}',\n SERVER_EMAIL, [email])\n if created:\n return Response({'Success registration data': serializer.data},\n status.HTTP_201_CREATED)\n return Response({'Success registration data': serializer.data,\n 'confirmation_code': confirmation_code},\n status=status.HTTP_200_OK)\n return Response(serializer.errors)\n\n\n@api_view(['POST'])\n@permission_classes([permissions.AllowAny])\ndef get_token(request):\n serializer = UserConfirmationSerializer(data=request.data)\n if serializer.is_valid():\n confirmation_code = serializer.validated_data['confirmation_code']\n email = serializer.validated_data['email']\n username = serializer.validated_data['username']\n user = get_object_or_404(User, email=email, username=username,\n confirmation_code=confirmation_code)\n token = AccessToken.for_user(user)\n user.confirmation_code = ''\n user.save()\n\n return Response({'token': str(token)}, status=status.HTTP_200_OK)\n return Response(serializer.errors)\n" } ]
19
maojj/learn
https://github.com/maojj/learn
83d80bfd0fe3a4dd66453ead5712456745eedd46
153b329fad16cef78107b5d832f1ca9d9988bf8d
41638c26127b551178aa4907fbce2da5392fb5b2
refs/heads/master
2021-01-01T20:34:56.410781
2015-03-18T14:45:36
2015-03-18T14:45:36
32,215,399
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5196137428283691, "alphanum_fraction": 0.5232347846031189, "avg_line_length": 30.884614944458008, "blob_id": "510057574fea33c08c77a9ab931fb84a0958934b", "content_id": "16df291da83e03741107a66893ef73c219ec3652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1657, "license_type": "no_license", "max_line_length": 108, "num_lines": 52, "path": "/mail/DBUtils.py", "repo_name": "maojj/learn", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n# !/usr/bin/python\n\nimport sqlite3\nimport DataObject\n\n\ndef init_database():\n connect = sqlite3.connect(\"messageDB.sqlite3\")\n connect.text_factory = str\n\n conn = connect.cursor()\n #init message table\n conn.execute('''CREATE TABLE IF NOT EXISTS EMAIL\n (INTERNALID \t TEXT PRIMARY KEY NOT NULL,\n SENDER\t TEXT NOT NULL,\n SENDTIME TEXT NOT NULL,\n SENDERIP TEXT NOT NULL,\n SENDERHOSTNAME TEXT,\n SUBJECT TEXT,\n ACTION\t\t INT,\n HITPOLICY INT);''')\n\n #init user table\n conn.execute('''CREATE TABLE IF NOT EXISTS USER\n (USERID INT PRIMARY KEY NOT NULL,\n EMAIL \tTEXT NOT NULL,\n DOMAIN\tTEXT);''');\n\n #init Send table\n conn.execute('''CREATE TABLE IF NOT EXISTS SEND\n (SENDERID INT NOT NULL,\n RECEIVERID INT NOT NULL,\n INTERNALID TEXT NOT NULL,\n PRIMARY KEY(SENDERID,RECEIVERID,INTERNALID));''')\n\n return conn\n\n\ndef insertAMail(newMail):\n # \"\"\"\n # :type newMail: DataObject.mailMeta\n # \"\"\"\n # newMail = DataObject.mailMeta(mail)\n newMail.SenderHostName = \"xxx\"\n connect = init_database()\n action = 0\n reason = 0\n connect.execute('INSERT INTO EMAIL Values (?,?,?,?,?,?,?,?)', [newMail.MailId, newMail.Sender,\n newMail.Timestamp, newMail.SenderIP,\n newMail.SenderHostName, newMail.Subject,\n action, reason])" }, { "alpha_fraction": 0.5438871383666992, "alphanum_fraction": 0.5485893487930298, "avg_line_length": 25.625, "blob_id": "b350c98406eb34d9fc639b1e5f40a575a7bd365a", "content_id": "73e2e1737a91ee10cdbdf1b9c372bea8274a2522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/mail/DataObject.py", "repo_name": "maojj/learn", "src_encoding": "UTF-8", "text": "class mailMeta:\n Sender = ''\n Recipient = []\n Timestamp = 0\n Subject = ''\n SenderIP = ''\n SenderHostName = ''\n Mail = ''\n MailId = ''\n\n def __init__(self, row):\n self.Sender = row[\"Sender\"]\n self.Recipient = row['Recipient(s)']\n self.Timestamp = row['Timestamp']\n self.Subject = row['Subject']\n self.SenderIP = row['SenderIP']\n self.SenderHostName = row['SenderHostName']\n self.Mail = row['Mail']\n self.MailId = self.parsemainid(self.Mail)\n\n\n def parsemainid(self, normalId):\n array = normalId.split(\".\")\n return array[-2].split(\"\\\\\")[-1]" }, { "alpha_fraction": 0.5704989433288574, "alphanum_fraction": 0.574837327003479, "avg_line_length": 23, "blob_id": "6f743d09280588c6c1497050f5d6dccf02803387", "content_id": "8af03c106d741afb35b51b88e810934b4a834564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 461, "license_type": "no_license", "max_line_length": 44, "num_lines": 19, "path": "/mail/CSVParser.py", "repo_name": "maojj/learn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# !/usr/bin/python\n\nimport csv\nimport DBUtils\nimport DataObject\n\ndef parseFile(file_name):\n with open(file_name) as csvFile:\n reader = csv.DictReader(csvFile)\n conn = DBUtils.init_database()\n for row in reader:\n print 'get row%s' % row\n aMail = DataObject.mailMeta(row)\n ret = DBUtils.insertAMail(aMail)\n print \"ret %s\" % ret\n break\n\nparseFile(\"5.csv\")\n\n\n\n\n\n" } ]
3
Aakash7065/EasyHousing
https://github.com/Aakash7065/EasyHousing
b6c8617fe017447e949516d62fe9b802f7aeb35d
b141e9c09682f56fe6aa4238f04d2d04d0b771f5
3eafcf44602e7f8d6ff1dad8f749789a9cfd3818
refs/heads/master
2020-08-09T22:37:51.834570
2019-10-10T17:56:58
2019-10-10T17:56:58
214,192,385
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3774145543575287, "alphanum_fraction": 0.4658246636390686, "avg_line_length": 19.363636016845703, "blob_id": "6f1e59e8f9cc61d3b77b56ad8dac1a7f20483c7b", "content_id": "3c76aeea4d6167c63d49144923164f19029eec75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1346, "license_type": "no_license", "max_line_length": 35, "num_lines": 66, "path": "/listings/choice.py", "repo_name": "Aakash7065/EasyHousing", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\n\nbedroom_choices = {\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10\n}\n\nprice_choices = {\n '10000': 'Rs.10,000',\n '20000': 'Rs.20,000',\n '30000': 'Rs.30,000',\n '40000': 'Rs.40,000',\n '50000': 'Rs.50,000',\n '60000': 'Rs.60,000',\n '70000': 'Rs.70,000',\n '80000': 'Rs.80,000',\n '90000': 'Rs.90,000',\n '100000': 'Rs.1L+',\n}\n\nstate_choices = {\n 'AN': 'Andaman and Nicobar',\n 'AP': 'Andhra Pradesh',\n 'AR': 'Arunachal Pradesh',\n 'AS': 'Assam',\n 'BR': 'Bihar',\n 'CG': 'Chattisgarh',\n 'CH': 'Chandigarh',\n 'DD': 'Daman and Diu',\n 'DL': 'Delhi',\n 'DN': 'Dadra and Nagar Haveli',\n 'GA': 'Goa',\n 'GJ': 'Gujarat',\n 'HP': 'Himachal Pradesh',\n 'HR': 'Haryana',\n 'JH': 'Jharkhand',\n 'JK': 'Jammu and Kashmir',\n 'KA': 'Karnataka',\n 'KL': 'Kerala',\n 'LD': 'Lakshadweep',\n 'MH': 'Maharashtra',\n 'ML': 'Meghalaya',\n 'MN': 'Manipur',\n 'MP': 'Madhya Pradesh',\n 'MZ': 'Mizoram',\n 'NL': 'Nagaland',\n 'OR': 'Orissa',\n 'PB': 'Punjab',\n 'PY': 'Pondicherry',\n 'RJ': 'Rajasthan',\n 'SK': 'Sikkim',\n 'TG': 'Telangana',\n 'TN': 'Tamil Nadu',\n 'TR': 'Tripura',\n 'UA': 'Uttarakhand',\n 'UP': 'Uttar Pradesh',\n 'WB': 'West Bengal'\n}\n\n\n" }, { "alpha_fraction": 0.6170731782913208, "alphanum_fraction": 0.6731707453727722, "avg_line_length": 26.33333396911621, "blob_id": "e7016e3c13b2e95ff66e6989a1bc661e4e918d1d", "content_id": "1247e891ba4bc99ffc67852ba7762c3ccec6fcf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 410, "license_type": "no_license", "max_line_length": 83, "num_lines": 15, "path": "/realtors/migrations/0002_auto_20190925_1323.py", "repo_name": "Aakash7065/EasyHousing", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.5 on 2019-09-25 13:23\n\nfrom django.db import migrations\nfrom django.db import models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('realtors', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField('Realtor', 'phone', models.CharField(max_length=20)),\n migrations.AlterField('Realtor', 'email', models.CharField(max_length=20))\n ]\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 17, "blob_id": "0d8a49a977f682a540bce134ac9ceb612b51b862", "content_id": "010fdeafd82c00402972a051f4df20788a2b7806", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/README.md", "repo_name": "Aakash7065/EasyHousing", "src_encoding": "UTF-8", "text": "# EasyHousing\nSample Django Project\n" }, { "alpha_fraction": 0.6118546724319458, "alphanum_fraction": 0.6118546724319458, "avg_line_length": 43.82857131958008, "blob_id": "b11ace0e61cc8a51e4fe2e7a0300fd8a8d85c064", "content_id": "cf9d27cd0cb03a6bfbe499002d9f1103b5c7eae2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1569, "license_type": "no_license", "max_line_length": 120, "num_lines": 35, "path": "/contacts/views.py", "repo_name": "Aakash7065/EasyHousing", "src_encoding": "UTF-8", "text": "from .models import Contacts\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\ndef contacts(request):\n if request.method == \"POST\":\n listing_id = request.POST['listing_id']\n listing = request.POST['listing']\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n user_id = request.POST['user_id']\n realtor_email = request.POST['realtor_email']\n # has user already contacted\n if request.user.is_authenticated:\n user_id = request.user.id\n has_contacted = Contacts.objects.filter(listing_id=listing_id, user_id=user_id).exists()\n if has_contacted:\n messages.error(request, \"already contacted\")\n return redirect('/listings/' + listing_id)\n\n contact = Contacts(listing=listing, listing_id=listing_id, name=name, email=email, phone=phone, message=message,\n user_id=user_id)\n contact.save()\n send_mail('Property Listing Inquiry',\n f'There has been inquiry for {listing} . Signin to admin pannel for more Info',\n settings.EMAIL_HOST,\n ['[email protected]',f'{listing.realtor.email}'],\n False\n )\n messages.success(request, \"your request has been saved successfully, we will contact you soon\")\n return redirect('/listings/' + listing_id)\n" } ]
4
dani75i/CovidData
https://github.com/dani75i/CovidData
18ae71cf9b9c3d8f1433fc93b6b69b81c9f0c1bc
1db6122950dd935355e55cd3999bb7c9a7169954
40e6daa0b18c9510d3311dc0aa666db8cdef349f
refs/heads/master
2022-07-31T21:07:35.588150
2020-05-24T14:34:43
2020-05-24T14:34:43
261,900,637
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.2831245958805084, "alphanum_fraction": 0.28707975149154663, "avg_line_length": 33.431819915771484, "blob_id": "9a73369d171f789cc1fd980b498efec6091bf43d", "content_id": "54d87d98d27651a2b67970e346a158d867c104b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3034, "license_type": "no_license", "max_line_length": 79, "num_lines": 88, "path": "/datas/static/datas/request_ajax.js", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "\n\n\n$(document).ready(function(){\n $(\"#post-form-list\").change(function(){\n $(\"#p1\").empty();\n $.ajax({\n type:'POST',\n url:'test',\n data:{\n country:$('#post-form-list :selected').text(),\n csrfmiddlewaretoken:$('input[name=csrfmiddlewaretoken]').val(),\n action: 'post'\n },\n success: function(json){\n $(\"#p1\").append(\n\n \"<div id='country'>\" +\n json.country +\n \"</div>\" + \"<br>\" +\n\n \"<div class='container'>\" +\n \"<div class='row'>\" +\n \"<div class='col-sm-4' id='confirmed'>\" +\n \"<h3>Confirmed</h3>\" +\n \"<p>\" + json.confirmed + \"</p>\" +\n \"</div>\" +\n \"<div class='col-sm-4' id='deaths'>\" +\n \"<h3>Deaths</h3>\" +\n \"<p>\" + json.deaths + \"</p>\" +\n \"</div>\" +\n \"<div class='col-sm-4' id='recovered'>\" +\n \"<h3>Recovered</h3>\" +\n \"<p>\" + json.recovered + \"</p>\" +\n \"</div>\" + \"<br>\" +\n \"</div>\" +\n \"</div>\" + \"<br>\"\n )\n console.log(json.confirmed)\n\n var ctx = document.getElementById(\"myChart\");\n var myChart = new Chart(ctx, {\n type: 'bar',\n data: {\n// labels: [\"Confirmed\", \"Deaths\", \"Recovered\"],\n labels: [\"country\"],\n datasets: [\n {\n label: 'Confirmed',\n backgroundColor: [\"blue\"],\n data: [\n json.confirmed, ]\n },\n {\n label: 'Deaths',\n backgroundColor: [\"red\"],\n data: [\n json.deaths]\n },\n {\n label: 'Recovered',\n backgroundColor: [\"green\"],\n data: [\n json.recovered]\n }\n ]\n }\n })\n\n var ctx = document.getElementById(\"myChart2\");\n console.log(json.list_dates)\n var myChart = new Chart(ctx, {\n type: 'line',\n data: {\n labels: json.list_dates,\n datasets: [\n {\n label: 'Deaths',\n backgroundColor: [\"red\"],\n data: json.list_deaths\n },\n ]\n }\n })\n\n }\n\n });\n\n})\n})\n\n" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.682692289352417, "avg_line_length": 22.11111068725586, "blob_id": "adb7c0c56f3f069ef19beccb002ca96666795674", "content_id": "75b9a10c76f2876a5daa214ad26b39dfd419ae89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 70, "num_lines": 9, "path": "/datas/urls.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "from django.urls import path\n\n\nfrom datas import views\n\nurlpatterns = [\n # path('test', views.get_value_covid_by_country, name=''),\n path('', views.get_value_covid_by_country_dashboard, name='test'),\n]\n" }, { "alpha_fraction": 0.5588967800140381, "alphanum_fraction": 0.563345193862915, "avg_line_length": 41.26315689086914, "blob_id": "3073f9bf61d535188e6cf298f4c9a4c128f40140", "content_id": "eddf9596a1f178a08f21f7afc269c6f02fd8e57d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5620, "license_type": "no_license", "max_line_length": 82, "num_lines": 133, "path": "/datas/views.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "import json\nfrom django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponse\nfrom datas.forms import CountryForm\nfrom datas.controllers.Getdatas import *\n\n\ndef get_value_covid_by_country(request):\n response_data = {}\n\n if request.POST.get('action') == 'post':\n form = CountryForm(request.POST)\n if form.is_valid():\n country = form.cleaned_data['country']\n\n result = postman_get_data_by_countries(country)\n response_data['confirmed'] = result[\"confirmed\"]\n response_data['deaths'] = result[\"deaths\"]\n response_data['recovered'] = result[\"recovered\"]\n response_data['death_rate'] = result[\"death_rate\"]\n\n histogramme = postman_get_data_from_beginning(country)\n response_data[\"list_dates\"] = histogramme[0]\n response_data[\"list_confirmed\"] = histogramme[1]\n response_data[\"list_deaths\"] = histogramme[2]\n response_data[\"list_recovered\"] = histogramme[3]\n response_data[\"france_last_day_deaths\"] = histogramme[4]\n\n return JsonResponse(response_data)\n\n else:\n form = CountryForm()\n\n result = postman_get_data_by_countries(\"France\")\n france_confirmed = result[\"confirmed\"]\n france_deaths = result[\"deaths\"]\n france_recovered = result[\"recovered\"]\n france_death_rate = result[\"death_rate\"]\n\n # histogramme = postman_get_data_from_beginning(\"France\")\n # france_dates_list = histogramme[0]\n # france_deaths_list = histogramme[2]\n # france_last_day_deaths = histogramme[4]\n\n\n return render(request, 'datas/home.html',\n {\"form\": form,\n \"france_confirmed\": france_confirmed,\n \"france_deaths\": france_deaths,\n \"france_recovered\": france_recovered,\n # \"france_dates_list\": france_dates_list,\n # \"france_deaths_list\": france_deaths_list,\n # \"france_last_day_deaths\": france_last_day_deaths,\n \"france_death_rate\": france_death_rate,\n })\n\n\ndef get_value_covid_by_country_dashboard(request):\n response_data = {}\n\n if request.POST.get('action') == 'post':\n form = CountryForm(request.POST)\n if form.is_valid():\n country = form.cleaned_data['country']\n\n result = postman_get_data_by_countries(country)\n response_data['confirmed'] = result[\"confirmed\"]\n response_data['deaths'] = result[\"deaths\"]\n response_data['recovered'] = result[\"recovered\"]\n response_data['death_rate'] = result[\"death_rate\"]\n response_data['recovered_rate'] = result[\"recovered_rate\"]\n\n histogramme = postman_get_data_from_beginning(country)\n response_data[\"list_dates\"] = histogramme[0]\n response_data[\"list_confirmed\"] = histogramme[1]\n response_data[\"list_deaths\"] = histogramme[2]\n response_data[\"list_recovered\"] = histogramme[3]\n response_data[\"france_last_day_deaths\"] = histogramme[4]\n response_data[\"france_last_day_confirmed\"] = histogramme[5]\n response_data[\"france_last_day_recovered\"] = histogramme[6]\n\n return JsonResponse(response_data)\n\n else:\n\n try:\n form = CountryForm()\n\n result = postman_get_data_by_countries(\"France\")\n france_confirmed = result[\"confirmed\"]\n france_deaths = result[\"deaths\"]\n france_recovered = result[\"recovered\"]\n france_death_rate = result[\"death_rate\"]\n france_recovered_rate = result[\"recovered_rate\"]\n\n histogramme = postman_get_data_from_beginning(\"France\")\n france_last_day_deaths = histogramme[4]\n france_last_day_confirmed = histogramme[5]\n france_last_day_recovered = histogramme[6]\n\n world = postman_get_world_datas()[0]\n world_confirmed = world[\"TotalConfirmed\"]\n world_deaths = world[\"TotalDeaths\"]\n world_recovered = world[\"TotalRecovered\"]\n word_new_confirmed = world[\"NewConfirmed\"]\n word_new_deaths = world[\"NewDeaths\"]\n word_new_recovered = world[\"NewRecovered\"]\n\n # summary = tableau()\n summary = postman_get_world_datas()[1]\n\n\n return render(request, 'datas/dashboard.html',\n {\"form\": form,\n \"france_confirmed\": france_confirmed,\n \"france_deaths\": france_deaths,\n \"france_recovered\": france_recovered,\n \"france_death_rate\": france_death_rate,\n \"france_recovered_rate\": france_recovered_rate,\n \"world_confirmed\": world_confirmed,\n \"world_deaths\": world_deaths,\n \"world_recovered\": world_recovered,\n \"world_new_confirmed\": word_new_confirmed,\n \"world_new_deaths\": word_new_deaths,\n \"world_new_recovered\": word_new_recovered,\n \"summary\": summary,\n \"france_last_day_deaths\": france_last_day_deaths,\n \"france_last_day_confirmed\": france_last_day_confirmed,\n \"france_last_day_recovered\": france_last_day_recovered,\n })\n except:\n\n return HttpResponse(\"<h1>error 500: please reload the page</h1>\")" }, { "alpha_fraction": 0.5400516986846924, "alphanum_fraction": 0.5542635917663574, "avg_line_length": 32.65217208862305, "blob_id": "e88ddad5e5511bd4517433f533f3030b2f394bf1", "content_id": "00513a818de3017b4f2f7709d0be042eb1d782c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 80, "num_lines": 23, "path": "/datas/forms.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "from django import forms\nfrom datas.controllers.Getdatas import *\n\n\nclass TestForm(forms.Form):\n name = forms.CharField(label='Your name', max_length=255)\n email = forms.EmailField(label='Your email', max_length=255, required=False)\n\nLIST_COUNTRIES = postman_get_all_countries()\n\n\nclass CountryForm(forms.Form):\n country = forms.CharField(label='Select a country : ',\n widget=forms.Select(\n choices=LIST_COUNTRIES,\n attrs={'style':\n 'font-size: 15px'},\n ),\n initial=\"France\")\n\n\nclass CountryFormAjax(forms.Form):\n country = forms.CharField(label='Your country', max_length=255)\n" }, { "alpha_fraction": 0.6769759654998779, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 25.454545974731445, "blob_id": "1ed5ad1d7256951585362688f26ef8051a77f7c1", "content_id": "55fec4f7f0eacc94ebc8331ee96efde1d3151816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 78, "num_lines": 11, "path": "/datas/models.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Song(models.Model):\n\n name = models.CharField(max_length=255, default='No Name')\n duration = models.IntegerField(default=0, help_text=\"Duration in seconds\")\n lyrics = models.TextField(blank=True)\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.6832740306854248, "alphanum_fraction": 0.6832740306854248, "avg_line_length": 20.615385055541992, "blob_id": "1e75e3293df23aafd224b03ad0154e2f273277f4", "content_id": "64efda2e4bfa75a1475223f151dc1c10397e6cc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/datas/admin.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom datas.models import Song\n\n# @admin.register(Song)\nclass SongAdmin(admin.ModelAdmin):\n\n list_display = ('id', 'name', 'duration', 'lyrics')\n search_fields = ['name']\n # list_editable = ('name',)\n\n\nadmin.site.register(Song, SongAdmin)\n" }, { "alpha_fraction": 0.6453900933265686, "alphanum_fraction": 0.6560283899307251, "avg_line_length": 31.290077209472656, "blob_id": "097037fd8228869f28b08f49b5e74b2bf4af5602", "content_id": "0abd91e812f6392f18f4b03c8ef5e221b32fa25a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 103, "num_lines": 131, "path": "/datas/controllers/Getdatas.py", "repo_name": "dani75i/CovidData", "src_encoding": "UTF-8", "text": "import requests\n\n\ndef get_data_by_countries(country):\n url = \"https://covid19.mathdro.id/api/countries/\" + country\n response = requests.get(url=url).json()\n\n information = {\n \"confirmed\": response[\"confirmed\"][\"value\"],\n \"recovered\": response[\"recovered\"][\"value\"],\n \"deaths\": response[\"deaths\"][\"value\"],\n }\n return information\n\n\ndef get_all_countries():\n list_countries = []\n url = \"https://covid19.mathdro.id/api/countries/\"\n response = requests.get(url=url).json()[\"countries\"]\n for country in response:\n list_countries.append((country['name'], country['name']))\n return list_countries\n\n\ndef get_world_datas():\n url = \"https://covid19.mathdro.id/api\"\n response = requests.get(url=url).json()\n\n information = {\n \"confirmed\": response[\"confirmed\"][\"value\"],\n \"recovered\": response[\"recovered\"][\"value\"],\n \"deaths\": response[\"deaths\"][\"value\"],\n }\n\n return information\n\n\ndef postman_get_data_from_beginning(country):\n list_confirmed = []\n list_deaths = []\n list_recovered = []\n list_dates = []\n\n url_confirmed = \"https://api.covid19api.com/total/dayone/country/\" + country + \"/status/confirmed\"\n response_confirmed = requests.get(url=url_confirmed).json()\n for case in response_confirmed:\n list_confirmed.append(case[\"Cases\"])\n list_dates.append(case[\"Date\"][5:10])\n\n url_deaths = \"https://api.covid19api.com/total/dayone/country/\" + country + \"/status/deaths\"\n response_recovered = requests.get(url=url_deaths).json()\n for case in response_recovered:\n list_deaths.append(case[\"Cases\"])\n\n url_recovered = \"https://api.covid19api.com/total/dayone/country/\" + country + \"/status/recovered\"\n response_recovered = requests.get(url=url_recovered).json()\n for case in response_recovered:\n list_recovered.append(case[\"Cases\"])\n\n list_confirmed_not_cumulated = []\n for case in range(len(list_deaths) - 1):\n list_confirmed_not_cumulated.append(list_confirmed[case + 1] - list_confirmed[case])\n list_confirmed_not_cumulated.insert(0, 0)\n\n number_confirmed_last_day = list_confirmed_not_cumulated[-1]\n\n list_deaths_not_cumulated = []\n for case in range(len(list_deaths) - 1):\n list_deaths_not_cumulated.append(list_deaths[case + 1] - list_deaths[case])\n list_deaths_not_cumulated.insert(0, 0)\n\n number_deaths_last_day = list_deaths_not_cumulated[-1]\n\n list_recovered_not_cumulated = []\n for case in range(len(list_deaths) - 1):\n list_recovered_not_cumulated.append(list_recovered[case + 1] - list_recovered[case])\n list_recovered_not_cumulated.insert(0, 0)\n\n number_recovered_last_day = list_recovered_not_cumulated[-1]\n\n\n return list_dates, list_confirmed, list_deaths_not_cumulated, \\\n list_recovered, number_deaths_last_day, number_confirmed_last_day, number_recovered_last_day\n\n\ndef postman_get_all_countries():\n url = \"https://api.covid19api.com/countries\"\n response = requests.get(url=url).json()\n list_countries = []\n\n for country in response:\n list_countries.append((country[\"Country\"], country[\"Country\"]))\n\n return sorted(list_countries)\n\n\ndef postman_get_data_by_countries(country):\n url = \"https://api.covid19api.com/total/country/\" + country\n response = requests.get(url=url).json()[-1]\n\n\n death_rate = (int(response[\"Deaths\"]) / int(response[\"Confirmed\"])) * 100\n death_rate = \"%.2f\" % death_rate\n\n recovered_rate = (int(response[\"Recovered\"]) / int(response[\"Confirmed\"])) * 100\n recovered_rate = \"%.2f\" % recovered_rate\n\n information = {\n \"confirmed\": response[\"Confirmed\"],\n \"recovered\": response[\"Recovered\"],\n \"deaths\": response[\"Deaths\"],\n \"death_rate\": death_rate,\n \"recovered_rate\": recovered_rate,\n }\n\n return information\n\n\ndef postman_get_world_datas():\n summary = []\n url = \"https://api.covid19api.com/summary\"\n # response = requests.get(url=url)\n # return response.status_code, response.reason\n response = requests.get(url=url).json()\n\n for country in response[\"Countries\"]:\n summary.append(country)\n\n return response[\"Global\"], summary\n\nprint(postman_get_world_datas())\n" } ]
7
ysj1173886760/ScreenTranslater
https://github.com/ysj1173886760/ScreenTranslater
136b889556e5718e60267f50cd5dd264e228e20f
0d05d9f7b7bf28b51de9bdc5386accaf92110710
f95c9933fabc628fc9e96b396381f7f1da0c9a8a
refs/heads/main
2023-03-03T04:09:10.421580
2021-02-04T03:10:28
2021-02-04T03:10:28
335,529,145
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6649128794670105, "alphanum_fraction": 0.7004274725914001, "avg_line_length": 47.269840240478516, "blob_id": "2cf630c611ea8a46850ade6cb2afe037b67bb05c", "content_id": "2fa043ae0f32f98a0e763ff7ff9a5e12d4390b31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3041, "license_type": "no_license", "max_line_length": 79, "num_lines": 63, "path": "/my_ui.py", "repo_name": "ysj1173886760/ScreenTranslater", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'untitled.ui'\n#\n# Created by: PyQt5 UI code generator 5.15.2\n#\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\n# run again. Do not edit this file unless you know what you are doing.\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(890, 701)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(40, 10, 101, 51))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_2.setGeometry(QtCore.QRect(540, 10, 101, 51))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit.setGeometry(QtCore.QRect(0, 60, 361, 601))\n self.textEdit.setObjectName(\"textEdit\")\n self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit_2.setGeometry(QtCore.QRect(370, 60, 511, 621))\n font = QtGui.QFont()\n font.setPointSize(14)\n self.textEdit_2.setFont(font)\n self.textEdit_2.setObjectName(\"textEdit_2\")\n self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton_3.setGeometry(QtCore.QRect(190, 10, 141, 51))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.checkBox = QtWidgets.QCheckBox(self.centralwidget)\n self.checkBox.setGeometry(QtCore.QRect(400, 30, 121, 23))\n self.checkBox.setObjectName(\"checkBox\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(700, 20, 161, 31))\n self.label.setObjectName(\"label\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 890, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Screenshot\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"Translate\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"GetFromClipBoard\"))\n self.checkBox.setText(_translate(\"MainWindow\", \"Paper Format\"))\n self.label.setText(_translate(\"MainWindow\", \"Made By heaven5heep\"))\n" }, { "alpha_fraction": 0.5502715110778809, "alphanum_fraction": 0.5705269575119019, "avg_line_length": 35.0476188659668, "blob_id": "a9cf2f127be0c3adc32b396cc0077015300110c4", "content_id": "6433fcbd19b7c51ffde792741a001617c324f157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6823, "license_type": "no_license", "max_line_length": 113, "num_lines": 189, "path": "/version0.1.py", "repo_name": "ysj1173886760/ScreenTranslater", "src_encoding": "UTF-8", "text": "import tkinter as tk\nfrom tkinter import ttk\nimport time\nfrom PIL import ImageGrab\nimport os\nimport aip\nimport random\nimport hashlib\nimport urllib\nimport requests\nimport http.client\nimport json\nfrom PIL import Image\n\nclass GUI:\n def __init__(self) -> None:\n self.X = tk.IntVar(value=0)\n self.Y = tk.IntVar(value=0)\n self.flag = False\n self.capture_img = 'cap.png'\n self.ocr_text = ''\n self.translate_text = ''\n\n self.text_box = tk.Text(window)\n self.text_box.place(x=20, y=70, anchor='nw', width=170, height=330)\n\n self.tran_text_box = tk.Text(window)\n self.tran_text_box.place(x=210, y=70, anchor='nw', width=170, height=330)\n\n self.capture_btn = tk.Button(text='ScreenShot', command=self.capture_cmd)\n self.capture_btn.place(x=70, y=10, anchor='nw', width=80, height=30)\n\n self.trans_btn = tk.Button(text='Translate', command=self.translate_cmd)\n self.trans_btn.place(x=260, y=10, anchor='nw', width=80, height=30)\n\n self.from_lang = 'en'\n self.to_lang = 'zh'\n self.lang_dic = {'Chinese':'zh', 'English':'en', 'Japanese':'jp'}\n self.from_lang_label = tk.Label(window, text='from: ')\n self.from_lang_box = ttk.Combobox(window, state='readonly')\n self.from_lang_box['value'] = ('Chinese', 'English', 'Japanese')\n self.from_lang_box.current(1)\n self.from_lang_label.place(x=30, y=45, anchor='nw')\n self.from_lang_box.place(x=80, y=45, anchor='nw', width=80, height=20)\n\n self.to_lang_label = tk.Label(window, text='to: ')\n self.to_lang_box = ttk.Combobox(window, state='readonly')\n self.to_lang_box['value'] = ('Chinese', 'English', 'Japanese')\n self.to_lang_box.current(0)\n self.to_lang_label.place(x=240, y=45, anchor='nw')\n self.to_lang_box.place(x=270, y=45, anchor='nw', width=80, height=20)\n\n self.screenWidth = window.winfo_screenwidth()\n self.screenHeight = window.winfo_screenheight()\n self.tmp_img = 'tmp.png'\n window.bind('<Control-Alt-c>', self.get_img_from_clipboard)\n\n def create_canvas(self):\n im = ImageGrab.grab()\n im.save(self.tmp_img)\n self.top = tk.Toplevel(window, width=self.screenWidth, height=self.screenHeight)\n self.top.overrideredirect(True) # ้š่—ๆމ่พนๆก†\n self.canvas = tk.Canvas(self.top, bg='white', width=self.screenWidth, height=self.screenHeight)\n self.image = tk.PhotoImage(file=self.tmp_img)\n self.canvas.create_image(0, 0, anchor='nw', image=self.image)\n \n self.canvas.bind('<Button-1>', self.mouse_left_down)\n self.canvas.bind('<B1-Motion>', self.mouse_move)\n self.canvas.bind('<ButtonRelease-1>', self.mouse_left_up)\n\n self.canvas.pack(fill=tk.BOTH, expand=tk.YES)\n\n def mouse_left_down(self, event):\n self.X.set(event.x)\n self.Y.set(event.y)\n self.flag = True\n \n def mouse_move(self, event):\n if not self.flag:\n return\n try:\n self.canvas.delete(self.lastDraw)\n except Exception as e:\n pass\n self.lastDraw = self.canvas.create_rectangle(self.X.get(), self.Y.get(), event.x, event.y, outline='red')\n \n def mouse_left_up(self, event):\n self.flag = False\n try:\n self.canvas.delete(self.lastDraw)\n except Exception as e:\n pass\n x1, x2 = sorted([self.X.get(), event.x])\n y1, y2 = sorted([self.Y.get(), event.y])\n pic = ImageGrab.grab((x1 + 1, y1 + 1, x2, y2))\n pic.save(self.capture_img)\n self.top.destroy()\n\n def set_text(self):\n self.ocr_text = self.baidu_ocr(self.capture_img)\n if (self.ocr_text):\n self.text_box.delete(1.0, tk.END)\n self.tran_text_box.delete(1.0, tk.END)\n self.text_box.insert(1.0, self.ocr_text)\n window.deiconify()\n os.remove(self.capture_img)\n\n def get_img_from_clipboard(self, event):\n cb = QApplication.clipboard()\n if cb.mimeData().hashImage():\n qt_image = cb.image()\n image = Image.fromqimage(qt_image)\n image.save(self.capture_img)\n self.set_text()\n\n def capture_cmd(self):\n window.iconify()\n window.withdraw()\n self.create_canvas()\n self.capture_btn.wait_window(self.top)\n os.remove(self.tmp_img)\n self.set_text()\n \n def baidu_ocr(self, file):\n app_id = '19890128'\n api_key = 'BqvwEKnyBhHX6QAj0Ezc2KH7'\n secret_key = '2wQh1smTldG9qvKdUKvZe5uXhb6L1o59'\n ocr_text = ''\n if os.path.isfile(file):\n with open(file, 'rb') as f:\n image = f.read()\n ocr_ret = aip.AipOcr(app_id, api_key, secret_key).basicGeneral(image)\n words = ocr_ret.get('words_result')\n print(words)\n if words is not None and len(words):\n for word in words:\n ocr_text += word['words'] + '\\n'\n return ocr_text\n return None\n\n def translate_cmd(self):\n if self.ocr_text:\n self.translate_text = self.baidu_translate(self.ocr_text)\n self.tran_text_box.delete(1.0, tk.END)\n if self.translate_text:\n self.tran_text_box.insert('end', self.translate_text)\n\n def baidu_translate(self, content):\n app_id = '20200515000455294'\n secret_key = 'TH9UQhQX1yMCQzbVsPaa'\n http_client = None\n myurl = '/api/trans/vip/translate'\n q = content\n from_lang = self.from_lang\n to_lang = self.to_lang\n salt = random.randint(32768, 65536)\n sign = app_id + q + str(salt) + secret_key\n sign = hashlib.md5(sign.encode()).hexdigest()\n myurl = myurl + '?appid=' + app_id + '&q=' + urllib.parse.quote(q) + \\\n '&from=' + from_lang + '&to=' + to_lang + '&salt=' + str(salt) + '&sign=' + sign\n \n try:\n http_client = http.client.HTTPConnection('api.fanyi.baidu.com')\n http_client.request('GET', myurl)\n response = http_client.getresponse()\n json_response = response.read().decode('utf-8')\n js = json.loads(json_response)\n res = ''\n for result in js['trans_result']:\n res += result['dst'] + '\\n'\n return res\n except Exception as e:\n print(e)\n return None\n finally:\n if http_client:\n http_client.close()\n \n def get_from_lang(self, event):\n self.from_lang = self.lang_dic[self.from_lang_box.get()]\n \n def get_to_lang(self, event):\n self.to_lang = self.lang_dic[self.to_lang_box.get()]\n\nwindow = tk.Tk()\nwindow.title('GUI')\nwindow.geometry('400x420')\ngui = GUI()\nwindow.mainloop()\n" }, { "alpha_fraction": 0.6087613105773926, "alphanum_fraction": 0.6178247928619385, "avg_line_length": 30.539682388305664, "blob_id": "03ded9d0db77ab80a9038b628951e3e8cc1dba88", "content_id": "a0591ce9d8b71cdb6834de5ca417dd9a0be9faa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1986, "license_type": "no_license", "max_line_length": 98, "num_lines": 63, "path": "/version0.2.py", "repo_name": "ysj1173886760/ScreenTranslater", "src_encoding": "UTF-8", "text": "#https://github.com/ysj1173886760/ScreenTranslater\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nimport my_ui\nimport sys\nimport utils\nfrom PIL import Image\nimport os\nfrom PyQt5.QtCore import Qt\nfrom screenshot_test import CaptureScreen\n\nclass GUI(QMainWindow, my_ui.Ui_MainWindow):\n \n def __init__(self):\n super(GUI, self).__init__()\n self.setupUi(self)\n # self.setWindowModality(Qt.ApplicationModal)\n self.pushButton_2.clicked.connect(self.translate_cmd)\n self.pushButton_3.clicked.connect(self.get_from_clipboard_cmd)\n self.pushButton.clicked.connect(self.capture_img)\n\n def capture_img(self):\n gui.hide()\n self.screenshot = CaptureScreen()\n self.screenshot.exec_()\n gui.show()\n self.set_text()\n\n def set_text(self):\n result = utils.baidu_ocr('tmp.png')\n if result:\n if self.checkBox.isChecked():\n result = utils.paper_format(result)\n self.textEdit.setText(result)\n os.remove('tmp.png')\n\n def get_from_clipboard_cmd(self):\n cb = QApplication.clipboard()\n if cb.mimeData().hasImage():\n qt_img = cb.image()\n qt_img.save('tmp.png', quality=95)\n self.set_text()\n\n def translate_cmd(self):\n content = self.textEdit.toPlainText()\n result = utils.baidu_translate(content)\n if result:\n self.textEdit_2.setText(result)\n else:\n self.textEdit_2.setText('Error')\n \n def keyPressEvent(self, event):\n if (event.key() == Qt.Key_C) and (event.modifiers() == Qt.ControlModifier|Qt.AltModifier):\n self.get_from_clipboard_cmd()\n if (event.key() == Qt.Key_Z) and (event.modifiers() == Qt.ControlModifier|Qt.AltModifier):\n self.translate_cmd()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n gui = GUI()\n gui.setWindowTitle('ScreenTranslator')\n gui.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.8535565137863159, "alphanum_fraction": 0.857740581035614, "avg_line_length": 15, "blob_id": "89673a257b775e136447aede097032f9f9fb0038", "content_id": "425341c17f90bfaa48cce4f11230e552c1b873ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 559, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/README.md", "repo_name": "ysj1173886760/ScreenTranslater", "src_encoding": "UTF-8", "text": "# ScreenTranslater\n\n็ฎ€ๆ˜“็š„ๆˆชๅฑ็ฟป่ฏ‘ๅฐ้กน็›ฎ๏ผŒไธป่ฆๆ˜ฏๆ–นไพฟ็œ‹่‹ฑๆ–‡่ฎบๆ–‡\n\nๅŸบไบŽpyqt5็š„ๅ›พๅฝข็•Œ้ข\n\n็ฟป่ฏ‘ๅ’Œocr้ƒฝๆ˜ฏ็™พๅบฆ็š„๏ผŒไธ็Ÿฅ้“ไธบไป€ไนˆ็”จ็ฝ‘้กต็ฟป่ฏ‘ๅ’Œ็”จapi็ฟป่ฏ‘็ป“ๆžœไธๅŒ\n\nๆˆชๅ›พๅŠŸ่ƒฝๅพˆๅžƒๅœพ๏ผŒๆ…Ž็”จ๏ผŒๆˆชๅฎŒๅ›พ็ช—ๅฃไผš้ป‘ๅฑ๏ผŒๅ› ไธบๅœจ็ญ‰ๅญ็ช—ๅฃ็ป“ๆŸ\n\n็ฎ€ๆ˜“็”จ่‡ชๅทฑ็š„ๆˆชๅ›พๅทฅๅ…ท๏ผŒๆฏ”ๅฆ‚qqๆˆชๅ›พๆŠŠๅ›พ็‰‡ๆ”พๅˆฐๅ‰ช่ดดๆฟ๏ผŒ็„ถๅŽ็›ดๆŽฅไปŽๅ‰ช่ดดๆฟ่ฏปๅ–\n\nๆˆชๅ›พๆฒกๆœ‰ๅฟซๆท้”ฎ๏ผŒไปŽๅ‰ช่ดดๆฟ่Žทๅ–ๅ›พ็‰‡ๆ˜ฏctrl+alt+c, ็ฟป่ฏ‘ๆ˜ฏctrl+alt+z\n\nๅ› ไธบๆˆ‘่‡ชๅทฑ็š„ๆˆชๅ›พ้”ฎๆ˜ฏctrl+alt+x, ๆ‰€ไปฅ่ฟ™ๆ ท้…ๅˆ็€็”จๅฐฑๅฏไปฅ" }, { "alpha_fraction": 0.5440931916236877, "alphanum_fraction": 0.5668330788612366, "avg_line_length": 28.57377052307129, "blob_id": "de79566a9caafd72fedb35d3619ae6a84f6a1d5d", "content_id": "fc9b86742c75119b061ac57e20cc2bc8a2654af2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "no_license", "max_line_length": 92, "num_lines": 61, "path": "/utils.py", "repo_name": "ysj1173886760/ScreenTranslater", "src_encoding": "UTF-8", "text": "import os\nimport aip\nimport http.client\nimport hashlib\nimport json\nimport random\nimport urllib\nimport re\n\ndef baidu_ocr(file):\n app_id = '19890128'\n api_key = 'BqvwEKnyBhHX6QAj0Ezc2KH7'\n secret_key = '2wQh1smTldG9qvKdUKvZe5uXhb6L1o59'\n ocr_text = ''\n if os.path.isfile(file):\n with open(file, 'rb') as f:\n image = f.read()\n ocr_ret = aip.AipOcr(app_id, api_key, secret_key).basicGeneral(image)\n words = ocr_ret.get('words_result')\n print(words)\n if words is not None and len(words):\n for word in words:\n ocr_text += word['words'] + ' '\n return ocr_text\n return None\n\ndef baidu_translate(content):\n app_id = '20200515000455294'\n secret_key = 'TH9UQhQX1yMCQzbVsPaa'\n http_client = None\n myurl = '/api/trans/vip/translate'\n q = content\n from_lang = 'en'\n to_lang = 'zh'\n salt = random.randint(32768, 65536)\n sign = app_id + q + str(salt) + secret_key\n sign = hashlib.md5(sign.encode()).hexdigest()\n myurl = myurl + '?appid=' + app_id + '&q=' + urllib.parse.quote(q) + \\\n '&from=' + from_lang + '&to=' + to_lang + '&salt=' + str(salt) + '&sign=' + sign\n \n try:\n http_client = http.client.HTTPConnection('api.fanyi.baidu.com')\n http_client.request('GET', myurl)\n response = http_client.getresponse()\n json_response = response.read().decode('utf-8')\n js = json.loads(json_response)\n res = ''\n for result in js['trans_result']:\n res += result['dst'] + '\\n'\n return res\n except Exception as e:\n print(e)\n return None\n finally:\n if http_client:\n http_client.close()\n\ndef paper_format(content):\n \"\"\" remove all \"- \" formatted string \"\"\"\n content = re.sub('-\\s', '', content)\n return content" } ]
5
uniphil/IATI-Standard-SSOT
https://github.com/uniphil/IATI-Standard-SSOT
6fa92c31308131ca49a44a78ce9588c2284740a9
412cb61922894460f18524231a623c9b3e1224b6
0756094208dd451876e8da854265beaa5d018a17
refs/heads/master
2021-01-22T13:12:35.806437
2014-01-23T14:01:07
2014-01-23T14:01:07
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7550514936447144, "alphanum_fraction": 0.7572305798530579, "avg_line_length": 37.6080322265625, "blob_id": "3d5ca8c91b29443f65ad9de1043154a1107a9bbb", "content_id": "a50dcf6f7b299f6d6c1217c94e742f75829eb41a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 20259, "license_type": "no_license", "max_line_length": 156, "num_lines": 523, "path": "/meta-docs/index.rst", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "A Single Source of Truth for the IATI Standard\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThis is a work in progress, and any suggested changes and improvements\nare welcome.\n\nKnown tasks/issues are listed with the prefix TODO:\n\n\n.. contents::\n\nIntroduction\n============\n\n\nMotivation\n----------\n\nBy having a Single Source of Truth (SSOT) for the IATI standard, we create one\nset (or sets) of documents that define the standard at any point in\ntime.\n\nThis is a 'backend' system. Most people will not know, or need to know\nhow it works, or of its existence. Most people will access the\ninformation held in the SSOT via other systems (websites, documents,\napplications, etc)\n\nThose people that want to know, or are interested in it, may gain\nfurther knowledge of how it works and exploit that for their own use.\n(e.g. developers may go directly to the SSOT to fetch data for their\napplications)\n\nWe (IATI) should build our public facing services from the SSOT (eating\nour own dog food). This means we can tell a consistent story across\nplatforms, and it allows us to know that it is working.\n\nThe SSOT will alter and change overtime. It is important therefore that\nwe are able to track changes over time, and therefore see 'snapshots' of\nthe IATI Standard. These will normally be linked with versions of the\nIATI Standard, but not always.ย (see `Changes After Release`_).\n\nย \n\nBundling up the standard\n------------------------\n\nTo create or use data published to a version of the IATI Standard, you\nneed to know all about the SSOT that exists/existed for that version.\nFor a single source of truth to work with the IATI Standard you will\nneed to maintain a collection of documentation, guidance, XML schema,\ncodelists, and enforcement rules that define the standard at any one\ntime.\n\nThe least complicated way to achieve this is to 'bundle' versions of the\nstandard, so that everything for one version can be found in one place.\n\nAbout GitHub\n------------------\n\nAt itโ€™s core, `GitHub <https://github.com/>`__ย is a hosted service for\nthe git version control software. Everything on github is stored as flat\nfiles, but these can be in whatever format. Files are stored in\nโ€˜repositoriesโ€™. Versioning on GitHub is at on a per repository basis\n(not per file). Different types of versions are tracked via commits,\nbranches, and tags.\n\nPros and Cons GitHub to store our Single Source of Truth\n--------------------------------------------------------\n\nPros:\n\n- Can store all the data in one place\n- Very structured versioning information\n- Makes it easy to pull a copy of the entire SSOTย (all versions etc.),\n provided you have git knowledge\n- Git (and github) allow distributed teams to collaborate easily\n- Git workflows encourage independent changes that are then reviewed\n before publication.\n- Documentation is not in 'Word'.\n- Changes can be viewed publicly as we are working on them - this may\n allow us to reduce the time between upgrades.\n- It allows us to introduce non-functional changes between release\n cycles\n\nCons:\n\n- โ€˜Single sourceโ€™ is split across multiple git repositories (however,\n we can use submodules to link these)\n- GitHub, while widely known and used in developer circles is not\n necessarily something non-techies would be comfortable with. As this\n is a backend system this is not necessarily a large con.\n- Editing documentation for example would have to be done on the web\n (on GitHub, via it's interface) or editors would need to learn how to\n checkout documents locally, work on them and submit them for review\n- Documentation is not in 'Word'.\n- A simple markup language would be used for creating human readable\n documents\n- It is easy perhaps too easy make changes without thinking them\n through\n- Reliance on Github (the โ€œwhat ifโ€ they go down/sell out etc)\n\nGitHub Architecture\n===================\n\nSubmodules\n----------\n\n|image0|\n\nGit submodules are a way of including one git repository with another.\nOn github they are indicated by the โ€˜folder within a folderโ€™ icon.\nSubmodules track a specific commit on the remote repository. On github,\nclicking on the name takes you to the general repository page, whereas\nclicking on the commit hash (f1b87ec etc.) takes you to the exact\ncommit.\n\nWe use git submodules to include all the other SSOT repositories into a\nparent IATI-Standard-SSOT repository. Since submodules track specific\ncommits, checking out an old version branch of the IATI-Standard-SSOT\nrepository will also pull in the old versions of the other repositories.\n\nRepositories\n------------\n\nThe IATI Standard is, in practice, a collection of schemas, rulesets,\ncodelists and explanatory text/extra documentation. These are all\nbrought together as submodules of the IATI-Standard-SSOT repository, as\ndescribed above.\n\nIATI-Standard-SSOT\n~~~~~~~~~~~~~~~~~~\n\n`https://github.com/IATI/IATI-Standard-SSOT <https://github.com/IATI/IATI-Standard-SSOT>`__ย has\nfour submodules:\n\n- `https://github.com/IATI/IATI-Extra-Documentation <https://github.com/IATI/IATI-Extra-Documentation>`__\n- `https://github.com/IATI/IATI-Schemas <https://github.com/IATI/IATI-Schemas>`__\n- `https://github.com/IATI/IATI-Codelists <https://github.com/IATI/IATI-Codelists>`__\n- `https://github.com/IATI/IATI-Rulesets <https://github.com/IATI/IATI-Rulesets>`__\n\nIATI-Extra-Documentation\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis holds any extra text documentation (ie. what would have been on the\nwiki previously). This has been scraped (a one off process) from the\nwiki and converted to reStructuredText format, in order to work with the\nnew `documentation generation`_ process.\n\nAlthough reStructuredText is different from wiki markup in many ways,\nthe main principle is the same - it is very human readable and writeable\nmarkup, that should be quite straightforward for non-programmers to\nwrite.\n\nThe documentation is structured such that each file is named after the\nxml element it describes, with subfolders for nested elements. This\nmeans that no extra mapping is needed to combine this documentation with\nthe information from the schema.\n\nIATI-Schemas\n~~~~~~~~~~~~\n\nThe schemas repository is unchanged from what it is previously.\n\nIATI-Codelists\n~~~~~~~~~~~~~~\n\nThe codelists in\n`https://github.com/IATI/IATI-Codelists <https://github.com/IATI/IATI-Codelists>`__ย have\nbeen donwloaded from the data.aidinfolabs.org site. The structure of the\nXML files was then updated to be more consistent, and conform to a\n`codelist\nschema <https://github.com/IATI/IATI-Codelists/blob/master/codelist.xsd>`__.\nThis new structure is not compatible with the old one - to provide\nbackwards compatibility with tools that expect the old codelist\nstructure, we could have our โ€˜APIโ€™ do this conversion, or just create a\nstatic mirror of the old codelists and deprecate it.\n\nSince codelists are now versioned as part of the Single Source of Truth,\nthe @version and @date-last-modified attributes are now redundant, so\nwill be removed.\n\n TODO: Discuss changes to codelists\n\nA machine readable `mapping\nfile <https://github.com/IATI/IATI-Codelists/blob/master/mapping.xml>`__ย describes\nwhat elements and attributes use which codelists. (This does not\ncurrently exist). It was not practical to do this using file naming\nconventions since each a codelist can be used in several places.\n\nIATI-Rulesets\n~~~~~~~~~~~~~\n\nThe rulesets\n`https://github.com/IATI/IATI-Rulesets/blob/master/rulesets/standard.json <https://github.com/IATI/IATI-Rulesets/blob/master/rulesets/standard.json>`__ย have\nbeen created afresh, in a new easy to parse JSON format. This is very\nmuch a work in progress, and more information can be found at `<https://github.com/IATI/IATI-Rulesets/blob/master/README.rst>`__\n\n`Python <https://github.com/IATI/IATI-Rulesets/blob/master/testrules.py>`__ and\n`PHP <https://github.com/IATI/IATI-Rulesets/blob/master/testrules.php>`__ libraries\nfor testing against these rulesets - with the idea that it should be\neasy to write one for any other programming language.\n\nThis is a drastic change from what we had previously, but I believe it\nis appropriate since a) the previous machine rulesets werenโ€™t official,\nand b) a json file containing xpath like this can be used in many\nprogramming languages (two examples above), unlike the constraints of\nthe current xquery.\n\nThe current rulesets in the draft SSOT have been created on the previous\ncompliance tests. However, these are not necessarily what we want going\nforward, and should be split out into those rules that are definitely\npart of the standard (a start date is by definition before an end date)\nand those that should be split out into optional files.\n\n TODO: Decide what rules we should have.\n\nGenerated Repositories\n----------------------\n\nSome of the SSOT repositories, can be used to generate extra text/data,\nwhich might also be useful to track in git repositories:\n\n- `<https://github.com/Bjwebb/IATI-Codelists-Output>`__\n- XML codelists converted into json and csv, and also an `automatically\n generated xml list of\n codelists <https://raw.github.com/Bjwebb/IATI-Codelists-Output/master/codelists.xml>`__\n\n TODO: the repository and link above still are under Bjwebb, not IATI\n\n- generated from\n `https://github.com/IATI/IATI-Codelists <https://github.com/IATI/IATI-Codelists>`__ย (see\n `Machine Consumption`_)\n- This would form the basis of/be the new Codelist API\n\n- We could also have a github repository for the generated\n documentation\n\nThese could be generated automatically using github webhooks. They\nshould be tagged and branched in the same way as the source\nrepositories.\n\nThe advantages of using github repositories for this are:\n\n- Allow people to download machine readable data in their preferred\n format e.g. get all the codelists in json format\n- Allows people who are not familiar with our source formats (xml,\n reStructuredText) to easily view what the changes in the output\n (json, html) are\n\n- Similarly allows us to more easily keep track of changes to\n documentation pages, without having to check all relevant source\n repositories\n\n- Allows us to easily keep track of the generated text/data for\n different versions by using the same branches and tags as the source\n repositories\n\n(There may be other generated data repositories we have, unrelated to\nthe SSOT, such as `<https://github.com/Bjwebb/IATI-Data-Snapshot>`__)\n\nBranches\n--------\n\nMain branches:\n\n- masterย - the main development branch, where development for the next\n version of the standard takes place\n- version\\_1.03 etc.ย - branch for each version,\n\nThese branches should be consistent across all the SSOT.\n\nCurrently only the master branch exists, as itโ€™s not clear what version\nof the standard we will target initially with the SSOT (1.03 or should\nit wait to 1.04). Also, no previous versions of the standard are not yet\nin the SSOT.\n\nThere are also feature branchesย that are specific to individual\nrepositories. These are for any changes that need reviewing/testing\nbefore they are merged into one of the main branches.\n\n TODO: determine when feature branches should be used and when/if\n committing directly to the main branches is appropriate.\n\nTags\n----\n\nTags starting with a v refer to a released version of the IATI Standard\n(e.g. v1.03 or v1.03.1). This tagging scheme is already `used for the\nschemas <https://github.com/IATI/IATI-Schemas/releases>`__. Tags should\nbe consistent across all of the repositories, and the submodules should\npoint at commits with the same tag. We would need to manage this as part\nof our internal workflow/policy.\n\nThere is currently no other planned uses for tags, although we could\neasily do so (e.g. to tag a snapshot on a particular date).\n\nGitHub Issues\n-------------\n\nGitHub Issues are used for people to suggest changes to the IATI\nStandard.\n\n TODO: Think about how this fits in with our current use of the\n knowledge base\n\nIssues are categorised using Milestones and Labels - only people with\npush access can add these. This means there is a task for one of us to\nadd the appropriate labels and milestones for each issue that is added.\nThere are advantages to this, since we know that every label and\nmilestone has been determined by us.\n\nIt is also possible for us to edit the title of issues, to ensure that\nthey are usefully descriptive for us.\n\nAssignment\n~~~~~~~~~~\n\nIssues can be assigned to a the person responsible for carrying out the\nnext action on them. A list of all issues assigned to your logged in\nuser can be seen at\n`https://github.com/organizations/IATI/dashboard/issues/assigned <https://github.com/organizations/IATI/dashboard/issues/assigned>`__\n\nMilestones\n~~~~~~~~~~\n\nIssues can be grouped into Milestones which will be used to build the\nroadmap for the next release of the IATI Standard. Each github\nrepository will have a milestone for each new version of the standard,\nconsistently named. Only issues that we have decided to include in a\ngiven version of the standard should be tagged with that milestone.\n\nLabels\n~~~~~~\n\nDefault github labels:\n\n- wontfix - issues that have been closed, because we have rejected the\n suggested change\n- bugย - ย Small changes that we want to fix asap, outside of the normal\n release process (such as typos)\n- enhancement - for improvements to the standard, rather than things\n that are currently โ€˜brokenโ€™\n- duplicateย - for duplicate issues - only one of the issues should be\n tagged with this, and closed\n\nCustom labels - these should be added\n\n- Activity XMLย - for issues relating to the activity xml\n- Organisation XMLย - for issues relating to the organisation xml\n\n TODO: Decide if these custom labels make sense, then add them to all SSOT repositories.\n\nIssues in correct repository\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIssues should be reported to the `repository <Repositories_>`_ย that\nis most relevant - e.g. codelists issues in the IATI-Codelists\nrepository. If an issue is not in the most relevant place, we should\nmove it.\n\nGitHub does not allow for issues to moved automatically, but we will\nmanually move any issues that have been reported in the the wrong\nrepository. This would involving labelling as duplicate and closing the\nold issue, and adding a link to the newly open issue.\n\nGithub Team\n-----------\n\nA Github Team will be set up for the Single Source of Truth, with push\naccess to each of the repositories. Members of this team will also be\nable to have issues assigned to them.\n\nChanges After Release\n---------------------\n\nThere is regular pressure on the 'standard' to alter it between upgrade\nreleases. Sometimes this is to do with simple documentation errors,\ncodelist inaccuracies, or even a bug in the schema.\n\nOne way that we try to deal with this is to have regular release cycles,\nthat way anything that 'can't be changed' need only wait a few (6?)\nmonths to be fixed.\n\nHowever, sometimes this means very minor changes get held up for no good\nreason. The GitHub approach outlined here may give us opportunities.\n\nA functional changeย is one which changes whether a given IATI XML file\nis considered to validate against the standard. This includes validation\nagainst the schema, conformance to rulesets and conformance to\nfunctional codelists.\n\n TODO: Determine which codelists are functional codelists. (This is\n likely to take place as part of a separate piece of work around\n codelists.\n\nAfter a minor (decimal) release, (e.g. 1.03) is tagged, there should be\nno further functional changesย to that versions branch (e.g.\nversion\\_1.03). However, we may want to make non-functional changes,\nsuch as correcting documentation and adding to non-functional codelists.\n\nWe could make those changes to the git branches and the website, and not\nmake a new release, and tell people to check the github to see what has\nchanged since release. Or, we could have patch releases for such changes\n(e.g. 1.03.1). Alternatively, we could do a mixture of both, depending\non how small the change is, and whether we want to announce it to\neveryone (e.g. correcting a typo vs. drastically rewording\ndocumentation).\n\n TODO: Decide approach on this.\n\nIf used, patch releases would always be non-functional, so SHOULD NOT be\nlisted in the @version attribute, in order to reduce the complexity for\ndata complexity. We should have explicit guidance about what values the\n@version attribute should contain.\n\nTracking Changes a guide for users/followers\n\nUpdating the SSOT\n=================\n\nSince the SSOT is hosted on github, it can be updated by developers\nusing git to pull/push from their local machine, or through githubโ€™s web\ninterface for editing.\n\nEditing through the web interface\n---------------------------------\n\nAnyone on the IATI Tech team who wants to edit the SSOT should be\ngranted push access to the `Github Team`_, on the\nunderstanding that they do not edit directly edit on any of the main\nbranches but create a `feature branch <Branches_>`_ย for the\nchange they are suggesting. A pull request can then be submitted to the\nrelevant branch.\n\nIt is also possible (for IATI Tech team members and anyone else) to fork \nthe SSOT and edit their own copy to incorporate the desired changes. A \npull request can then be submitted back to the relevant repository within\nthe SSOT. The details of including the changes can be discussed via \nGithub's pull request queue.\n\n\nSuggested improvements through GitHub issue\n-------------------------------------------\n\nAdditionally, anyone can suggest changes by creating a GitHub\nissueย in the relevant repository. See the `GitHub\nIssues`_ย section for more.\n\nUsing the SSOT\n==============\n\n|image1|\n\nDocumentation generation\n------------------------\n\nThe documentation on\n`http://iatistandard.org/ <http://iatistandard.org/>`__ย will be\ngenerated from the Single Source of Truth.\n\nText generated from the machine readable sources is combined with the\nextra documentation, which is then fed into Sphinx documentation tool.\nThe scripts for doing this are in the IATI-Standard-SSOT repository, and\nthe full technical instructions are in the\n`README <https://github.com/IATI/IATI-Standard-SSOT/blob/master/README.rst#building-the-documentation>`__ย there.\n\nThis automated process for documentation generation makes it easy to\ngenerate the documentation in other formats, such as a single pages html\nfile, or a pdf.\n\nMachine Consumption\n-------------------\n\nMoving forward, the preferred method of fetching machine readable\ninformation from the Single Source of truth will be via\ngithub.ย The advantageย of this is that, via\nbranching, there will be a consistent way for people to query different\nversions of the standard, without any extra effort on our side. (The\ndownside is that github doesnโ€™t look as authoritative as hosting it on\niatistandard.org ourselves - one compromised would be redirects from\n`http://iatistandard.org/api/something <http://iatistandard.org/api/something>`__ย to\nthe appropiate github page)\n\n TODO: Discuss.\n\nIn some cases, we will make the data easier to consume automatically by\npreprocessing it, and pushing it to โ€˜Generated Dataโ€™ repositories. See\nthe `Generated Repositories`_ย section for more.\n\nCase Studies\n------------\n\nThe Validator\n~~~~~~~~~~~~~\n\nCurrently the validator only validates against the schema. Eventually,\nthe validator should be able to check files against all machine readable\nparts of the SSOT - currently this is codelists and rulesets in addition\nto the schema.\n\nPlan\n====\n\nMuch of the technical backend of the Single Source of Truth is set up\nnow. The big next step is to work on the human processes of managing the\ngit repositories properly etc.\n\nWe can do this in tandem with our work on 1.04 - `<http://dev.iatistandard.org>`__\nwill be set up to build from the Single Source of Truth, and this will\nbecome the new website when 1.04 is released.\n\n\nCurrent versions of the standard\n--------------------------------\n\nDue to some of the changes proposed, it is cumbersome to move old\nversions of the Standard into the Single Source of Truth. I propose\ncontinuing to maintain these separately, using our current archiving\nprocess with the wordpress.\n\n\n.. |image0| image:: images/image00.png\n.. |image1| image:: images/image01.png\n" }, { "alpha_fraction": 0.6997900605201721, "alphanum_fraction": 0.7018894553184509, "avg_line_length": 24.96363639831543, "blob_id": "705eb4901afdc5968f9e732ff28cd5657bddb669", "content_id": "704e4dcc2497eec3a8ba91a4d8aa47fb1c2f2163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1429, "license_type": "no_license", "max_line_length": 220, "num_lines": 55, "path": "/README.rst", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "IATI Standard SSOT\n==================\n\nThis is the main github repository for the IATI Standard Single Source of Truth (SSOT). For more detailed information about the SSOT, please see https://github.com/IATI/IATI-Standard-SSOT/blob/master/meta-docs/index.rst \n\nThis repository is currently under development, and does not necessarily represent any current or future version of the IATI standard.\n\n\nBuilding the documentation\n==========================\n\nRequirements:\n\n* Git\n* Unix based setup (e.g. Linux, Mac OS X) with bash etc.\n* Python 2.7\n* python-virtualenv\n* gcc\n* Development files for libxml and libxslt e.g. libxml2-dev, libxslt-dev\n\nFetch the source code:::\n\n git clone https://github.com/IATI/IATI-Standard-SSOT.git\n\nPull in the git submodules:::\n \n git submodule init\n git submodule update\n\nSet up a virtual environment:\n\n.. code-block:: bash\n\n # Create a virtual environment\n virtualenv pyenv\n\n # Activate the virtual environment\n # This must repeated each time you open a new shell\n source pyenv/bin/activate\n\n # Install python requirements\n pip install -r requirements.txt\n \nBuild the documentation:::\n\n ./gen.sh\n\nThe built documentation is now in ``docs/_build/html`` \n\n\nEditing the documentation\n=========================\n\nMake any changes in ``IATI-Extra-Documentation``, as the ``docs`` directory is generated from\nthis and other sources each time ``./gen.sh`` is run. \n" }, { "alpha_fraction": 0.663217306137085, "alphanum_fraction": 0.6735653877258301, "avg_line_length": 30.235294342041016, "blob_id": "1c8e1c10ee5a6584bca729c0832e1b00476f91fc", "content_id": "36980e5c764830eb1249efa156e19fe0404562de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 204, "num_lines": 34, "path": "/gen.sh", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -o nounset\n\n# Remove docs (the output directory), and recreate directory structure\nrm -r docs\nmkdir docs\ncd IATI-Extra-Documentation/ || exit 1\nfind -type d -exec mkdir ../docs/{} \\;\ncd .. || exit 1\n\n# Generate csvs etc. from codelists\ncd IATI-Codelists || exit 1\n./gen.sh || exit 1\ncd .. || exit 1\nmkdir docs/_static\ncp -r IATI-Codelists/out/ docs/_static/codelists/\n\n# Generate documentation from the Schema and Codelists etc\npython gen.py || exit 1\n\n# Append Extra-Documentation to the documentation we've just generated\ncd IATI-Extra-Documentation || exit 1\nfind \\( ! -path '*/.*' \\) -follow -type f -exec bash -c 'cat {} >> ../docs/{}' \\;\ncd .. || exit 1\n\n# Build the documentation (generate html etc.) using sphinx\ncd docs/en || exit 1\nln -s ../_static .\n(echo '.. raw:: html'; echo ''; curl \"https://raw.github.com/okfn/iati-datastore/master/iati_datastore/iatilib/frontend/docs/index.md\" | pandoc -f markdown_github -t html | sed 's/^/ /') > datastore.rst\nmake html\ncd ../../ || exit 1\ncd docs/fr || exit 1\nln -s ../_static .\nmake html\n\n" }, { "alpha_fraction": 0.5684210658073425, "alphanum_fraction": 0.5684210658073425, "avg_line_length": 20.538461685180664, "blob_id": "dbc115c2a302811e060fc3fbaabe35ce49a1de89", "content_id": "f394a0a5ef84e20ff936a5e159210d1fb685a155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 285, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/templates/en/codelist.rst", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "{{fname}}\n{{underline}}\n\n\n{{description}}\n\nDownload this codelist:\n`XML <../_static/codelists/xml/{{fname}}.xml>`_\n`CSV <../_static/codelists/csv/{{lang}}/{{fname}}.csv>`_\n`JSON <../_static/codelists/json/{{lang}}/{{fname}}.json>`_\n\n.. csv-table::\n :file: ../../../{{csv_file}}\n \n" }, { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 10, "blob_id": "b29dee263bcf7b527fbf40084a44e25f18878c74", "content_id": "31880cba797fab8e109992c849eca882433503bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 34, "license_type": "no_license", "max_line_length": 13, "num_lines": 3, "path": "/requirements.txt", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "Sphinx==1.1.3\nlxml==3.2.3\nJinja2\n\n" }, { "alpha_fraction": 0.5680851340293884, "alphanum_fraction": 0.5706753134727478, "avg_line_length": 42.063743591308594, "blob_id": "d1ed8c90b22aa9051d65be2b8342a9d9ba4a7924", "content_id": "6ca9e7edebace272fcd4cc9332e5cb72418f48da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10810, "license_type": "no_license", "max_line_length": 151, "num_lines": 251, "path": "/gen.py", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "from lxml import etree as ET\nimport os\nimport textwrap\nimport json\nimport jinja2\n\nlanguages = ['en','fr']\n\n# Namespaces necessary for opening schema files\nnamespaces = {\n 'xsd': 'http://www.w3.org/2001/XMLSchema'\n}\n# Attrbitues that have documentation that differs to that in the schema\ncustom_attributes = {\n 'xml:lang': 'ISO 2 letter code specifying the language of text in this element.'\n}\n\ndef human_list(l):\n \"\"\"\n Returns a human friendly version of a list. Currently seperates list items\n with comas, but could be extended to insert 'and'/'or' correctly.\n\n \"\"\"\n return ', '.join(l)\n\n# TODO - This function should be moved into the IATI-Rulesets submodule\nrulesets = json.load(open('./IATI-Rulesets/rulesets/standard.json'))\ndef ruleset_text(path):\n \"\"\" Return text describing the rulesets for a given path (xpath) \"\"\"\n out = ''\n for xpath, rules in rulesets.items():\n if xpath.startswith('//'):\n try:\n reduced_path = path.split(xpath[2:]+'/')[1]\n for rule in rules:\n cases = rules[rule]['cases']\n for case in cases:\n if 'paths' in case:\n for case_path in case['paths']:\n # Don't forget [@ ]\n if case_path == reduced_path:\n other_paths = case['paths']\n other_paths.remove(case_path)\n if rule == 'only_one':\n out += 'This element must be present only once. '\n if other_paths:\n out += 'This element must not be present if {0} are present. '.format(human_list(other_paths))\n elif rule == 'atleast_one':\n if other_paths:\n out += 'Either this element or {0} must be present. '.format(human_list(other_paths))\n else:\n out += 'This element must be present. ' \n else: print case_path, rule, case['paths'] \n break\n except IndexError:\n pass\n if out != '':\n out += '(`see standard.json <https://github.com/IATI/IATI-Rulesets/blob/master/rulesets/standard.json>`_)'\n return out\n\n\n# TODO - This function should be moved into the IATI-Codelists submodule\ncodelist_mappings = ET.parse('./IATI-Codelists/mapping.xml').getroot().findall('mapping')\ndef match_codelist(path):\n \"\"\"\n Returns the name of the codelist that the given path (xpath) should be on.\n If there is no codelist for the given path, None is returned.\n\n \"\"\"\n for mapping in codelist_mappings:\n if mapping.find('path').text.startswith('//'):\n #print mapping.find('path').text.strip('/'), path\n if mapping.find('path').text.strip('/') in path:\n return mapping.find('codelist').attrib['ref']\n else:\n pass # FIXME\n return\n\n\n\nclass Schema2Doc(object):\n \"\"\"\n Class for converting an IATI XML schema to documentation in the\n reStructuredText format.\n\n \"\"\"\n def __init__(self, schema, lang):\n \"\"\"\n schema -- the filename of the schema to use, e.g.\n 'iati-activities-schema.xsd'\n\n \"\"\"\n self.tree = ET.parse(\"./IATI-Schemas/\"+schema)\n self.tree2 = ET.parse(\"./IATI-Schemas/iati-common.xsd\")\n self.jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))\n self.lang = lang\n\n def get_schema_element(self, tag_name, name_attribute):\n \"\"\"\n Returns the specified element from the schema.\n\n tag_name -- the name of the tag in the schema, e.g. 'complexType'\n name_attribute -- the value of the 'name' attribute in the schema, ie.\n the name of the element/type etc. being described,\n e.g. iati-activities\n\n \"\"\"\n schema_element = self.tree.find(\"xsd:{0}[@name='{1}']\".format(tag_name, name_attribute), namespaces=namespaces)\n if schema_element is None:\n schema_element = self.tree2.find(\"xsd:{0}[@name='{1}']\".format(tag_name, name_attribute), namespaces=namespaces)\n return schema_element\n\n def output_docs(self, element_name, path, element=None):\n \"\"\"\n Output documentation for the given element, and it's children.\n\n If element is not given, we try to find it in the schema using it's\n element_name.\n\n path is the xpath of the context where this element was found, for the\n root context, this is the empty string \n\n \"\"\"\n if element is None:\n element = self.get_schema_element('element', element_name)\n if element is None:\n return\n\n url = element.base.replace('./IATI-Schemas/', 'https://github.com/IATI/IATI-Schemas/blob/master/') + '#L' + str(element.sourceline)\n try:\n os.makedirs('docs/'+self.lang+'/'+path)\n except OSError: pass\n with open('docs/'+self.lang+'/'+path+element_name+'.rst', 'w') as fp:\n t = self.jinja_env.get_template(self.lang+'/schema_element.rst')\n fp.write(t.render(\n element_name=element_name,\n element_name_underline='='*len(element_name),\n element=element,\n path=path,\n url=url,\n schema_documentation=textwrap.dedent(element.find(\".//xsd:documentation\", namespaces=namespaces).text),\n ruleset_text=ruleset_text(path+element_name),\n extended_types=element.xpath('xsd:complexType/xsd:simpleContent/xsd:extension/@base', namespaces=namespaces),\n attributes=self.attribute_loop(element),\n textwrap=textwrap, match_codelist=match_codelist, ruleset_text_=ruleset_text, #FIXME\n childnames = self.element_loop(element, path)\n ))\n\n def element_loop(self, element, path):\n \"\"\"\n Loop over the children of a given element, and run output_docs on each.\n\n Returns the names of the child elements.\n\n \"\"\"\n children = ( element.findall('xsd:complexType/xsd:choice/xsd:element', namespaces=namespaces)\n + element.findall(\"xsd:complexType/xsd:all/xsd:element\", namespaces=namespaces) )\n childnames = []\n for child in children:\n a = child.attrib\n if 'name' in a:\n self.output_docs(a['name'], path+element.attrib['name']+'/', child)\n childnames.append(a['name'])\n else:\n self.output_docs(a['ref'], path+element.attrib['name']+'/')\n childnames.append(a['ref'])\n return childnames\n\n def attribute_loop(self, element):\n \"\"\"\n Returns a list containing a tuple for each attribute the given element\n can have.\n\n The format of the tuple is (name, type, documentation)\n\n \"\"\"\n #if element.find(\"xsd:complexType[@mixed='true']\", namespaces=namespaces) is not None:\n # print_column_info('text', indent)\n \n a = element.attrib\n type_attributes = []\n type_attributeGroups = []\n if 'type' in a:\n complexType = self.get_schema_element('complexType', a['type'])\n if complexType is None:\n print 'Notice: No attributes for', a['type']\n else:\n type_attributes = (\n complexType.findall('xsd:attribute', namespaces=namespaces) +\n complexType.findall('xsd:simpleContent/xsd:extension/xsd:attribute', namespaces=namespaces)\n )\n type_attributeGroups = (\n complexType.findall('xsd:attributeGroup', namespaces=namespaces) +\n complexType.findall('xsd:simpleContent/xsd:extension/xsd:attributeGroup', namespaces=namespaces)\n )\n\n group_attributes = []\n for attributeGroup in ( \n element.findall('xsd:complexType/xsd:attributeGroup', namespaces=namespaces) +\n element.findall('xsd:complexType/xsd:simpleContent/xsd:extension/xsd:attributeGroup', namespaces=namespaces) +\n type_attributeGroups\n ):\n group_attributes += self.get_schema_element('attributeGroup', attributeGroup.attrib['ref']).findall('xsd:attribute', namespaces=namespaces)\n\n out = []\n for attribute in (\n element.findall('xsd:complexType/xsd:attribute', namespaces=namespaces) +\n element.findall('xsd:complexType/xsd:simpleContent/xsd:extension/xsd:attribute', namespaces=namespaces) +\n type_attributes + group_attributes\n ):\n if 'ref' in attribute.attrib:\n if attribute.get('ref') in custom_attributes:\n out.append((attribute.get('ref'), '', custom_attributes[attribute.get('ref')]))\n continue\n attribute = self.get_schema_element('attribute', attribute.get('ref'))\n doc = attribute.find(\".//xsd:documentation\", namespaces=namespaces)\n if doc is not None:\n out.append((attribute.get('name'), attribute.get('type'), doc.text))\n else:\n print 'Ack', ET.tostring(attribute)\n return out\n\n\ndef codelists_to_docs(lang):\n dirname = 'IATI-Codelists/out/csv/'+lang\n try:\n os.mkdir('docs/'+lang+'/codelists/')\n except OSError: pass\n\n for fname in os.listdir(dirname):\n csv_file = os.path.join(dirname, fname)\n if not fname.endswith('.csv'): continue\n fname = fname[:-4]\n underline = '='*len(fname)\n xml = ET.parse('IATI-Codelists/combined-xml/{0}.xml'.format(fname))\n description = ''.join(xml.getroot().xpath('/codelist/@description'))\n with open('docs/{0}/codelists/{1}.rst'.format(lang, fname), 'w') as fp:\n jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'))\n t = jinja_env.get_template(lang+'/codelist.rst')\n fp.write(t.render(csv_file=csv_file, fname=fname, underline=underline, description=description, lang=lang))\n\n\nif __name__ == '__main__':\n for language in languages:\n activities = Schema2Doc('iati-activities-schema.xsd', lang=language)\n activities.output_docs('iati-activities', '')\n\n orgs = Schema2Doc('iati-organisations-schema.xsd', lang=language)\n orgs.output_docs('iati-organisations', '')\n \n codelists_to_docs(lang=language)\n\n" }, { "alpha_fraction": 0.6102719306945801, "alphanum_fraction": 0.611027181148529, "avg_line_length": 22.64285659790039, "blob_id": "e32fe61ea7bc44dde1523d1b42320afacb929c07", "content_id": "d112b2c0f5d1b53b176987450466480c3ce9d240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1324, "license_type": "no_license", "max_line_length": 161, "num_lines": 56, "path": "/templates/en/schema_element.rst", "repo_name": "uniphil/IATI-Standard-SSOT", "src_encoding": "UTF-8", "text": "{{element_name}}\n{{element_name_underline}}\n\n``{{path}}{{element_name}}``\n\n.. index::\n single: {{element_name}}\n\nDRAFT\n-----\n\n\nDeveloper tools\n~~~~~~~~~~~~~~~\n\n`View this element in the schema source <{{url}}>`_\n\nFrom the schema\n~~~~~~~~~~~~~~~\n\n{{schema_documentation}}\n\n{{ruleset_text}}\n\n{% for extended_type in extended_types %}{% if extended_type.startswith('xsd:') %}The text in this element should be of type {{extended_type}}.\n\n\n{% endif %}{% endfor %}{% if element.get('type') and element.get('type').startswith('xsd:') %}The text in this element should be of type {{element.get('type')}}.\n\n\n{% endif %}{% if attributes %}Attributes\n~~~~~~~~~~\n\n{% for attribute, attribute_type, text in attributes %}@{{attribute}}\n {{ textwrap.dedent(text).strip().replace('\\n','\\n ') }}\n{% set codelist = match_codelist(path+element_name+'/@'+attribute) %}{% if attribute_type %} \n This value should be of type {{attribute_type}}.\n\n{% endif %}{% if codelist %} \n This value should be on the :doc:`{{codelist}} codelist </codelists/{{codelist}}>`.\n\n{% endif %} \n \n{{ ruleset_text_(path+element_name+'/@'+attribute) }}{% endfor %}\n\n{% endif %}{% if childnames %}\nSubelements\n~~~~~~~~~~~\n\n.. toctree::\n :titlesonly:\n :maxdepth: 1\n\n{% for childname in childnames %} {{element_name}}/{{childname}}\n{%endfor%}\n{% endif %}\n" } ]
7
shobhit-nigam/airbus_happy
https://github.com/shobhit-nigam/airbus_happy
365af3bc202b9cab1e51b2e6bce79b2706f1b91e
a66aa2d077fdb766b3645398ff81078f462f61b5
dbabc66b886a9aa337597651b4650a7978d2d253
refs/heads/main
2023-02-16T11:18:12.691402
2021-01-18T03:21:57
2021-01-18T03:21:57
327,232,657
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5978260636329651, "alphanum_fraction": 0.5978260636329651, "avg_line_length": 14.5, "blob_id": "900eca748136b9cf40c97236feb0da0c10d37e52", "content_id": "670eae259d342a054e74280ad1b83ff79d9a032f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/day8/app/pack/welcome.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "from .. import sample\n\ndef hello(la):\n print(\"hello \" + la)\n sample.funca()\n return" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 52.5, "blob_id": "f7913ca3ab3ff2270c2677f2be4b6c717946732b", "content_id": "58c9888c606e9759db362b268724fd972410121e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/README.md", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "## airbus python January 2021 (hap-py)\n### class notes, documents, codes, reference material, assignments. \n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 25, "blob_id": "510baab9575ef28e604fa30abf1f1f80d083c615", "content_id": "58448c4e32a928088349ad93e9f9b8efb1d6c6e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/day8/app/__init__.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "from .pack import colours\nfrom .pack import welcome" }, { "alpha_fraction": 0.5805470943450928, "alphanum_fraction": 0.5927051901817322, "avg_line_length": 14.666666984558105, "blob_id": "92a598e2d85da0810ba5e3e21afb41bcb797854a", "content_id": "6b0fe4c7325c8c267fb8500c399116d22b033e73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 37, "num_lines": 21, "path": "/day3/syed.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "\"\"\"this is syed's colourful module\nwill help us extract some colours\n\"\"\"\n\n\ndef blue():\n \"this gets us blue\"\n print(\"cool blue\")\n\ndef yellow():\n print(\"bright yellow\") \n\ndef green():\n \"dependency: needs blue & yellow\"\n print(\"go green is a mixutre of\")\n blue()\n yellow() \n \n \ndata_a = 33\ndata_b = 44\n" }, { "alpha_fraction": 0.5032410025596619, "alphanum_fraction": 0.5038303136825562, "avg_line_length": 27.762712478637695, "blob_id": "b1709738342ffffc9fd1ef037d5bd52f2021fc3c", "content_id": "396771cf33ae60a1befe98cea1df35359b31bb15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1697, "license_type": "no_license", "max_line_length": 69, "num_lines": 59, "path": "/day4/files.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "\"\"\"library for working with files\n\"\"\"\n\nclass fileops:\n \"\"\"for working with files\"\"\"\n path = \"/Users/shobhit/Desktop/airbus_python/day4/\"\n \n def __init__(self, n):\n self.name = self.path + n\n import os\n \n if not os.path.exists(self.name):\n print(\"check the file name\")\n \n \n def read_col(self, num):\n \"\"\"reads a particular column from the opened text file\n mention the column number, zero will be assumed by default\"\"\"\n \n with open(self.name, \"r\") as fa:\n stra = fa.read()\n \n lista = stra.splitlines()\n list_col = []\n\n for line in lista:\n temp = line.split()\n if len(temp) > num:\n list_col.append(temp[num])\n return list_col\n \n def read_col_num(self, num):\n \"\"\"reads a particular column from the opened text file\n mention the column number, zero will be assumed by default\"\"\"\n \n with open(self.name, \"r\") as fa:\n stra = fa.read()\n \n lista = stra.splitlines()\n list_col = []\n\n for line in lista:\n temp = line.split()\n if len(temp) > num:\n if temp[num].isdigit():\n list_col.append(int(temp[num])) \n return list_col\n \n def concert_to_csv(self):\n self.new_name = self.name.replace(\".txt\", \".csv\")\n with open(self.name, \"r\") as fa:\n stra = fa.read()\n with open(self.new_name, \"w\") as fa:\n strb = stra.replace(\" \", \",\")\n fa.write(strb)\n \n @classmethod\n def change_file(cls, new_path):\n cls.path = new_path\n" }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 14.5, "blob_id": "0ddb14a94414d426202c3a527ea2d96822ee7bee", "content_id": "10f37bb255aa0aa81b61c88b9d3130b60ec97d92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/day8/app/sample.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "def funca():\n print(\"in A\")" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 24.14285659790039, "blob_id": "24fdabfd9a571f51bae95ac216e0f78d9ea30552", "content_id": "3ad4d4725b1a791d9de856ea4372d238d8593485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/day8/app/setup.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(name=\"pack\", version=\"2.1\", \n description=\"sample package\",\n url='#', author=\"airbus\",\n license=\"GPL\",\n zip_safe=False)\n" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 30, "blob_id": "180296bfc4df0373d556202bfdc8558e04b370e7", "content_id": "a0eb08cfa12746e4c7c2f004fdf8068fe8e72d88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/day8/app/pack/__init__.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "#from .colours import green, blue\n#from .welcome import hello\n" }, { "alpha_fraction": 0.5169491767883301, "alphanum_fraction": 0.5169491767883301, "avg_line_length": 13.5, "blob_id": "7158b1265112863f9d5a7a37300d7a6bca73cd10", "content_id": "68ee15d8e7bcc28c6b9cb4ba1b4e13ba28e63c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/day8/app/pack/colours.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "def blue():\n print(\"cool blue\")\n \ndef green():\n print(\"go green\")\n \ndef red():\n print(\"scarlet red\")\n " }, { "alpha_fraction": 0.4885057508945465, "alphanum_fraction": 0.4885057508945465, "avg_line_length": 20.25, "blob_id": "1ec2e8a6abb7d22682c8f815e5fe0619174b1759", "content_id": "0166d9667e596a112695bac3454a42915b6efb90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 55, "num_lines": 16, "path": "/day3/one.py", "repo_name": "shobhit-nigam/airbus_happy", "src_encoding": "UTF-8", "text": "print(\"this module does so & so\")\n\nif __name__ == \"__main__\":\n def funca():\n print(\"funca when used within this module\")\n \n def funcb():\n print(\"funcb when used within this module\") \n\nelif __name__ == \"one\":\n def funca():\n print(\"funca when someone else uses\") \n \n \n \nfunca()\n \n\n " } ]
10
anhkhoa14592/flash
https://github.com/anhkhoa14592/flash
511f7a826e928bbfd8d50fa8c889c1d2c42ebcea
d33b0540b5b748e94f3362fcebfe3a9645decc19
ffffc15a83e6db02fe1142ee8655bd8ce82d8d21
refs/heads/master
2023-01-30T12:12:59.169357
2020-12-16T03:12:03
2020-12-16T03:12:03
317,742,733
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5926892757415771, "alphanum_fraction": 0.5926892757415771, "avg_line_length": 41.55555725097656, "blob_id": "11e14869e2626d17b8bafb518650ef69a3579bb2", "content_id": "7fe0baafb676603aa8872ee1dd87614053657685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 383, "license_type": "no_license", "max_line_length": 111, "num_lines": 9, "path": "/README.md", "repo_name": "anhkhoa14592/flash", "src_encoding": "UTF-8", "text": "usage: [-h] [-r REF] [-f FILE] [-o OUTPUT]\n\noptional arguments:\n -h, --help show this help message and exit\n -r REF, --reference REF\n Choose the kind of dictionary as reference. Support [oxf, cambr] only, default is cambr\n -f FILE, --file FILE File name need to perform\n -o OUTPUT, --ouput OUTPUT\n Output result filename\n" }, { "alpha_fraction": 0.5803588628768921, "alphanum_fraction": 0.5873046517372131, "avg_line_length": 29.8511905670166, "blob_id": "c2d49e4a8ffc6510f0153bbb098b7e27ba5f3ca4", "content_id": "d2906e999dba82cc84615ead10530d0046227319", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5190, "license_type": "no_license", "max_line_length": 119, "num_lines": 168, "path": "/main.py", "repo_name": "anhkhoa14592/flash", "src_encoding": "UTF-8", "text": "\"\"\"\nusage: [-h] [-r REF] [-f FILE] [-o OUTPUT]\n\noptional arguments:\n -h, --help show this help message and exit\n -r REF, --reference REF\n Choose the kind of dictionary as reference. Support [oxf, cambr] only, default is cambr\n -f FILE, --file FILE File name need to perform\n -o OUTPUT, --ouput OUTPUT\n Output result filename\n\n\"\"\"\nimport argparse\nimport requests\nimport time\nimport re\n\n# Configuration for request\nDELAY = 0\nURL = \"https://dictionary.cambridge.org/vi/dictionary/english/\"\nHEADER = {\n \"Referer\": \"https://dictionary.cambridge.org/\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:80.0) Gecko/20100101 Firefox/80.0\",\n \"X-API-SOURCE\": \"PC\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n}\n\n\"\"\"\nRead lines for files\nreturn: list of line\n\"\"\"\n\n\ndef read_file(filename):\n with open(filename) as f:\n lines = f.readlines()\n\n lines = [x.strip() for x in lines] # Remove breakline at the end of each line\n\n return lines\n\n\ndef fix_tense(word, origin, transcription):\n if origin == word:\n return transcription\n\n if word[len(word) - 2:] == \"ed\" or word[len(word) - 1:] == \"d\":\n if transcription[len(transcription) - 1:] in [\"s\", \"p\", \"k\", \"f\", \"สƒ\", \"tสƒ\"]:\n transcription = transcription + \"t\"\n elif transcription[len(transcription) - 2:] in [\"t\", \"d\"]:\n transcription = transcription + \"id\"\n else:\n transcription = transcription + \"d\"\n if word[len(word) - 2:] == \"es\" or word[len(word) - 1:] == \"s\":\n if transcription[len(transcription) - 1:] in [\"p\", \"k\", \"t\", \"f\", \"ฮธ\"]:\n transcription = transcription + \"s\"\n elif transcription[len(transcription) - 2:] in [\"tสƒ\", \"s\", \"สƒ\", \"z\", \"ส’\", \"dส’\"]:\n transcription = transcription + \"iz\"\n else:\n transcription = transcription + \"z\"\n\n # print(word, origin, transcription)\n\n return transcription\n\n\n\"\"\"\nExtract word by word from each line by based on whitespace, then make transcription.\nreturn: list of transcription.\n\"\"\"\n\n\ndef make_transcription(lines):\n transcription_lines = list()\n\n for line in lines:\n list_of_words = line.split(\" \")\n transcription_line = \"\"\n for word in list_of_words:\n\n # remove sepecial character in string like : \" , etc.\n w = \"\"\n for c in word:\n if c.isalnum():\n w += c\n\n word = w\n\n r = requests.get(url=URL + word, headers=HEADER)\n origin, transcription = extract_transcription(r)\n\n # In case not found transcription, keep original word\n if transcription:\n transcription = fix_tense(word=word, origin=origin,\n transcription=transcription) # Fixing case having s, es, d or ed\n transcription_line = transcription_line + transcription + \" \"\n else:\n transcription_line = transcription_line + word + \" \"\n\n print(origin)\n print(transcription)\n\n time.sleep(DELAY) # Bypass rate limit\n\n transcription_lines.append(transcription_line)\n return transcription_lines\n\n\n\"\"\"\nExtract transcription from response HTML using rexp\nreturn: string\n\"\"\"\n\n\ndef extract_transcription(response):\n pattern = \"/<span class=\\\"ipa dipa lpr-2 lpl-1\\\">(\\S+)</span>/\" # This pattern is used for transcription\n ori_pattern = \"<span class=\\\"hw dhw\\\">(\\S+)</span></span>\" # This pattern is used for matched word\n\n matched = re.search(pattern=pattern, string=response.text)\n ori_matched = re.search(pattern=ori_pattern, string=response.text)\n\n if matched:\n return ori_matched.group(1), matched.group(1)\n else:\n return None, None\n\n\n\"\"\"\nWrite to file the result\n\"\"\"\n\n\ndef write_to_file(lines, trans_lines, filename):\n with open(filename, \"w\", encoding=\"utf-8\") as f:\n for line in lines:\n f.write(line)\n f.write(\"\\n\") # Break line each line\n\n f.write(\"\\n---\\n\") # Break line between original and transcription\n\n for trans_line in trans_lines:\n f.write(trans_line)\n f.write(\"\\n\") # Break line each line\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"\")\n parser.add_argument(\"-r\", \"--reference\", dest=\"ref\",\n help=\"Choose the kind of dictionary as reference. Support [oxf, cambr] only, default is cambr\",\n type=str,\n default=\"cambr\")\n parser.add_argument(\"-f\", \"--file\", dest=\"file\", help=\"File name need to perform\", type=str)\n parser.add_argument(\"-o\", \"--ouput\", dest=\"output\", help=\"Output result filename\", type=str)\n args = parser.parse_args()\n\n if args.ref:\n ref = args.ref\n if args.file:\n file = args.file\n output = args.file + \"_output.txt\"\n if args.output:\n output = args.output\n\n lines = read_file(filename=file)\n trans_lines = make_transcription(lines=lines)\n\n write_to_file(lines=lines, trans_lines=trans_lines, filename=output)\n" } ]
2