repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
AdamGreco/TMESOLO
|
https://github.com/AdamGreco/TMESOLO
|
f5e6c8350a760274649970d41eefa5c3765f492a
|
62b211bd7138bdc6b8e51940e27b7445d8ea50d1
|
1b271b3dbe0496adf32ce60e4ebf26c4e6840603
|
refs/heads/master
| 2020-05-09T23:26:19.705968 | 2019-04-15T15:48:35 | 2019-04-15T15:48:35 | 181,501,814 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6147816181182861,
"alphanum_fraction": 0.6278462409973145,
"avg_line_length": 31.277109146118164,
"blob_id": "b7921747bafe68be38e081ced7d0afd879ab448c",
"content_id": "b50ae5adfe517e7df98cf2f075ed3eaf436a4476",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2679,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 83,
"path": "/strategie.py",
"repo_name": "AdamGreco/TMESOLO",
"src_encoding": "UTF-8",
"text": "\"\"\" les strategies prennent tous un superviseur. On joue avec les encoders et selon la strategie,\non essaie d'en tirer une certaine vitesse et vitesse angulaire qu'on transmettra au superviseur\n\"\"\"\n\nclass LigneStrat():\n \"\"\"cette strategie consiste a faire avancer le robot en ligne droite\n sur une certaine distance donnee en parametre\n \"\"\"\n\n def __init__(self,superviseur,distancemax):\n\n self.superviseur=superviseur\n self.distance=0\n self.distancemax=distancemax\n self.superviseur.robot.reset_encoder()\n\n def get_command(self):\n\n #si on a pas atteint la distance fixee\n if (self.distance < self.distancemax):\n self.distance+= self.superviseur.robot.get_encoder()[0]*self.superviseur.robot.rayonroue\n return self.superviseur.robot.vmax, 0\n else:\n return 0,0\n \n\n\nclass Turn90Strat():\n \"\"\"on tourne de 90 degrees\n \"\"\"\n\n def __init__(self,superviseur):\n\n self.superviseur=superviseur\n self.superviseur.robot.reset_encoder()\n self.angle_parcouru=0\n\n def get_command(self):\n\n if (self.angle_parcouru < 82): #on a une imprecision de 8 degrees dans la simulation et \n #il faut 82 degrees pour en faire 90, la source du probleme\n #doit encore etre trouvee\n self.angle_parcouru += self.superviseur.robot.get_encoder()[0]\n print(self.angle_parcouru)\n return 0,10\n else:\n return 0,0\n\nclass SquareStrat():\n \"\"\"on fait dessiner un carre au robot\n \"\"\"\n\n def __init__(self,superviseur):\n\n self.superviseur= superviseur\n\n #on a besoin de sous strategies ici\n self.strat_avancer = LigneStrat(self.superviseur,5)\n self.strat_tourner= Turn90Strat(self.superviseur)\n\n def get_command(self):\n #on fait alterner les sous strategies et on\n #base notre raisonnement sur leurs reponses respectives,\n #si une sous strategie renvoie 0,0 on passe a la sous strategie\n #suivante\n\n v_avancer, omega_avancer = self.strat_avancer.get_command()\n\n if (v_avancer==0 and omega_avancer==0):\n v_tourner, omega_tourner = self.strat_tourner.get_command()\n\n if (v_tourner==0 and omega_tourner==0):\n self.strat_avancer=LigneStrat(self.superviseur,5)\n\n return v_tourner, omega_tourner\n\n else:\n #on actualise le vecteur rotation au cas ou on commencerait la\n #rotation le prochain tour\n\n self.strat_tourner=Turn90Strat(self.superviseur)\n\n return v_avancer,omega_avancer\n"
}
] | 1 |
oliviac12/Gild_challange
|
https://github.com/oliviac12/Gild_challange
|
ec0e6101c1f48eb3bd3d4de2a25f6cd1a2bed1e3
|
e7e8613270d4cdd15f819d643144368d4ae5be9b
|
2519cf04001532b33a0a4440c6c704a698b1e6f2
|
refs/heads/master
| 2021-01-20T20:18:08.322987 | 2016-07-09T09:05:15 | 2016-07-09T09:05:15 | 62,840,040 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6361910700798035,
"alphanum_fraction": 0.6361910700798035,
"avg_line_length": 30.610000610351562,
"blob_id": "fb0f58357f7b5e3f5aad991882aa08dc9a3ba886",
"content_id": "94ea41808a880a90a859e1cae41b173874c6fc32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3161,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 100,
"path": "/test_name_parsing.py",
"repo_name": "oliviac12/Gild_challange",
"src_encoding": "UTF-8",
"text": "'''\nWrite a class that identifies the first and last name of a person in\nunstructured text representing a person's name. You should strive to\nhave your model work universally with other names beyond the examples below.\nFeel free to add any other tests or methods you feel are appropriate.\n\nPlease submit what you have by the deadline.\n\nrun the following to test:\n$ pip install pytest\n$ py.test -v test_name_parsing.py\n'''\nfrom name_parse import NameParse\n\n\nclass TestNameParsing:\n def setup_class(cls):\n cls.aaron = ('aaron', 'mangum')\n cls.naini = ('naini', 'mistry')\n cls.molly = ('molly', 'scott')\n cls.steven = ('steven', 'st. claire')\n\n def test_aaron_first_last(self):\n name = 'aaron mangum'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_first_middle_last(self):\n name = 'aaron david mangum'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_last_first(self):\n name = 'mangum aaron'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_last_comma_first_middle(self):\n name = 'mangum, aaron david'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_last_first_middle(self):\n name = 'mangum aaron david'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_title_first_last(self):\n name = 'data scientist, aaron mangum'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_first_last_title(self):\n name = 'aaron mangum, data scientist'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_evil_twin(self):\n name = 'aaron david von mangum'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_aaron_evil_twin_last_first(self):\n name = 'von mangum, aaron david'\n assert NameParse(name).parsed_name == self.aaron\n\n def test_naini_first_last(self):\n name = 'naini mistry'\n assert NameParse(name).parsed_name == self.naini\n\n def test_naini_last_first(self):\n name = 'mistry naini'\n assert NameParse(name).parsed_name == self.naini\n\n def test_naini_last_comma_first(self):\n name = 'mistry, naini'\n assert NameParse(name).parsed_name == self.naini\n\n def test_naini_first_last_title(self):\n name = 'naini mistry, vice president of product'\n assert NameParse(name).parsed_name == self.naini\n\n def test_double_first(self):\n name = 'molly scott'\n assert NameParse(name).parsed_name == self.molly\n\n def test_double_first_last_first(self):\n name = 'scott molly'\n assert NameParse(name).parsed_name == self.molly\n\n def test_double_first_last_comma_first(self):\n name = 'scott, molly'\n assert NameParse(name).parsed_name == self.molly\n\n def test_double_first_title(self):\n name = 'molly scott account executive'\n assert NameParse(name).parsed_name == self.molly\n\n def test_punctuated_double(self):\n name = 'steven st. claire'\n assert NameParse(name).parsed_name == self.steven\n\n def test_title_puctuated_double(self):\n name = 'user interface designer steven st. claire'\n assert NameParse(name).parsed_name == self.steven\n\n\nprint dir(TestNameParsing)\n"
},
{
"alpha_fraction": 0.7162743806838989,
"alphanum_fraction": 0.7236142754554749,
"avg_line_length": 43.82954406738281,
"blob_id": "806164ef605025331c8bb4d4215fd16d366570b6",
"content_id": "aeb4aa61f4938cd31911b8610c48e087a0ec3124",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3951,
"license_type": "no_license",
"max_line_length": 450,
"num_lines": 88,
"path": "/README.md",
"repo_name": "oliviac12/Gild_challange",
"src_encoding": "UTF-8",
"text": "## Gild_challenge\n\nI was able to pass all the test in the test set. I was using this moudule called [probablepeople](https://github.com/datamade/probablepeople) becasue it uses uses [parserator](https://github.com/datamade/parserator), a library for making and improving probabilistic parsers - specifically, parsers that use python-crfsuite's implementation of conditional random fields.Therefore, I was able to customize and added more example and train the moudule. \n\n## Following are steps to set up probablepeople and test if my script pass all the tests\n\n1. clone Gild_challenge and submodule probablepeople to your local machine\n\n ```\n $ git clone --recursive https://github.com/oliviac12/Gild_challenge.git \n $ cd Gild_challenge/probablepeople\n $ pip install -r requirements.txt \n $ python setup.py develop\n ```\n2. Another thing need to be fixed: downgrade the parserator to version 0.4.1 because the parserator installed with probablepepople is 5.0+ , it doesn't work with the train command we are using next step (ugh I know!) \n \n ```\n $ pip uninstall parserator\n $ pip install parserator==0.4.1\n ``` \n3. Do a test train with existing data\n ```\n $ parserator train name_data/labeled/labeled.xml,name_data/labeled/company_labeled.xml probablepeople\n ``` \nif no error pops up and you see something like this \n\"training model on 2078 training examples from ['name_data/labeled/labeled.xml']\"\n\"done training! model file created: probablepeople/learned_settings.crfsuite\"\nthen we are almost there. \n \n 4. After train sussusfully, cd back to the Gild_challange folder, make sure you have pytest installed and run. Dependencies for this script also including nltk, after install nltk, go to python shell and do ```import nltk, nltk.download()```\n \n ```\n $ cd .. \n $ py.test -v test_name_parsing.py\n ``` \n \n Hope everything works and my script passes all the test as on my machine! \n\n## How to train the module with new examples in the future (how did I do it as an example here)\n\n 1.-3. steps are same as the previous part\n \n \n 4. After the test training ran successfully, label new data. In this probablepeople folder, there's raw csv called newpeople.csv. That's the new data I wanted to add. Based on the note in [probablepeople](https://github.com/datamade/probablepeople)'s page, parserator doesn't need a lot of new example to learn about the new label. This is what the data looks like in the newpeople.csv\n ```\n Molly Scott\n Steven St.Claire\n Naini Mistry\n mistry naini\n St.Claire steven\n scott molly\n stephanie scott\n scott stephsnie\n aaron david von mangum\n serena van der woodsen\n ```\nTo Label:\n ```\n $ parserator label newpeople.csv name_data/labeled/labeled.xml probablepeople\n ``` \nLabeling example: (there's a list for tag with numbers to choose from, i.e. GivenName -2, SurName -6, please refer to labeling.txt for more details how I lable them)\n ```\n STRING: serena van der woodsen\n | serena | GivenName |\n | van | Surname |\n | der | Surname |\n | woodsen | Surname |\n Is this correct? (y)es / (n)o / (s)kip / (f)inish tagging / (h)elp\n n\n What is 'serena' ? If GivenName hit return\n \n What is 'van' ? If Surname hit return\n 4\n What is 'der' ? If Surname hit return\n 4\n What is 'woodsen' ? If Surname hit return\n 6\n ```\n 5. Re-train and test the performance! \n```\nparserator train name_data/labeled/labeled.xml probablepeople\n```\n\nThoughts:\n\n But... does this a bit arbitrary? What I did is basically telling the moudule that I want molly to be labeled as first name and scott as last name. So I mocked up a dataset with random people name and job title and test my method again. It's the test.py and test_data.csv. The AUC is about 93% \n \n I know that my method is pretty naive and straighforward, but I tried some other NLP tools like standford NER and NLTK stuff, doesn't seem working that well and I think it has something to do with the fact that I am dealing with short content here instead of artilces/paragraphs\n \n \n"
},
{
"alpha_fraction": 0.5139896273612976,
"alphanum_fraction": 0.5186528563499451,
"avg_line_length": 37.599998474121094,
"blob_id": "511723281a7e1d255cc140ad7a614bb699e9bdce",
"content_id": "2654061ff97f718f711e1d87d337e532391d1fc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1930,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 50,
"path": "/name_parse.py",
"repo_name": "oliviac12/Gild_challange",
"src_encoding": "UTF-8",
"text": "import probablepeople as pp\nimport nltk\nfrom nltk.tokenize import PunktSentenceTokenizer\n\n\nclass NameParse(object):\n def __init__(self, name):\n self.name = name\n self.parsed_name = None\n self._parse()\n\n def _parse(self):\n '''\n Assign the first and last name in self.name to self.parsed_name\n using the following format. ('first_name', 'last_name')\n '''\n custom_sent_tokenize = PunktSentenceTokenizer()\n text_file = open(\"title.txt\", \"r\")\n title = text_file.read().split(',') #the file I used to detect job title\n name = self.name.replace('. ', '.').lower()\n tokenized = custom_sent_tokenize.tokenize(name)\n for i in tokenized:\n words = nltk.word_tokenize(i)\n jobtil = list(set(words)&set(title)) #see if there'a s job keyword in the string\n if len(jobtil) != 0:\n if ',' in words: #if there's a comma in the string, very likely that seperate the job and name\n if words.index(jobtil[0]) < words.index(','):\n newstr = name.split(',')[1]\n else:\n newstr = name.split(',')[0]\n else:\n if words.index(jobtil[0]) == len(words)-1:\n newstr = ' '.join(words[:-2])\n else:\n newstr = ' '.join(words[words.index(jobtil[0])+1:])\n self.ppname(newstr)\n\n\n else:\n self.ppname(name)\n\n def ppname(self, text):\n parsed_name = pp.parse(text)\n inv_parsed_name = {v: k for k, v in dict(parsed_name).items()}\n try:\n first_name = inv_parsed_name['GivenName']\n last_name = inv_parsed_name['Surname'].strip(',')\n self.parsed_name = (first_name, last_name.replace('.', '. '))\n except Exception as e:\n print str(e)\n"
},
{
"alpha_fraction": 0.569979727268219,
"alphanum_fraction": 0.5862069129943848,
"avg_line_length": 24.947368621826172,
"blob_id": "4329dc1f32b8b5624ffbd2706d9d457eb003451b",
"content_id": "11c06d7169ab57512beb0ad993c5529b1b1a2468",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 19,
"path": "/test.py",
"repo_name": "oliviac12/Gild_challange",
"src_encoding": "UTF-8",
"text": "from name_parse import NameParse\nimport pandas as pd\n\ntest = pd.read_csv('test_data.csv')\ntest['Result'] = [s.lower() for s in test['Result']]\nyes = 0\nno = 0\nfor index, row in test.iterrows():\n first = NameParse(row['Test']).parsed_name[0]\n last = NameParse(row['Test']).parsed_name[1]\n r_first = row['Result'].split(',')[0]\n r_last = row['Result'].split(',')[1]\n if (first == r_first) & (last == r_last.strip()) :\n yes += 1\n else:\n no += 1\n\nprint yes\nprint no\n"
}
] | 4 |
Agyraspide/congenial-octo-robot
|
https://github.com/Agyraspide/congenial-octo-robot
|
e42a26342e26de4ce75a52eaa3494ff256337a7f
|
33621935b3330107ee70df0d98d6fecd120e57ad
|
fbd5acfc697ea6ab4730640955f39f46b1b4016d
|
refs/heads/master
| 2020-11-26T15:25:00.032113 | 2020-01-21T00:40:38 | 2020-01-21T00:40:38 | 229,121,260 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.45764192938804626,
"alphanum_fraction": 0.5030567646026611,
"avg_line_length": 27.41025733947754,
"blob_id": "89a24695ee6128bade858dfc9f3bd3f301919622",
"content_id": "4f9c599f9ebd4ae823ec37c23a568f58baa15e4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1145,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 39,
"path": "/euler27test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "ab_pairs = set()\r\npos_primes = [2, ]\r\n\r\ndef primes_tester(x):\r\n mods = [abs(x) % d for d in range(2, (round(abs(x)**0.5) + 1))]\r\n if 0 not in mods:\r\n return True\r\n\r\nfor n in range(3, 1000, 2):\r\n if primes_tester(n):\r\n pos_primes.append(n)\r\n\r\nneg_primes = [p * -1 for p in pos_primes]\r\nprimes = set(pos_primes + neg_primes)\r\n\r\n\r\nfor b in primes:\r\n\tfor a in range(-1000, 1000):\r\n\t\tif (a%2!=0) or a==2:\r\n\t\t\tif abs(b + a + 1) < 1000:\r\n\t\t\t\tif (b + a + 1) in primes:\r\n\t\t\t\t\tab_pairs.add((a, b))\r\n\t\t\tif abs(b + a + 1) >= 1000:\r\n\t\t\t\tif primes_tester((b + a + 1)):\r\n\t\t\t\t\tab_pairs.add((a, b))\r\n\r\nab_pair_lst = list(ab_pairs)\r\nn = 1\r\nwhile len(ab_pairs) > 1:\r\n n += 1 # initalizing n at n=2\r\n ab_pair_lst = list(ab_pairs) # updating list to remove pairs removed from ab_pairs in previous iteration\r\n for (a, b) in ab_pair_lst:\r\n if abs(n**2 + (n * a) + b) < 1000:\r\n if (n**2 + (n * a) + b) not in primes:\r\n ab_pairs.discard((a, b))\r\n if abs(n ** 2 + (n * a) + b) >= 1000:\r\n if not primes_tester(n**2 + (n * a) + b):\r\n ab_pairs.discard((a, b))\r\nprint(ab_pairs)"
},
{
"alpha_fraction": 0.6088957190513611,
"alphanum_fraction": 0.6226993799209595,
"avg_line_length": 17.235294342041016,
"blob_id": "f1f9d0505d22376f27dd42bc17dc1f43b99308cf",
"content_id": "61a186d15c149cee86872440cb1cc703568abccf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 34,
"path": "/euler24hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfactorial = {0:1}\r\ndef fact(n):\r\n\tif n in factorial:\r\n\t\treturn factorial[n]\r\n\telse:\r\n\t\tfactorial[n] = n*fact(n-1)\r\n\t\treturn factorial[n]\r\n\r\n#from itertools import permutations\r\n\r\ndef factoradic(n):\r\n\tl = []\r\n\tcount = 0\r\n\tfor i in reversed(range(13)):\r\n\t\twhile n/fact(i)>1:\r\n\t\t\tn-=fact(i)\r\n\t\t\tcount+=1\r\n\t\tl.append(count)\r\n\t\tcount = 0\r\n\treturn l\r\ndef convertFactoradic(n):\r\n\ta = list('abcdefghijklm')\r\n\ts = ''\r\n\tfor i in n:\r\n\t\ts+=a[i]\r\n\t\tdel a[i]\r\n\treturn s\r\n\t\r\n\r\n#l =sorted(list(permutations(a)))\r\n\r\nfor _ in range(int(input())):\r\n\tprint(convertFactoradic(factoradic(int(input()))))"
},
{
"alpha_fraction": 0.5098522305488586,
"alphanum_fraction": 0.5394088625907898,
"avg_line_length": 15,
"blob_id": "288c21e9c7822fbee8e8b75912cd1b952881dc07",
"content_id": "fd636382d10e3d688a870e23f7e04038300f3e9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 24,
"path": "/euler2.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "#!/bin/python3\r\nimport timeit\r\n\r\nimport sys\r\nfibs = {1:1,2:1}\r\ndef fib(n):\r\n\tif n in fibs:\r\n\t\treturn(fibs[n])\r\n\telse:\r\n\t\tfibs[n] = fib(n-1) + fib(n-2)\r\n\t\treturn(fibs[n])\r\n \r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n\tn = int(input().strip())\r\n\tstart = timeit.timeit()\r\n\tc = []\r\n\ti = 1\r\n\twhile fib(i) < n:\r\n\t\tif fib(i)%2==0:\r\n\t\t\tc.append(fib(i))\r\n\t\ti+=1\r\n\tprint(sum(c))\r\n\tprint(start-timeit.timeit())"
},
{
"alpha_fraction": 0.5489078760147095,
"alphanum_fraction": 0.561253547668457,
"avg_line_length": 32.032257080078125,
"blob_id": "77929094b3ffa29495914be0b0a855b70f050b8b",
"content_id": "77aa09793f586d0411965a406ad614081412183f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1053,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 31,
"path": "/euler26.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "def convert(numerator, denominator):\r\n #print(\"---->\", numerator, \"/\", denominator)\r\n result = [str(numerator//denominator) + \".\"]\r\n subresults = [numerator % denominator] ### changed ###\r\n numerator %= denominator\r\n while numerator != 0:\r\n #print(numerator)\r\n numerator *= 10\r\n result_digit, numerator = divmod(numerator, denominator)\r\n result.append(str(result_digit)) ### moved before if-statement\r\n\r\n if numerator not in subresults:\r\n subresults.append(numerator)\r\n #print(\"appended\", result_digit)\r\n\r\n else:\r\n result.insert(subresults.index(numerator) + 1, \"(\") ### added '+ 1'\r\n #print(\"index\", subresults.index(numerator), subresults, \"result\", result)\r\n result.append(\")\")\r\n #print(\"repeating\", numerator)\r\n break\r\n #print(result)\r\n return \"\".join(result)\r\nmax = 0\r\nanswer = 0\r\nfor i in range(1,1001):\r\n\ta = convert(1,i)\r\n\tif len(a)>max:\r\n\t\tmax = len(a)\r\n\t\tanswer = i\r\nprint(answer)"
},
{
"alpha_fraction": 0.3913043439388275,
"alphanum_fraction": 0.4749794900417328,
"avg_line_length": 21.037734985351562,
"blob_id": "aeeb540e2b84132ab87d88ab33a5539e54cc01e6",
"content_id": "65d80b0668e35041bdb849c44ec980f01bd3322f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1219,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 53,
"path": "/euler19hr.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "for _ in range(int(input())):\r\n\ty1, m1, d1 = input().split()\r\n\ty2, m2, d2 = input().split()\r\n\ty1, m1, d1 = int(y1), int(m1), int(d1)\r\n\ty2, m2, d2 = int(y2), int(m2), int(d2)\r\n\tyear = 1900\r\n\tmon = {0: 'Jan', 1: 'Feb', 2:'Mar',3:'Apr', 4:'May', 5:'Jun', 6:'Jul', 7:'Aug',8:'Sep', 9:'Oct', 10:'Nov', 11:'Dec'}\r\n\tdays30 = {'Sep':0,'Apr':0, 'Jun':0, 'Nov':0}\r\n\tmonths = 0\r\n\tday = 1\r\n\tcount = 0\r\n\ttot_days = 1\r\n\t\r\n\twhile (year<=y2):\r\n\t\tif year == y2 and mon == m2-1 and day == d2:\r\n\t\t\tbreak\r\n\t\tm = mon[months%12]\r\n\t\tif m =='Feb' and ((year%4==0 and not year%100 ==0) or year%400==0):\r\n\t\t\tif day==30:\r\n\t\t\t\tmonths+=1\r\n\t\t\t\tday =1\r\n\t\telif m=='Feb':\r\n\t\t\tif day==29:\r\n\t\t\t\tmonths+=1\r\n\t\t\t\tday =1\r\n\t\telif m in days30:\r\n\t\t\tif day==31:\r\n\t\t\t\tmonths+=1\r\n\t\t\t\tday =1\r\n\t\telse:\r\n\t\t\tif day==32:\r\n\t\t\t\tmonths+=1\r\n\t\t\t\tif months%12==0:\r\n\t\t\t\t\tyear+=1\r\n\t\t\t\tday =1\r\n\t\t#if tot_days%7==0:\r\n\t\t\t#print(str(day) + ' ' + m + ' ' + str(year))\r\n\t\tif tot_days%7==0 and year>=y1 and day ==1:\r\n\t\t\tif year == y1:\r\n\t\t\t\tif months < m1:\r\n\t\t\t\t\tday+=1\r\n\t\t\t\t\ttot_days+=1\r\n\t\t\t\t\tcontinue\r\n\t\t\t\telse:\r\n\t\t\t\t\tif day < d1:\r\n\t\t\t\t\t\tday+=1\r\n\t\t\t\t\t\ttot_days+=1\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t#print(str(day) + ' ' + m + ' ' + str(year))\r\n\t\t\tcount+=1\r\n\t\tday+=1\r\n\t\ttot_days+=1\r\n\tprint(count-2)"
},
{
"alpha_fraction": 0.5211267471313477,
"alphanum_fraction": 0.5809859037399292,
"avg_line_length": 21.5,
"blob_id": "a40c8663288dbf83f9620513a9f0d79deb24109e",
"content_id": "3eb19d4cac959c8da6082c47751f886b7a205705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 12,
"path": "/euler32test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from itertools import permutations\r\na = '123456789'\r\nn = int(input())\r\nd = {''.join(i):0 for i in list(permutations(a[:n]))}\r\nt = []\r\nfor i in range(100):\r\n\tfor j in range(2000):\r\n\t\tfoo = str(i)+str(j)+str(i*j)\r\n\t\tif foo in d:\r\n\t\t\tt.append(i*j)\r\n#print(set(t))\r\nprint(sum(set(t)))\r\n\t\t"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.625,
"avg_line_length": 64,
"blob_id": "da5e6759668464463810c04673d9223acc3f9090",
"content_id": "e40d1d9691cce7b22d50585caa1286f77030e65a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 1,
"path": "/euler29test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "print(len({i**j for i in range(2, 101) for j in range(2, 101)}))"
},
{
"alpha_fraction": 0.4449999928474426,
"alphanum_fraction": 0.47749999165534973,
"avg_line_length": 16.212121963500977,
"blob_id": "2276ba3f506db0abdee105b5084dbef8b06c30d3",
"content_id": "607bb092b4d6f895f2c61cf123183bddee4c6139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1200,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 66,
"path": "/euler12.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "import sys\r\nfrom math import ceil\r\n\r\ndef isprime(n):\r\n\tif n == 1:\r\n\t\treturn False\r\n\telif n % 2 == 0:\r\n\t\treturn False\r\n\tfor i in range(3, int(ceil(n ** 0.5)), 2):\r\n\t\tif n % i == 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\ndef prime_factors(n):\r\n i = 2\r\n factors = []\r\n while i * i <= n:\r\n if n % i:\r\n i += 1\r\n else:\r\n n //= i\r\n factors.append(i)\r\n if n > 1:\r\n factors.append(n)\r\n return factors\r\n\r\ndef p_fctr_exp(n):\r\n primes = prime_factors(n)\r\n exp=[]\r\n\r\n for p in primes:\r\n e=0\r\n while (n%p==0):\r\n n=n//p # since p still divides n,\r\n e+=1 # we divide n by p and increase the exponent\r\n exp.append(e)\r\n return exp\r\ndef factor(n):\r\n\ta = 1\r\n\tfor i in p_fctr_exp(n):\r\n\t\tif i != 0:\r\n\t\t\ta*=(i+1)\r\n\treturn(a)\r\n\t\r\nd = {}\r\n\t\r\ndef find(n):\r\n\tsumm = 0\r\n\tfor i in range(1, 842161320):\r\n\t\tsumm +=i\r\n\t\tif isprime(i) and i >15:\r\n\t\t\tcontinue\r\n\t\tif i not in d:\r\n\t\t\td[i] = factor(summ)\r\n\t\tif d[i] > n:\r\n\t\t\treturn int(summ)\r\n\r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n n = int(input().strip())\r\n print(find(n))\r\n'''\r\nn = 17907120\r\nprint(p_fctr_exp(n))\r\nprint(factor(n))\r\n'''"
},
{
"alpha_fraction": 0.38461539149284363,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 13,
"blob_id": "50c2b41b0bce8baefe7895eb3d61ed973d3dbd1e",
"content_id": "3d3f44d2e7974e18bba975d0e1a7d3d6166bad58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 1,
"path": "/euler30.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "print(9**6*6)"
},
{
"alpha_fraction": 0.37828370928764343,
"alphanum_fraction": 0.4150612950325012,
"avg_line_length": 15.363636016845703,
"blob_id": "960f5ce6b60cef68d1cf6df579cd44c3d470b410",
"content_id": "a41f8d52704eb849b1163c5f3d25f6334293174a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 33,
"path": "/euler29.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "n=int(input())\r\n\r\nk=1\r\nwhile(pow(2,k)<=n):\r\n k+=1\r\nlast=k\r\n\r\nlists={}\r\nfor i in range(1,last):\r\n lists[i]=[]\r\n for j in range(2,n+1):\r\n lists[i].append(j*i)\r\n if(i!=1):\r\n temp=list(set().union(lists[i-1],lists[i])) \r\n lists[i]=temp\r\nprint(lists)\r\n\r\ncheck=[0]*n\r\nans=0\r\nfor i in range(2,n+1):\r\n if(check[i-2]==0):\r\n check[i-2]=1\r\n j=i*i\r\n while(j<=n):\r\n check[j-2]=1\r\n j=j*i\r\n \r\n k=1\r\n while(pow(i,k)<=n):\r\n k+=1\r\n ans+=len(lists[k-1])\r\n\r\nprint(ans)"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 22,
"blob_id": "96ed6d3a5545aaead576c5473196a3b6604bc069",
"content_id": "93b2aad4a2842ed7fac5178fc2fc110d559e98e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/README.md",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# congenial-octo-robot"
},
{
"alpha_fraction": 0.48046308755874634,
"alphanum_fraction": 0.5137481689453125,
"avg_line_length": 19.65625,
"blob_id": "c98494fc23a235b22983b9fe2b86711140d61ec3",
"content_id": "2d7af4207570f940c4d122f46b04c22efe2f11eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 691,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 32,
"path": "/euler25.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nf = {0:1, 1:1}\r\ndef fib(x):\r\n if x in f:\r\n return f[x]\r\n else:\r\n f[x]= fib(x-1)+fib(x-2)\r\n return f[x]\r\ndef fibDigits(x):\r\n return(len(str(fib(x))))\r\n\r\nd = {}\r\n\r\ndef findFibDigits(x):\r\n if fibDigits(x) in d:\r\n return d[fibDigits(x)]\r\n else:\r\n d[fibDigits(x)] = x\r\n return d[fibDigits(x)]\r\nfoo = 0\r\nwhile findFibDigits(foo)<7000:\r\n #print(findFibDigits(foo))\r\n foo+=1\r\n '''\r\n if foo%1000==0:\r\n print(foo)'''\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n while n not in d:\r\n n-=1\r\n print(d[n]+1)\r\n #print(fibDigits(7003))"
},
{
"alpha_fraction": 0.37361282110214233,
"alphanum_fraction": 0.3933415412902832,
"avg_line_length": 23.40625,
"blob_id": "d3fd3915a5e81ba5619be6645db4eccda42fc0af",
"content_id": "4a2e9c1f17365c2748fdf99c83c754f0754fc7d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 32,
"path": "/euler21.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from functools import reduce\r\n\r\ndef factorsSum(n): \r\n return sum(set(reduce(list.__add__, \r\n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))-n\r\n \r\nd = {}\r\nf = {}\r\nfor i in range(1,2*10**5):\r\n d[i] = factorsSum(i)\r\n\r\ndef answer(n):\r\n if n in f:\r\n return(f[n])\r\n else:\r\n l = []\r\n for key in range(1,n):\r\n if d[key] in d and key!=d[key] and key ==d[d[key]]:\r\n l.append(key)\r\n \r\n if len(l)>=2:\r\n for i in range(1,len(l)):\r\n for j in range(l[i-1],l[i]):\r\n f[j] = sum(l[:i])\r\n for i in range(l[-1],n+1):\r\n \r\n f[i] = sum(l)\r\n return(f[n])\r\n \r\n \r\nfor _ in range(int(input())):\r\n print(answer(int(input())))"
},
{
"alpha_fraction": 0.29123711585998535,
"alphanum_fraction": 0.36082473397254944,
"avg_line_length": 25.714284896850586,
"blob_id": "203dcd9bbcf0083893c7f45f876e1f37e17b8dd9",
"content_id": "bf3ffb2166d104b3d57ff5ff89a3ded2f1fe183e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 388,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 14,
"path": "/euler24test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\r\ncnt = 1\r\nwhile cnt < 1000000:\r\n for i in range(len(s) - 1, -1, -1):\r\n if s[i] > s[i - 1]:\r\n m = min([m for m in s[i - 1:] if m > s[i - 1]])\r\n idx = s.index(m)\r\n s[i - 1], s[idx] = s[idx], s[i - 1]\r\n s[i:] = reversed(s[i:])\r\n break\r\n cnt += 1\r\n\r\nprint(''.join([str(i) for i in s]))\r\n"
},
{
"alpha_fraction": 0.40696409344673157,
"alphanum_fraction": 0.44940152764320374,
"avg_line_length": 25.08823585510254,
"blob_id": "3b04b117589cc4502923bce07e061ddcbd43de57",
"content_id": "80c519d1eef30fe3c0e85e08f3299e19a891cdd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 34,
"path": "/euler27hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\ndef primes(n):\r\n sieve = [True] * (n>>1)\r\n for i in range(3,int(n**0.5)+1,2):\r\n if sieve[i>>1]:\r\n sieve[i*i>>1::i] = [False] * ((n-i*i-1)//(2*i)+1)\r\n return [2] + [2*i+1 for i in range(1,n>>1) if sieve[i]]\r\n \r\np = primes(2001)\r\ndef equations(n):\r\n l = []\r\n for i in p:\r\n if n%2==0:\r\n for j in range(-n+1,n-1,2):\r\n l.append([j,i])\r\n else:\r\n for j in range(-n,n,2):\r\n l.append([j,i])\r\n return l\r\ndef tester(l,n):\r\n tracker = 0\r\n mx = 0\r\n for i in range(len(l)):\r\n count = 0\r\n while (count**2+l[i][0]*count+l[i][1]) in p and l[i][0]<n and l[i][1]<n:\r\n count+=1\r\n if count>mx:\r\n mx = count\r\n tracker = i\r\n return(l[tracker])\r\n\r\nl = equations(2001)\r\nprint(*tester(l,int(input())))"
},
{
"alpha_fraction": 0.5248869061470032,
"alphanum_fraction": 0.5475113391876221,
"avg_line_length": 22.77777862548828,
"blob_id": "5c0afaef47731974541687b66e5c0a021ef2cfd3",
"content_id": "5f59765e5ecdd3095ffc2a76e6821169dd36ea40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 9,
"path": "/euler22.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "b = sorted(open('names.txt').read().rstrip().replace('\"', '').split(','))\r\nt = []\r\ncount = 0\r\nfor i in range(len(b)):\r\n\tfor j in range(len(b[i])):\r\n\t\tcount+=ord(b[i][j])-64\r\n\tt.append(count*(i+1))\r\n\tcount=0\r\nprint(sum(t))"
},
{
"alpha_fraction": 0.5114942789077759,
"alphanum_fraction": 0.5344827771186829,
"avg_line_length": 12.666666984558105,
"blob_id": "4c6bbb2cc6b5e2001697bed00d72f4671794fee9",
"content_id": "a63932b606407246b96c32f1359180e2823b6ec8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 12,
"path": "/euler34.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "import math\r\nx = math.factorial(9)*7\r\nl = []\r\n\r\nfor i in range(3,x):\r\n\tb = 0\r\n\ta = str(i)\r\n\tfor j in a:\r\n\t\tb+=math.factorial(int(j))\r\n\tif b ==i:\r\n\t\tl.append(b)\r\nprint(sum(l))"
},
{
"alpha_fraction": 0.5488371849060059,
"alphanum_fraction": 0.5767441987991333,
"avg_line_length": 19.700000762939453,
"blob_id": "7c967d640fca191a0c62d39ebe1074d716c652fb",
"content_id": "48c07c7eb848e581a26805e08e73749836b18e20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 10,
"path": "/euler17.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "import inflect\r\np = inflect.engine()\r\nl =[]\r\nfor i in range(1,1001):\r\n\tl.append(p.number_to_words(i).replace('-','').split())\r\nt = 0\r\nfor i in range(len(l)):\r\n\tfor j in range(len(l[i])):\r\n\t\tt+=len(l[i][j])\r\nprint(t)"
},
{
"alpha_fraction": 0.41006097197532654,
"alphanum_fraction": 0.4603658616542816,
"avg_line_length": 17.939393997192383,
"blob_id": "e0e3270ce5d3065e3dc69f1fd2a930b675d31b06",
"content_id": "19aff15424a976d3b42b00b205076666e62a4cfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 33,
"path": "/euler14.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\ncache_limit = 5000001\r\nd = [0] * cache_limit\r\nd[1]=1\r\ndef collatz(n):\r\n if n < cache_limit and d[n - 1] != 0:\r\n return d[n-1]\r\n if n%2==0:\r\n #print(\"even, %i\"%n)\r\n r = 1+ collatz(n>>1)\r\n else:\r\n #print(\"odd, %i\"%n)\r\n r = 1 + collatz(3*n+1)\r\n if n< cache_limit:\r\n d[n-1]=r\r\n return r\r\nf={}\r\na = 0\r\nb = 0\r\nfor i in range(1, 5000000):\r\n a = collatz(i)\r\n if a>=b:\r\n f[i]=i\r\n b = a\r\n else:\r\n f[i]=f[i-1]\r\n\r\n\r\n\r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n n = int(input().strip())\r\n print(f[n])"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.570652186870575,
"avg_line_length": 16.5,
"blob_id": "26c8ce6ee337d2010504a40a9bad6c0e9c75758f",
"content_id": "c9ed98f8d774108032d3e144e797fdf338b394c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 20,
"path": "/euler22test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\nd = {}\r\nfor i in range(len(a)):\r\n\td[a[i]] = i+1\r\n\r\ndef getScore(name, rank):\r\n\tfoo = sum([d[i] for i in name])\r\n\treturn rank*foo\r\n\t\r\nl = []\r\n\r\nfor _ in range(int(input())):\r\n\tl.append(input())\r\nl.sort()\r\nanswer = {}\r\nfor i in range(len(l)):\r\n\tanswer[l[i]] = getScore(l[i],i+1)\r\n\r\nfor j in range(int(input())):\r\n\tprint(answer[input()])"
},
{
"alpha_fraction": 0.39007091522216797,
"alphanum_fraction": 0.42907801270484924,
"avg_line_length": 16.25806427001953,
"blob_id": "8f3bd6d17fcb6e65d2f72490250788a3ec3353d4",
"content_id": "00427ed36b0b47c74ba495c19f95125f1f97d5ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 564,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 31,
"path": "/euler12test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\ndef divisors(n):\r\n a = 0\r\n t = 1\r\n for i in range (2, int(n ** 0.5) + 1):\r\n while n%i==0:\r\n a+=1\r\n n//=i\r\n t*=(a+1)\r\n a=0\r\n if n!=1:\r\n t*=2\r\n return(t)\r\n\r\n\r\ndef num(n):\r\n return((n*(n+1))>>1)\r\nd={}\r\ntemp = 0\r\ninc = 0\r\nfor i in range(1,501):\r\n while(temp<=i):\r\n inc+=1\r\n temp = divisors(num(inc))\r\n d[i]= num(inc)\r\n\r\n\r\n \r\nfor a0 in range(int(input())):\r\n n = int(input().strip())\r\n print(d[n])"
},
{
"alpha_fraction": 0.4346330165863037,
"alphanum_fraction": 0.4954128563404083,
"avg_line_length": 18.325580596923828,
"blob_id": "98264dcf14846a899c0e143b6cdd4680a61116a8",
"content_id": "b8d6ac4a5e357548a918d535fe9d77bd45e5364a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 872,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 43,
"path": "/euler27.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "import numpy\r\ndef primes(n):\r\n\t#finds all primes from 2 to n\r\n sieve = numpy.ones(n//3 + (n%6==2), dtype=numpy.bool)\r\n for i in range(1,int(n**0.5)//3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ k*k//3 ::2*k] = False\r\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\r\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]\r\ndef consecPrimes(i,j):\r\n\tcount =0\r\n\tif i<0:\r\n\t\ti=-i\r\n\t\tfor x in range(i+1):\r\n\t\t\tif (x**2-i*x+j) not in p:\r\n\t\t\t\tbreak\r\n\t\t\tcount+=1\r\n\tfor x in range(i+1):\r\n\t\tif (x**2+i*x+j) not in p:\r\n\t\t\tbreak\r\n\t\tcount+=1\r\n\treturn(count)\r\np= primes(2000000)\r\nl = []\r\na = 0\r\nwhile a <1001:\r\n\tif a in p:\r\n\t\tl.append(a)\r\n\ta+=1\r\nmax = 0\r\na = 0\r\nb = 0\r\nfor j in l:\r\n\tfor i in range(-j,j):\r\n\t\tif i%2==1 or i==2:\r\n\t\t\tif consecPrimes(i,j)> max:\r\n\t\t\t\tmax = consecPrimes(i,j)\r\n\t\t\t\ta=i\r\n\t\t\t\tb=j\r\nprint(a)\r\nprint(b)\r\nprint(a*b)"
},
{
"alpha_fraction": 0.3469601571559906,
"alphanum_fraction": 0.3815513551235199,
"avg_line_length": 20.761905670166016,
"blob_id": "77dd3dab598f407cc342037c6ae2cc1d17ca9b93",
"content_id": "d171357332c64f6391bd23960f3b03fbd1c37cce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 954,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 42,
"path": "/euler12hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "n = int(input())\r\n\r\ndividers = {1 : 3}\r\nanswers = {2: 2, 3 : 2}\r\n\r\ndef calcMinDividers():\r\n \r\n s = 3\r\n curMaxDiv = 2\r\n _curDiv = 1\r\n while(_curDiv < 1000):\r\n \r\n s += 1\r\n _temps = s\r\n _curDiv = 1\r\n\r\n for i in range (2, int(s ** 0.5) + 1):\r\n _countDiv = 0\r\n while _temps % i == 0:\r\n _countDiv += 1\r\n _temps //= i\r\n _curDiv *= _countDiv + 1\r\n\r\n if _temps != 1:\r\n _curDiv *= 2\r\n \r\n answers[s] = _curDiv\r\n \r\n if s % 2 == 0: \r\n _curDiv = answers[s // 2] * answers[s - 1] \r\n else: \r\n _curDiv *= answers[(s - 1) // 2]\r\n\r\n if _curDiv > curMaxDiv :\r\n for i in range ( curMaxDiv, _curDiv):\r\n dividers[i] = (s * (s - 1)) // 2\r\n curMaxDiv = _curDiv\r\n\r\ncalcMinDividers()\r\n\r\nfor i in range(n):\r\n print(dividers[int(input())])"
},
{
"alpha_fraction": 0.38327091932296753,
"alphanum_fraction": 0.4569288492202759,
"avg_line_length": 20.94285774230957,
"blob_id": "5f85eb9c483041a93a0572199394986757a15ed1",
"content_id": "6b21cab627a18b0b3c76f81459cb00501e1c8933",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 35,
"path": "/euler21hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from math import floor\r\n\r\ndef zeller(day, month, year) : \r\n if (month == 1) : \r\n month = 13\r\n year = year - 1\r\n \r\n if (month == 2) : \r\n month = 14\r\n year = year - 1\r\n q = day \r\n m = month \r\n k = year % 100; \r\n j = year // 100; \r\n h = q + 13 * (m + 1) // 5 + k + k // 4 + j // 4 + 5 * j \r\n h = h % 7\r\n return h \r\n\t\r\n#print(zeller(30, 12, 19, floor(2019/100)))\r\n\r\nfor _ in range(int(input())):\r\n\ty1, m1, d1 = list(map(int,input().split()))\r\n\ty2, m2, d2 = list(map(int,input().split()))\r\n\tcount = 0\r\n\tfor i in range(y1, y2+1):\r\n\t\tfor j in range (1, 13):\r\n\t\t\tif (i==y1 and j<m1) or (i==y1 and j==m1 and d1!=1):\r\n\t\t\t\tcontinue\r\n\t\t\telif (i==y2 and j>m2):\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tif zeller(1, j, i)==1:\r\n\t\t\t\t\tcount+=1\r\n\t\t\t\t\t#print(j, i)\r\n\tprint(count)"
},
{
"alpha_fraction": 0.5243902206420898,
"alphanum_fraction": 0.5487805008888245,
"avg_line_length": 40,
"blob_id": "827bf8580670055435b805f90d6ff592a9274630",
"content_id": "9caa4f82d7c15d3a23c53567e27b7f218e58e113",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 2,
"path": "/euler16.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "for _ in range(int(input())):\r\n print(sum(map(int, str((2<< int(input())-1)))))"
},
{
"alpha_fraction": 0.4585091471672058,
"alphanum_fraction": 0.4767932593822479,
"avg_line_length": 18.314285278320312,
"blob_id": "323abc5859ea8dedd03156dad6952aa2e14f27d5",
"content_id": "b6986f51a70c15f18b9b7bde0f640895883f4781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 35,
"path": "/euler3.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "#!/bin/python3\r\n\r\nimport sys\r\n\r\ndef primes(limit):\r\n limitn = limit+1\r\n primes = dict()\r\n for i in range(2, limitn): primes[i] = True\r\n\r\n for i in primes:\r\n factors = range(i,limitn, i)\r\n for f in factors[1:]:\r\n primes[f] = False\r\n return [i for i in primes if primes[i]==True]\r\n'''\r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n n = int(input().strip())\r\n for i in primes(int(n**0.5)):\r\n if n%i==0:\r\n n/=i\r\n while n%i==0:\r\n n/=i\r\n if n ==1:\r\n break\r\n if n ==1:\r\n print(i)\r\n else:\r\n print(int(n))\r\n\r\n'''\r\nimport time\r\nstart = time.time()\r\nprint(sum(primes(10)))\r\nprint(time.time()-start)\r\n"
},
{
"alpha_fraction": 0.4209989905357361,
"alphanum_fraction": 0.4954128563404083,
"avg_line_length": 27.787878036499023,
"blob_id": "bac606a4e614496d9da62d87d3316d76a4fe5a41",
"content_id": "c40ae8eebcf6ad7a053b88d11b61b239e6513db0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 33,
"path": "/euler17hr.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "ones = (\"\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\")\r\ntens = (\"\", \"\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\")\r\nteens = (\"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\")\r\n\r\ndef word(w):\r\n\tif w>=10**12:\r\n\t\ta = w//(10**12)\r\n\t\treturn(word(a) + \"Trillion \" + word(w-(a*(10**12))))\r\n\telif w>=10**9:\r\n\t\ta = w//(10**9)\r\n\t\treturn(word(a) + \"Billion \" + word(w-(a*(10**9))))\r\n\telif w>=10**6:\r\n\t\ta = w//(10**6)\r\n\t\treturn(word(a) + \"Million \" + word(w-(a*(10**6))))\r\n\telif w>=10**3:\r\n\t\ta = w//(10**3)\r\n\t\treturn(word(a) + \"Thousand \" + word(w-(a*(10**3))))\r\n\telif w>=100:\r\n\t\ta = w//100\r\n\t\treturn(word(a) + \"Hundred \" + word(w-(a*100)))\r\n\telif w>=10:\r\n\t\tif w>20:\r\n\t\t\ta = w//10\r\n\t\t\treturn(tens[w//10]+ ' ' + word(w-(a*10)))\r\n\t\telse:\r\n\t\t\treturn(teens[w%10]+ ' ')\r\n\telse:\r\n\t\tif w>0:\r\n\t\t\treturn(ones[w]+ ' ')\r\n\t\telse:\r\n\t\t\treturn('')\r\n\r\nprint(word(104382426112).title())"
},
{
"alpha_fraction": 0.46253231167793274,
"alphanum_fraction": 0.524547815322876,
"avg_line_length": 19.44444465637207,
"blob_id": "a528d4e5be6d0fb314f4fbad6cc4424389627cc5",
"content_id": "025634a57832ac114485b78f258b43e13b85c527",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 18,
"path": "/euler31test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "c = [1,2,5,10,20,50,100,200]\r\nd = {0:1}\r\ndef keymeasure(key):\r\n return d[key] and key\r\n\r\ndef count(n): \r\n\tif n in d: \r\n\t\treturn d[n]\r\n\tdp = [1] + n*[0]\r\n\tfor i in range(len(c)):\r\n\t\tfor j in range(c[i],n+1): \r\n\t\t\tdp[j]+=dp[j-c[i]]\r\n\tfor i in range(max(d, key=keymeasure),len(dp)):\r\n\t\td[i] = dp[i]%(10**9+7)\r\n\treturn d[n]\r\n\r\nfor _ in range(int(input())):\r\n\tprint(count(int(input())))\r\n\t"
},
{
"alpha_fraction": 0.4708589017391205,
"alphanum_fraction": 0.5061349868774414,
"avg_line_length": 17.81818199157715,
"blob_id": "a8a13a7b0d70d82e42dc0b0a849c3b87a61f40d1",
"content_id": "7208e6ab370c0f12c6f81e6bbd7d52f138115676",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 652,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 33,
"path": "/euler35.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from math import sqrt\r\n\r\ndef rotate(n):\r\n n=str(n)\r\n return n[1:]+n[:1]\r\n\t\r\ndef isRotatable(n, p):\r\n for x in range(2,n):\r\n n = rotate(n)\r\n if not p[int(n)]:\r\n return False\r\n return True\r\n\r\n\r\ndef sieve(n):\r\n primes = [True] * (n+1)\r\n primes[0] = primes[1] = False\r\n\r\n for x in range(4,n+1,2):\r\n primes[x] = False\r\n\r\n for x in range(3,int(sqrt(n))+1,2):\r\n if(primes[x]):\r\n for y in range(x*x,n+1,x):\r\n primes[y] = False\r\n\r\n return primes\r\ncount = 0\r\nprimes = sieve(1000000)\r\nfor x in range(2, len(primes)):\r\n\tif(isRotatable(x,primes)):\r\n\t\tcount+=1\r\nprint(count)"
},
{
"alpha_fraction": 0.3965517282485962,
"alphanum_fraction": 0.47044333815574646,
"avg_line_length": 22,
"blob_id": "cc941c3a0555a9239913f0db4a9fe667e43fe9b7",
"content_id": "9077bfda5204f447228c33d43b3d98e2a784bc57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 17,
"path": "/euler31.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "def getWays(n, c):\r\n dp = [1] + n*[0]\r\n for i in range(len(c)):\r\n for j in range(c[i],n+1): dp[j]+=dp[j-c[i]]\r\n return(dp[-1])\r\n\r\nd = {0:1}\r\nc = [1,2,5,10,20,50,100,200]\r\ndef store(n):\r\n if n in d:\r\n return d[n]\r\n else:\r\n d[n] = getWays(n,c)%(10**9+7)\r\n return d[n]\r\n#for i in range(0,10**5,3): store(i)\r\nfor _ in range(int(input())):\r\n\tprint(store(int(input())))"
},
{
"alpha_fraction": 0.4185110628604889,
"alphanum_fraction": 0.4486921429634094,
"avg_line_length": 20.68181800842285,
"blob_id": "de00fa37bb65c608eb318b76f00eb6b00ae1a4dd",
"content_id": "8bda908d7890207fc90d219af5749b720addc2c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 22,
"path": "/euler23hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from functools import reduce\r\n\r\n\r\ndef factorsSum(n): \r\n return sum(set(reduce(list.__add__, \r\n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))-n\r\nl = []\r\nd = {}\r\nfor i in range(1,100001):\r\n if i < factorsSum(i):\r\n l.append(i)\r\n d[i] = 1\r\n\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n i = 0\r\n foo = False\r\n while l[i]<n:\r\n if n-l[i] in d:\r\n foo = True\r\n i+=1\r\n print(\"YES\") if foo else print(\"NO\")"
},
{
"alpha_fraction": 0.46252676844596863,
"alphanum_fraction": 0.5117772817611694,
"avg_line_length": 13.129032135009766,
"blob_id": "21b80e786122beb6aaa1c45b663a65192847bd6f",
"content_id": "e5c0079f22cd511eb581aedde64ccac9c9e14771",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 31,
"path": "/euler32.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "def checkPan(a,p):\r\n\tpNew = p[:]\r\n\tfoo = True\r\n\tfor i in a:\r\n\t\tif i not in pNew:\r\n\t\t\tfoo=False\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tpNew.remove(i)\r\n\tif len(pNew)>0:\r\n\t\tfoo = False\r\n\treturn(foo)\r\n\r\na = '123456789'\r\nl=[]\r\nfor i in a:\r\n\tl.append(i)\r\nd = {}\r\nx = []\r\nfor i in range(1,10000):\r\n\tfor j in range(1,10000):\r\n\t\tk = i*j\r\n\t\tif k not in d:\r\n\t\t\tt = str(i)+str(j)+str(k)\r\n\t\t\tif len(t)<9:\r\n\t\t\t\tcontinue\r\n\t\t\tif checkPan(t,l):\r\n\t\t\t\td[k] =i\r\n\t\t\t\tx.append(k)\r\n\tprint(i)\r\nprint(sum(x))"
},
{
"alpha_fraction": 0.4768907427787781,
"alphanum_fraction": 0.5315126180648804,
"avg_line_length": 13.354838371276855,
"blob_id": "0f8d3aeb659a8b31dd6be331dbb26dd59608ff8f",
"content_id": "103d2d67e973a5e675fad6eb7ae4a91e994227b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 31,
"path": "/euler26test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "def makeCoprime(n):\r\n\twhile n%2==0:\r\n\t\tn>>=1\r\n\twhile n%5==0:\r\n\t\tn//=5\r\n\treturn(n)\r\n\t\r\ndef getCycleLength(n):\r\n\tmv = 10%n\r\n\tcm = mv\r\n\tk =1\r\n\twhile True:\r\n\t\tif cm ==1:\r\n\t\t\treturn k\r\n\t\tcm = (cm*mv)%n\r\n\t\tk+=1\r\n\t\r\nd = {0:-1, 1:0, 2:0}\r\nmxLength= 0\r\nmxCycleAt =1\r\nfor i in range(3, 10001):\r\n\tcP = makeCoprime(i)\r\n\tif cP!=1:\r\n\t\tn = getCycleLength(cP)\r\n\t\tif(n>mxLength):\r\n\t\t\tmxLength = n\r\n\t\t\tmxCycleAt =i\r\n\td[i] = mxCycleAt\r\n\t\r\nfor _ in range(int(input())):\r\n\tprint(d[int(input())])\r\n"
},
{
"alpha_fraction": 0.428943932056427,
"alphanum_fraction": 0.46544981002807617,
"avg_line_length": 17.225000381469727,
"blob_id": "300da93363fe3da54040d8646acadb62c818d74e",
"content_id": "678cc80c7b7f3f670e7673f3667ba5e265fc4063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 40,
"path": "/euler2test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "'''import sys\r\n#import timeit\r\nd = {}\r\ndef fib(x):\r\n\tif x in d:\r\n\t\treturn(d[x])\r\n\telse:\r\n\t\td[x]=(1/(5**0.5)*((((1+(5**0.5))/2)**x)-(((1-(5**0.5))/2)**x)))\r\n\t\treturn(d[x])\r\n \r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n\tn = int(input().strip())\r\n\t#start = timeit.timeit()\r\n\tc = []\r\n\ti = 3\r\n\tr = int(fib(i))\r\n\tprint(r)\r\n\twhile r < n:\r\n\t\tif r%2==0:\r\n\t\t\tc.append(r)\r\n\t\ti+=3\r\n\t\tr = int(fib(i))\r\n\tprint(sum(c))\r\n\t#end = timeit.timeit()\r\n\t#print(start-end)'''\r\nimport sys\r\nd = {0:1, 1:1}\r\ndef fib(i):\r\n if i in d:\r\n return(d[i])\r\n else:\r\n d[i]= fib(i-1)+fib(i-2)\r\n return(d[i])\r\n\r\nt = int(input().strip())\r\nfor a0 in range(t):\r\n n = int(input().strip())\r\n l = [fib(i) for i in range(n) if (fib(i)<n and fib(i)%2==0)]\r\n print(sum(l))"
},
{
"alpha_fraction": 0.7010869383811951,
"alphanum_fraction": 0.7010869383811951,
"avg_line_length": 44.5,
"blob_id": "c9335b136b9fab74b7e20d252be2f2c1b488b3d1",
"content_id": "5f357c9cacf8c01ef5872fa614439a5b2d66c049",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 4,
"path": "/euler20.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "# Enter your code here. Read input from STDIN. Print output to STDOUT\r\nfrom math import factorial\r\nfor _ in range(int(input())):\r\n print(sum(map(int, str(factorial(int(input()))))))"
},
{
"alpha_fraction": 0.3128654956817627,
"alphanum_fraction": 0.6608186960220337,
"avg_line_length": 19.5,
"blob_id": "85e33fe781766835737aa8ad416b68ed1ee5584c",
"content_id": "b2bfa915b607f7079822c568d65019ccb4798efc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 342,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 16,
"path": "/euler21test.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "from functools import reduce\r\n\r\n\r\n\r\nfoo = [220,284,1184,1210,2620,2924,5020,5564,6232,6368,10744, 10856,12285,14595,17296,18416,63020,66928,66992,67095, 69615,71145,76084,79750,87633,88730]\r\n\r\nfor _ in range(int(input())):\r\n\tn = int(input())\r\n\tanswer = 0\r\n\tfor i in foo:\r\n\t\tprint(i)\r\n\t\tif i<n:\r\n\t\t\tanswer+=i\r\n\t\telse:\r\n\t\t\tbreak\r\n\tprint(answer)"
},
{
"alpha_fraction": 0.45264846086502075,
"alphanum_fraction": 0.4911717474460602,
"avg_line_length": 16.878787994384766,
"blob_id": "7a71bdc15c89b8d20ce714788dd1a600c7cf5b31",
"content_id": "1404bcc54bb6bce7263ec2aab26dddd5677e06ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 623,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 33,
"path": "/euler29hrtest.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "'''\r\nfrom fractions import Fraction as Fr\r\n \r\ndef bernoulli(n):\r\n A = [0] * (n+1)\r\n for m in range(n+1):\r\n A[m] = Fr(1, m+1)\r\n for j in range(m, 0, -1):\r\n A[j-1] = j*(A[j-1] - A[j])\r\n return A[0] # (which is Bn)\r\n\r\nf = {0:1}\r\ndef factorial(n):\r\n\tif n in f:\r\n\t\treturn f[n]\r\n\telse:\r\n\t\tf[n] = n*factorial(n-1)\r\n\t\treturn f[n]\r\n\t\t\r\ndef binomial(n,k):\r\n\treturn(factorial(n)/(factorial(k)*factorial(n-k)))\r\n\t\r\ndef sum(n,p):\r\n\ts = 0\r\n\tfor j in range(p):\r\n\t\ts+= binomial(p+1,j)*bernoulli(j)*n**(p-j+1)\r\n\treturn s/(p+1)\r\n'''\r\n\r\ndef sum(n,p):\r\n\treturn((n**(p+1)-1)/(n-1))\r\n\t\r\nprint(sum(5,5)-sum(5,1))\r\n"
},
{
"alpha_fraction": 0.4060606062412262,
"alphanum_fraction": 0.5292929410934448,
"avg_line_length": 20.590909957885742,
"blob_id": "143edfc752d4608c8a9c561611367f1afaeb39de",
"content_id": "e1fe5872262eba466b6a3cbcd29f057b0a456ce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 22,
"path": "/euler19.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "year = 1900\r\nmon = {0: 'Jan', 1: 'Feb', 2:'Mar',3:'Apr', 4:'May', 5:'Jun', 6:'Jul', 7:'Aug',8:'Sep', 9:'Oct', 10:'Nov', 11:'Dec'}\r\ndays30 = ['Sep','Apr', 'Jun', 'Nov']\r\nmonths = 0\r\nday = 0\r\ncount = 0\r\nwhile year<2001:\r\n\tm = mon[months%12]\r\n\tif m =='Feb' and ((year%4==0 and not year%100 ==0) or year%400==0):\r\n\t\tday+=29\r\n\telif m=='Feb':\r\n\t\tday+=28\r\n\telif m in days30:\r\n\t\tday+=30\r\n\telse:\r\n\t\tday+=31\r\n\tif day%7==6 and year>=1901:\r\n\t\tcount+=1\r\n\tmonths+=1\r\n\tif months%12==0:\r\n\t\tyear+=1\r\nprint(count)"
},
{
"alpha_fraction": 0.5046296119689941,
"alphanum_fraction": 0.520061731338501,
"avg_line_length": 17.696969985961914,
"blob_id": "69cb761cc3703fc828c756e9b6503dd02afdb681",
"content_id": "c5810394eda1ff39654d454c3ffef9c1613a113f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 33,
"path": "/euler23.py",
"repo_name": "Agyraspide/congenial-octo-robot",
"src_encoding": "UTF-8",
"text": "import math\r\nimport time\r\n\r\ndef divisorGenerator(n):\r\n large_divisors = []\r\n for i in range(1, int(math.sqrt(n) + 1)):\r\n if n % i == 0:\r\n yield i\r\n if i*i != n:\r\n large_divisors.append(int(n / i))\r\n for divisor in reversed(large_divisors):\r\n yield divisor\r\n\t\t\r\nn = 28124\r\np = []\r\nd = {}\r\nstart = time.time()\r\nfor i in range(n):\r\n\tif (sum(list(divisorGenerator(i)))-i)>i:\r\n\t\tp.append(i)\r\n\t\td[i] = i\r\nsums = 1\r\nfor i in range(2,n):\r\n\tboo = True\r\n\tfor j in p:\r\n\t\tif j < i:\r\n\t\t\tif (i-j) in d:\r\n\t\t\t\tboo = False\r\n\t\t\t\tbreak\r\n\t\telse: break\r\n\tif boo: sums+=i\r\nprint(sums)\r\nprint(time.time()-start)"
}
] | 39 |
wingofsnake/Blockchain_Project
|
https://github.com/wingofsnake/Blockchain_Project
|
b0abd42c532cf7e8d6345811d51d533464f4dfb4
|
d22bef5c04d033e0bab0e85c49fc1e65cf5a23f5
|
0ae08cd727b5c9973fb3a4f9f654899cb79d6deb
|
refs/heads/master
| 2020-07-22T18:16:31.040014 | 2019-12-20T07:50:19 | 2019-12-20T07:50:19 | 207,285,436 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7593361139297485,
"alphanum_fraction": 0.771784245967865,
"avg_line_length": 23.100000381469727,
"blob_id": "e6a2aa28d5f9bfce6ba35b7b53377a829fd243a7",
"content_id": "638d35a6c8a01fda4911de56b51f3266d4fa6ff3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 10,
"path": "/README.txt",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "# Blockchain_Project\n\n1. Project_Decen_Simple.py\n Main file for project_Decentralization in Blockchain\n \n2. Functions.py\n Moduel for functions that will be imported in Project_Decen_Simple.py\n \n3. simple_model.c\n Pilot code for project\n"
},
{
"alpha_fraction": 0.5820930004119873,
"alphanum_fraction": 0.5979069471359253,
"avg_line_length": 41.91999816894531,
"blob_id": "e8534c5a2748d4e26d2dde1367ce7a01ab83d0a0",
"content_id": "a720e54e3eb5a4903fe09910db2ff6e08e2cf8b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4300,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 100,
"path": "/Project_Decent_Babomodel.py",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport time\n\nfrom multiprocessing import Process\n\nfrom Functions import Set_List, Mining, Investment, Reinvestment, FilePrint_babo, redistribution_babo\n\n#define functions\ndef set_parameter(Parameters):\n \"\"\"Set parameters with system argument values\"\"\"\n\n Parameters['Repeat'] = int(sys.argv[1])\n Parameters['NumCore'] = int(sys.argv[2])\n #Parameters['StaticOrNot'] = int(sys.argv[2])\n #Parameters['DistributionFormat'] = int(sys.argv[3])\n #Parameters['InitialParameter'] = int(sys.argv[4])\n #Parameters['NodeSize'] = int(sys.argv[5])\n #Parameters['ProcessingNumber'] = int(sys.argv[6])\n #Parameters['ReinvestmentParameter'] = 16\n\ndef processing_multi(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list):\n \"\"\"Call Set_List function, Mining function, Investment function,\n Reinvestment function, and fileIO function\"\"\"\n\n copied_dic = Parameters.copy()\n\n for repeat in range(Parameters['Repeat']) :\n for Dis in range(Parameters['DistributionFormat']) :\n #for ReinvORRedis in range(Parameters['ReinvOrRediv']) :\n for Growth in range(Parameters['StaticOrNot']) :\n for reinv in range(int((Parameters['ReinvestmentParameter']/Parameters['NumCore']))) :\n\n copied_dic['Repeat'] = repeat\n copied_dic['DistributionFormat'] = Dis\n copied_dic['StaticOrNot'] = Growth\n copied_dic['ReinvestmentParameter'] = (reinv * Parameters['NumCore'])\n #copied_dic['ReinvOrRediv'] = ReinvORRedis\n copied_dic['ReinvOrRediv'] = 1\n print(copied_dic)\n\n Hash_list = list()\n Crypto_Wealth_list = list()\n\n Set_List(Hash_list, Crypto_Wealth_list, copied_dic)\n\n\n procs = []\n for i in range(Parameters['NumCore']) :\n proc = Process(target = processing, args = (Hash_list, Crypto_Wealth_list, copied_dic, i, reinvestment_ratio_list))\n procs.append(proc)\n proc.start()\n\n for proc in procs :\n proc.join()\n #sys.exit()\n\ndef processing(Hash_list, Crypto_Wealth_list, copied_dic, index, reinvestment_ratio_list) :\n\n copied_dic['ReinvestmentParameter'] = copied_dic['ReinvestmentParameter'] + index\n pid = os.getpid()\n for i in range(copied_dic['ProcessingNumber']) :\n Miner = Mining(Hash_list, Crypto_Wealth_list, copied_dic)\n Investment(Hash_list, Crypto_Wealth_list, copied_dic)\n if copied_dic['ReinvOrRediv'] == 0:\n Reinvestment(Hash_list, Crypto_Wealth_list, copied_dic, reinvestment_ratio_list)\n elif copied_dic['ReinvOrRediv'] == 1:\n redistribution_babo(Hash_list, Crypto_Wealth_list, Miner, reinvestment_ratio_list, copied_dic)\n if (i % 1000) == 0:\n print('{0}th calculation by process id: {1}'.format(i, pid))\n #print('{0}th calculation by process id: {1}'.format(i, pid))\n\n Hash_list.sort(reverse = True)\n Crypto_Wealth_list.sort(reverse = True)\n\n FilePrint_babo(Hash_list, Crypto_Wealth_list, copied_dic)\n\n print('Dis = ' + str(copied_dic['DistributionFormat']) + \\\n ' Par = ' + str(copied_dic['InitialParameter']) + \\\n ' G = ' + str(copied_dic['StaticOrNot']) + \\\n ' Reinv = ' + str(copied_dic['ReinvestmentParameter'] + index) + \\\n 'Reinv or Redis = ' + str(copied_dic['ReinvOrRediv']))\n\nif __name__ == '__main__':\n\n #define dictionary for parameters\n Parameters = {'Repeat': 1, 'StaticOrNot': 2,\n 'DistributionFormat': 4, 'InitialParameter': 2, 'NodeSize': 100000,\n 'ProcessingNumber': 100000, 'ReinvestmentParameter': 8, 'NumCore':1, 'ReinvOrRediv': 2}\n reinvestment_ratio_list = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5]\n\n set_parameter(Parameters)\n print(Parameters)\n\n #define main two variables\n Hash_list = list()\n Crypto_Wealth_list = list()\n\n processing_multi(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list)\n print(Parameters['NodeSize'])\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5902397036552429,
"alphanum_fraction": 0.6005390286445618,
"avg_line_length": 43.39743423461914,
"blob_id": "bbda96a2c3151d6094c5b4dc29296b55396bc9a5",
"content_id": "1d98e96aff6b2803d320c15380b67d3cefd7d1fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10389,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 234,
"path": "/Functions.py",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "import random\nimport os\nimport time\nimport sys\nimport csv\nimport math\n\ndef Set_List(Hash_list, Crypto_Wealth_list, Parameters):\n \"\"\"Initialize Hash and Wealth lists with input parameters\"\"\"\n if Parameters['StaticOrNot'] == 0 :\n for i in range(Parameters['NodeSize']) :\n Hash_list += [RandomDisGen(Parameters)]\n Crypto_Wealth_list += [0]\n elif Parameters['StaticOrNot'] == 1 :\n Hash_list += [RandomDisGen(Parameters)]\n Crypto_Wealth_list += [0]\n else :\n print(\"Static or Not parameters should be 0 or 1\")\n\ndef Mining(Hash_list, Crypto_Wealth_list, Parameters):\n \"\"\"Decide which node will success mining, and get reward\"\"\"\n total = 0\n Reward = [1, 10, 30, 50]\n for i in range(len(Hash_list)) :\n total += Hash_list[i]\n index = 0\n Success_Possibility = random.uniform(0, total)\n Success_Indicator = 0\n for i in range(len(Hash_list)) :\n Success_Indicator += Hash_list[i]\n if Success_Indicator >= Success_Possibility:\n Crypto_Wealth_list[i] += Reward[Parameters['Reward']]\n index = i\n break\n return index\n\ndef Investment(Hash_list, Crypto_Wealth_list, Parameters):\n \"\"\"Decide new node enter the system\"\"\"\n if Parameters['StaticOrNot'] == 0 :\n for i in range(Parameters['NodeSize']) :\n if Hash_list[i] == 0 :\n Hash_list[i] += RandomDisGen(Parameters)\n break\n elif Parameters['StaticOrNot'] == 1 :\n if len(Hash_list) < Parameters['NodeSize'] :\n Hash_list += [RandomDisGen(Parameters)]\n Crypto_Wealth_list += [0]\n\ndef Reinvestment(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list):\n if Parameters['ReinvType'] == 0:\n Reinvestment_Simple(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list)\n elif Parameters['ReinvType'] == 1:\n Reinvestment_Chance(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list)\n\ndef Reinvestment_Simple(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list):\n \"\"\"Decide each node reinvest to their hash power and make distribution of wealth\"\"\"\n if len(Hash_list) > 1 :\n for i in range(len(Hash_list)):\n Hash_list[i] += reinvestment_ratio_list[Parameters['ReinvestmentParameter']] * Crypto_Wealth_list[i]\n Crypto_Wealth_list[i] -= reinvestment_ratio_list[Parameters['ReinvestmentParameter']] * Crypto_Wealth_list[i]\n\ndef Reinvestment_Chance(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list):\n \"\"\"Decide each node reinvest to their hash power and make distribution of wealth\"\"\"\n if len(Hash_list) > 1:\n maximum_wealth = 0\n for i in range(len(Hash_list)):\n if maximum_wealth < Crypto_Wealth_list[i]:\n maximum_wealth = Crypto_Wealth_list[i]\n\n reinvestment_chance = 0\n for i in range(len(Hash_list)) :\n reinvestment_chance = random.uniform(0,1)\n if reinvestment_chance <= (Crypto_Wealth_list[i] / maximum_wealth) :\n Hash_list[i] += reinvestment_ratio_list[Parameters['ReinvestmentParameter']] * Crypto_Wealth_list[i]\n Crypto_Wealth_list[i] -= reinvestment_ratio_list[Parameters['ReinvestmentParameter']] * Crypto_Wealth_list[i]\n\ndef Redistribution(Hash_list, Crypto_Wealth_list, Parameters, index, reinvestment_ratio_list):\n redistribution_babo(Hash_list, Crypto_Wealth_list, index, reinvestment_ratio_list, Parameters)\n\ndef redistribution_babo(Hash_list, Crypto_Wealth_list, index, reinvestment_ratio_list, Parameters):\n \"\"\"\n multi = 50.0 / (len(Hash_list) - 1)\n for i in range(len(Hash_list)):\n if i != index:\n Crypto_Wealth_list[i] += multi * reinvestment_ratio_list[Parameters['ReinvestmentParameter']]\n else:\n Crypto_Wealth_list[i] -= multi * reinvestment_ratio_list[Parameters['ReinvestmentParameter']]\n \"\"\"\n if len(Hash_list) > 1:\n temp_list = list()\n multi = 1.0/ (len(Hash_list) - 1)\n for i in range(len(Hash_list)):\n temp_list += [Crypto_Wealth_list[i] * reinvestment_ratio_list[Parameters['RedistributionParameter']]]\n Crypto_Wealth_list[i] -= Crypto_Wealth_list[i] * reinvestment_ratio_list[Parameters['RedistributionParameter']]\n\n temp = 0\n for i in range(len(Hash_list)):\n temp += temp_list[i]\n\n for i in range(len(Hash_list)):\n temp_list[i] = (temp - temp_list[i]) * multi\n Crypto_Wealth_list[i] += temp_list[i]\n\ndef FilePrint(Hash_list, Crypto_Wealth_list, Parameters):\n \"\"\"Print out CSV file of hash power and crypto-currency\"\"\"\n\n filename_hash = str(Parameters['Repeat']) + 'h' + \\\n 'di' + str(Parameters['DistributionFormat']) + \\\n 'dp' + str(Parameters['InitialParameter']) + \\\n 's' + str(Parameters['NodeSize']) + \\\n 'n' + str(Parameters['ProcessingNumber']) + \\\n 'G' + str(Parameters['StaticOrNot']) + \\\n 're' + str(Parameters['ReinvestmentParameter']) + \\\n 'rdi' + str(Parameters['RedistributionParameter']) + \\\n 'rw' + str(Parameters['Reward']) + \\\n 'rit' + str(Parameters['ReinvType']) + '.csv'\n fileout_hash = open(filename_hash, 'w')\n wrh = csv.writer(fileout_hash)\n wrh.writerow(['Hash', 'Accumulated frequency'])\n\n ach = 0\n for i in range(len(Hash_list) - 1) :\n ach += 1\n if Hash_list[i+1] < Hash_list[i] :\n wrh.writerow([Hash_list[i], ach])\n\n ach += 1\n wrh.writerow([Hash_list[Parameters['NodeSize'] - 1], ach])\n fileout_hash.close()\n\n filename_crypto = str(Parameters['Repeat']) + 'c' + \\\n 'di' + str(Parameters['DistributionFormat']) + \\\n 'dp' + str(Parameters['InitialParameter']) + \\\n 's' + str(Parameters['NodeSize']) + \\\n 'n' + str(Parameters['ProcessingNumber']) + \\\n 'G' + str(Parameters['StaticOrNot']) + \\\n 're' + str(Parameters['ReinvestmentParameter']) + \\\n 'rdi' + str(Parameters['RedistributionParameter']) + \\\n 'rw' + str(Parameters['Reward']) + \\\n 'rit' + str(Parameters['ReinvType']) + '.csv'\n fileout_crypto = open(filename_crypto, 'w')\n wrc = csv.writer(fileout_crypto)\n wrc.writerow(['Cryptocurrency, Accumulated frequency'])\n\n acc = 0\n for i in range(len(Crypto_Wealth_list) - 1) :\n acc += 1\n if Crypto_Wealth_list[i+1] < Crypto_Wealth_list[i] :\n wrc.writerow([Crypto_Wealth_list[i], acc])\n\n acc += 1\n wrc.writerow([Crypto_Wealth_list[Parameters['NodeSize'] - 1], acc])\n fileout_crypto.close()\n\ndef FilePrint_babo(Hash_list, Crypto_Wealth_list, Parameters):\n \"\"\"Print out CSV file of hash power and crypto-currency for Babo model\"\"\"\n\n filename_hash = str(Parameters['Repeat']) + 'h' + 'di' + \\\n str(Parameters['DistributionFormat']) + 'dp'+ \\\n str(Parameters['InitialParameter']) + 's' + \\\n str(Parameters['NodeSize']) + 'n' + \\\n str(Parameters['ProcessingNumber']) + 'G' + \\\n str(Parameters['StaticOrNot']) + 'rd' + \\\n str(Parameters['ReinvOrRediv']) + 're'+ \\\n str(Parameters['ReinvestmentParameter']) + '.csv'\n fileout_hash = open(filename_hash, 'w')\n wrh = csv.writer(fileout_hash)\n wrh.writerow(['Hash', 'Accumulated frequency'])\n\n ach = 0\n for i in range(len(Hash_list) - 1) :\n ach += 1\n if Hash_list[i+1] < Hash_list[i] :\n wrh.writerow([Hash_list[i], ach])\n\n ach += 1\n wrh.writerow([Hash_list[Parameters['NodeSize'] - 1], ach])\n fileout_hash.close()\n\n filename_crypto = str(Parameters['Repeat']) + 'c' + 'di' + \\\n str(Parameters['DistributionFormat']) + 'dp' + \\\n str(Parameters['InitialParameter']) + 's' + \\\n str(Parameters['NodeSize']) + 'n' + \\\n str(Parameters['ProcessingNumber']) + 'G' + \\\n str(Parameters['StaticOrNot']) + 'rd' + \\\n str(Parameters['ReinvOrRediv']) + 're' + \\\n str(Parameters['ReinvestmentParameter']) + '.csv'\n fileout_crypto = open(filename_crypto, 'w')\n wrc = csv.writer(fileout_crypto)\n wrc.writerow(['Cryptocurrency, Accumulated frequency'])\n\n acc = 0\n for i in range(len(Crypto_Wealth_list) - 1) :\n acc += 1\n if Crypto_Wealth_list[i+1] < Crypto_Wealth_list[i] :\n wrc.writerow([Crypto_Wealth_list[i], acc])\n\n acc += 1\n wrc.writerow([Crypto_Wealth_list[Parameters['NodeSize'] - 1], acc])\n fileout_crypto.close()\n\ndef RandomDisGen(Parameters):\n \"\"\"Create random number to make specific types of distribution\"\"\"\n Uniform_Parameters = [1, 5, 10]\n Exponential_Parameters = [0.1, 1, 5]\n Power_Parameters = [1.5, 2, 2.5]\n Regular_Parameters = [1, 5, 10, 30]\n\n if Parameters['DistributionFormat'] == 0 :\n return random.uniform(0, Uniform_Parameters[Parameters['InitialParameter']])\n elif Parameters['DistributionFormat'] == 1 :\n return RandomExp(Parameters, Exponential_Parameters)\n elif Parameters['DistributionFormat'] == 2 :\n return RandomPow(Parameters, Power_Parameters)\n elif Parameters['DistributionFormat'] == 3 :\n return Regular_Parameters[Parameters['InitialParameter']]\n else :\n print(\"Distribution Format should be 0 to 3\")\n\ndef RandomExp(Parameters, Exponential_Parameters):\n \"\"\"Create random number that follow exp distribution\"\"\"\n UncheckedChance = random.uniform(0,1)\n while UncheckedChance == 0 or UncheckedChance == 1:\n UncheckedChance = random.uniform(0,1)\n\n return (-1 / Exponential_Parameters[Parameters['InitialParameter']]) * math.log(1 - UncheckedChance)\n\ndef RandomPow(Parameters, Power_Parameters):\n \"\"\"Create random number that follow Power distribution\"\"\"\n UncheckedChance = random.uniform(0,1)\n while UncheckedChance == 0 or UncheckedChance == 1:\n UncheckedChance = random.uniform(0,1)\n\n return 1.0 / pow(1 - UncheckedChance, (1.0 / Power_Parameters[Parameters['InitialParameter']]))\n"
},
{
"alpha_fraction": 0.5427230000495911,
"alphanum_fraction": 0.5692018866539001,
"avg_line_length": 44.82758712768555,
"blob_id": "e4c962ee9053a041ad7d288d58982dbf76d86166",
"content_id": "010f3e104eac3eed0de6f6c15789eb90c05486d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5325,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 116,
"path": "/Project_Decent_Simple.py",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport time\n\nfrom multiprocessing import Process\n\nfrom Functions import Set_List, Mining, Investment, Reinvestment, FilePrint, Redistribution\n\n#define functions\ndef set_parameter(Parameters):\n \"\"\"Set parameters with system argument values\"\"\"\n\n Parameters['Repeat'] = int(sys.argv[1])\n Parameters['NumCore'] = int(sys.argv[2])\n #Parameters['StaticOrNot'] = int(sys.argv[2])\n #Parameters['DistributionFormat'] = int(sys.argv[3])\n #Parameters['InitialParameter'] = int(sys.argv[4])\n #Parameters['NodeSize'] = int(sys.argv[5])\n #Parameters['ProcessingNumber'] = int(sys.argv[6])\n #Parameters['ReinvestmentParameter'] = 16\n\ndef processing_multi(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list):\n \"\"\"Call Set_List function, Mining function, Investment function,\n Reinvestment function, and fileIO function\"\"\"\n\n copied_dic = Parameters.copy()\n\n for repeat in range(Parameters['Repeat']) :\n #for Dis in range(2, Parameters['DistributionFormat']) :\n for rw in range(Parameters['Reward']):\n for Ini in range(Parameters['InitialParameter']) :\n #for Growth in range(Parameters['StaticOrNot']) :\n for reinv in range(Parameters['ReinvestmentParameter']) :\n #for ReinvType in range(Parameters['ReinvType']):\n for redis in range(Parameters['RedistributionParameter']):\n\n copied_dic['Repeat'] = repeat\n copied_dic['DistributionFormat'] = 3\n copied_dic['InitialParameter'] = Ini\n #copied_dic['InitialParameter'] = 2\n #copied_dic['StaticOrNot'] = Growth\n copied_dic['StaticOrNot'] = 0\n copied_dic['ReinvestmentParameter'] = reinv\n #copied_dic['ReinvestmentParameter'] = 1\n copied_dic['ReinvType'] = 0\n copied_dic['RedistributionParameter'] = redis\n\n Hash_list = list()\n Crypto_Wealth_list = list()\n\n procs = []\n for i in range(Parameters['NumCore']) :\n proc = Process(target = processing, args = (Hash_list, Crypto_Wealth_list, copied_dic, i, reinvestment_ratio_list))\n procs.append(proc)\n proc.start()\n\n for proc in procs :\n proc.join()\n #sys.exit()\n\ndef processing(Hash_list, Crypto_Wealth_list, copied_dic, index, reinvestment_ratio_list) :\n\n #copied_dic['ReinvestmentParameter'] = copied_dic['ReinvestmentParameter'] + index\n #copied_dic['StaticOrNot'] = int(index / 4)\n \"\"\"\n copied_dic['ReinvType'] = int(index / 16)\n copied_dic['RedistributionParameter'] = index % 4\n if index < 16:\n copied_dic['ReinvestmentParameter'] = int(index / 4)\n elif 16 <= index < 32:\n copied_dic['ReinvestmentParameter'] = int(index/4) - 4\n \"\"\"\n #copied_dic['ReinvType'] = int(index/4)\n copied_dic['RedistributionParameter'] = index%4\n print(copied_dic)\n Set_List(Hash_list, Crypto_Wealth_list, copied_dic)\n\n pid = os.getpid()\n for i in range(copied_dic['ProcessingNumber']) :\n Miner = Mining(Hash_list, Crypto_Wealth_list, copied_dic)\n Investment(Hash_list, Crypto_Wealth_list, copied_dic)\n Redistribution(Hash_list, Crypto_Wealth_list, copied_dic, Miner, reinvestment_ratio_list)\n Reinvestment(Hash_list, Crypto_Wealth_list, copied_dic, reinvestment_ratio_list)\n if (i % 1000) == 0:\n print('{0}th calculation by process id: {1}'.format(i, pid))\n\n Hash_list.sort(reverse = True)\n Crypto_Wealth_list.sort(reverse = True)\n\n FilePrint(Hash_list, Crypto_Wealth_list, copied_dic)\n\n print('Dis = ' + str(copied_dic['DistributionFormat']) + \\\n ' Par = ' + str(copied_dic['InitialParameter']) + \\\n ' G = ' + str(copied_dic['StaticOrNot']) + \\\n ' Reinv = ' + str(copied_dic['ReinvestmentParameter']) + \\\n ' Redis = ' + str(copied_dic['RedistributionParameter']) +\\\n ' Reward = ' + str(copied_dic['Reward']) + \\\n ' Reinv Type = ' + str(copied_dic['ReinvType']))\n\nif __name__ == '__main__':\n\n #define dictionary for parameters\n Parameters = {'Repeat': 1, 'StaticOrNot': 2,\n 'DistributionFormat': 4, 'InitialParameter': 4, 'NodeSize': 100000,\n 'ProcessingNumber': 100000, 'ReinvestmentParameter': 4, 'RedistributionParameter': 4, 'ReinvType': 2,\n 'RedisType': 2, 'NumCore':1, 'Reward': 4}\n #reinvestment_ratio_list = [0.0001, 0.0005, 0.001, 0.00125, 0.0025, 0.005, 0.0075,\n # 0.00875, 0.01, 0.0125, 0.025, 0.05, 0.075, 0.0875, 0.1, 0.5]\n reinvestment_ratio_list = [0.0001, 0.001, 0.01, 0.1]\n set_parameter(Parameters)\n\n #define main two variables\n Hash_list = list()\n Crypto_Wealth_list = list()\n\n processing_multi(Hash_list, Crypto_Wealth_list, Parameters, reinvestment_ratio_list)\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5960544347763062,
"alphanum_fraction": 0.6204171180725098,
"avg_line_length": 45.35261535644531,
"blob_id": "a76a493a0f1cf78890f879c815a09bea8a23c453",
"content_id": "0652012583d3c8f2bc4aef01a0e9145e3d723553",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16829,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 363,
"path": "/Drawing_Graph.py",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "import csv\nimport glob\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport math\n\ndef Opening_files (Hash_list, Ac_list, i, input_files, index_range) :\n for index in range(index_range):\n csv_file = open(input_files[index + i], 'r', encoding='utf-8')\n csv_reader = csv.reader(csv_file)\n Hash_value = list()\n Ac_value = list()\n header = next(csv_reader)\n Making_list(Hash_value, Ac_value, csv_reader)\n Hash_list.append(Hash_value)\n Ac_list.append(Ac_value)\n csv_file.close()\n\ndef Opening_files_Babo (Hash_list, Ac_list, i, input_files) :\n csv_file = open(input_files[i], 'r', encoding='utf-8')\n csv_reader = csv.reader(csv_file)\n Hash_value = list()\n Ac_value = list()\n header = next(csv_reader)\n Making_list(Hash_value, Ac_value, csv_reader)\n Hash_list.append(Hash_value)\n Ac_list.append(Ac_value)\n csv_file.close()\n\ndef Making_list (Hash_value, Ac_value, csv_reader) :\n\n for line in csv_reader :\n if len(line) == 0 :\n continue\n else:\n Hash_value.append(float(line[0]))\n Ac_value.append(int(line[1]))\n\ndef ploting_LineartoLinear_Babo(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC) :\n if HorC == 0:\n if len(input_file_name) == 99:\n output_file_name = input_file_name[68:95] + 't1.png'\n else:\n output_file_name = input_file_name[68:96] + 't1.png'\n else:\n if len(input_file_name) == 109:\n output_file_name = input_file_name[78:105] + 't1.png'\n else:\n output_file_name = input_file_name[78:106] + 't1.png'\n for index in range(len(Hash_list[0])):\n Hash_list[0][index] += 1\n plt.plot(Hash_list[0], Ac_list[0], linewidth = 0.5)\n plt.scatter(Hash_list[0], Ac_list[0], s = 1)\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.savefig(output_file_name, dpi = 350)\n\n plt.close()\n\ndef ploting_LineartoLog_Babo(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC):\n if HorC == 0:\n if len(input_file_name) == 99:\n output_file_name = input_file_name[68:95] + 't2.png'\n else:\n output_file_name = input_file_name[68:96] + 't2.png'\n else:\n if len(input_file_name) == 109:\n output_file_name = input_file_name[78:105] + 't2.png'\n else:\n output_file_name = input_file_name[78:106] + 't2.png'\n\n for index in range(len(Hash_list[0])):\n Ac_list[0][index] = math.log(Ac_list[0][index])\n plt.plot(Hash_list[0], Ac_list[0], linewidth=0.5)\n plt.scatter(Hash_list[0], Ac_list[0], s=1)\n if HorC == 0 :\n plt.title('Hash-log(Accumulated Frequency)')\n plt.xlabel('Hash')\n else :\n plt.title('Cryptocurrency-log(Accumulated Frequency)')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('log(Accumulated Frequency)')\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog_Babo(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC):\n if HorC == 0:\n if len(input_file_name) == 99:\n output_file_name = input_file_name[68:95] + 't3.png'\n else:\n output_file_name = input_file_name[68:96] + 't3.png'\n else:\n if len(input_file_name) == 109:\n output_file_name = input_file_name[78:105] + 't3.png'\n else:\n output_file_name = input_file_name[78:106] + 't3.png'\n\n for index in range(len(Hash_list[0])):\n Hash_list[0][index] = math.log(Hash_list[0][index])\n plt.plot(Hash_list[0], Ac_list[0], linewidth=0.5)\n plt.scatter(Hash_list[0], Ac_list[0], s=1)\n if HorC == 0:\n plt.title('log(Hash)-log(Accumulated Frequency)')\n plt.xlabel('log(Hash)')\n else:\n plt.title('log(Cryptocurrency)-log(Accumulated Frequency)')\n plt.xlabel('log(Cryptocurrency)')\n plt.ylabel('log(Accumulated Frequency)')\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\n\ndef ploting_LineartoLinear(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length) :\n\n output_file_name = input_file_name[path_length + 1 : total_length - 12] + 't1.png'\n\n for i in range(len(reinvestment_ratio_list)) :\n Max_val = Hash_list[i][0]\n Min_val = Hash_list[i][len(Hash_list[i]) - 1]\n division_ratio = Max_val - Min_val\n if division_ratio == 0:\n division_ratio = 1\n for index in range(len(Hash_list[i])) :\n Hash_list[i][index] = (((Hash_list[i][index] - Min_val) * 9.0) / division_ratio) + 1\n #Hash_list[i][index] = Hash_list[i][index]/Min_val\n plt.plot(Hash_list[i], Ac_list[i], label = reinvestment_ratio_list[i], linewidth = 0.5)\n plt.scatter(Hash_list[i], Ac_list[i], s = 1)\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi = 350)\n\n plt.close()\n\ndef ploting_LineartoLog(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n output_file_name = input_file_name[path_length + 1: total_length - 12] + 't2.png'\n\n for i in range(len(reinvestment_ratio_list)):\n for index in range(len(Hash_list[i])):\n Ac_list[i][index] = math.log(Ac_list[i][index])\n plt.plot(Hash_list[i], Ac_list[i], label=reinvestment_ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i], Ac_list[i], s=1)\n if HorC == 0 :\n plt.title('Hash-log(Accumulated Frequency)')\n plt.xlabel('Hash')\n else :\n plt.title('Cryptocurrency-log(Accumulated Frequency)')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('log(Accumulated Frequency)')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n output_file_name = input_file_name[path_length + 1 : total_length - 12] + 't3.png'\n\n\n for i in range(len(reinvestment_ratio_list)):\n for index in range(len(Hash_list[i])):\n Hash_list[i][index] = math.log(Hash_list[i][index])\n plt.plot(Hash_list[i], Ac_list[i], label=reinvestment_ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i], Ac_list[i], s=1)\n if HorC == 0:\n plt.title('log(Hash)-log(Accumulated Frequency)')\n plt.xlabel('log(Hash)')\n else:\n plt.title('log(Cryptocurrency)-log(Accumulated Frequency)')\n plt.xlabel('log(Cryptocurrency)')\n plt.ylabel('log(Accumulated Frequency)')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLinear_SDis(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length) :\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 14] + 'di' + str(j) + 't1.png'\n for i in range(len(reinvestment_ratio_list)) :\n Max_val = Hash_list[i*4 + j][0]\n Min_val = Hash_list[i*4 + j][len(Hash_list[i*4 + j]) - 1]\n division_ratio = Max_val - Min_val\n if division_ratio == 0:\n division_ratio = 1\n for index in range(len(Hash_list[i*4 + j])) :\n Hash_list[i*4 + j][index] = (((Hash_list[i*4 + j][index] - Min_val) * 9.0) / division_ratio) + 1\n #Hash_list[i][index] = Hash_list[i][index]/Min_val\n plt.plot(Hash_list[i*4 + j], Ac_list[i*4 + j], label = reinvestment_ratio_list[i], linewidth = 0.5)\n plt.scatter(Hash_list[i*4 + j], Ac_list[i*4 + j], s = 1)\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi = 350)\n\n plt.close()\n\ndef ploting_LineartoLog_SDis(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 14] + 'di' + str(j) + 't2.png'\n\n for i in range(len(reinvestment_ratio_list)):\n for index in range(len(Hash_list[i*4 + j])):\n Ac_list[i*4 + j][index] = math.log(Ac_list[i*4 + j][index])\n plt.plot(Hash_list[i*4 + j], Ac_list[i*4 + j], label=reinvestment_ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i*4 + j], Ac_list[i*4 + j], s=1)\n if HorC == 0 :\n plt.title('Hash-log(Accumulated Frequency)')\n plt.xlabel('Hash')\n else :\n plt.title('Cryptocurrency-log(Accumulated Frequency)')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('log(Accumulated Frequency)')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog_SDis(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1 : total_length - 14] + 'di' + str(j) + 't3.png'\n\n\n for i in range(len(reinvestment_ratio_list)):\n for index in range(len(Hash_list[i*4 + j])):\n Hash_list[i*4 + j][index] = math.log(Hash_list[i*4 + j][index])\n plt.plot(Hash_list[i*4 + j], Ac_list[i*4 + j], label=reinvestment_ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i*4 + j], Ac_list[i*4 + j], s=1)\n if HorC == 0:\n plt.title('log(Hash)-log(Accumulated Frequency)')\n plt.xlabel('log(Hash)')\n else:\n plt.title('log(Cryptocurrency)-log(Accumulated Frequency)')\n plt.xlabel('log(Cryptocurrency)')\n plt.ylabel('log(Accumulated Frequency)')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLinear_Rewardbase(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n pass\n\ndef ploting_LineartoLog_Rewardbase(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n pass\n\ndef ploting_LogtoLog_Rewardbase(Hash_list, Ac_list, input_file_name, reinvestment_ratio_list, HorC, total_length, path_length):\n pass\n\nif __name__ == '__main__':\n\n #input_path_Hash = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Babo model\\Hash\"\n #input_path_crypto = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Babo model\\Cryptocurrency\"\n #input_path_Hash = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Selective Reinvestment\\Hash\"\n #input_path_crypto = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Selective Reinvestment\\Cryptocurrency\"\n #input_path_Hash = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Simplest Model\\Hash\"\n #input_path_crypto = \"E:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Simplest Model\\Cryptocurrency\"\n input_path_Hash = r\"C:\\Users\\wingo\\Workspace\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Exp1\\Hash\\Rit1\"\n input_path_crypto = r\"C:\\Users\\wingo\\Workspace\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Exp1\\Cryptocurrency\\Rit1\"\n input_files_Hash = glob.glob(os.path.join(input_path_Hash, '*.csv'))\n input_files_crypto = glob.glob(os.path.join(input_path_crypto, '*.csv'))\n #reinvestment_ratio_list = [0.0001, 0.0005, 0.025, 0.05, 0.075, 0.00875, 0.1,\n # 0.5, 0.001, 0.00125, 0.0025, 0.005, 0.0075, 0.00875, 0.01, 0.0125]\n reinvestment_ratio_list = [0.0001, 0.001, 0.01, 0.1]\n #reinvestment_ratio_list = ['0.0001, rt0', '0.0001, rt1', '0.001, rt0', '0.001, rt1', '0.01, rt0', '0.01, rt1', '0.1, rt0', '0.1, rt1']\n\n\"\"\"\n i = 0\n while i < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n Opening_files(Hash_list, Ac_list, i, input_files_Hash, 16)\n ploting_LineartoLinear_SDis(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0, len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog_SDis(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog_SDis(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n i = i +16\n\n j = 0\n while j < len(input_files_crypto):\n crypto_list = list()\n Ac_list = list()\n Opening_files(crypto_list, Ac_list, j, input_files_crypto, 16)\n ploting_LineartoLinear_SDis(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog_SDis(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog_SDis(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n j = j + 16\n\n\n i = 0\n while i < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n Opening_files(Hash_list, Ac_list, i, input_files_Hash, len(reinvestment_ratio_list))\n ploting_LineartoLinear(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0, len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0, len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0, len(input_files_Hash[i]), len(input_path_Hash))\n i = i + 4\n j = 0\n while j < len(input_files_crypto):\n crypto_list = list()\n Ac_list = list()\n Opening_files(crypto_list, Ac_list, j, input_files_crypto, len(reinvestment_ratio_list))\n ploting_LineartoLinear(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1, len(input_files_crypto[j]), len(input_path_crypto))\n j = j + 4\n \n for i in range(len(input_files_Hash)):\n Hash_list = list()\n Ac_list = list()\n Opening_files_Babo(Hash_list, Ac_list, i, input_files_Hash)\n ploting_LineartoLinear_Babo(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n ploting_LineartoLog_Babo(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n ploting_LogtoLog_Babo(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n\n crypto_list = list()\n Ac_list2 = list()\n Opening_files_Babo(crypto_list, Ac_list2, i, input_files_crypto)\n ploting_LineartoLinear_Babo(crypto_list, Ac_list2, input_files_crypto[i], reinvestment_ratio_list, 1)\n ploting_LineartoLog_Babo(crypto_list, Ac_list2, input_files_crypto[i], reinvestment_ratio_list, 1)\n ploting_LogtoLog_Babo(crypto_list, Ac_list2, input_files_crypto[i], reinvestment_ratio_list, 1)\n\n \n i = 0\n while i < len(input_files_Hash) :\n Hash_list = list()\n Ac_list = list()\n Opening_files(Hash_list, Ac_list, i, input_files_Hash)\n ploting_LineartoLinear(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n ploting_LineartoLog(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n ploting_LogtoLog(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0)\n i = i + 16\n j = 0\n while j < len(input_files_crypto) :\n crypto_list = list()\n Ac_list = list()\n Opening_files(crypto_list, Ac_list, j, input_files_crypto)\n ploting_LineartoLinear(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1)\n ploting_LineartoLog(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1)\n ploting_LogtoLog(crypto_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1)\n j = j + 16\n\n \"\"\"\n\n\n\n"
},
{
"alpha_fraction": 0.5383530259132385,
"alphanum_fraction": 0.5707388520240784,
"avg_line_length": 46.75906753540039,
"blob_id": "159062fac1eb15f7a9a5e5412ed7dcc1f9c02592",
"content_id": "79ea9223222e223e3a234274e26d367187779d7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18434,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 386,
"path": "/ReRdiRw_Drawing_graph.py",
"repo_name": "wingofsnake/Blockchain_Project",
"src_encoding": "UTF-8",
"text": "import csv\nimport glob\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport math\n\nfrom Drawing_Graph import Opening_files, Making_list\n\ndef Reshape_Data(Hash_list, Ac_list, Hash_loged_list, Ac_loged_list):\n for i in range(64):\n Max_val = Hash_list[i][0]\n Min_val = Hash_list[i][len(Hash_list[i]) - 1]\n division_ratio = Max_val - Min_val\n Hash_loged_value = list()\n Ac_loged_value = list()\n if division_ratio == 0:\n division_ratio = 1\n for index in range(len(Hash_list[i])):\n Hash_list[i][index] = (((Hash_list[i][index] - Min_val) * 9.0) / division_ratio) + 1\n Ac_loged_value += [math.log(Ac_list[i][index])]\n Hash_loged_value += [math.log(Hash_list[i][index])]\n Hash_loged_list.append(Hash_loged_value)\n Ac_loged_list.append(Ac_loged_value)\n\ndef ploting_LineartoLinear_RWB(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 're2rdi1' + 't1.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + 36], Ac_list[i + 36], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + 36], Ac_list[i + 36], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLog_RWB(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 're2rdi1' + 't2.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + 36], Ac_list[i + 36], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + 36], Ac_list[i + 36], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog_RWB(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 're2rdi1' + 't3.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + 36], Ac_list[i + 36], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + 36], Ac_list[i + 36], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLinear_Reinvbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1 : total_length - 18] + 're' + str(j) + 't1.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + j*16], Ac_list[i + j*16], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + j*16], Ac_list[i + j*16], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi = 350)\n\n plt.close()\n\ndef ploting_LineartoLog_Reinvbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 're' + str(j) + 't2.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + j * 16], Ac_list[i + j * 16], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + j * 16], Ac_list[i + j * 16], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\n\ndef ploting_LogtoLog_Reinvbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 're' + str(j) + 't3.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i + j * 16], Ac_list[i + j * 16], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i + j * 16], Ac_list[i + j * 16], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLinear_Redibase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rdi' + str(j) + 't1.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[(i + (12 * int(i/4))) + j * 4], Ac_list[(i + (12 * int(i/4))) + j * 4], label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[(i + (12 * int(i/4))) + j * 4], Ac_list[(i + (12 * int(i/4))) + j * 4], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLog_Redibase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rdi' + str(j) + 't2.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[(i + (12 * int(i / 4))) + j * 4], Ac_list[(i + (12 * int(i / 4))) + j * 4],\n label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[(i + (12 * int(i / 4))) + j * 4], Ac_list[(i + (12 * int(i / 4))) + j * 4], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog_Redibase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rdi' + str(j) + 't3.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[(i + (12 * int(i / 4))) + j * 4], Ac_list[(i + (12 * int(i / 4))) + j * 4],\n label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[(i + (12 * int(i / 4))) + j * 4], Ac_list[(i + (12 * int(i / 4))) + j * 4], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLinear_Rewardbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rw' + str(j) + 't1.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i*4 + j], Ac_list[i*4 + j],\n label= ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i*4 + j], Ac_list[i*4 + j], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LineartoLog_Rewardbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rw' + str(j) + 't2.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i * 4 + j], Ac_list[i * 4 + j],\n label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i * 4 + j], Ac_list[i * 4 + j], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\ndef ploting_LogtoLog_Rewardbase(Hash_list, Ac_list, input_file_name, ratio_list, HorC, total_length, path_length):\n for j in range(4):\n output_file_name = input_file_name[path_length + 1: total_length - 18] + 'rw' + str(j) + 't3.png'\n for i in range(len(ratio_list)):\n plt.plot(Hash_list[i * 4 + j], Ac_list[i * 4 + j],\n label=ratio_list[i], linewidth=0.5)\n plt.scatter(Hash_list[i * 4 + j], Ac_list[i * 4 + j], s=1)\n\n if HorC == 0:\n plt.title('Hash-Accumulated Frequency')\n plt.xlabel('Hash')\n else:\n plt.title('Cryptocurrency-Accumulated Frequency')\n plt.xlabel('Cryptocurrency')\n plt.ylabel('Accumulated Frequency')\n plt.legend()\n plt.savefig(output_file_name, dpi=350)\n\n plt.close()\n\nif __name__ == '__main__':\n\n input_path_Hash = \"D:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Exp_Reward\\Hash\\Rit1\"\n input_path_crypto = \"D:\\Dropbox\\PhD Research\\Blockchain_Code\\Python\\Data\\Exp_Reward\\Cryptocurrency\\Rit1\"\n input_files_Hash = glob.glob(os.path.join(input_path_Hash, '*.csv'))\n input_files_crypto = glob.glob(os.path.join(input_path_crypto, '*.csv'))\n\n reinvestment_ratio_list = ['rdi=0.0001, rw=1', 'rdi=0.0001, rw=10', 'rdi=0.0001, rw=30', 'rdi=0.0001, rw=50',\\\n 'rdi=0.001 rw=1', 'rdi=0.001 rw=10', 'rdi=0.001 rw=30', 'rdi=0.001 rw=50',\\\n 'rdi=0.01 rw=1', 'rdi=0.01 rw=10', 'rdi=0.01 rw=30', 'rdi=0.01 rw=50',\\\n 'rdi=0.1 rw=1', 'rdi=0.1 rw=10', 'rdi=0.1 rw=30', 'rdi=0.1 rw=50']\n redistribution_ratio_list = ['re=0.0001, rw=1', 're=0.0001, rw=10', 're=0.0001, rw=30', 're=0.0001, rw=50', \\\n 're=0.001 rw=1', 're=0.001 rw=10', 're=0.001 rw=30', 're=0.001 rw=50', \\\n 're=0.01 rw=1', 're=0.01 rw=10', 're=0.01 rw=30', 're=0.01 rw=50', \\\n 're=0.1 rw=1', 're=0.1 rw=10', 're=0.1 rw=30', 're=0.1 rw=50']\n reward_ratio_list = ['re=0.0001, rdi=0.0001', 're=0.0001, rdi=0.001', 're=0.0001, rdi=0.01', 're=0.0001, rdi=0.1', \\\n 're=0.001, rdi=0.0001', 're=0.001, rdi=0.001', 're=0.001, rdi=0.01', 're=0.001, rdi=0.1',\\\n 're=0.01, rdi=0.0001', 're=0.01, rdi=0.001', 're=0.01, rdi=0.01', 're=0.01, rdi=0.1',\\\n 're=0.1, rdi=0.0001', 're=0.1, rdi=0.001', 're=0.1, rdi=0.01', 're=0.1, rdi=0.1']\n RW_ratio_list = [1, 10, 30, 50]\n i = 0\n while i < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n\n Hash_loged_list = list()\n Ac_loged_list = list()\n\n Opening_files(Hash_list, Ac_list, i, input_files_Hash, 64)\n Reshape_Data(Hash_list, Ac_list, Hash_loged_list, Ac_loged_list)\n\n ploting_LineartoLinear_RWB(Hash_list, Ac_list, input_files_Hash[i], RW_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog_RWB(Hash_list, Ac_loged_list, input_files_Hash[i], RW_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog_RWB(Hash_loged_list, Ac_loged_list, input_files_Hash[i], RW_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n i=i+64\n\n j = 0\n while j < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n\n Hash_loged_list = list()\n Ac_loged_list = list()\n\n Opening_files(Hash_list, Ac_list, j, input_files_crypto, 64)\n Reshape_Data(Hash_list, Ac_list, Hash_loged_list, Ac_loged_list)\n\n ploting_LineartoLinear_RWB(Hash_list, Ac_list, input_files_crypto[j], RW_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog_RWB(Hash_list, Ac_loged_list, input_files_crypto[j], RW_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog_RWB(Hash_loged_list, Ac_loged_list, input_files_crypto[j], RW_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n \n j = j + 64\n\n\n\n\n\"\"\"\n i = 0\n while i < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n\n Hash_loged_list = list()\n Ac_loged_list = list()\n\n Opening_files(Hash_list, Ac_list, i, input_files_Hash, 64)\n Reshape_Data(Hash_list, Ac_list, Hash_loged_list, Ac_loged_list)\n\n ploting_LineartoLinear_Reinvbase(Hash_list, Ac_list, input_files_Hash[i], reinvestment_ratio_list, 0, len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog_Reinvbase(Hash_list, Ac_loged_list, input_files_Hash[i], reinvestment_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog_Reinvbase(Hash_loged_list, Ac_loged_list, input_files_Hash[i], reinvestment_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n\n ploting_LineartoLinear_Redibase(Hash_list, Ac_list, input_files_Hash[i], redistribution_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog_Redibase(Hash_list, Ac_loged_list, input_files_Hash[i], redistribution_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog_Redibase(Hash_loged_list, Ac_loged_list, input_files_Hash[i], redistribution_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n\n ploting_LineartoLinear_Rewardbase(Hash_list, Ac_list, input_files_Hash[i], reward_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LineartoLog_Rewardbase(Hash_list, Ac_loged_list, input_files_Hash[i], reward_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n ploting_LogtoLog_Rewardbase(Hash_loged_list, Ac_loged_list, input_files_Hash[i], reward_ratio_list, 0,\n len(input_files_Hash[i]), len(input_path_Hash))\n\n i = i + 64\n\n j = 0\n while j < len(input_files_Hash):\n Hash_list = list()\n Ac_list = list()\n\n Hash_loged_list = list()\n Ac_loged_list = list()\n\n Opening_files(Hash_list, Ac_list, j, input_files_crypto, 64)\n Reshape_Data(Hash_list, Ac_list, Hash_loged_list, Ac_loged_list)\n\n ploting_LineartoLinear_Reinvbase(Hash_list, Ac_list, input_files_crypto[j], reinvestment_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog_Reinvbase(Hash_list, Ac_loged_list, input_files_crypto[j], reinvestment_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog_Reinvbase(Hash_loged_list, Ac_loged_list, input_files_crypto[j], reinvestment_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n\n ploting_LineartoLinear_Redibase(Hash_list, Ac_list, input_files_crypto[j], redistribution_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog_Redibase(Hash_list, Ac_loged_list, input_files_crypto[j], redistribution_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog_Redibase(Hash_loged_list, Ac_loged_list, input_files_crypto[j], redistribution_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n\n ploting_LineartoLinear_Rewardbase(Hash_list, Ac_list, input_files_crypto[j], reward_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LineartoLog_Rewardbase(Hash_list, Ac_loged_list, input_files_crypto[j], reward_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n ploting_LogtoLog_Rewardbase(Hash_loged_list, Ac_loged_list, input_files_crypto[j], reward_ratio_list, 1,\n len(input_files_crypto[j]), len(input_path_crypto))\n j = j + 64\n\"\"\""
}
] | 6 |
dibyanijaiswal/my-first-blog
|
https://github.com/dibyanijaiswal/my-first-blog
|
c3cbf0e2bf2bb16ea1aa6b3226441b78e282af50
|
4a93251fd9e6a239962ecc7494db2ddd4cbccef6
|
b8ff3839580d3c308a565cd83c7957fe81f2935f
|
refs/heads/master
| 2020-09-10T15:13:20.612768 | 2019-11-15T20:07:49 | 2019-11-15T20:07:49 | 221,734,776 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6769759654998779,
"alphanum_fraction": 0.6769759654998779,
"avg_line_length": 31.44444465637207,
"blob_id": "3546a8bca9ac54d50a9444745b3bf6bc984329cd",
"content_id": "6a4f9401d848d80577ef3ceecb538179ba6794f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 9,
"path": "/blog/urls.py",
"repo_name": "dibyanijaiswal/my-first-blog",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.postlist,name='postlist'),\n path('post/<int:pk>/',views.postdetail,name='postdetail'),\n path('post/new/',views.postnew,name='postnew'),\n path('post/<int:pk>/edit/',views.postedit,name='postedit'),\n]"
}
] | 1 |
iss4e/jurisdiction-profitability
|
https://github.com/iss4e/jurisdiction-profitability
|
682fd769444c7c219006e06d6f73c2a73a0789e7
|
088ea54cacf5834d0b6d4f5031e3a31f20dab6e5
|
a4ebdd19db6b5fb408d62efacda6164e815cea98
|
refs/heads/master
| 2021-01-11T14:02:12.633972 | 2017-06-23T15:57:34 | 2017-06-23T15:57:34 | 94,938,747 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5254582762718201,
"alphanum_fraction": 0.5403937697410583,
"avg_line_length": 25.781818389892578,
"blob_id": "72a32467fce44ea8ff5170789b3156283bd3bb1f",
"content_id": "25f403728c68f870cb33204ddcf1d99ed8b2aff2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 55,
"path": "/ontario_roi/runAMPL_ToUprice_ontario.py",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\nmaxnum=100\n\nfname = \"ontario_data/PV.dat\"\nnum_lines = sum(1 for line in open(fname))\n\nfor i in range(0,maxnum):\n \n # Create script\n script_file = \"scripts/script\" + str(i) + \".ampl\"\n f = open(script_file, 'w')\n\n pns_file = open(\"results/results_ns_\"+str(i), 'r')\n pns = pns_file.read();\n\n f.write(\"model model_roi.mod;\\n\")\n f.write(\"model objective_ToUprice.mod;\\n\")\n f.write(\"data ontario_data/PV.dat;\\n\")\n f.write(\"data ontario_data/Load_\" + str(i) + \".dat;\\n\")\n f.write(\"data prices_ontario_USD.dat;\\n\")\n f.write(\"data data_roi.dat;\\n\") \n f.write(\"data z_ontario.dat;\\n\")\n f.write(\"\\n\")\n\n f.write(\"option solver cplex;\\n\")\n f.write(\"\\n\")\n \n f.write(\"let T := \" + str(num_lines-2) + \";\\n\")\n f.write(\"let PV := 10;\\n\")\n f.write(\"let PNS := \" + pns + \"*(-1);\\n\")\n f.write(\"\\n\") \n\n f.write(\"for {pv_factor in 1..5} {\\n\")\n f.write(\"let PV := pv_factor*2;\\n\")\n f.write(\"if PV > 6 then {let p_plus := 0.235;} else {let p_plus := 0.25;}\\n\")\n f.write(\"for {b_factor in 0..5} {\\n\")\n f.write(\"\\tlet B := b_factor;\\n\")\n f.write(\"\\tsolve;\\n\")\n f.write(\"\\tprintf \\\"%f %f %f\\\\n\\\", revenue, B, PV > results/results_\" + str(i) + \";\\n\")\n f.write(\"}\\n\") \n f.write(\"}\\n\")\n\n f.write(\"\\n\")\n f.write(\"quit;\\n\")\n f.close()\n\n # Run script\n cmd= \"ampl \" + script_file \n print cmd\n call(cmd, shell=True)\n print \"done.\"\n"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.5917471647262573,
"avg_line_length": 25.488372802734375,
"blob_id": "d8a376347ede69a5c04b20f809022ccb4ee564a4",
"content_id": "b08eb380e82aab4c4c61fe4b56f84fea6f8a48dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 43,
"path": "/germany_roi/runAMPL_flatprice_nosystem.py",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\nmaxnum=90\n\nfor i in range(maxnum):\n # Load and solar data is expected to be stored in the hourly_data folder.\n # Here, we check if the file with id=i exists, before proceeding.\n fname = \"AMPL_files/Load_\" + str(i) + \".dat\"\n if not(os.path.isfile(fname)):\n continue\n \n num_lines = sum(1 for line in open(fname))\n\n # Create script\n script_file = \"scripts/script_nosystem_\" + str(i) + \"_flatprice.ampl\"\n f = open(script_file, 'w')\n\n f.write(\"model model_nosystem.mod;\\n\")\n f.write(\"model objective_flatprice.mod;\\n\")\n f.write(\"data pv1_ampl.dat;\\n\")\n f.write(\"data AMPL_files/Load_\" + str(i) + \".dat;\\n\")\n f.write(\"data data_nosystem.dat;\\n\") \n f.write(\"data z_germany.dat;\\n\")\n f.write(\"\\n\")\n\n f.write(\"option solver cplex;\\n\")\n f.write(\"\\n\")\n \n f.write(\"\\tsolve;\\n\")\n f.write(\"\\tprintf \\\"%f\\\\n\\\", revenue > results/results_ns_\" + str(i) + \";\\n\")\n\n f.write(\"\\n\")\n f.write(\"quit;\\n\")\n f.close()\n\n # Run script\n cmd= \"ampl \" + script_file \n print cmd\n call(cmd, shell=True)\n print \"done.\"\n"
},
{
"alpha_fraction": 0.5559724569320679,
"alphanum_fraction": 0.5628517866134644,
"avg_line_length": 27.553571701049805,
"blob_id": "ca0f1e4ab5630c6804d8124256998ce62ab1b4e4",
"content_id": "222be13d04ea83c01a2b9a54552d2872705c0c8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1599,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 56,
"path": "/germany_roi/runAMPL_flatprice.py",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\nmaxnum=90\n\nfor i in range(maxnum):\n # Load and solar data is expected to be stored in the hourly_data folder.\n # Here, we check if the file with id=i exists, before proceeding.\n fname = \"hourly_data/Load_\" + str(i) + \".dat\"\n if not(os.path.isfile(fname)):\n continue\n \n num_lines = sum(1 for line in open(fname))\n\n # Create script\n script_file = \"scripts/script\" + str(i) + \"_flatprice.ampl\"\n f = open(script_file, 'w')\n\n # read the file containing the total grid payment with no system\n pns_file = open(\"results/results_ns_\"+str(i), 'r')\n pns = pns_file.read();\n\n f.write(\"model model_roi.mod;\\n\")\n f.write(\"model objective_flatprice_roi.mod;\\n\")\n f.write(\"data hourly_data/PV_ampl.dat;\\n\")\n f.write(\"data hourly_data/Load_\" + str(i) + \".dat;\\n\")\n f.write(\"data data_roi.dat;\\n\") \n f.write(\"data z_germany.dat;\\n\") \n f.write(\"\\n\")\n\n f.write(\"option solver cplex;\\n\")\n f.write(\"\\n\")\n \n f.write(\"let PV := 10;\\n\")\n\n f.write(\"for {pv_factor in 1..5} {\\n\")\n f.write(\"let PV := pv_factor*2;\\n\");\n f.write(\"for {b_factor in 0..5} {\\n\")\n f.write(\"\\tlet B := b_factor;\\n\")\n f.write(\"\\tlet PNS := \" + pns + \"*(-1);\\n\")\n f.write(\"\\tsolve;\\n\")\n f.write(\"\\tprintf \\\"%f %f %f\\\\n\\\", revenue, B, PV > results/results_\" + str(i) + \";\\n\")\n f.write(\"}\\n\") \n f.write(\"}\\n\") \n\n f.write(\"\\n\")\n f.write(\"quit;\\n\")\n f.close()\n\n # Run script\n cmd= \"ampl \" + script_file \n print cmd\n call(cmd, shell=True)\n print \"done.\"\n"
},
{
"alpha_fraction": 0.5678536891937256,
"alphanum_fraction": 0.5736284852027893,
"avg_line_length": 23.162790298461914,
"blob_id": "fe367246fbc689d0133365a585f1f77dcc37acec",
"content_id": "5b45e65b8601c4f78416de879a0f423476ca7270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1039,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 43,
"path": "/ontario_roi/runAMPL_ToUprice_ontario_nosystem.py",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\nmaxnum=100\n\nfname = \"ontario_data/PV.dat\"\nnum_lines = sum(1 for line in open(fname))\n\nfor i in range(0,maxnum):\n \n # Create script\n script_file = \"scripts/script_nosystem_\" + str(i) + \".ampl\"\n f = open(script_file, 'w')\n\n f.write(\"model model_nosystem.mod;\\n\")\n f.write(\"model objective_ToUprice_ns.mod;\\n\")\n f.write(\"data ontario_data/PV.dat;\\n\")\n f.write(\"data ontario_data/Load_\" + str(i) + \".dat;\\n\")\n f.write(\"data prices_ontario_USD.dat;\\n\")\n f.write(\"data data_nosystem.dat;\\n\") \n f.write(\"data z_ontario.dat;\\n\")\n f.write(\"\\n\")\n\n f.write(\"option solver cplex;\\n\")\n f.write(\"\\n\")\n \n f.write(\"let T := \" + str(num_lines-2) + \";\\n\")\n f.write(\"\\n\") \n\n f.write(\"\\tsolve;\\n\")\n f.write(\"\\tprintf \\\"%f\\\\n\\\", revenue > results/results_ns_\" + str(i) + \";\\n\")\n\n f.write(\"\\n\")\n f.write(\"quit;\\n\")\n f.close()\n\n # Run script\n cmd= \"ampl \" + script_file \n print cmd\n call(cmd, shell=True)\n print \"done.\"\n"
},
{
"alpha_fraction": 0.7891714572906494,
"alphanum_fraction": 0.7924528121948242,
"avg_line_length": 120.9000015258789,
"blob_id": "776b622dd7c35165855c7ea35348336f77c8f3b0",
"content_id": "2a177de513fdf2437d2a4e84550acdd6c18441c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1219,
"license_type": "no_license",
"max_line_length": 462,
"num_lines": 10,
"path": "/README.md",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "# jurisdiction-profitability\nAMPL optimization models and scripts\n\nWe use python scripts to write and execute AMPL scripts, which then use CPLEX as the optimization solver.\n\nThe folders for each jurisdiction contain all the data needed to compute the ROI of a household EXCEPT for the solar and load data traces. Interested users should look up AMPL documentation for an explanation of how to format their data traces properly. The scripts expect hourly data, but can be easily configured to work with different time-slot durations (change the T_u value in the data.dat files, as well as the grid price files if they are time dependent)\n\nThe optimization is done as follows (it is the same for each jurisdiction):\n1. First, we compute the payment with no system (i.e. B = 0, PV = 0) using the runAMPL....nosystem.py script. It is possible to do this without using an optimization solver, however CPLEX does not take much time to calculate this value either and we found it easy to manage when all the computation is done using the same tools.\n2. Next, we compute the ROI using the runAMPL....py script. This script writes and executes AMPL scripts, one for each available load trace. The results are written to a file.\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6089347004890442,
"avg_line_length": 28.693878173828125,
"blob_id": "9b1cd02d2618cdd9fe8c0604710dd675f01bf185",
"content_id": "94daa456e9b6dcfd16543f63bd2e950c1104bfc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 49,
"path": "/texas_roi/runAMPL_tierprice_roi_nosystem.py",
"repo_name": "iss4e/jurisdiction-profitability",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nfrom subprocess import call\nimport os\n\n# This script is used to generate and AMPL scripts that get us the grid payments with no system (i.e., B=0, PV=0). There results are written to files.\n\nmaxnum=10000\n\nfor i in range(maxnum):\n # Load and solar data is expected to be stored in the hourly_data folder.\n # Here, we check if the file with id=i exists, before proceeding.\n fname = \"hourly_data/Load_\" + str(i) + \"_hourly.dat\"\n if not(os.path.isfile(fname)):\n continue\n\n num_lines = sum(1 for line in open(fname))\n\n # Create script\n script_file = \"scripts/script_ns_\" + str(i) + \"_tierprice\" + \".ampl\"\n f = open(script_file, 'w')\n\n f.write(\"model model_nosystem.mod;\\n\")\n f.write(\"model objective_tierprice.mod;\\n\")\n f.write(\"data hourly_data/PV_8046_hourly.dat;\\n\")\n f.write(\"data hourly_data/Load_\" + str(i) + \"_hourly.dat;\\n\")\n f.write(\"data data_nosystem.dat;\\n\") \n f.write(\"data data_tierprice.dat;\\n\") \n f.write(\"data z_texas_decline.dat;\\n\")\n f.write(\"\\n\")\n\n f.write(\"option solver cplex;\\n\")\n f.write(\"\\n\")\n\n f.write(\"let T := \" + str(num_lines-2) + \";\\n\")\n f.write(\"\\n\") \n\n f.write(\"\\tsolve;\\n\")\n f.write(\"\\tprintf \\\"%f\\\\n\\\", revenue > results_pricedecline/results_ns_\" + str(i) + \";\\n\")\n\n f.write(\"\\n\")\n f.write(\"quit;\\n\")\n f.close()\n\n # Run script\n cmd= \"ampl \" + script_file \n print cmd\n call(cmd, shell=True)\n print \"done.\"\n"
}
] | 6 |
hrokr/siml
|
https://github.com/hrokr/siml
|
d34d7c9363db10402f5f3426e33b876a0501b215
|
60e40d10f18412a127d0f599433d929d04c6ddce
|
c0991c469ed3f99745e8dada5d709bf75371ca90
|
refs/heads/master
| 2020-08-12T05:50:05.415381 | 2019-10-13T03:51:43 | 2019-10-13T03:51:43 | 214,700,938 | 0 | 0 |
MIT
| 2019-10-12T19:08:35 | 2019-10-04T21:59:42 | 2019-04-05T10:11:10 | null |
[
{
"alpha_fraction": 0.5228105187416077,
"alphanum_fraction": 0.5434565544128418,
"avg_line_length": 31.29032325744629,
"blob_id": "3d16f50be85c97fa49dfbed4eb35cd4c2d001adf",
"content_id": "cc80170c86e8b4835ec6968618b9b5c39e4d44cb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3003,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 93,
"path": "/siml/perceptron.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport load_data as ld\nfrom evaluators import *\nimport random\n\ndef generate_data(no_points):\n X = np.zeros(shape=(no_points, 2))\n Y = np.zeros(shape=no_points)\n for ii in range(no_points):\n X[ii][0] = random.randint(1,9)+0.5\n X[ii][1] = random.randint(1,9)+0.5\n Y[ii] = 1 if X[ii][0]+X[ii][1] >= 13 else -1\n return X, Y\n\nclass Perceptron():\n \"\"\"\n Class for performing Perceptron.\n X is the input array with n rows (no_examples) and m columns (no_features)\n Y is a vector containing elements which indicate the class \n (1 for positive class, -1 for negative class)\n w is the weight-vector (m number of elements)\n b is the bias-value\n \"\"\"\n def __init__(self, b = 0, max_iter = 1000):\n self.max_iter = max_iter\n self.w = []\n self.b = 0\n self.no_examples = 0\n self.no_features = 0\n \n def train(self, X, Y):\n self.no_examples, self.no_features = np.shape(X)\n self.w = np.zeros(self.no_features)\n for ii in range(0, self.max_iter):\n w_updated = False\n for jj in range(0, self.no_examples):\n a = self.b + np.dot(self.w, X[jj])\n if np.sign(Y[jj]*a) != 1:\n w_updated = True\n self.w += Y[jj] * X[jj]\n self.b += Y[jj]\n if not w_updated:\n print(\"Convergence reached in %i iterations.\" % ii)\n break\n if w_updated:\n print(\n \"\"\"\n WARNING: convergence not reached in %i iterations.\n Either dataset is not linearly separable, \n or max_iter should be increased\n \"\"\" % self.max_iter\n )\n\n def classify_element(self, x_elem):\n return int(np.sign(self.b + np.dot(self.w, x_elem)))\n \n def classify(self, X):\n predicted_Y = []\n for ii in range(np.shape(X)[0]):\n y_elem = self.classify_element(X[ii])\n predicted_Y.append(y_elem)\n return predicted_Y\n\n\nX, Y = generate_data(100)\np = Perceptron()\np.train(X, Y)\nX_test, Y_test = generate_data(50)\npredicted_Y_test = p.classify(X_test)\nf1 = f1_score(predicted_Y_test, Y_test, 1)\nprint(\"F1-score on the test-set for class %s is: %s\" % (1, f1))\n\n\n\n#####\n \n# to_bin_y = { 1: { 'Iris-setosa': 1, 'Iris-versicolor': -1, 'Iris-virginica': -1 },\n # 2: { 'Iris-setosa': -1, 'Iris-versicolor': 1, 'Iris-virginica': -1 },\n # 3: { 'Iris-setosa': -1, 'Iris-versicolor': -1, 'Iris-virginica': 1 }\n # }\n\n# X_train, y_train, X_test, y_test = ld.iris()\n\n# Y_train = np.array([to_bin_y[1][x] for x in y_train])\n# Y_test = np.array([to_bin_y[1][x] for x in y_test])\n\n# p = Perceptron()\n# print(\"Training Perceptron Classifier\")\n# p.train(X_train, Y_train)\n\n# predicted_Y_test = p.classify(X_test)\n# f1 = f1_score(predicted_Y_test, Y_test, 1)\n# print(\"F1-score on the test-set for class %s is: %s\" % (1, f1))\n"
},
{
"alpha_fraction": 0.6642394661903381,
"alphanum_fraction": 0.6779935359954834,
"avg_line_length": 35.35293960571289,
"blob_id": "ab961646d09dd3d5c9cde67429443b4a882e8c83",
"content_id": "369874962eaf9a46bd86c8c206a1feea128c723c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 34,
"path": "/siml/evaluators.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "from collections import Counter\n\ndef true_positives(determined_Y, real_Y, label):\n true_positives = 0\n for ii in range(0,len(determined_Y)):\n if determined_Y[ii] == label and real_Y[ii] == label: \n true_positives+=1\n return true_positives\n\ndef all_positives(determined_Y, label):\n return Counter(determined_Y)[label]\n\ndef false_negatives(determined_Y, real_Y, label):\n false_negatives = 0\n for ii in range(0,len(determined_Y)):\n if determined_Y[ii] != label and real_Y[ii] == label: \n false_negatives+=1\n return false_negatives\n \ndef precision(determined_Y, real_Y, label):\n if float(all_positives(determined_Y, label)) == 0: return 0\n return true_positives(determined_Y, real_Y, label) / float(all_positives(determined_Y, label))\n\ndef recall(determined_Y, real_Y, label):\n denominator = float((true_positives(determined_Y, real_Y, label) + false_negatives(determined_Y, real_Y, label)))\n if denominator == 0: return 0\n return true_positives(determined_Y, real_Y, label) / denominator\n\ndef f1_score(determined_Y, real_Y, label = 1):\n p = precision(determined_Y, real_Y, label)\n r = recall(determined_Y, real_Y, label)\n if p + r == 0: return 0\n f1 = 2 * (p * r) / (p + r)\n return f1\n"
},
{
"alpha_fraction": 0.555377185344696,
"alphanum_fraction": 0.5727661848068237,
"avg_line_length": 29.639345169067383,
"blob_id": "3032244caaa9ec3aeb819222aa0d59cd85afd7e7",
"content_id": "7a7d348c810418eed56f775eb2764eb16badbbfe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3738,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 122,
"path": "/siml/load_data.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport os\nimport numpy as np\nimport random\nfrom io import open\n\n###\ndef strip_quotations_newline(text):\n text = text.rstrip()\n if text[0] == '\"':\n text = text[1:]\n if text[-1] == '\"':\n text = text[:-1]\n return text\n\ndef expand_around_chars(text, characters):\n for char in characters:\n text = text.replace(char, \" \"+char+\" \")\n return text\n\ndef split_text(text):\n text = strip_quotations_newline(text)\n text = expand_around_chars(text, '\".,()[]{}:;')\n splitted_text = text.split(\" \")\n cleaned_text = [x for x in splitted_text if len(x)>1]\n text_lowercase = [x.lower() for x in cleaned_text]\n return text_lowercase\n\n###\ndef pow10(x):\n i = 1;\n while((i * 10) < x):\n i *= 10.0;\n return i\n \ndef normalize_col(col1, method):\n cc_mean = np.mean(col1)\n if method == 'pow10': \n return col1 / pow10(np.max(col1))\n else:\n return col1 - cc_mean\n \ndef normalize_matrix(X, method = 'mean'):\n no_rows, no_cols = np.shape(X)\n X_normalized = np.zeros(shape=(no_rows, no_cols))\n X_normalized[:,0] = X[:,0]\n for ii in range(1,no_cols):\n X_normalized[:, ii] = normalize_col(X[:, ii], method)\n return X_normalized \n\n###\ndef amazon_reviews():\n datafolder = '../datasets/amazon/'\n files = os.listdir(datafolder)\n Y_train, Y_test, X_train, X_test, = [], [], [], []\n for file in files:\n f = open(datafolder + file, 'r', encoding=\"utf8\")\n label = file\n lines = f.readlines()\n no_lines = len(lines)\n no_training_examples = int(0.7*no_lines)\n for line in lines[:no_training_examples]:\n Y_train.append(label)\n X_train.append(split_text(line))\n for line in lines[no_training_examples:]:\n Y_test.append(label)\n X_test.append(split_text(line))\n f.close()\n return X_train, Y_train, X_test, Y_test\n\ndef adult():\n datafile = '../datasets/adult/adult.data'\n file_test = '../datasets/adult/adult.test'\n df = pd.read_csv(datafile, header=None)\n Y_train = df[14].values\n del df[14]\n del df[2]\n X_train = df.values\n\n df_test = pd.read_csv(file_test, header=None)\n Y_test = df_test[14].values\n del df_test[14]\n del df_test[2]\n X_test = df_test.values\n return X_train, Y_train, X_test, Y_test\n\ndef myopia():\n datafile = '../datasets/myopia/myopia.dat'\n with open(datafile, 'r') as f:\n lines = f.readlines()\n random.shuffle(lines)\n start_col = 2\n no_lines = len(lines)\n no_training_examples = int(0.7*no_lines)\n no_test_examples = no_lines - no_training_examples\n no_features = len(lines[0].split())-start_col\n X, Y = np.zeros(shape=(no_lines, no_features)), np.zeros(shape=no_lines)\n X[:,0] = 1\n rownum = 0\n for line in lines:\n line = line.split()\n line = map(float, line)\n Y[rownum] = int(line[2])\n X[rownum, 1:16] = line[3:18]\n rownum+=1\n X_norm = normalize_matrix(X, 'pow10')\n X_train = X_norm[0:no_training_examples,:]\n Y_train = Y[0:no_training_examples]\n X_test = X_norm[no_training_examples:no_lines,:]\n Y_test = Y[no_training_examples:no_lines]\n return X_train, Y_train, X_test, Y_test\n\ndef iris():\n datafile = '../datasets/iris/iris.data'\n df = pd.read_csv(datafile, header=None)\n df_train = df.sample(frac=0.7)\n df_test = df.loc[~df.index.isin(df_train.index)]\n X_train = df_train.values[:,0:4].astype(float)\n Y_train = df_train.values[:,4]\n X_test = df_test.values[:,0:4].astype(float)\n Y_test = df_test.values[:,4]\n return X_train, Y_train, X_test, Y_test\n"
},
{
"alpha_fraction": 0.6710423231124878,
"alphanum_fraction": 0.6757445335388184,
"avg_line_length": 38.269229888916016,
"blob_id": "dde2026df05cee1935e4f684cccdd78a283764b5",
"content_id": "d5f6b227ac2f2d480911cf9dab4bf86339f37179",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5104,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 130,
"path": "/siml/sk_utils.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport time\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom IPython.display import display\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\n\n###\n\ndict_classifiers = {\n \"Gradient Boosting Classifier\": GradientBoostingClassifier(),\n \"Random Forest\": RandomForestClassifier(),\n \"Logistic Regression\": LogisticRegression(),\n \"Nearest Neighbors\": KNeighborsClassifier(),\n \"Decision Tree\": DecisionTreeClassifier(),\n \"Linear SVM\": SVC(),\n \"Neural Net\": MLPClassifier(alpha = 1),\n \"Naive Bayes\": GaussianNB(), \n \"AdaBoost\": AdaBoostClassifier(),\n \"Gaussian Process\": GaussianProcessClassifier()\n}\n \ndef batch_classify(X_train, Y_train, X_test, Y_test, no_classifiers = 5, verbose = True):\n \"\"\"\n This method, takes as input the X, Y matrices of the Train and Test set.\n And fits them on all of the Classifiers specified in the dict_classifier.\n Usually, the SVM, Random Forest and Gradient Boosting Classifier take quiet some time to train. \n So it is best to train them on a smaller dataset first and \n decide whether you want to comment them out or not based on the test accuracy score.\n \"\"\"\n \n dict_models = {}\n for classifier_name, classifier in list(dict_classifiers.items())[:no_classifiers]:\n t_start = time.clock()\n classifier.fit(X_train, Y_train)\n t_end = time.clock()\n \n t_diff = t_end - t_start\n train_score = classifier.score(X_train, Y_train)\n test_score = classifier.score(X_test, Y_test)\n \n dict_models[classifier_name] = {'model': classifier, 'train_score': train_score, 'test_score': test_score, 'train_time': t_diff}\n if verbose:\n print(\"trained {c} in {f:.2f} s\".format(c=classifier_name, f=t_diff))\n return dict_models\n\n####\n\ndef label_encode(df, list_columns):\n \"\"\"\n This method one-hot encodes all column, specified in list_columns\n \n \"\"\"\n for col in list_columns:\n le = LabelEncoder()\n col_values_unique = list(df[col].unique())\n le_fitted = le.fit(col_values_unique)\n\n col_values = list(df[col].values)\n le.classes_\n col_values_transformed = le.transform(col_values)\n df[col] = col_values_transformed\n\ndef get_train_test(df, y_col, x_cols, ratio):\n \"\"\" \n This method transforms a dataframe into a train and test set, for this you need to specify:\n 1. the ratio train : test (usually 0.7)\n 2. the column with the Y_values\n \"\"\"\n mask = np.random.rand(len(df)) < ratio\n df_train = df[mask]\n df_test = df[~mask]\n \n Y_train = df_train[y_col].values\n Y_test = df_test[y_col].values\n X_train = df_train[x_cols].values\n X_test = df_test[x_cols].values\n return df_train, df_test, X_train, Y_train, X_test, Y_test\n \ndef display_dict_models(dict_models, sort_by='test_score'):\n cls = [key for key in dict_models.keys()]\n test_s = [dict_models[key]['test_score'] for key in cls]\n training_s = [dict_models[key]['train_score'] for key in cls]\n training_t = [dict_models[key]['train_time'] for key in cls]\n \n df_ = pd.DataFrame(data=np.zeros(shape=(len(cls),4)), columns = ['classifier', 'train_score', 'test_score', 'train_time'])\n for ii in range(0,len(cls)):\n df_.loc[ii, 'classifier'] = cls[ii]\n df_.loc[ii, 'train_score'] = training_s[ii]\n df_.loc[ii, 'test_score'] = test_s[ii]\n df_.loc[ii, 'train_time'] = training_t[ii]\n \n display(df_.sort_values(by=sort_by, ascending=False))\n \ndef display_corr_matrix(df):\n correlation_matrix = df.corr()\n fig, ax = plt.subplots(figsize=(10,10))\n ax = sns.heatmap(correlation_matrix, vmax=1, square=True,annot=False,cmap='RdYlGn')\n plt.title('Correlation matrix between the features')\n plt.colorbar(fig, orientation='vertical', fraction=0.03, label = 'Correlation strength')\n plt.show()\n\ndef display_corr_with_col(df, col):\n correlation_matrix = df.corr()\n correlation_type = correlation_matrix[col].copy()\n abs_correlation_type = correlation_type.apply(lambda x: abs(x))\n desc_corr_values = abs_correlation_type.sort_values(ascending=False)\n y_values = list(desc_corr_values.values)[1:]\n x_values = range(0,len(y_values))\n xlabels = list(desc_corr_values.keys())[1:]\n fig, ax = plt.subplots(figsize=(8,8))\n ax.bar(x_values, y_values)\n ax.set_title('The correlation of all features with {}'.format(col), fontsize=20)\n ax.set_ylabel('Pearson correlatie coefficient [abs waarde]')\n plt.xticks(x_values, xlabels, rotation='vertical')\n plt.show()"
},
{
"alpha_fraction": 0.5719723105430603,
"alphanum_fraction": 0.5889273285865784,
"avg_line_length": 34.679012298583984,
"blob_id": "c6625cc08ef449e8a59728d2e916f99b4de01bea",
"content_id": "20d9b910bf3f43798f9c5a6d2176eb782f4240c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2890,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 81,
"path": "/siml/logistic_regression.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport load_data as ld\nfrom evaluators import *\n\nclass LogisticRegression():\n \"\"\"\n Class for performing logistic regression.\n \"\"\"\n def __init__(self, learning_rate = 0.7, max_iter = 1000):\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n self.theta = []\n self.no_examples = 0\n self.no_features = 0\n self.X = None\n self.Y = None\n \n def add_bias_col(self, X):\n bias_col = np.ones((X.shape[0], 1))\n return np.concatenate([bias_col, X], axis=1)\n \n def hypothesis(self, X):\n return 1 / (1 + np.exp(-1.0 * np.dot(X, self.theta)))\n\n def cost_function(self):\n \"\"\"\n We will use the binary cross entropy as the cost function. https://en.wikipedia.org/wiki/Cross_entropy\n \"\"\"\n predicted_Y_values = self.hypothesis(self.X)\n cost = (-1.0/self.no_examples) * np.sum(self.Y * np.log(predicted_Y_values) + (1 - self.Y) * (np.log(1-predicted_Y_values)))\n return cost\n \n def gradient(self):\n predicted_Y_values = self.hypothesis(self.X)\n grad = (-1.0/self.no_examples) * np.dot((self.Y-predicted_Y_values), self.X)\n return grad\n \n def gradient_descent(self):\n for iter in range(1,self.max_iter):\n cost = self.cost_function()\n delta = self.gradient()\n self.theta = self.theta - self.learning_rate * delta\n print(\"iteration %s : cost %s \" % (iter, cost))\n \n def train(self, X, Y):\n self.X = self.add_bias_col(X)\n self.Y = Y\n self.no_examples, self.no_features = np.shape(X)\n self.theta = np.ones(self.no_features + 1)\n self.gradient_descent()\n \n def classify(self, X):\n X = self.add_bias_col(X)\n predicted_Y = self.hypothesis(X)\n predicted_Y_binary = np.round(predicted_Y)\n return predicted_Y_binary\n\nto_bin_y = { 1: { 'Iris-setosa': 1, 'Iris-versicolor': 0, 'Iris-virginica': 0 },\n 2: { 'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 0 },\n 3: { 'Iris-setosa': 0, 'Iris-versicolor': 0, 'Iris-virginica': 1 }\n }\n\nX_train, y_train, X_test, y_test = ld.iris()\n\nY_train = np.array([to_bin_y[3][x] for x in y_train])\nY_test = np.array([to_bin_y[3][x] for x in y_test])\n\nprint(\"training Logistic Regression Classifier\")\nlr = LogisticRegression()\nlr.train(X_train, Y_train)\nprint(\"trained\")\npredicted_Y_test = lr.classify(X_test)\nf1 = f1_score(predicted_Y_test, Y_test, 1)\nprint(\"F1-score on the test-set for class %s is: %s\" % (1, f1))\n\n# from sklearn.linear_model import LogisticRegression\n# logistic = LogisticRegression()\n# logistic.fit(X_train,Y_train)\n# predicted_Y_test = logistic.predict(X_test)\n# f1 = f1_score(predicted_Y_test, Y_test, 1)\n# print(\"F1-score on the test-set for class %s is: %s\" % (1, f1))\n"
},
{
"alpha_fraction": 0.7001827955245972,
"alphanum_fraction": 0.7199268937110901,
"avg_line_length": 33.150001525878906,
"blob_id": "0a2bf473cd42833fa8329673fd447058f7626244",
"content_id": "6689bc4171ed75a759ae09bd92ef690bfacc5d3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2735,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 80,
"path": "/datasets/japanese_credit/load_data.py",
"repo_name": "hrokr/siml",
"src_encoding": "UTF-8",
"text": "#This is code includes the logistic regression algorithm for the classification of the japanese credit dataset.\n#goto http://ataspinar.com for a detailed explanation of the math behind logistic regression\n#goto https://github.com/taspinar/siml for the full code\n#It was used during hackathon4 of the Eindhoven Data Science group: https://www.meetup.com/Eindhoven-Data-Science-Meetup/events/234115346/\n\nimport pandas as pd\nfrom sets import Set\nimport random\nimport numpy as np\n\ndatafile = './japanese_credit.data'\n\ndf = pd.read_csv(datafile, header=None)\n\ncolumn_values = list(df.columns.values)\ncategorical_columns = [0,3,4,5,6,8,9,11,12]\nstr_cols = [0,1,3,4,5,6,8,9,11,12,13]\nint_columns = [10,13,14]\nfloat_columns = [1,2,7]\n\n#first we select only the rows which do not contain any invalid values\nfor col in str_cols:\n df = df[df[col] != '?']\n\n#columns containing categorical values are expanded to k different columns with binary values (k is number of categories)\nfor col in categorical_columns:\n col_values = list(Set(df[col].values))\n for col_value in col_values:\n if col_value != '?':\n df.loc[df[col] == col_value, str(col)+'_is_'+col_value] = 1\n \n#remove original columns\nfor col in categorical_columns:\n del df[col]\n\n#rename the column with the label to 'label' and make it integer\ndf.loc[df[15] == '+', 'label'] = 1\ndel df[15]\n\n#normalize the columns with integer values by the mean value\nfor col in int_columns:\n df[col] = df[col].apply(int)\n col_values = list(df[col].values)\n mean = np.mean(map(int,col_values))\n df[col] = df[col].apply(lambda x: x/float(mean))\n\n#normalize the columns with float values by the mean value\nfor col in float_columns:\n df[col] = df[col].apply(float)\n col_values = list(df[col].values)\n mean = np.mean(map(float,col_values))\n df[col] = df[col].apply(lambda x: x/mean)\n \ndf = df.fillna(0)\n\n#create a training and a test set\nindices = df.index.values\nrandom.shuffle(indices)\nno_training_examples = int(0.7*len(indices))\ndf_training = df.ix[indices[:no_training_examples]]\ndf_test = df.ix[indices[no_training_examples:]]\n\n#create and fill the Y matrices of the training and test set\nY = df_training['label'].values\nY_test = df_test['label'].values\ndel df_training['label']\ndel df_test['label']\n\n#create the X matrices of the training and test set and initialize with zero\nno_features = len(df_training.columns.values)\nno_test_examples = len(df_test.index.values)\nX = np.zeros(shape=(no_training_examples, no_features))\nX_test = np.zeros(shape=(no_test_examples,no_features))\n\n#fill the X matrices\ncol_values = df_training.columns.values\nfor ii in range(0,len(col_values)):\n col = col_values[ii]\n X[:,ii] = df_training[col].values\n X_test[:,ii] = df_test[col].values\n \n"
}
] | 6 |
LukeKeltner/Primes-and-ML
|
https://github.com/LukeKeltner/Primes-and-ML
|
a5626a3e32053e2dbaa6e8ec756e146217b9a91a
|
e88600dee24478a5c717c62977b79c0cc351ee30
|
79996b20bfbe029bbb95517ff43d849b02883cd5
|
refs/heads/master
| 2020-03-11T04:53:55.778337 | 2018-04-16T20:58:57 | 2018-04-16T20:58:57 | 129,787,947 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6775635480880737,
"alphanum_fraction": 0.6932104229927063,
"avg_line_length": 28.3360652923584,
"blob_id": "7bcde79515c386c2bcf4410e13bfd637135fd376",
"content_id": "1e333f8cbc6d5b7facb83b40cfa347925563ae1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3579,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 122,
"path": "/primes.py",
"repo_name": "LukeKeltner/Primes-and-ML",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\ntotal_numbers_considered = []\ntotal_scores = []\neven_scores = []\nodd_scores = []\nfifty_percent_scores = []\n\ndef run(total_number):\n\ttotal_numbers_considered.append(total_number)\n\tnumber = []\n\tdigits = []\n\tprimes = []\n\tmax_number = total_number\n\tmax_length = len(str(max_number))\n\n\tdef getPrimes(N):\n\t\tprimes_to_check = []\n\t\tprimes_to_return = []\n\n\t\tfor n in range(2, N+1):\n\n\t\t\tprime = True\n\n\t\t\tfor i in primes_to_check:\n\n\t\t\t\tif n%i==0:\n\n\t\t\t\t\tprime = False\n\n\t\t\tif(prime):\n\t\t\t\tprimes_to_return.append(1)\n\t\t\telse:\n\t\t\t\tprimes_to_return.append(0)\n\n\t\t\tif (n**2 < N and prime):\n\t\t\t\tprimes_to_check.append(n)\n\n\t\treturn primes_to_return\n\n\n\tprimes = getPrimes(max_number)\n\n\tfor i in range(2, max_number+1):\n\n\t\tnumbers = []\n\t\tfor d in str(i):\n\t\t\tnumbers.append(int(d))\n\n\t\tif len(str(i)) < max_length:\n\t\t\tfor neg_one in range(max_length-len(str(i))):\n\t\t\t\tnumbers.insert(0,-1)\n\t\tdigits.append(numbers)\n\t\tnumber.append(i)\n\n\tnew_digits = []\n\n\tfor digit in range(len(digits[0])):\n\t\tnew_digit_column = []\n\t\tfor num in range(len(number)):\n\t\t\tnew_digit_column.append(digits[num][digit])\n\t\tnew_digits.append(new_digit_column)\n\n\tdictionary = {\"Number\": number, \"Prime\": primes}\n\tfor digit_column in range(len(new_digits)):\n\t\tdictionary[str(10**(len(new_digits)-digit_column-1))] = new_digits[digit_column]\n\n\tprimes_df = pd.DataFrame(dictionary)\n\tfeatures = primes_df.drop(\"Prime\", axis=1).columns.values\n\tprimes_df = shuffle(primes_df)\n\n\ttrain = primes_df[0: round(max_number*.8)]\n\ttest = primes_df[round(max_number*.8)+1: max_number]\n\ttest_evens = test[test[\"Number\"]%2==0]\n\ttest_odds = test[test[\"Number\"]%2==1]\n\n\tfeatures = primes_df.drop(\"Prime\", axis=1).columns.values\n\tclf = RandomForestClassifier(max_depth=2, random_state=0)\n\tclf.fit(train[features], train[\"Prime\"])\n\n\ttotal_score = clf.score(test[features], test[\"Prime\"])\n\ttotal_scores.append(total_score)\n\n\teven_score = clf.score(test_evens[features], test_evens[\"Prime\"])\n\teven_scores.append(even_score)\n\n\todd_score = clf.score(test_odds[features], test_odds[\"Prime\"])\n\todd_scores.append(odd_score)\n\nmax_run = 9999\nmin_run = 50\n\nfor i in range(min_run, max_run):\n\tprint(str(round((i-min_run)/(max_run-min_run)*100,2))+\"% complete. Finished with number \"+str(i-1))\n\trun(i)\n\ndef fit_func(x, a, b, c):\n\treturn a * np.log(b * x) + c\n\ntotal_numbers_considered = np.array(total_numbers_considered)\ntotal_scores = np.array(total_scores)\neven_scores = np.array(even_scores)\nodd_scores = np.array(odd_scores)\n\npopt_total, pcov_total = curve_fit(fit_func, total_numbers_considered, total_scores)\npopt_even, pcov_even = curve_fit(fit_func, total_numbers_considered, even_scores)\npopt_odd, pcov_odd = curve_fit(fit_func, total_numbers_considered, odd_scores)\n\nplt.plot(total_numbers_considered, total_scores, label=\"Total Score\")\nplt.plot(total_numbers_considered, even_scores, label=\"Evens Score\")\nplt.plot(total_numbers_considered, odd_scores, label=\"Odds Score\")\nplt.plot(total_numbers_considered, fit_func(total_numbers_considered, *popt_total), 'b-', label='Total fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt_total))\nplt.plot(total_numbers_considered, fit_func(total_numbers_considered, *popt_even), 'r-', label='Even fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt_even))\nplt.plot(total_numbers_considered, fit_func(total_numbers_considered, *popt_odd), 'g-', label='Odd fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt_odd))\n\nplt.legend(loc=4)\nplt.show()\n"
},
{
"alpha_fraction": 0.6601483225822449,
"alphanum_fraction": 0.6985839605331421,
"avg_line_length": 58.2400016784668,
"blob_id": "cac9ee089acb0a4dbed70012e44b4235f46804df",
"content_id": "2b7182b1ff8b569f612dcf8a79981becb93a6737",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1483,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 25,
"path": "/README.md",
"repo_name": "LukeKeltner/Primes-and-ML",
"src_encoding": "UTF-8",
"text": "# Using Machine Learning to Predict Prime Numbers\n\n\n## Breakdown of Process\n1. Creates the DataFrame of the number, it's digits (-1's indicate no digit), and whether it is a prime number or not. (Python, NumPy, Pandas). A few example rows are shown below.\n\nid | Number | \"1000\" | \"100\" | \"10\" | \"1\" | Prime \n--- | --- | --- | --- | --- | --- | ---\n0 | 2 | -1 | -1 | -1 | 2 | 1\n32 | 34 | -1 | -1 | 3 | 4 | 0\n935 | 937 | -1 | 9 | 3 | 7 | 1\n\n2. Shuffles the DataFrame then splits it up into 80% going into a training set and the other 20% going into a testing set.\n3. Uses the training set to create a Random Forest Classifier to create a model of Prime Numbers given their digits. (Scikit-learn).\n4. Tests the models on the training set in three ways. It first tests its score on the total set of testing data, then on the even numbers in the testing data, then on the odd numbers. \n5. Plots the accuracy of the model given a certain amount of numbers you use to train the model. (Matplotlib).\n6. Fits each model to y = alog(bx)+c (SciPy) \n\n## Example Output\n<img src=\"example.JPG\" width=\"1000px\"/>\n\n## Notes\n* The total score accuracy gets around 84-85% from out-of-the-box Scikit-learn Random Forest Classifier.\n* With very little training data, the model learns that even numbers (except 2 of course) are never prime.\n* The same holds true for any given multiple. This statement begins to breakdown as the mutiple gets closer to the max number you use to train the data with. \n"
}
] | 2 |
KHAung-kha/GIS
|
https://github.com/KHAung-kha/GIS
|
8239d17f7e57e1bf6e69dc07ae4e6b47383bd36a
|
26d0b8a144c66035c53c7328972e3bf430e6471a
|
a00deca5b8f73dba5cf9f2df51c53dbe70c6fd7e
|
refs/heads/master
| 2020-09-17T18:15:00.560559 | 2020-08-07T04:45:48 | 2020-08-07T04:45:48 | 224,107,900 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5610427856445312,
"alphanum_fraction": 0.5873251557350159,
"avg_line_length": 34.03053283691406,
"blob_id": "e8045e18bd559f713fd5fd88f58f22659021fea0",
"content_id": "ecf536cc896ff3e18fffd1b79a650f40bf884b13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4718,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 131,
"path": "/photo_location_ArcTool_with_flip.py",
"repo_name": "KHAung-kha/GIS",
"src_encoding": "UTF-8",
"text": "# looking for specified location to create a point and fill the attribute from excel (photo name)\r\n# https://gis.stackexchange.com/questions/9433/extracting-coordinates-of-polygon-vertices-in-arcmap\r\n# Author - Kyaw Htet Aung & Kyaw Naing Win(OneMap Myanmar)\r\n# Date - 20200813\r\n\r\n#Feature must be WGS84 Datum, otherwise calculated value will be wrong\r\n\r\nimport pandas as pd\r\nimport arcpy\r\nimport os\r\n\r\nfcOrig = arcpy.GetParameterAsText(0)\r\ncolName = arcpy.GetParameterAsText(1)\r\ntn = arcpy.GetParameterAsText(2)\r\ncol1 = arcpy.GetParameterAsText(3)\r\ncol2 = arcpy.GetParameterAsText(4)\r\ncol3 = arcpy.GetParameterAsText(5)\r\npath = arcpy.GetParameterAsText(6)\r\nname = arcpy.GetParameterAsText(7)\r\nisflip = arcpy.GetParameterAsText(8)\r\n\r\narcpy.env.overwriteOutput = True \r\n\r\n'''\r\nfc = r\"D:\\DOH\\doh_lrs_2d3d_analysis\\track_2D_10mile_utm47.shp\"\r\ncolName = \"rdId\"\r\ntn = r\"D:\\DOH\\photo_location.xlsx\\Sheet1$\"\r\npath = r\"D:\\DOH\\Finding_Photo_Location\"\r\nname = \"test010\"\r\n'''\r\n\r\ndoneList = [] #create a list not to add duplicate\r\n\r\n#change the Spatial Reference to projected System (UTM 47 N)\r\narcpy.CreateFileGDB_management(\"C:/\", \"fGDB.gdb\")\r\nfc = r\"C:\\fGDB.gdb\\Temp_fc\"\r\noutCS = arcpy.SpatialReference(\"WGS 1984 UTM Zone 47N\")\r\narcpy.Project_management(fcOrig,fc,outCS)\r\n\r\nif name.endswith(\".shp\"):\r\n name = name\r\nelse:\r\n name = name + \".shp\"\r\nfile_name = path + \"\\\\\" + name\r\n\r\n#creating feature class\r\narcpy.CreateFeatureclass_management(path, name, \"POINT\", has_z=\"ENABLED\", spatial_reference=fc)\r\n\r\n#adding new columns\r\narcpy.AddField_management(file_name,\"X\",\"DOUBLE\")\r\narcpy.AddField_management(file_name,\"Y\",\"DOUBLE\")\r\narcpy.AddField_management(file_name,\"threeD_len\",\"DOUBLE\")\r\narcpy.AddField_management(file_name,\"twoD_len\",\"DOUBLE\")\r\narcpy.AddField_management(file_name,\"phName\",\"TEXT\")\r\narcpy.AddField_management(file_name,\"rdId\",\"TEXT\")\r\n\r\n#what attributes will be added when every new feature is added\r\ncursor0 = arcpy.da.InsertCursor(file_name, [\"SHAPE@XY\",\"X\",\"Y\",\"twoD_len\",\"phName\",\"rdId\"])\r\n\r\n#reading the excel file by panda\r\n#df = pd.read_excel(r\"D:\\DOH\\photo_location.xlsx\")\r\n\r\nfile, sheet = os.path.split(tn)\r\ndf = pd.read_excel(file, sheet_name= sheet)\r\n\r\ncols = list(df.columns)\r\nid0 = cols.index(col1)\r\nid1 = cols.index(col2)\r\nid2 = cols.index(col3)\r\n#id0 = cols.index(\"road_id\")\r\n#id1 = cols.index(\"len\")\r\n#id2 = cols.index(\"photo\")\r\n\r\nfor index0, row1 in df.iterrows():\r\n row1 = list(row1)\r\n rdId = row1[int(id0)]\r\n rdLen = row1[int(id1)]\r\n phName = row1[int(id2)]\r\n\r\n expression = \"\\\"{}\\\" = \\'{}\\'\".format(colName,rdId) #to use in the where_clause(filtering)\r\n sum2D = 0\r\n\r\n #reading the feature class\r\n with arcpy.da.SearchCursor(fc, ['SHAPE@'],where_clause= expression) as cursor:\r\n for row in cursor:\r\n array1 = row[0].getPart()\r\n if isflip == \"true\":\r\n start = row[0].pointCount -1\r\n stop = -1\r\n step = -1\r\n\r\n else:\r\n start = 0\r\n stop = row[0].pointCount\r\n step = 1\r\n #for index in range(row[1].pointCount,-1,-1):\r\n for index in range(start,stop,step):\r\n if index:\r\n pnt = array1.getObject(0).getObject(index) #get 2nd point\r\n pnt0 = array1.getObject(0).getObject(index - 1) #get 1st point\r\n x1 = pnt.X\r\n y1 = pnt.Y\r\n x0 = pnt0.X\r\n y0 = pnt0.Y\r\n dx = x1 - x0\r\n dy = y1 - y0\r\n dist2D = math.sqrt((dx * dx) + (dy * dy))\r\n #if not(rdId in doneList): #blocking the duplicate values\r\n #if sum2D != rdLen :\r\n #cursor0.insertRow([(x0, y0), x0, y0, (sum2D),\"\",rdId]) #add old points\r\n\r\n if sum2D < rdLen < (sum2D + dist2D): #specifying the location (the extect point as wanted)\r\n s = rdLen -sum2D\r\n dX = x1 - x0\r\n dx = dX * (s / dist2D)\r\n dY = y1 - y0\r\n dy = dY * (s / dist2D)\r\n X = x0 + dx\r\n Y = y0 + dy\r\n cursor0.insertRow([(X, Y), X, Y, rdLen,phName,rdId]) #add new points and some attributes\r\n sum2D = sum2D + dist2D\r\n #doneList.append(rdId)\r\n\r\nprint(\"code was successfully run\")\r\n\r\n#add the feature to current ArcMap#add the feature to current ArcMap\r\narcpy.AddMessage(\"Finished\")\r\nmxd = arcpy.mapping.MapDocument(\"CURRENT\")\r\nlayer = arcpy.mapping.Layer(file_name)\r\ndf1 = arcpy.mapping.ListDataFrames(mxd)[0]\r\narcpy.mapping.AddLayer(df1,layer)"
},
{
"alpha_fraction": 0.6658519506454468,
"alphanum_fraction": 0.6784217953681946,
"avg_line_length": 55.31999969482422,
"blob_id": "a9be4462267d6460cf2a85727182351cc282f4ce",
"content_id": "3ab33afeba29fdc7ef68765b8dbb406ab4a2b38c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2864,
"license_type": "no_license",
"max_line_length": 632,
"num_lines": 50,
"path": "/KML_Towns_all_Regions_2.py",
"repo_name": "KHAung-kha/GIS",
"src_encoding": "UTF-8",
"text": "#required modules : shutil, pandas, time\r\n\r\n#turn the crrodinates from excel to kml with village names, state by state\r\n#define the head, tail, style, point start and point end of kml first\r\n#merge them with coordinate from excel\r\nimport shutil\r\nimport pandas as pd\r\nimport time as tm\r\nst_time = tm.time()\r\nhead = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?><kml xmlns=\\\"http://www.opengis.net/kml/2.2\\\" xmlns:gx=\\\"http://www.google.com/kml/ext/2.2\\\" xmlns:kml=\\\"http://www.opengis.net/kml/2.2\\\" xmlns:atom=\\\"http://www.w3.org/2005/Atom\\\"><Document>\"\r\nstyle = \"<StyleMap id=\\\"msn_placemark_circle\\\"><Pair><key>normal</key><styleUrl>#sn_placemark_circle</styleUrl></Pair><Pair><key>highlight</key><styleUrl>#sh_placemark_circle_highlight</styleUrl></Pair></StyleMap><Style id=\\\"sh_placemark_circle_highlight\\\"><IconStyle><scale>1.2</scale><Icon><href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle_highlight.png</href></Icon></IconStyle><ListStyle></ListStyle></Style><Style id=\\\"sn_placemark_circle\\\"><IconStyle><scale>1.2</scale><Icon><href>http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png</href></Icon></IconStyle><ListStyle></ListStyle></Style><Folder>\"\r\ntail = \"</Folder></Document></kml>\"\r\nptend = \"</coordinates></Point></Placemark>\"\r\ndf = pd.read_excel(r\"C:\\Users\\Thinkpad\\Downloads\\Compressed\\Myanmar_PCodes_Release-IX_Sep2019_Countrywide\\Myanmar PCodes Release-IX_Sep2019_Countrywide.xlsx\",sheet_name=\"_07_Villages\")\r\ndf = df[['SR_Name_Eng','Village_Name_Eng','Latitude','Longitude']] #filtering the wanted colums only\r\ndf= df.dropna() #removing NA rows\r\n#print(df.shape)\r\n#print(df.head())\r\nalt = 0\r\nstName = df['SR_Name_Eng']\r\nstName= set(stName)\r\nprint(stName)\r\nfor eachst in stName:\r\n df1=df.loc[(df['SR_Name_Eng'] == eachst)]\r\n #print(df1.shape)\r\n points = \"\"\r\n for index,row in df1.iterrows():\r\n name = row['Village_Name_Eng']\r\n ptstart = f\"<Placemark><name>{name}</name><styleUrl>#msn_placemark_circle</styleUrl><Point><gx:drawOrder>1</gx:drawOrder><coordinates>\"\r\n lat = row['Latitude']\r\n lon = row['Longitude']\r\n coordinate = str(lon) + \",\" + str(lat) + \",\" + str(alt) + \" \"\r\n points = points + ptstart+ coordinate + ptend\r\n placemark = head + style + points + tail\r\n Namefile = \"kml_creation_KHA_\" + eachst + \".kml\"\r\n kml_file = open(Namefile,\"w\",encoding=\"utf-8\")\r\n kml_file.write(placemark)\r\n kml_file.close()\r\n print(eachst + \" has been written!\")\r\n source = \"E:\\Python_scrpits\\python_files\\\\\" + Namefile\r\n distination = \"E:\\KMLs\\\\\" + Namefile\r\n tm.sleep(1)\r\n shutil.move(source, distination) #moving the files to other directory\r\nend_time = tm.time()\r\nprint(str(round(end_time - st_time)))\r\nprint(\"finished\")\r\nimport pyttsx3 as pt\r\nsound = pt.init()\r\nsound.say(\"Hi, your code is successfully run and finished\")\r\nsound.runAndWait()"
}
] | 2 |
utokyo-hirata-lab/Earth-Neuro-Link
|
https://github.com/utokyo-hirata-lab/Earth-Neuro-Link
|
9ee34910307462ead450e52b4becf0230c69e35b
|
acd9f857480c6be7a18724cc918905330ed6d270
|
6a3ea306ec2dd844a27a673e8c7580b3114de696
|
refs/heads/master
| 2020-07-15T05:56:03.362762 | 2019-09-01T08:29:51 | 2019-09-01T08:29:51 | 205,494,500 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5443708896636963,
"alphanum_fraction": 0.739072859287262,
"avg_line_length": 57.07692337036133,
"blob_id": "07a0eceed16b2195e6947e45b648ab2d6223d8d5",
"content_id": "96f94a792e7977c7cdaf1b22388525aa318405f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 13,
"path": "/main.py",
"repo_name": "utokyo-hirata-lab/Earth-Neuro-Link",
"src_encoding": "UTF-8",
"text": "from enl_map import locality\nlc = locality()\n\nlc.set_default(0,0,3)\n#lc.read_spreadsheet('https://docs.google.com/spreadsheets/d/1IUBUV8mGHp9lHNyCOh0FrJTupVpeLQA0W7sl6d7XiWM/edit#gid=1390728700')\nlc.auto_collect('1IUBUV8mGHp9lHNyCOh0FrJTupVpeLQA0W7sl6d7XiWM')\n#lc.manual_collect('Australia','Pilbara',-21.8097056,117.3049005,'zircon',4.2,'Etc et al., 2019')\n#lc.manual_collect('Australia','Pilbara',-23.8097056,119.3049005,'zircon',4.2,'Etc et al., 2018')\n#lc.manual_collect('Australia','Pilbara',-24.0097056,118.3000005,'zircon',4.3,'Etc et al., 2017')\n#lc.manual_collect('Canada','Labrador',53.682000,-60.746118,'zircon',2.7,'Etc et al., 2016')\n#lc.manual_collect('Gabon','Okandja',-0.704010,13.791012,'zircon',2.4,'Etc et al., 2015')\n\nlc.marker('all')\n"
},
{
"alpha_fraction": 0.4935064911842346,
"alphanum_fraction": 0.7922077775001526,
"avg_line_length": 50.33333206176758,
"blob_id": "dbb078ab33c10bd2f531dd1c68bba01844ab7337",
"content_id": "1377ff8679d2153657ba1683d558102991d8a26f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 3,
"path": "/README.md",
"repo_name": "utokyo-hirata-lab/Earth-Neuro-Link",
"src_encoding": "UTF-8",
"text": "# Earth-Neuro-Link\n\n\n"
},
{
"alpha_fraction": 0.6253482103347778,
"alphanum_fraction": 0.6330083608627319,
"avg_line_length": 41.235294342041016,
"blob_id": "1030a41925fd8a9ff2b36f6be7dcdefeb68b93ee",
"content_id": "d4221a3dfc428e21dc80b9a1092b776d8a82e946",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2872,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 68,
"path": "/enl_map.py",
"repo_name": "utokyo-hirata-lab/Earth-Neuro-Link",
"src_encoding": "UTF-8",
"text": "import os\nimport folium\nimport sqlite3\nimport pandas as pd\nfrom httplib2 import Http\nimport gspread\nfrom df2gspread import gspread2df as g2d\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nclass locality:\n def __init__(self):\n self.locality = pd.DataFrame({'Nation':[], 'Location':[], 'Latitude':[], 'Longitude':[], 'Mineral':[], 'Age':[],'Reference':[]})\n self.zmap = folium.Map(location=[0, 0], zoom_start=3)\n self.conn = \"\"\n\n def set_default(self,latitude,longitude,zoom):\n self.zmap = folium.Map(location=[latitude, longitude], zoom_start=zoom)\n\n def download_as_df(self,sheet_id, sheet_name):\n scopes = ['https://www.googleapis.com/auth/spreadsheets']\n json_file = '/Users/watarut/Downloads/Earth-Neuro-Link.json'#Json Cliant ID for OAuth\n credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file, scopes=scopes)\n http_auth = credentials.authorize(Http())\n df = g2d.download(sheet_id, wks_name=sheet_name, col_names=True, row_names=False, credentials=credentials)\n return df\n\n def sql(self,path):\n df = self.download_as_df(path, 'form1')\n dbname = \"enl.db\"\n self.conn = sqlite3.connect(dbname)\n df.to_sql(\"enl_data\", self.conn, if_exists=\"replace\")\n self.conn.close()\n\n def auto_collect(self,path):\n self.sql(path)\n dbname = \"enl.db\"\n self.conn = sqlite3.connect(dbname)\n c = self.conn.cursor()\n sql = 'select * from enl_data'\n for row in c.execute(sql):\n self.manual_collect(row[2],row[3],row[4],row[5],row[6],row[7],row[8])\n self.conn.close()\n\n def manual_collect(self,nation,location,latitude,longitude,mineral,age,reference):\n add_locality = pd.DataFrame({'Nation':[nation], 'Location':[location], 'Latitude':[latitude], 'Longitude':[longitude], 'Mineral':[mineral], 'Age':[age],'Reference':[reference]},index=[len(self.locality)+1])\n self.locality = pd.concat([self.locality,add_locality],sort=False)\n\n def google_map(self,latitude,longitude):\n url_bridge = 'https://www.google.com/maps/place/'+str(latitude)+'+'+str(longitude)\n return url_bridge\n\n def add_map(self,num):\n ind = 'index == '+str(num)\n latitude,longitude = self.locality.query(ind)['Latitude'],self.locality.query(ind)['Longitude']\n link = self.google_map(float(latitude), float(longitude))\n pop = str(float(self.locality.query(ind)['Age']))+' (GA)\\n'+link\n folium.Marker([latitude, longitude], popup=pop).add_to(self.zmap)\n self.zmap.save('mineral_age.html')\n\n def marker(self,num):\n if num == 'all':\n for i in range(len(self.locality)):\n self.add_map(i+1)\n else:\n self.add_map(num)\n\n def output(self):\n return self.locality\n"
}
] | 3 |
Juikyy/Nasa-Space-Apps
|
https://github.com/Juikyy/Nasa-Space-Apps
|
4ecb03cf26b95b1fbd0a285ad02c47aa2aebfb77
|
4ac7e7dbc573e008d918d7b6d1d1cbf04e16ec38
|
ab77adeb985a91063b0ddcbc2b6f1062a8a404d7
|
refs/heads/master
| 2020-08-22T13:02:18.050557 | 2019-10-20T17:34:57 | 2019-10-20T17:34:57 | 216,400,356 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.2847616672515869,
"alphanum_fraction": 0.4687577188014984,
"avg_line_length": 46.40238952636719,
"blob_id": "05ccaecb7cfdbdd2dff61147545c1d4177fefdd0",
"content_id": "65729ada5f80a30ef47cba59b5dfc04df756af97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12147,
"license_type": "no_license",
"max_line_length": 464,
"num_lines": 251,
"path": "/Datos.py",
"repo_name": "Juikyy/Nasa-Space-Apps",
"src_encoding": "UTF-8",
"text": "import requests\r\nimport json\r\ntemp=[]\r\ncloud=[]\r\nhumidity=[]\r\nres=[]\r\nfor i in range(-90,90,10):\r\n for j in range(-180,180,10):\r\n response = requests.get(\"http://api.weatherstack.com/current?access_key=4dd1a884407a4df6d2a4ba2af6a40019&query=\"+str(i)+\",\"+str(j))\r\n print(response.status_code)\r\n x = response.json()\r\n print(x)\r\n temp.append(x['current']['temperature'])\r\n cloud.append(x['current'][\"cloudcover\"])\r\n humidity.append(x['current'][\"humidity\"])\r\n\r\n#print(temp)\r\n#print(cloud)\r\n#print(humidity)\r\n\r\nshading1=[100,100,90,80,70,60,50,40,40,30,30,30,20,20,20,10],[100,90,80,70,60,60,50,40,40,30,30,20,20,20,10,10],[100,90,80,70,60,50,50,40,30,30,30,20,20,20,10,10],[100,90,80,70,60,50,40,40,30,30,20,20,20,20,10,10],[100,80,70,60,60,50,40,40,30,30,20,20,20,10,10,10],[90,80,70,60,50,50,40,30,30,30,20,20,20,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,70,60,60,50,40,40,30,30,20,20,20,10,10,10,10]\r\nshading2=[100,100,80,70,60,60,50,40,40,30,30,20,20,20,20,10],[100,90,80,70,60,50,50,40,40,30,30,20,20,20,10,10],[100,90,80,70,60,50,40,40,30,30,30,20,20,20,10,10],[100,90,80,70,60,50,40,40,30,30,20,20,20,10,10,10],[100,80,70,60,50,50,40,40,30,30,20,20,20,10,10,10],[90,80,70,60,50,50,40,30,30,20,20,20,20,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[80,70,60,50,50,40,30,30,20,20,20,10,10,10,10,10]\r\nshading3=[100,90,80,70,60,50,50,40,40,30,30,20,20,20,10,10],[100,90,80,70,60,50,50,40,30,30,30,20,20,20,10,10],[100,80,80,70,60,50,40,40,30,30,20,20,20,10,10,10],[100,80,70,60,60,50,40,40,30,30,20,20,20,10,10,10],[90,80,70,60,50,50,40,30,30,30,20,20,20,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,80,70,6,50,40,40,30,30,20,20,20,10,10,10,10],[90,70,60,50,50,40,30,30,30,20,20,20,10,10,10,10],[80,70,60,50,50,40,30,30,20,20,20,10,10,10,10,10]\r\nshading4=[100,90,80,70,60,50,50,40,30,30,30,20,20,20,10,10],[100,90,80,70,60,50,40,40,30,30,20,20,20,20,10,10],[100,80,70,60,60,50,40,40,30,30,20,20,20,10,10,10],[100,80,70,60,60,50,40,30,30,30,20,20,20,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,80,70,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,70,60,60,50,40,40,30,30,20,20,20,10,10,10,10],[90,70,60,50,50,40,30,30,20,20,20,10,10,10,10,10],[80,70,60,50,40,40,30,30,20,20,20,10,10,10,10,10]\r\n\r\nfor k in range(0,len(cloud)):\r\n if (cloud[k]==0 and humidity[k]==0 and temp[k]==0):\r\n res.append(0)\r\n elif cloud[k]<=10:\r\n if temp[k]>=((110-32)/1.8):\r\n if round(humidity[k]*17/100)<2:\r\n res.append=shading1[0][0]\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[0][w-2])\r\n elif (temp[k]<=((109-32)/1.8) and temp[k]>=((100-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[1][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[1][w-2])\r\n print(4)\r\n elif (temp[k]<=((99-32)/1.8) and temp[k]>=((90-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[2][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[2][w-2])\r\n print(6)\r\n elif (temp[k]<=((89-32)/1.8) and temp[k]>=((80-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[3][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[3][w-2])\r\n elif (temp[k]<=((79-32)/1.8) and temp[k]>=((70-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[4][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[4][w-2])\r\n elif (temp[k]<=((69-32)/1.8) and temp[k]>=((60-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[5][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[5][w-2])\r\n elif (temp[k]<=((59-32)/1.8) and temp[k]>=((50-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[6][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[6][w-2])\r\n elif (temp[k]<=((49-32)/1.8) and temp[k]>=((40-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[7][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[7][w-2])\r\n elif (temp[k]<=((39-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading1[8][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading1[8][w-2])\r\n elif cloud[k]>10 and cloud[k]<=50:\r\n if temp[k]>=((110-32)/1.8):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[0][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[0][w-2])\r\n elif (temp[k]<=((109-32)/1.8) and temp[k]>=((100-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[1][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[1][w-2])\r\n elif (temp[k]<=((99-32)/1.8) and temp[k]>=((90-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[2][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[2][w-2])\r\n elif (temp[k]<=((89-32)/1.8) and temp[k]>=((80-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[3][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[3][w-2])\r\n elif (temp[k]<=((79-32)/1.8) and temp[k]>=((70-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[4][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[4][w-2])\r\n elif (temp[k]<=((69-32)/1.8) and temp[k]>=((60-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[5][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[5][w-2])\r\n elif (temp[k]<=((59-32)/1.8) and temp[k]>=((50-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[6][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[6][w-2])\r\n elif (temp[k]<=((49-32)/1.8) and temp[k]>=((40-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[7][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[7][w-2])\r\n elif (temp[k]<=((39-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading2[8][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading2[8][w-2])\r\n elif cloud[k]>50 and cloud[k]<=90:\r\n if temp[k]>=((110-32)/1.8):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[0][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[0][w-2])\r\n elif (temp[k]<=((109-32)/1.8) and temp[k]>=((100-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[1][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[1][w-2])\r\n elif (temp[k]<=((99-32)/1.8) and temp[k]>=((90-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[2][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[2][w-2])\r\n elif (temp[k]<=((89-32)/1.8) and temp[k]>=((80-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[3][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[3][w-2])\r\n elif (temp[k]<=((79-32)/1.8) and temp[k]>=((70-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[4][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[4][w-2])\r\n elif (temp[k]<=((69-32)/1.8) and temp[k]>=((60-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[5][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[5][w-2])\r\n elif (temp[k]<=((59-32)/1.8) and temp[k]>=((50-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[6][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[6][w-2])\r\n elif (temp[k]<=((49-32)/1.8) and temp[k]>=((40-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[7][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[7][w-2])\r\n elif (temp[k]<=((39-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading3[8][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading3[8][w-2])\r\n elif cloud[k]>90:\r\n if temp[k]>=((110-32)/1.8):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[0][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[0][w-2])\r\n elif (temp[k]<=((109-32)/1.8) and temp[k]>=((100-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[1][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[1][w-2])\r\n elif (temp[k]<=((99-32)/1.8) and temp[k]>=((90-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[2][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[2][w-2])\r\n elif (temp[k]<=((89-32)/1.8) and temp[k]>=((80-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[3][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[3][w-2])\r\n elif (temp[k]<=((79-32)/1.8) and temp[k]>=((70-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[4][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[4][w-2])\r\n elif (temp[k]<=((69-32)/1.8) and temp[k]>=((60-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[5][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[5][w-2])\r\n elif (temp[k]<=((59-32)/1.8) and temp[k]>=((50-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[6][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[6][w-2])\r\n elif (temp[k]<=((49-32)/1.8) and temp[k]>=((40-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[7][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[7][w-2])\r\n elif (temp[k]<=((39-32)/1.8)):\r\n if round(humidity[k]*17/100)<2:\r\n res.append(shading4[8][0])\r\n else:\r\n w = round(humidity[k]*17/100)\r\n res.append(shading4[8][w-2])\r\nprint(res)"
}
] | 1 |
antonio0728/21FCS390Z
|
https://github.com/antonio0728/21FCS390Z
|
56124fea87999c99ce8cea08f0c6122cb6c81b38
|
dee9da8741711a64bedc7536187066ca26ab91f2
|
59818d0ee24993a561978b65234d20712fc69c03
|
refs/heads/main
| 2023-08-10T10:48:30.060171 | 2021-09-01T23:54:43 | 2021-09-01T23:54:43 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5306496024131775,
"alphanum_fraction": 0.5397987365722656,
"avg_line_length": 21.306121826171875,
"blob_id": "7f53c591bf70a75e1af0255a2a22694ebfaae147",
"content_id": "5e04adae85b4e83698e6522c29a58dee599727f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1093,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 49,
"path": "/activity_01_csv_load/src/csv_load.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: Activity 01 - CSV Data Load\n\nimport mysql.connector \nimport csv\nimport os\nimport sys\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nCSV_FILE_NAME = 'employees.csv'\nDB_HOST = 'localhost'\nDB_NAME = 'hr'\n\nif __name__ == \"__main__\":\n\n # TODO: get db connection parameters\n db_user = \n db_passwd = \n\n try:\n # TODO: connect to db\n db = mysql.connector.connect(\n host=,\n database=,\n user=,\n password=\n )\n print('DB connection successful!')\n\n # TODO: check if csv file exists\n \n\n # TODO: process csv file\n cursor = db.cursor()\n sql = 'INSERT INTO Employees VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)'\n count = 0\n with open(os.path.join(DATA_FOLDER, CSV_FILE_NAME), 'rt') as csv_file:\n \n print(count, 'record(s) inserted.')\n\n # TODO: close db connection\n \n\n print('Done!')\n \n except Exception as err: \n print(err)\n"
},
{
"alpha_fraction": 0.6581325531005859,
"alphanum_fraction": 0.6897590160369873,
"avg_line_length": 25.559999465942383,
"blob_id": "f011a1cbc6fc3ceb9f75937498179d03b8c85c0c",
"content_id": "af2f441a83f0a860326afb1dafa433115d06d9ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 25,
"path": "/hwk_02_weather_api/src/weather_api.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: Homework 02 - Weather API\n\nimport requests\nimport json\nimport os\nfrom urllib.parse import urlencode\nfrom datetime import datetime\nimport time\nimport math\n\n# definitions/parameters\nDATA_FOLDER = os.path.join('..', 'data')\nLOCATIONS_FILE_NAME = 'locations.csv'\nJSON_FILE_NAME = 'weather.json'\nOPEN_WEATHER_API = 'http://api.openweathermap.org/data/2.5/weather'\nSLEEP_TIME = 5\n\ndef kelvin_fahrenheit(k):\n return math.floor((k - 273.15) * 9 / 5 + 32)\n\nif __name__ == \"__main__\":\n api_key = os.getenv('API_KEY')\n today = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n"
},
{
"alpha_fraction": 0.7214137315750122,
"alphanum_fraction": 0.7318087220191956,
"avg_line_length": 21.809524536132812,
"blob_id": "5fbe29f68de5bafed7513dfc946b5df9807081b6",
"content_id": "97f0e098f850d57bdcf5639bc418a55944dc37d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 21,
"path": "/activity_01_csv_load/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 01\n\n## CSV Data Load to MySQL Database\n\n## Goal\n\nTo illustrate how to perform a typical data load that reads a CSV file and inserts the data records into a MySQL database. \n\n## Steps\n\n### Step 1 - Database Creation\n\nUse the employees.sql script to create the hr database with the Employees table. \n\n### Step 2 - Data Load Coding \n\nFinish the TO-DO's embedded in the csv_load.py script. \n\n### Step 3 - Verification\n\nVerify that all data records were loaded into MySQL. \n\n"
},
{
"alpha_fraction": 0.6620469093322754,
"alphanum_fraction": 0.6695095896720886,
"avg_line_length": 23.05128288269043,
"blob_id": "cf4f0ce7a042225eb92541336ede73731c1f47a4",
"content_id": "6483467558f6754c08d86f7add10db21735a450f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 39,
"path": "/activity_07_puppeteer/src/indeed_scrape.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: A Simple Puppeteer Web Scraper\n\nimport asyncio\nimport pyppeteer\nimport os.path \nimport json\n\nURL = 'https://www.indeed.com/'\nCHROME_PATH = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'\nDATA_PATH = '../data'\nJSON_FILE = 'jobs.json'\n\nasync def main():\n\n # TODO: launch browser\n\n\n # TODO: open target URL\n\n # TODO: fill-in the \"What\" input field (e.g., Data Analyst)\n\n # TODO: click on the submit button\n \n await page.waitForNavigation( waitUntil = 'load' )\n\n # TODO: extract the name of the companies and their job positions url (if avaialable)\n \n\n # close the browser\n await browser.close()\n\n # export to json \n with open(os.path.join(DATA_PATH, JSON_FILE), 'wt') as json_file:\n json.dump(jobs, json_file)\n\nif __name__ == \"__main__\":\n asyncio.get_event_loop().run_until_complete(main())\n"
},
{
"alpha_fraction": 0.6234527826309204,
"alphanum_fraction": 0.6716612577438354,
"avg_line_length": 37.375,
"blob_id": "8ccc2eee3dc34a3b140bbb4d0f1c468bd5614b8a",
"content_id": "c6185b5f49430af1ebfc93cdea4e6e2f763bac39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1535,
"license_type": "no_license",
"max_line_length": 307,
"num_lines": 40,
"path": "/activity_06_bs4/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 06\n\n## Write a Simple Web Scraper\n\n## Goal\nThe goal of this activity is have you write a simple web scraper using Beautiful Soup, saving the extracted information in JSON format. \n \n## Instructions\n\nBecause pages are structured differently, every scraper is unique. Open [https://covidcheckcolorado.org/find-our-sites-testing](https://covidcheckcolorado.org/find-our-sites-testing/) and use your browser code inspection tools to strategize your scraper. Your scrape should produce the following JSON file:\n\n```\n{\n \"name\": \"16th Street Mall\", \n \"saliva_testing\": true, \n \"address\": \"1600 California St\", \n \"city\": \"Denver\", \n \"state\": \"CO\", \n \"zipcode\": \"80202\", \n \"hours_of_operation\": \"Monday \\u2013 Friday, 12pm \\u2013 6:30pm\"\n}, \n{ \"name\": \"All City Stadium\", \n \"address\": \"1495 S. Race Street\", \n \"city\": \"Denver\", \n \"state\": \"CO\", \n \"zipcode\": \"80210\", \n \"hours_of_operation\": \"Monday \\u2013 Wednesday, 7am \\u2013 5pm; Thursday \\u2013 Friday, 7am \\u2013 1pm\"\n},\n{ \"name\": \"Littleton Park and Walk\", \n \"indoor_testing\": true, \n \"address\": \"190 East Littleton Blvd.\", \n \"city\": \"Littleton\", \n \"state\": \"CO\", \n \"zipcode\": \"80120\", \n \"hours_of_operation\": \"Monday \\u2013 Friday: 7am \\u2013 12pm\"\n}, \n...\n```\n\nNote that \"16th Street Mall\" provides \"saliva testing\", \"All City Stadium\" does NOT provide \"saliva testing\", and \"Littleton Park and Walk\" provides \"indoor testing\". That information needs to be extracted from the page and saved in a structured way. Hint: use regular expressions. "
},
{
"alpha_fraction": 0.7048457860946655,
"alphanum_fraction": 0.7356828451156616,
"avg_line_length": 27.5,
"blob_id": "fbb97f96caa1f8832e90f49e8116adf09b559fae",
"content_id": "89e26f1065eb5496ddfa1d2b5fbe561807db5bf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 8,
"path": "/activity_02_csv_export/files/employees.sql",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "USE hr;\n\nGRANT FILE ON *.* TO 'hr_admin';\n\nSELECT * FROM Employees\nINTO OUTFILE '/Users/tmota/devel/teach/__21FCS390Z_DM__/activities/activity_02_csv_export/data/employees.csv'\nFIELDS TERMINATED BY ','\nLINES TERMINATED BY '\\n';"
},
{
"alpha_fraction": 0.6631853580474854,
"alphanum_fraction": 0.667754590511322,
"avg_line_length": 51.86206817626953,
"blob_id": "f453f1300ab6ce201b288b363ab7644a5d7653ee",
"content_id": "184f945e793637ed8f068ef4c9edf21c011cf620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1532,
"license_type": "no_license",
"max_line_length": 218,
"num_lines": 29,
"path": "/activity_07_puppeteer/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 07\n\n## Write a Simple Web Scraper\n\n## Goal\nThe goal of this activity is have you write a simple web scraper using Puppeteer, saving the extracted information in JSON format. \n \n## Instructions\n\nFinish the TO-DO's embedded in the provided code to scrape \"Data Analyst\" job posts in Denver. The resulting JSON format should have (at a minimum) the name of the position and the company that is hiring. For example: \n\n```\n[\n {\"title\": \"Data Analyst\", \"company\": \"Datalot\"}, \n {\"title\": \"newData Analyst (IT)\", \"company\": \"GC Associates USA\"}, \n {\"title\": \"Training Quality Analyst\", \"company\": \"UCHealth\"}, \n {\"title\": \"Customer & Innovation Analyst (Entry Level)\", \"company\": \"Xcel Energy\"}, \n {\"title\": \"Data Analyst\", \"company\": \"Rocky Mountain Partnership\"}, \n {\"title\": \"SimioCloud - Data Analyst \\u2013 Production Support\", \"company\": \"SimioCloud\"}, \n {\"title\": \"newData Analyst I\", \"company\": \"Computershare\"}, \n {\"title\": \"newData Analyst\", \"company\": \"Insight Global\"}, \n {\"title\": \"Data Analyst\", \"company\": \"Brooksource\"}, \n {\"title\": \"Data Analyst\", \"company\": \"NakedWines.com\"}, \n {\"title\": \"newData Analyst\", \"company\": \"Secure Innovate Technology\"}, \n {\"title\": \"Business Analyst\", \"company\": \"ECCO Select\"}, \n {\"title\": \"newBusiness Analyst, Equipment and Accessories\", \"company\": \"The North Face\"}, \n {\"title\": \"Crime Data Analyst Associate - Denver Police Department\", \"company\": \"City and County of Denver\"}, {\"title\": \"Data Analyst\", \"company\": \"Location3\"}\n]\n```"
},
{
"alpha_fraction": 0.47007977962493896,
"alphanum_fraction": 0.5910904407501221,
"avg_line_length": 59.15999984741211,
"blob_id": "39c8d9576d77e5f8441a13d46834e129ebab602d",
"content_id": "428236be1decd4dbd7f7f7f6df386fa7b079b257",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1504,
"license_type": "no_license",
"max_line_length": 302,
"num_lines": 25,
"path": "/hwk_02_weather_api/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Homework 02\n\n## Open Weather API\n\n## Goal\n\nTo illustrate how to use an API to collect weather-related data. \n\n## Instructions \n\nIn this assignment you are asked to register a free account on [https://openweathermap.org/](https://openweathermap.org/). Then study the API's documentation to extract weather info from locations described in data/locations.csv. Your program should save the collected info in JSON format, similar to: \n\n```\n[\n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Denver\", \"state\": \"CO\", \"temp_min\": 80, \"temp_max\": 91, \"temp\": 86}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Colorado Springs\", \"state\": \"CO\", \"temp_min\": 70, \"temp_max\": 89, \"temp\": 82}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Aspen\", \"state\": \"CO\", \"temp_min\": 54, \"temp_max\": 73, \"temp\": 64}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Phoenix\", \"state\": \"AR\", \"temp_min\": 86, \"temp_max\": 94, \"temp\": 90}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Tucson\", \"state\": \"AR\", \"temp_min\": 80, \"temp_max\": 89, \"temp\": 85},\n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Los Angeles\", \"state\": \"CA\", \"temp_min\": 67, \"temp_max\": 85, \"temp\": 73}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Bethlehem\", \"state\": \"PA\", \"temp_min\": 64, \"temp_max\": 72, \"temp\": 66}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Miami\", \"state\": \"FL\", \"temp_min\": 86, \"temp_max\": 96, \"temp\": 91}, \n {\"today\": \"2021-09-01 14:41:32\", \"city\": \"Boston\", \"state\": \"MA\", \"temp_min\": 64, \"temp_max\": 69, \"temp\": 66}\n]\n```\n"
},
{
"alpha_fraction": 0.5355704426765442,
"alphanum_fraction": 0.5436241626739502,
"avg_line_length": 25.60714340209961,
"blob_id": "92e2259c841c8f96678968200438e1681c281b0e",
"content_id": "0fb3c762193b4b4b6756e981e4b150a59e836379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1490,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 56,
"path": "/activity_01_csv_load/sol/csv_load.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: Activity 01 - CSV Data Load\n\nimport mysql.connector \nimport csv\nimport os\nimport sys\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nCSV_FILE_NAME = 'employees.csv'\nDB_HOST = 'localhost'\nDB_NAME = 'hr'\n\nif __name__ == \"__main__\":\n\n # TODO: get db connection parameters\n db_user = os.getenv('DB_USER')\n db_passwd = os.getenv('DB_PASSWD')\n\n try:\n # TODOd: connect to db\n db = mysql.connector.connect(\n host= DB_HOST,\n database= DB_NAME,\n user=db_user,\n password=db_passwd\n )\n print('DB connection successful!')\n\n # TODOd: check if csv file exists\n file_name = os.path.join(DATA_FOLDER, CSV_FILE_NAME)\n if not file_name:\n print('Couldn\\'t find ' + file_name)\n sys.exit(1)\n\n # TODOd: process csv file\n cursor = db.cursor()\n sql = 'INSERT INTO Employees VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)'\n count = 0\n with open(os.path.join(DATA_FOLDER, CSV_FILE_NAME), 'rt') as csv_file:\n reader = csv.reader(csv_file)\n for row in reader:\n cursor.execute(sql, row)\n db.commit()\n count += 1\n print(count, 'record(s) inserted.')\n\n # TODOd: close db connection\n db.close()\n\n print('Done!')\n \n except Exception as err: \n print(err)\n"
},
{
"alpha_fraction": 0.787009060382843,
"alphanum_fraction": 0.8157099485397339,
"avg_line_length": 164.25,
"blob_id": "3d916c13963b1e6de9ae017634653519d0d06f55",
"content_id": "5b9443a08015019798cfcf6a11b9999676029fd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 589,
"num_lines": 4,
"path": "/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# 21FCS390Z\nCode for CS 390Z (Introduction to Data Mining), Fall 2021\n\nThis repository has code shared with my students in CS 390Z (Introduction to Data Mining), Fall 2021, at MSU Denver. This course is an introduction to data mining, a discipline whose overall goal is to obtain knowledge through the systematic search for patterns and relationships in rather large and potentially unstructured data. The huge amounts of data generated today are too large and complex to be processed and analyzed by traditional methods. This course aims to discuss frameworks and algorithms that can be used to transform raw data into useful information for decision making. \n"
},
{
"alpha_fraction": 0.669741690158844,
"alphanum_fraction": 0.6780442595481873,
"avg_line_length": 21.978723526000977,
"blob_id": "9c0849472172a14d661f8f02a9a6f2e3abbf3171",
"content_id": "dc7d4bfe5df1cff051913e253f959c6113822592",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 47,
"path": "/activity_05_gcp_cv/src/cv-pdf-extraction.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: GCP Cloud Vision API Example\n\nimport json \nimport re \nimport os\nfrom google.cloud import vision\nfrom google.cloud import storage\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nPDF_FILE_NAME = 'Purchase Order.pdf'\nJSON_FILE_NAME_SUFFIX = '.txtoutput-1-to-1.json'\nBUCKET_NAME = 'interminent-drips'\n\ndef get_uri(file_name):\n return 'gs://' + BUCKET_NAME + '/' + file_name\n\n# requirements: API_KEY environment variable\nif __name__ == \"__main__\":\n\n # TODO: instantiate a GCP's storage API client and get a reference to bucket\n \n\n # TODO: upload pdf file to bucket\n \n\n # TODO: instantiate the GCP's cloud vision API client\n \n \n # TODO: gather information about the source file\n \n\n # TODOO: gather information about the destination file\n \n\n # TODO: make the API call\n \n\n #TODO: monitor the status of the request \n \n\n # TODO: wait for the opeation to complete\n \n\n # TODO: now download the json blob file with the results of the conversion\n "
},
{
"alpha_fraction": 0.6819839477539062,
"alphanum_fraction": 0.6863602995872498,
"avg_line_length": 35.078948974609375,
"blob_id": "e03f59fa67790b2b4d330f480d5ae628ff3ca509",
"content_id": "dc5fb073017d33208e135a4361523ee47f5da2e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1371,
"license_type": "no_license",
"max_line_length": 356,
"num_lines": 38,
"path": "/hwk_01_xlsx_load/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Homework 01\n\n## XLSX Parse to JSON\n\n## Goal\n\nTo illustrate how to perform a typical XLSX parse to JSON\n\n## Instructions \n\nIn this assignment you are NOT allowed to use any external library to read the given XLSX file directly. Instead, you should unzip the XLSX file and extract the following XML files: sharedStrings.xml and sheet1.xml. To be clear, after unzipping athletes.xlsx you should see the following file structure: \n\n```\nArchive: Athletes.xlsx\n [Content_Types].xml \n _rels/.rels \n xl/_rels/workbook.xml.rels \n xl/workbook.xml \n xl/sharedStrings.xml \n xl/theme/theme1.xml \n xl/styles.xml \n xl/worksheets/sheet1.xml \n docProps/core.xml \n docProps/app.xml\n```\n\nThe shared strings file should be read first and be used to create a list of all strings referenced in the document. You should then read sheet1.xml and extract all of its rows. Note that the content of each cell maps to the index in the shared string list. Finally, your parser should then saved the extracted information in json, using the format below: \n\n```\n[\n {\"name\": \"AALERUD Katrine\", \"noc\": \"Norway\", \"discipline\": \"Cycling Road\"}, \n {\"name\": \"ABAD Nestor\", \"noc\": \"Spain\", \"discipline\": \"Artistic Gymnastics\"}, \n {\"name\": \"ABAGNALE Giovanni\", \"noc\": \"Italy\", \"discipline\": \"Rowing\"},\n ...\n]\n```\n\nHint: use the \"xml\" parser from beautiful soup. "
},
{
"alpha_fraction": 0.7040358781814575,
"alphanum_fraction": 0.713004469871521,
"avg_line_length": 16.153846740722656,
"blob_id": "4504955323c346b93b3039f1789022a4ec7b7895",
"content_id": "b0a0de02b8f708a93629bbef95997e3feae3253b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 26,
"path": "/activity_03_quotes_api/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 03\n\n## Quotes API\n\n## Goal\n\nTo illustrate how to perform a typical web service consumption using a RESTful API\n\n## Steps\n\n### Step 1 - Setup Destination Folder\n\nCreate a 'data' folder\n\n\n### Step 2 - Code Part \n\nFinish the TO-DO's embedded in quotes_api.py. Below are the list of quote fields that we are interested in: \n\n* text\n* author\n* tags\n* category\n* date\n\nRemember, the output must be saved in a json file (one quote per line). "
},
{
"alpha_fraction": 0.6690140962600708,
"alphanum_fraction": 0.6866196990013123,
"avg_line_length": 22.5,
"blob_id": "6452caf71327654c6e3cd8d3a1f2e547d6833655",
"content_id": "a0d68971c4ff6e8c70e61670b32e0ac1017a217e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/hwk_01_xlsx_load/src/xlsx_load.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Student:\n# Description: Homework 01 - XLSX Data Load\n\nfrom bs4 import BeautifulSoup\nimport os\nimport json\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nATHLETES_FILE_NAME = 'athletes.xml'\nSS_FILE_NAME = 'sharedStrings.xml'\nJSON_FILE_NAME = 'athletes.json'\n\nif __name__ == \"__main__\":\n\n # TODO: creates a list with all strings found in \"sharedStrings.xml\"\n \n\n # TODO: read contents of \"athletes.xml\" into a list of dictionaries\n \n \n # TODO: write list into json file\n "
},
{
"alpha_fraction": 0.5916473269462585,
"alphanum_fraction": 0.6403712034225464,
"avg_line_length": 19.571428298950195,
"blob_id": "1629a6a3eef68f98266a95f701d70a4d6b556995",
"content_id": "a9e4df918dc7a73fa45f9e223a4e73818986e04c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 21,
"path": "/activity_01_csv_load/files/employees.sql",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "CREATE DATABASE hr;\n\nUSE hr;\n\nCREATE TABLE Employees (\n id INT PRIMARY KEY, \n name VARCHAR(20),\n gender CHAR(1),\n email VARCHAR(40), \n birth DATE, \n start DATE, \n salary INT, \n ssn VARCHAR(11), \n phone VARCHAR(12)\n);\n\nCREATE USER 'hr' IDENTIFIED BY '024680';\nCREATE USER 'hr_admin' IDENTIFIED BY '135791';\n\nGRANT SELECT ON TABLE Employees TO 'hr';\nGRANT ALL ON TABLE Employees TO 'hr_admin';"
},
{
"alpha_fraction": 0.738471269607544,
"alphanum_fraction": 0.7523689270019531,
"avg_line_length": 41.72972869873047,
"blob_id": "e60f300f48e9d6db1b03cc4791346743a0d7f5a8",
"content_id": "265bb3d168b4fbf95cfb969d8c5fb92885fd9f0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1583,
"license_type": "no_license",
"max_line_length": 410,
"num_lines": 37,
"path": "/activity_05_gcp_cv/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 05\n\n## PDF Content Extraction using GCP's Cloud Vision API\n\n## Goal\nThe goal of this activity is to illustrate how to use GCP's Cloud Vision API to extract information from a PDF document. \n \n## Steps\n\n### Step 1 - Setup a GCP Project\n\nFollow the steps described under Resources - GCP 101 to create a new project named \"cv-pdf\". Make sure to enable the Cloud Vision API on this project. This time you will need to create a service account (instead of an API KEY). \n\n* Choose Credentials\n* Manage service accounts\n* Create Service Account\n * Name: cv-pdf-account\n * Role: owner\n* Create private key for the service account (type json)\n\nCopy the json file to a protected folder (for example, files under this activity main folder). Then set the GOOGLE_APPLICATION_CREDENTIALS environment variable with the path to the credentials file. \n\n```\nexport GOOGLE_APPLICATION_CREDENTIALS=../files/cv-pdf-323419-a6ddc5a33453.json\n```\n\n### Step 2 - Create a GCP Bucket\n\nGCP's Cloud Vision API requires that converted pdf files to be stored in a GCP's storage bucket. Select \"Cloud Storage\" in your GCP web console and then browser. If this is the first time you are using this service you need to enable billing. After that, create a bucket with an unique name, e.g. \"interminent-drips\". Choose \"Region\" for the location type for your bucket and \"Standard\" for the storage class. \n\n### Step 3 - Write the Code\n\nComplete the TO-DO's in the code found in src. \n\n### Step 4 - Test\n\nBegin testing the Purchase Order pdf file found under data. Try other PDF files if you want to. \n"
},
{
"alpha_fraction": 0.6824034452438354,
"alphanum_fraction": 0.7038626670837402,
"avg_line_length": 17.83783721923828,
"blob_id": "0e3159f1f58b6357cb1515362d6692f814bd8103",
"content_id": "afccd39e43041a38c6ef26bbfcf2f9203aae29b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 699,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 37,
"path": "/activity_02_csv_export/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 02\n\n## MySQL Table Export to CSV\n\n## Goal\n\nTo illustrate how to perform a typical data export of a MySQL table to a CSV file\n\n## Steps\n\n### Step 1 - Setup Destination Folder\n\nCreate a 'data' folder and open its permission to all users. \n\n```\nmkdir data\nchmod 777 data\n```\n\n### Step 2 - Grant FILE Permissions to a DB User \n\n```\nGRANT FILE ON *.* TO 'hr_admin';\n```\n\n### Step 3 - Export Table Employees\n\nReplace the path to your data folder. \n\n```\nSELECT * FROM Employees\nINTO OUTFILE '/Users/tmota/devel/teach/__21FCS390Z_DM__/activities/activity_02_csv_export/data/employees.csv'\nFIELDS TERMINATED BY ','\nLINES TERMINATED BY '\\n';\n```\n\nVerify that the table was exported successfully. \n\n"
},
{
"alpha_fraction": 0.6872586607933044,
"alphanum_fraction": 0.7374517321586609,
"avg_line_length": 22.636363983154297,
"blob_id": "2177867bf2dbbbd1e656d244f1f9ee6961f8e94a",
"content_id": "8f98bc4fd10c8d2ae253eb099cfd7fdefefd0aea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 11,
"path": "/activity_04_gcp_maps/README.md",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# Activity 04\n\n## Google's Map API\n\n## Goal\n\nTo illustrate how to use the Google Cloud Platform (GCP) Static Map API. \n\n## Steps\n\nFollow the steps described on Canvas at Resources - [GCP 101](https://msudenver.instructure.com/courses/47959/pages/cs-390z-gcp)."
},
{
"alpha_fraction": 0.6438188552856445,
"alphanum_fraction": 0.7037943601608276,
"avg_line_length": 30.423076629638672,
"blob_id": "6f715bf44291092ef627688cdac33c921d7bc965",
"content_id": "da7727091d92f0cc4b590790170ba86d2963f2c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 817,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 26,
"path": "/activity_06_bs4/src/bs4_scraper.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: A Simple Web Scraper\n\nimport json \nimport re \nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nJSON_FILE_NAME = 'covid_testing_denver.json'\nBASE_URL = 'https://covidcheckcolorado.org/find-our-sites-testing/'\nHEADERS = {\"User-Agent\": \"Mozilla/5.0 (X11; CrOS x86_64 12871.102.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.141 Safari/537.36\"}\n\ndef remove_tags(s): \n tag = re.compile('<.*?>')\n return re.sub(tag, '', s)\n\nif __name__ == \"__main__\":\n\n # TODO: get list of covid-19 testing centers in Denver from BASE_URL, saving them in a dictionary with the structure described in README\n \n\n # TODO: save covid-19 testing centers in Denver to json\n"
},
{
"alpha_fraction": 0.5865671634674072,
"alphanum_fraction": 0.6059701442718506,
"avg_line_length": 21.200000762939453,
"blob_id": "5652ac4ffbc16cdf129d3623b87585b108e5fb1c",
"content_id": "c8a959552e32d8cc1692ad5ef03f268b9b0cb2c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 30,
"path": "/activity_03_quotes_api/src/quotes_api.py",
"repo_name": "antonio0728/21FCS390Z",
"src_encoding": "UTF-8",
"text": "# CS390Z - Introduction to Data Minining - Fall 2021\n# Instructor: Thyago Mota\n# Description: Activity 03 - Quotes API\n\nimport requests\nimport json\nimport os\n\n# definitions/parameters\nDATA_FOLDER = '../data'\nJSON_FILE_NAME = 'quotes.json'\nQUOTES_API_URL = 'http://quotes.rest/qod'\n\nif __name__ == \"__main__\":\n\n # TODO: send the request to the API\n result = \n\n # TODO: process the response\n if result.status_code == 200:\n raw_json = json.loads(result.content.decode('utf-8'))\n quotes = []\n \n \n # TODO: append quotes to json file\n \n\n else:\n print('Ops, something didn\\'t work!')\n print(result.content)\n\n\n\n\n"
}
] | 20 |
doutiansheng/rengongshenjingwangluo
|
https://github.com/doutiansheng/rengongshenjingwangluo
|
afecd761f70e229c48af075c61810938130ad40c
|
8e848471b5b5e03802051baa769e208e926b313b
|
435edf6d0253f96a219c3dfaecc965f521f9b631
|
refs/heads/master
| 2020-08-02T20:47:11.133533 | 2019-10-02T07:03:47 | 2019-10-02T07:03:47 | 211,502,151 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5787709355354309,
"alphanum_fraction": 0.6279329657554626,
"avg_line_length": 14.701754570007324,
"blob_id": "2ed16ff6a7a3bd62c99e82630d3abf711d6e2c34",
"content_id": "be9ebb733243aaf402170feed9899fb5a8c80dbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1089,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 57,
"path": "/42017070001network.py",
"repo_name": "doutiansheng/rengongshenjingwangluo",
"src_encoding": "UTF-8",
"text": "\"\"\"\n=——表示赋值\n==——表示判断是否相等\n感知器分类算法(部分代码):\neta:学习率\nn_iter:权重向量的训练次数\nw_:神经分叉权重向量\nerrors_:用于记录神经元判断出错次数\n\"\"\"\n import numpy as np\nclass Perceptron(object):\ndef__int__(self,eta=0.01,n_iter=10):\n\tself.eta=eta;\n\tself.n_iter=n_iter\n\tpass\ndef fit(self,x,y):\n\"\"\"\n输入训练数据,培训神经元,x输入样本向量,y对应样本分类\nx:shape[n_samples,n_features]\nx:[[1,2,3],[4,5,6]]\nn_samples:2\nn_features:3\n\ny:[1,-1]\n\"\"\"\n\"\"\"\n初始化权重向量为0\n\"\"\"\nself.w_=np.zero(X.shape[1])\nself.errors_=[] \nfor _ in range(self.n_iter):\nerrors=0\n\"\"\"\nx:[[1,2,3],[4,5,6]]\ny:[1,-1]\nzip(x,y)=[[1,2,3,1],[4,5,6,-1]]\n\"\"\"\nfor xi,target in zip(X,y):\n\"\"\"\nupdate=N*(y-y')\n\"\"\"\nupdate=self.eta*(target - self.predict(xi))\nself.w_[1:]*=update * xi\nself.w_[0]+=update;\nerrors += int(update !=0.0)\nself.errors_.append(errors)\n\tpass\n\n\tpass\ndef net_input(self,X):\nreturn np.dot(X,self.w_[1:]) + self.w_[0]\n\tpass\n\ndef predict(self,X):\nreturn np.where(self.net_input(X)>=0.0,1,-1)\npass\npass\n"
}
] | 1 |
samarajackson/login_wall
|
https://github.com/samarajackson/login_wall
|
2f458327aca8e0c81e57d05802d62cefb5b24ed9
|
a2ff58ba46cc900fbc24811bafd448ba29245391
|
eca25cc62bfc6e25bcb48d9cccfc2b40a5229128
|
refs/heads/master
| 2022-01-29T07:32:35.718380 | 2019-11-14T19:43:28 | 2019-11-14T19:43:28 | 221,757,294 | 0 | 0 | null | 2019-11-14T18:00:13 | 2019-11-14T19:44:05 | 2022-01-21T20:06:40 |
Python
|
[
{
"alpha_fraction": 0.6405472755432129,
"alphanum_fraction": 0.6492537260055542,
"avg_line_length": 41.33333206176758,
"blob_id": "e33077dfaedf0973456609ed8028c851c1c3bd84",
"content_id": "6d2d2ea7662f46377af9324c96b139cd403a1292",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2412,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 57,
"path": "/apps/login_app/models.py",
"repo_name": "samarajackson/login_wall",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom datetime import datetime\n\nclass UserManager(models.Manager):\n def basic_validator(self,postData):\n errors = {}\n now = datetime.now()\n maxdate = now.replace(year=now.year - 13).date()\n bday = postData['bday']\n print(f\"bday from form: {bday}\")\n print(f\"max date is {maxdate}\")\n if len(postData['first'])<2 :\n errors['first'] = \"First name should be at least 2 characters\"\n if len(postData['last'])<2:\n errors['last']= \"Last name should be at least 2 characters\"\n if len(postData['pw'])<2:\n errors['pw']= \"Password should be at least 8 characters\"\n if postData['pw'] != postData['pw2']:\n errors['pw2'] = \"Passwords do not match\"\n if User.objects.filter(email = postData['email']):\n errors['email_not_unique']=\"Email address already is used for an existing account\"\n if bday == \"\":\n errors['bday_required']=\"Birthday is a required field\"\n else:\n bday = datetime.strptime(postData['bday'], \"%Y-%m-%d\").date()\n if maxdate <= bday:\n errors['age']=\"You must be at least 13 years old to register\"\n try:\n validate_email(postData['email'])\n except ValidationError:\n errors[\"email\"] = \"Invalid email\"\n return errors\n\nclass User(models.Model):\n first = models.CharField(max_length=45)\n last = models.CharField(max_length=45)\n email = models.CharField(max_length=45)\n bday = models.DateField(null=True)\n pw =models.CharField(max_length=180)\n objects = UserManager()\n #user_comments\n\nclass Message(models.Model):\n user_id= models.ForeignKey(User,related_name=\"user_messages\", on_delete=models.CASCADE)\n message = models.TextField()\n created_at=models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n #message_comments\n\nclass Comment(models.Model):\n message_id = models.ForeignKey(Message,related_name=\"message_comments\", on_delete=models.CASCADE)\n user_id=models.ForeignKey(User,related_name=\"user_comments\", on_delete=models.CASCADE)\n comment = models.TextField()\n created_at=models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)"
},
{
"alpha_fraction": 0.6243368983268738,
"alphanum_fraction": 0.6253315806388855,
"avg_line_length": 29.775510787963867,
"blob_id": "dfd5c199d5cc18685c6289f2cffb9a35d7229f6e",
"content_id": "5fb213f14dd1f414fe802e873b016051343aa831",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3016,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 98,
"path": "/apps/login_app/views.py",
"repo_name": "samarajackson/login_wall",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect, HttpResponse\nfrom .models import User, UserManager, Message, Comment\nfrom django.contrib import messages\nfrom datetime import datetime, timedelta\nimport bcrypt\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, \"index.html\")\n\n\ndef register(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(\"/\")\n else:\n first_name = request.POST[\"first\"]\n last = request.POST[\"last\"]\n email = request.POST[\"email\"]\n pw = request.POST[\"pw\"]\n bday = request.POST[\"bday\"]\n # bday = datetime.strptime(bday, \"%Y-%m-%d\").date()\n pw_hash = bcrypt.hashpw(pw.encode(), bcrypt.gensalt())\n user = User.objects.create(\n first=first_name, last=last, email=email, bday=bday, pw=pw_hash)\n request.session[\"userid\"] = user.id\n return redirect(\"/success\")\n\n\ndef success(request):\n if \"userid\" in request.session:\n user = User.objects.get(id=request.session[\"userid\"])\n messages = Message.objects.all()\n context = {\n \"user\": user,\n\n \"messages\": messages\n }\n return render(request, \"home.html\", context)\n else:\n return redirect(\"/\")\n\n\ndef logout(request):\n if \"userid\" in request.session:\n del request.session[\"userid\"]\n return redirect(\"/\")\n\n\ndef login(request):\n if User.objects.filter(email=request.POST['email']):\n user = User.objects.get(email=request.POST['email'])\n if bcrypt.checkpw(request.POST['pw'].encode(), user.pw.encode()):\n request.session[\"userid\"] = user.id\n return redirect(\"/success\")\n else:\n context = {\n \"error\": \"Password is incorrect.\"\n }\n else:\n context = {\n \"error\": \"There is no account with that email address.\"\n }\n return render(request, \"index.html\", context)\n\n\ndef post_message(request):\n userid = request.session[\"userid\"]\n user = User.objects.get(id=userid)\n message = request.POST[\"message\"]\n message = Message.objects.create(user_id=user, message=message)\n return redirect(\"/success\")\n\n\ndef post_comment(request, message_id):\n userid = request.session[\"userid\"]\n user = User.objects.get(id=userid)\n message = Message.objects.get(id=message_id)\n comment = request.POST[\"comment\"]\n comment = Comment.objects.create(\n user_id=user, message_id=message, comment=comment)\n return redirect('/success')\n\n\ndef delete(request, message_id):\n userid = request.session[\"userid\"]\n user = User.objects.get(id=userid)\n message = Message.objects.get(id=message_id)\n now = datetime.now()\n timediff = now - message.created_at\n timediff = timediff.total_minutes()\n if message.user_id.id == user.id and timediff < 30:\n message.delete()\n return redirect(\"/success\")\n"
},
{
"alpha_fraction": 0.5227765440940857,
"alphanum_fraction": 0.5965293049812317,
"avg_line_length": 22.049999237060547,
"blob_id": "06687bff85839e4f1d8a7bdba9dcdedb5bf7a33d",
"content_id": "9fa8644b2829c8abdee3af9367389329f2407924",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 20,
"path": "/apps/login_app/migrations/0004_auto_20191113_0420.py",
"repo_name": "samarajackson/login_wall",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2019-11-13 04:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('login_app', '0003_auto_20191113_0216'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='email',\n field=models.CharField(max_length=45, unique=True),\n ),\n ]\n"
}
] | 3 |
jcoffi/Deep-QNetwork
|
https://github.com/jcoffi/Deep-QNetwork
|
49eff39627d45042e3af19738a923dbf675dc615
|
e31d4ebc3ccd1f74c7d5bc97977b85559fdb6c95
|
7cb3d28a6a0cfe782a7c3e3440bc861aab824142
|
refs/heads/master
| 2021-12-20T19:19:08.389938 | 2017-04-25T02:07:56 | 2017-04-25T02:07:56 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5710073709487915,
"alphanum_fraction": 0.5874692797660828,
"avg_line_length": 34.094825744628906,
"blob_id": "2eaf3c94e072f907ebd60bc206313107b69b8892",
"content_id": "721b21a85bed0edb2eeac19eb8b3ae2f0848276c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4074,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 116,
"path": "/deep_qnetwork.py",
"repo_name": "jcoffi/Deep-QNetwork",
"src_encoding": "UTF-8",
"text": "from collections import deque\nfrom random import sample\n\nimport tensorflow as tf\nimport numpy as np\nimport gym\n\nfrom neural_network import NeuralNetwork\nfrom models import Model\n\n\nclass DeepQNetwork(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, net_structure: list, exp_length, gamma):\n self.num_observations = net_structure[0]\n self.num_actions = net_structure[-1]\n self.gamma = gamma\n self.epsilon = 1\n\n #\n self.min_epsilon = 0.1\n self.delta__epsilon = 0.001\n self.learn_count = 0\n self.update_freq = 100\n\n self.experience = deque(maxlen=exp_length) # stored past experience for training\n\n self.q_network = NeuralNetwork(net_structure, scope='q_network')\n self.q_target_network = NeuralNetwork(net_structure, scope='target_network')\n\n def choose_action(self, observation):\n if np.random.uniform() > self.epsilon:\n assert observation.size == self.num_observations\n x = observation.reshape([1, self.num_observations])\n q_values = self.q_network.output.eval(feed_dict={self.q_network.x: x}) # use the q-net to estimate Q-values\n return np.argmax(q_values) # return the action corresponds to the highest estimated Q-value\n else: # random choose\n return np.random.randint(0, self.num_actions)\n\n def record(self, observation, action, reward, next_observation):\n self.experience.append((observation, action, reward, next_observation))\n\n def learn(self, batch_size, session):\n if self.learn_count % self.update_freq == 0:\n self.q_target_network.copy_network(self.q_network, session=session)\n print('q_target_network updated @ learn_count ', self.learn_count)\n\n batch = sample(self.experience, batch_size)\n\n state = np.array([exp[0] for exp in batch], dtype=np.float64) # φ_t\n action = np.array([exp[1] for exp in batch], dtype=np.int32)\n reward = np.array([exp[2] for exp in batch], dtype=np.float64)\n next_state = np.array([exp[3] for exp in batch], dtype=np.float64) # φ_{t+1}\n\n # compute max Q from the next states\n q_next = self.q_target_network.output.eval(feed_dict={self.q_target_network.x: next_state}) # Q(φ_{t+1}, a'; θ)\n target_reward = reward + np.max(q_next, axis=1) * self.gamma\n\n # build q_target\n q_target = self.q_network.output.eval(feed_dict={self.q_network.x: state})\n for row, a, r in zip(q_target, action, target_reward):\n row[a] = r\n\n # training\n loss = self.q_network.train(state, q_target, session=session)\n\n # update\n self.epsilon = max(self.min_epsilon, self.epsilon - self.delta__epsilon)\n self.learn_count += 1\n\n\ndef main():\n dqn = DeepQNetwork([4, 50, 50, 2], # structure of neural net\n exp_length=3000, # size of experience pool\n gamma=0.7) # discount factor\n\n env = gym.make('CartPole-v0')\n step_count = 0\n\n with tf.Session() as sess:\n Model.start_new_session(sess)\n\n for i_episode in range(10000):\n observation = env.reset()\n print('episode', i_episode)\n\n while True:\n env.render()\n\n action = dqn.choose_action(observation)\n new_observation, _, done, info = env.step(action)\n\n # compute reward\n x, x_dot, theta, theta_dot = new_observation\n r1 = (env.x_threshold - abs(x))/env.x_threshold - 0.8\n r2 = (env.theta_threshold_radians - abs(theta))/env.theta_threshold_radians - 0.5\n reward = r1 + r2\n\n # record\n dqn.record(observation, action, reward, new_observation)\n\n # learn\n if step_count>1000:\n dqn.learn(batch_size=320, session=sess)\n\n step_count += 1\n observation = new_observation\n\n if done:\n break\n\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.4541814923286438,
"alphanum_fraction": 0.47330960631370544,
"avg_line_length": 28.578947067260742,
"blob_id": "c56e1115135a6e17bbf73104a31d1efd761bf215",
"content_id": "d80dfe68455b380f35836b812a55a882a8668020",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2248,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 76,
"path": "/data_manager.py",
"repo_name": "jcoffi/Deep-QNetwork",
"src_encoding": "UTF-8",
"text": "import json\nimport socket\n\nimport requests\nfrom requests import ConnectionError, Timeout, HTTPError\n\n\nclass DataManager(object):\n \"\"\"\n\n \"\"\"\n def __init__(self):\n pass\n\n def get_prices(self, ticker):\n \"\"\"\n\n \"\"\"\n years = [('2017-01-01', '2017-03-05')] + \\\n [('{}-01-01'.format(y), '{}-12-31'.format(y)) for y in range(2016, 1999, -1)]\n\n quotes = []\n for start_date, end_date in years:\n query = 'select * from yahoo.finance.historicaldata where ' \\\n 'symbol = \"{}\" and startDate = \"{}\" and endDate = \"{}\"'.format(ticker, start_date, end_date)\n params = {'q': query,\n 'format': 'json',\n 'env': 'store://datatables.org/alltableswithkeys',\n 'callback': ''}\n url = 'https://query.yahooapis.com/v1/public/yql'\n while True:\n timeout = False\n try:\n r = requests.get(url, params=params, timeout=(3.05, 3.05))\n except (ConnectionError, Timeout, HTTPError) as e:\n print(e)\n timeout = True\n except Exception as e:\n print(e)\n print('type:', type(e))\n timeout = True\n\n if (not timeout) and r:\n break\n\n ans = r.json()\n count = ans['query']['count']\n\n if count > 0:\n if count == 1:\n quotes.append(ans['query']['results']['quote'])\n else:\n quotes.extend(ans['query']['results']['quote'])\n\n quotes.reverse()\n\n with open('prices/{}.json'.format(ticker), 'w') as file:\n json.dump(quotes, file)\n\n def download(self, companies_file):\n with open(companies_file) as file:\n companies = json.load(file)\n for i, company in enumerate(companies):\n symbol = company['Symbol']\n print('downloading data of', symbol, '{}/{}'.format(i+1, len(companies)))\n\n self.get_prices(symbol)\n\n\ndef main():\n dm = DataManager()\n dm.download('companies.json')\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5783811211585999,
"alphanum_fraction": 0.5855532884597778,
"avg_line_length": 31.549999237060547,
"blob_id": "70e9acb78341a0c38305d43eb3a4aa2d1fd13724",
"content_id": "df105cee969637d44417493b523a4a339e60ffec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1952,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 60,
"path": "/neural_network.py",
"repo_name": "jcoffi/Deep-QNetwork",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\n\"\"\"\nimport tensorflow as tf\n\nfrom models import Model, FullyConnected\n\n\nclass NeuralNetwork(object):\n \"\"\"\n\n \"\"\"\n def __init__(self, neuron_list: list, scope):\n with tf.variable_scope(scope) as scope:\n # placeholder of input\n self.x = tf.placeholder(tf.float32, shape=[None, neuron_list[0]])\n\n # build hidden layers\n self.layers = []\n x = self.x\n for i, num_neurons in enumerate(neuron_list[1:], start=1):\n activation = tf.nn.relu if i < (len(neuron_list)-1) else None # NO activation for the last layer\n\n # create a fully connected layer\n fully_connected_layer = FullyConnected(num_neurons, activation=activation, scope='layer_{}'.format(i))\n self.layers.append(fully_connected_layer)\n x = fully_connected_layer(x)\n\n self.output = x\n\n self.y = tf.placeholder(tf.float32, shape=[None, neuron_list[-1]]) # training labels\n self.loss = tf.nn.l2_loss(self.y - self.output)\n self.training = tf.train.AdamOptimizer(1e-4).minimize(self.loss)\n # self.training = tf.train.RMSPropOptimizer(0.1).minimize(self.loss)\n\n\n def train(self, x, y, session: tf.Session):\n _, loss = session.run([self.training, self.loss], feed_dict={self.x: x, self.y: y})\n return loss\n\n def copy_network(self, network, session: tf.Session):\n \"\"\"\n\n :param network:\n :type network: NeuralNetwork\n :return:\n \"\"\"\n assert len(self.layers) == len(network.layers)\n for self_layer, source_layer in zip(self.layers, network.layers): # type: FullyConnected\n \"\"\":type : FullyConnected\"\"\"\n session.run(tf.assign(self_layer.weights, source_layer.weights))\n session.run(tf.assign(self_layer.bias, source_layer.bias))\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()"
}
] | 3 |
JacquesFernandes/py-nav
|
https://github.com/JacquesFernandes/py-nav
|
35568bb5e62f46ceee5630f06f3380d295f446a6
|
bce5f5f6ca97c0f4fec54d5514574dfc3e9ce7bc
|
1875d098001dc5788f51a79c1db0a10539d94f9e
|
refs/heads/master
| 2021-01-21T01:43:49.507108 | 2016-07-03T12:36:09 | 2016-07-03T12:36:09 | 62,494,960 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6825000047683716,
"alphanum_fraction": 0.6850000023841858,
"avg_line_length": 35.3636360168457,
"blob_id": "28e8428cf2c01673ae2e795a814bd0f001877511",
"content_id": "e198bb7bb2aa56eb59dfca57de0c210ead33067f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 400,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 11,
"path": "/README.md",
"repo_name": "JacquesFernandes/py-nav",
"src_encoding": "UTF-8",
"text": "# py-nav\nPyNav - Directory traversal management module for python3 (Linux)\n\n# Usage\n - importing:\n - from py-nav import DirectoryNavigator\n# TL;DR version of code:\n - See __main__ at bottom for example of usage\n - init flags:\n - root : path string (if blank, will take current directory as root)\n - verbose : if set (True) the code will be, well, verbose... (pretty obvious, isn't it?)\n"
},
{
"alpha_fraction": 0.6119951009750366,
"alphanum_fraction": 0.613831102848053,
"avg_line_length": 23.02941131591797,
"blob_id": "2bebb29cecc361dd497ada39513d0c3cf1397ac5",
"content_id": "1638f95b194debad3d0920872f0a72cc134b0684",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1634,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 68,
"path": "/py-nav.py",
"repo_name": "JacquesFernandes/py-nav",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n'''\nPyNav - Directory traversal management module for python3 (Linux)\nAuthor: Jacques \"Lawrenz\" Fernandes\n\nTL;DR version of code:\n - See __main__ at bottom for example of usage\n - init flags:\n - root : path string (if blank, will take current directory as root)\n - verbose : if set (True) the code will be, well, verbose... (pretty obvious, isn't it?)\n'''\n\n\nimport os;\n\nclass DirectoryNavigator:\n\t\n\tdef __init__(self,root=os.getcwd(),verbose=False):\n\t\tself.root = root+\"/\";\n\t\tself.verbose = verbose;\n\t\tself.print(\"Root : \"+self.root);\n\t\tself.dir_stack = list();\n\t\tself.print(\"Dir stack initialized...\");\n\t\tself.print(\"starting...\");\n\t\t#self.cd(root);\n\t\t\n\tdef print(self,msg,**args):\n\t\tif self.verbose:\n\t\t\tprint(msg,**args);\n\t\n\tdef cd(self,path):\n\t\tpath=path.strip(\" \").rstrip(\"/\");\n\t\tpath_set = path.split(\"/\");\n\t\tfor loc in path_set:\n\t\t\tif loc == \"..\":\n\t\t\t\tif len(self.dir_stack) > 0:\n\t\t\t\t\tself.dir_stack.pop();\n\t\t\t\telse:\n\t\t\t\t\tself.print(\" :: ERROR: Cannot go above set root...\");\n\t\t\telse:\n\t\t\t\tdir_list = os.listdir();\n\t\t\t\tif loc in dir_list:\n\t\t\t\t\tself.dir_stack.append(loc);\n\t\t\t\t\tself.print(\"moved to \",end=\"\");\n\t\t\t\t\tself.print_path();\n\t\t\t\t\tself.print(\"\");\n\t\t\t\telse:\n\t\t\t\t\tself.print(\" :: ERROR: Not a valid Directory...\");\n\t\n\t\n\tdef print_path(self):\n\t\tpath_string = self.root;\n\t\tfor loc in self.dir_stack:\n\t\t\tpath_string+=loc+\"/\";\n\t\tself.print(path_string);\n\t\n\tdef prompt(self):\n\t\tself.print_path();\n\t\tnew_path = input(\" -> \");\n\t\tself.cd(new_path);\n\t\t\t\t\nif __name__ == \"__main__\":\n\tnav = DirectoryNavigator(verbose=True);\n\ttry:\n\t\twhile True:\n\t\t\tnav.prompt();\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nExiting...\");\n"
}
] | 2 |
kamalesh101994/python_workspace
|
https://github.com/kamalesh101994/python_workspace
|
d774443dd2ff9021f47f11bfa56b6a85c01a05fa
|
c1375a99342c4009add97a1a511815ea093dfd6a
|
b05ad2bb15e8d6e1ad2a62b19d83bab92f8936b4
|
refs/heads/master
| 2023-06-27T16:50:59.757966 | 2021-07-29T09:30:31 | 2021-07-29T09:30:31 | 390,670,824 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 12,
"blob_id": "d2350aa0b4d6f9c9e70c2015871b550f91601a72",
"content_id": "6b261fefcee54e2b226c1822b52edd63b530f390",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/test.py",
"repo_name": "kamalesh101994/python_workspace",
"src_encoding": "UTF-8",
"text": "print(\"KKK\")\n\n"
}
] | 1 |
willbrom/python-script
|
https://github.com/willbrom/python-script
|
401a0868df2f8728a92fc5f3875978a9ec32ecda
|
3fa7e1009f405abab747977c5da48bb33d256ed3
|
c0162096e49115389f562532ad3ffd37d18bb790
|
refs/heads/master
| 2021-01-24T00:43:16.122346 | 2018-03-03T14:00:30 | 2018-03-03T14:00:30 | 122,775,732 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6381856799125671,
"alphanum_fraction": 0.6882911324501038,
"avg_line_length": 25.704225540161133,
"blob_id": "26cc4970f83d7169a69478537ad011764eaa3560",
"content_id": "a252b232f38f570828396a3bfe34aa17e3f9ba60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1896,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 71,
"path": "/hello.py",
"repo_name": "willbrom/python-script",
"src_encoding": "UTF-8",
"text": "print(\"Hello Friend!\")\n\ndef cylinder_volume(height, radius):\n\tpi = 3.14159\n\treturn height * pi * radius**2\n\ndef days_to_weeks(days):\n\tweeks = days//7\n\tday = days%7\n\treturn \"{} week(s) and {} day(s)\".format(weeks, day)\n\ndef months_to_years(months):\n\tyears = months//12\n\tmonth = months%12\n\treturn \"{} year(s) and {} month(s)\".format(years, month)\n\ndef months_alive(age):\n\treturn age*12\n\nvolume = cylinder_volume(10, 3)\n\ndef get_rating_from_five_scores(score1, score2, score3, score4, score5):\n\tscore1 = int(score1)\n\tscore2 = int(score2)\n\tscore3 = int(score3)\n\tscore4 = int(score4)\n\tscore5 = int(score5)\n\n\trequired_total_score = remove_outliers(score1, score2, score3, score4, score5)\n\taverage_score = required_total_score / 3\n\n\treturn get_rating_from_avg_score(average_score)\n\ndef remove_outliers(score1, score2, score3, score4, score5):\n\ttotal_score = score1 + score2 + score3 + score4 + score5\n\ttotal_score_no_outlier = total_score - min(score1, score2, score3, score4, score5) - max(score1, score2, score3, score4, score5)\n\treturn total_score_no_outlier\n\ndef get_rating_from_avg_score(average_score):\n\tif 0 <= average_score < 1:\n\t\treturn \"Terrible\"\n\telif 1 <= average_score < 2:\n\t\treturn \"Bad\"\n\telif 2 <= average_score < 3:\n\t\treturn \"OK\"\n\telif 3 <= average_score < 4:\n\t\treturn \"Good\"\n\telif 4 <= average_score <= 5:\n\t\treturn \"Excellent\"\n\telse:\n\t\treturn \"Rating not recognized\"\n\nscore1 = 0\nscore2 = 1\nscore3 = 1\nscore4 = 1\nscore5 = 5\n\nprint(volume)\nprint(days_to_weeks(10))\nprint(months_to_years(32))\nprint(\"Approx {} months!\".format(months_alive(24)))\nprint(\"Rating of these scores({}, {}, {}, {}, {}) is: {}\".format(score1, score2, score3, score4, score5, get_rating_from_five_scores(score1, score2, score3, score4, score5)))\n\nname_list = ['john', 'mavrik', 'Legend']\nname_list_with_line_break = \"-\".join(name_list)\n\nprint(name_list_with_line_break)\n\nfor name in name_list:\n\tprint(name.title())\n"
},
{
"alpha_fraction": 0.7352941036224365,
"alphanum_fraction": 0.7352941036224365,
"avg_line_length": 16,
"blob_id": "c7ecfce65074c8cfbcfbd40b6160454fa05947db",
"content_id": "93bfed267048a4f8c01da5286334d1bcce42bee0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 2,
"path": "/README.md",
"repo_name": "willbrom/python-script",
"src_encoding": "UTF-8",
"text": "# python-script\nPython scripts...\n"
},
{
"alpha_fraction": 0.5694577693939209,
"alphanum_fraction": 0.5880436897277832,
"avg_line_length": 25.358585357666016,
"blob_id": "907c3a562ba2d3f5eedba12a27188e926a7ca4c7",
"content_id": "419befc8c6d81e05b5adf8021148f221a1760b6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5219,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 198,
"path": "/script_1.py",
"repo_name": "willbrom/python-script",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCapitalize each item of a list and update the list\n\"\"\"\nnames = ['charlotte hippopotamus turner', 'oliver st. john-mollusc',\n 'nigel incubator-jones', 'philip diplodocus mallory']\nprint(names)\n\nfor index in range(len(names)) :\n names[index] = names[index].title()\n\nprint(names)\n\n\n\"\"\"\nCreate Html list\n\"\"\"\ndef html_list(str_list):\n start_item_tag = \"<li>\"\n end_item_tag = \"</li>\"\n list_body = \"\"\n for each_str in str_list:\n list_body += start_item_tag + str(each_str) + end_item_tag + \"\\n\"\n return \"<ul>\\n\" + list_body + \"</ul>\"\n\nprint(html_list([\"strings\", 2.0, True, \"and other types too!\"]))\n\n\n\"\"\"\nSquare number\n\"\"\"\ndef square_num(limit):\n answer = 0\n while (answer+1)**2 < limit:\n answer += 1\n return answer**2\n\nprint(square_num(89))\n\n\n\"\"\"\nCargo problem\n\"\"\"\n# each item in the manifest is an item and its weight\nmanifest = [[\"bananas\", 15], [\"mattresses\", 34], [\"dog kennels\",42], [\"machine that goes ping!\", 120], [\"tea chests\", 10], [\"cheeses\", 0]]\n\ndef cargo_problem(manifest, weight_limit):\n weight_reached = 0\n for item in manifest:\n if weight_limit >= weight_reached + item[1]:\n weight_reached += item[1]\n else:\n break\n return weight_reached\n\nprint(cargo_problem(manifest, 52))\n\n\n\"\"\"\nCreate String from list exactly 140 char long\n\"\"\"\nheadlines = [\"Local Bear Eaten by Man\",\n \"Legislature Announces New Laws\",\n \"Peasant Discovers Violence Inherent in System\",\n \"Cat Rescues Fireman Stuck in Tree\",\n \"Brave Knight Runs Away\",\n \"Papperbok Review: Totally Triffic\"]\n\nnews_ticker = \"\"\n\nfor headline in headlines:\n news_ticker += headline + \" \"\n if len(news_ticker) >= 140:\n news_ticker = news_ticker[:140]\n break\n\nprint(len(news_ticker))\n\n\n\"\"\"\nRefactor code\n\"\"\"\n# Orignal code\ndef check_answers(my_answers,answers):\n \"\"\"\n Checks the five answers provided to a multiple choice quiz and returns the results.\n \"\"\"\n results= [None, None, None, None, None]\n if my_answers[0] == answers[0]:\n results[0] = True\n elif my_answers[0] != answers[0]:\n results[0] = False\n if my_answers[1] == answers[1]:\n results[1] = True\n elif my_answers[1] != answers[1]:\n results[1] = False\n if my_answers[2] == answers[2]:\n results[2] = True\n elif my_answers[2] != answers[2]:\n results[2] = False\n if my_answers[3] == answers[3]:\n results[3] = True\n elif my_answers[3] != answers[3]:\n results[3] = False\n if my_answers[4] == answers[4]:\n results[4] = True\n elif my_answers[4] != answers[4]:\n results[4] = False\n count_correct = 0\n count_incorrect = 0\n for result in results:\n if result == True:\n count_correct += 1\n if result != True:\n count_incorrect += 1\n if count_correct/5 > 0.7:\n return \"Congratulations, you passed the test! You scored \" + str(count_correct) + \" out of 5.\"\n elif count_incorrect/5 >= 0.3:\n return \"Unfortunately, you did not pass. You scored \" + str(count_correct) + \" out of 5.\"\n\nprint(check_answers(['a', 'a', 'a', 'd', 'e'], ['a', 'b', 'c', 'd', 'e']))\n\n# Refacored code\ndef check_answers_refactored(my_answers, answers):\n correct_count = 0\n for index in range(len(answers)):\n if my_answers[index] == answers[index]:\n correct_count += 1\n if correct_count/len(answers) > 0.7:\n return \"Congratulations, you passed the test! You scored \" + str(correct_count) + \" out of \" + str(len(answers)) + \".\"\n else:\n return \"Unfortunately, you did not pass. You scored \" + str(correct_count) + \" out of \" + str(len(answers)) + \".\"\n\nprint(check_answers_refactored(['a', 'b', 'c', 'a', 'e', 'f', 'g'], ['a', 'b', 'c', 'd', 'e', 'f', 'g']))\n\n\n\"\"\"\nRemoving duplicate\n\"\"\"\ndef remove_duplicates(list_wtih_duplicates):\n new_list = []\n contains = False\n for index in range(len(list_wtih_duplicates)):\n for index_new_list in range(len(new_list)):\n if new_list[index_new_list] == list_wtih_duplicates[index]:\n contains = True\n break\n if not contains:\n new_list.append(list_wtih_duplicates[index])\n else:\n contains = False\n return new_list\n\nprint(remove_duplicates(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nRemoving duplicate refactor\n\"\"\"\ndef remove_duplicates_refactor(source):\n target = []\n for element in source:\n if element not in target:\n target.append(element)\n return target\n\nprint(remove_duplicates_refactor(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nRemoving duplicates using a set\n\"\"\"\ndef remove_duplicates_with_set(source):\n source_set = set(source)\n return source_set\n\nprint(remove_duplicates_with_set(['john', 'lucy', 'anivia', 'john', 'james', 'harry']))\n\n\n\"\"\"\nNearest square with sets\n\"\"\"\ndef all_squares_till_limit(limit):\n square_set = set()\n square_num = 0\n while (square_num+1)**2 < limit:\n square_set.add((square_num+1)**2)\n square_num += 1\n return square_set\n\nprint(len(all_squares_till_limit(2000)))\n\n\n\"\"\"\nDictionary\n\"\"\"\npopulation = {'Istanbul': 13.3, 'Karachi': 13.0}\n\nprint(population['Mumbai'])\n"
},
{
"alpha_fraction": 0.5368111729621887,
"alphanum_fraction": 0.5803974866867065,
"avg_line_length": 28.13157844543457,
"blob_id": "068e0e1aeda048231e8ac6a43219de38b8d845c2",
"content_id": "f1706016bbc2c5903497cfe007e06f45d2719ee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4428,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 152,
"path": "/script_2.py",
"repo_name": "willbrom/python-script",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCountry problem\n\"\"\"\nfrom countries import country_list\n\ncountry_counts = {}\nfor country in country_list:\n if country not in country_counts:\n country_counts[country] = 1\n else:\n country_counts[country] += 1\n\nprint(len(country_counts))\n\n\n\n\"\"\"\nAlbums problem\n\"\"\"\ndef most_prolific(album_dict):\n year = {}\n for album in album_dict:\n album_year = album_dict[album]\n if album_year not in year:\n year[album_year] = 1\n else:\n year[album_year] += 1\n\n year_count = []\n for year_key in year:\n year_count.append(year[year_key])\n\n max_num = max(year_count)\n for year_key in year:\n if year[year_key] == max_num:\n return year_key\n\nBeatles_Discography = {\"Please Please Me\": 1963, \"With the Beatles\": 1963,\n \"A Hard Day's Night\": 1964, \"Beatles for Sale\": 1964, \"Twist and Shout\": 1964, \"Lollipop\": 1964,\n \"Help\": 1965, \"Rubber Soul\": 1965, \"Revolver\": 1966,\n \"Sgt. Pepper's Lonely Hearts Club Band\": 1967,\n \"Magical Mystery Tour\": 1967, \"The Beatles\": 1968,\n \"Yellow Submarine\": 1969 ,'Abbey Road': 1969,\n \"Let It Be\": 1970}\n\ndicc = {'Rubber Soul': 1965, 'Magical Mystery Tour': 1967, \"Sgt. Pepper's Lonely Hearts Club Band\": 1967, 'Revolver': 1966,\n 'The Beatles': 1968, 'With the Beatles': 1963, 'Beatles for Sale': 1964,\n 'Yellow Submarine': 1969, \"A Hard Day's Night\": 1964, 'Help': 1965,\n 'Let It Be': 1970, 'Abbey Road': 1969,\n 'Twist and Shout': 1964, 'Please Please Me': 1963}\n# 1964\nprint(most_prolific(Beatles_Discography))\nprint(most_prolific(dicc))\n\n\n\n\"\"\"\nCircus problem\n\"\"\"\ndef total_takings(monthly_takings):\n total_takings = 0\n for month in monthly_takings:\n takings = monthly_takings[month]\n for taking in takings:\n total_takings += taking\n return total_takings\n\nmonthly_takings = {'January': [54, 63], 'February': [64, 60], 'March': [63, 49],\n 'April': [57, 42], 'May': [55, 37], 'June': [34, 32],\n 'July': [69, 41, 32], 'August': [40, 61, 40], 'September': [51, 62],\n 'October': [34, 58, 45], 'November': [67, 44], 'December': [41, 58]}\n\nprint(total_takings(monthly_takings))\n\n\n\n\"\"\"\nRefactored Circus problem\n\"\"\"\ndef total_takings_ref(monthly_takings):\n #total is used to sum up the monthly takings\n total = 0\n for month in monthly_takings.keys():\n #I use the Python function sum to sum up over\n #all the elements in a list\n total = total + sum(monthly_takings[month])\n return total\n\n\n\n\"\"\"\nTuple example\n\"\"\"\ndef hours2days(hours):\n days = hours//24\n hour = hours%24\n return days, hour\n\nprint(hours2days(10000))\n\n\n\n\"\"\"\nDefault args\n\"\"\"\ndef print_list(l, numbered=False, bullet_character='-'):\n \"\"\"Prints a list on multiple lines, with numbers or bullets\n\n Arguments:\n l: The list to print\n numbered: set to True to print a numbered list\n bullet_character: The symbol placed before each list element. This is\n ignored if numbered is True.\n \"\"\"\n for index, element in enumerate(l):\n if numbered:\n print(\"{}: {}\".format(index+1, element))\n else:\n print(\"{} {}\".format(bullet_character, element))\n\nprint_list([\"cats\", \"in\", \"space\"])\n\n\n\n\"\"\"\nCreate list from txt file\n\n['Graham Chapman', 'Eric Idle', 'Terry Jones', 'Michael Palin', 'Terry Gilliam', 'John Cleese', 'Carol Cleveland', 'Ian Davidson',\n'John Hughman', 'The Fred Tomlinson Singers', 'Connie Booth', 'Bob Raymond', 'Lyn Ashley', 'Rita Davies', 'Stanley Mason', 'David Ballantyne',\n'Donna Reading', 'Peter Brett', 'Maureen Flanagan', 'Katya Wyeth', 'Frank Lester', 'Neil Innes', 'Dick Vosburgh', 'Sandra Richards',\n'Julia Breck', 'Nicki Howorth', 'Jimmy Hill', 'Barry Cryer', 'Jeannette Wild', 'Marjorie Wilde', 'Marie Anderson', 'Caron Gardner',\n'Nosher Powell', 'Carolae Donoghue', 'Vincent Wong', 'Helena Clayton', 'Nigel Jones', 'Roy Gunson', 'Daphne Davey', 'Stenson Falke',\n'Alexander Curry', 'Frank Williams', 'Ralph Wood', 'Rosalind Bailey', 'Marion Mould', 'Sheila Sands', 'Richard Baker', 'Douglas Adams',\n'Ewa Aulin', 'Reginald Bosanquet', 'Barbara Lindley', 'Roy Brent', 'Jonas Card', 'Tony Christopher', 'Beulah Hughes', 'Peter Kodak', 'Lulu',\n'Jay Neill', 'Graham Skidmore', 'Ringo Starr', 'Fred Tomlinson', 'David Hamilton', 'Suzy Mandel', 'Peter Woods']\n\"\"\"\ndef create_cast_list(filename):\n cast_list = []\n #use with to open the file filename\n #use the for loop syntax to process each line\n #and add the actor name to cast_list\n with open(filename) as f:\n for line in f:\n cast_list.append(line.strip())\n\n for index in range(len(cast_list)):\n cast = cast_list[index].split(',')\n cast_list[index] = cast[0]\n\n return cast_list\n\nprint(create_cast_list('txt files/flying_circus_cast.txt'))\n"
}
] | 4 |
Fors3cDream/py3_ml
|
https://github.com/Fors3cDream/py3_ml
|
36022b40858c3f55d18a378eb544a06591e9e913
|
b3b511aa69e0dde4b37c227f80951982ce2fb7b6
|
a4d9cc37fc207c988c10789d7a946974698b5aad
|
refs/heads/master
| 2020-04-17T21:52:23.819882 | 2019-01-28T07:47:51 | 2019-01-28T07:47:51 | 166,970,389 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7586206793785095,
"alphanum_fraction": 0.7931034564971924,
"avg_line_length": 28,
"blob_id": "cd45212ac98cdee1ee11bbf4c1d95d84c5263773",
"content_id": "273b526ea6f79fac2813cebd2579110853ad871d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 1,
"path": "/README.md",
"repo_name": "Fors3cDream/py3_ml",
"src_encoding": "UTF-8",
"text": "# py3 machine learning codes\n"
},
{
"alpha_fraction": 0.6593567132949829,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 31.619047164916992,
"blob_id": "c296dedcb9b3aa811cb78d38f4fc24e60603a5bb",
"content_id": "6ff697f9ddf0bf0a056fc6f96f4c55eb040bb58e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 21,
"path": "/Linear-Regression/05-Regression-Metrics-MSE-vs-MAE/playML/metrics.py",
"repo_name": "Fors3cDream/py3_ml",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\ndef accuracy_score(y_true, y_predict):\n \"\"\"计算y_true和y_predict之间的准确率\"\"\"\n assert y_true.shape[0] == y_predict.shape[0], \"the size of y_true must be equal to the size of y_predict\"\n\n return sum(y_true == y_predict) / len(y_true)\n\ndef mean_squared_error(y_true, y_predict):\n \"\"\" MSE \"\"\"\n assert len(y_true) == len(y_predict), \"the size of y_true must be equal to the size of y_predict\"\n\n return np.sum((y_true - y_predict)**2)/len(y_true)\n\n\ndef root_mean_squared_error(y_true, y_predict):\n return mean_squared_error(y_true,y_predict)**0.5\n\n\ndef mean_absolute_error(y_true, y_predict):\n return np.sum(np.absolute(y_true - y_predict))/len(y_true)"
}
] | 2 |
AlphaLambdaMuPi/EECampTeaching
|
https://github.com/AlphaLambdaMuPi/EECampTeaching
|
8390f4723fa3cf922bda9091961fc6edc5e2cf65
|
6cea279d97974bf8045f8089160e92b873bcba34
|
e8ca8d00528e7a576e353fb6e37f0fc9fad25953
|
refs/heads/master
| 2021-01-20T12:21:24.734757 | 2015-07-12T17:48:12 | 2015-07-12T17:48:12 | 38,862,565 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8421052694320679,
"alphanum_fraction": 0.8421052694320679,
"avg_line_length": 30.66666603088379,
"blob_id": "d33735b328c9ee8c2b529e7f0ad081527805acab",
"content_id": "19aed08103730298ad7d2c06ef11fd5ff702f626",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 3,
"path": "/mymath/__init__.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "from .kalfil import ThetaOmegaKalmanFilter\nfrom .pid import PID\nfrom .momentum import Momentum\n"
},
{
"alpha_fraction": 0.5838425159454346,
"alphanum_fraction": 0.5947046875953674,
"avg_line_length": 26.79245376586914,
"blob_id": "8c9dd18be07697de3a747e05626c66fc476b0d88",
"content_id": "45d0b550aa9c56cba4dc05609f5d7b10496b2f51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 53,
"path": "/simulator/simulator.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\nimport asyncio\nimport logging\n# import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom drone import SimpleVirtualDrone\n\nfrom controller import SimpleController\n\nlogger = logging.getLogger()\n\nnp.set_printoptions(precision=10, suppress=True)\n\nclass Simulator(object):\n def __init__(self):\n self._drone = SimpleVirtualDrone()\n self._controller = SimpleController(self._drone, log=True)\n self._loop = asyncio.get_event_loop()\n #self._drone.set_init([0., 0., 0.], [0., 0., 1.])\n self.started = asyncio.Future()\n # self._AOO = []\n # self._drone.dt = 5e-4\n # self._drone.noise_z = 1e-10\n\n @asyncio.coroutine\n def run(self):\n logger.info('starting simulation...')\n yield from self._controller.arm()\n self._loop.call_soon_threadsafe(\n self._loop.create_task,\n self._controller.start()\n )\n self.started.set_result(True)\n logger.info('started.')\n\n @asyncio.coroutine\n def get_data(self):\n pos = list(self._drone.pos)\n ori = list(self._drone.rot.flatten())\n # oori = ori[:, 2]\n # self._AOO.append(self._drone.acc_sensor[2])\n # self._AOO.append(oori)\n return pos, ori\n\n @asyncio.coroutine\n def stop(self):\n yield from self._controller.stop()\n yield from self._drone.stop()\n # logger.debug('plotting...')\n # plt.plot(self._AOO)\n # plt.show()\n"
},
{
"alpha_fraction": 0.6119295954704285,
"alphanum_fraction": 0.6385195851325989,
"avg_line_length": 30.625,
"blob_id": "a6929c87c574b5a2d97703e67f3c0891fe4b691b",
"content_id": "477099925e001a1125b4379e918a26757ebffc93",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2783,
"license_type": "permissive",
"max_line_length": 168,
"num_lines": 88,
"path": "/WebDrone/js/drone.js",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "// Generated by CoffeeScript 1.9.3\n(function() {\n var VEL, camVel;\n\n VEL = 0.2;\n\n camVel = new THREE.Vector3(0, 0, 0);\n\n this.moveCamera = function() {\n return this.camera.position.add(camVel);\n };\n\n this.render = function() {\n return this.renderer.render(scene, camera);\n };\n\n this.animate = function() {\n requestAnimationFrame(animate);\n this.controls.update();\n return render();\n };\n\n this.init = function() {\n var axisHelper, container, radius;\n this.scene = new THREE.Scene();\n this.camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);\n this.renderer = new THREE.WebGLRenderer({\n 'antialias': false\n });\n renderer.setSize(window.innerWidth * 0.6, window.innerHeight * 0.6);\n renderer.setClearColor(0xeeeeee);\n this.drone = new THREE.Object3D();\n this.loader = new THREE.ColladaLoader();\n loader.load('assets/ar-drone-2.dae', function(result) {\n result.scene.scale.divideScalar(200);\n drone.add(result.scene);\n return render();\n });\n scene.add(drone);\n axisHelper = new THREE.AxisHelper(1.5);\n drone.add(axisHelper);\n axisHelper = new THREE.AxisHelper(4);\n scene.add(axisHelper);\n camera.position.y = -5;\n camera.position.z = 2;\n camera.up.set(0, 0.5, 1);\n camera.lookAt(new THREE.Vector3(0, 0, 0));\n scene.add(camera);\n renderer.domElement.setAttribute(\"id\", \"main-canvas\");\n container = document.getElementById('canvas-wrapper');\n container.appendChild(renderer.domElement);\n radius = 60;\n this.controls = new THREE.TrackballControls(camera, container);\n controls.rotateSpeed = 5;\n controls.zoomSpeed = 5;\n controls.panSpeed = 1;\n controls.noZoom = false;\n controls.noPan = false;\n controls.staticMoving = true;\n controls.dynamicDampingFactor = 0.3;\n controls.keys = [65, 83, 68];\n return controls.addEventListener('change', render);\n };\n\n this.updateStatus = function(status) {\n var ori, pos, statusdiv, theta, x, y;\n statusdiv = document.getElementById('status');\n pos = new THREE.Vector3().fromArray(status.pos);\n statusdiv.innerHTML = \"(\" + pos.x + \", \" + pos.y + \", \" + pos.z + \")\";\n ori = new THREE.Matrix4();\n ori.set(status.ori[0], status.ori[1], status.ori[2], 0, status.ori[3], status.ori[4], status.ori[5], 0, status.ori[6], status.ori[7], status.ori[8], 0, 0, 0, 0, 1);\n theta = new THREE.Euler();\n theta.setFromRotationMatrix(ori);\n x = (new Date()).getTime();\n if (window.lastx === void 0 || window.lastx < x - 80) {\n y = theta._x;\n series.addPoint([x, y], true, false);\n window.lastx = x;\n }\n drone.position.copy(pos);\n return drone.rotation.setFromRotationMatrix(ori);\n };\n\n init();\n\n animate();\n\n}).call(this);\n"
},
{
"alpha_fraction": 0.805084764957428,
"alphanum_fraction": 0.8135592937469482,
"avg_line_length": 28.25,
"blob_id": "9c7f288bea1c8412bfab4326207e4c386e3e574d",
"content_id": "778e3a4f31c5ee41c0ffbed315ddab26392addc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 4,
"path": "/controller/__init__.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\nfrom .base_controller import BaseController\nfrom .simple_controller import SimpleController\n\n"
},
{
"alpha_fraction": 0.5140153765678406,
"alphanum_fraction": 0.5312832593917847,
"avg_line_length": 29.91575050354004,
"blob_id": "f3a986eb4d0ec71ed06024b9904b4d34d02e2d6a",
"content_id": "c15d17c4d65d9f3180fb4d424f416a64803a4877",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8455,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 273,
"path": "/controller/controller.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\nimport asyncio\nimport logging\nimport json\nimport numpy as np\n\nfrom .pid import PID\nfrom .utils import Momentum\nfrom .kalfil import ThetaOmegaKalmanFilter\nfrom .constant import CONST\n\nlogger = logging.getLogger()\n\nclass Controller(object):\n def __init__(self, drone, *, loop=None, log=False):\n if not loop:\n loop = asyncio.get_event_loop()\n self._armed = False\n self._landing = True\n self._loop = loop\n self._drone = drone\n self._kf = []\n for i in range(3):\n self._kf.append(ThetaOmegaKalmanFilter(0.1, 0.1, 0.04))\n self._action = np.array([0., 0., 0., 0.])\n self._thrust0 = 400\n self._target_angle = np.array([0., 0., 0.])\n\n self.stop_signal = False\n self.stopped = asyncio.Future(loop=self._loop)\n\n self._restriction = 700\n\n # self.theta_mom = Momentum()\n # self.omega_mom = Momentum()\n self.z_mom = Momentum(tau=0.5)\n self.zacc_mom = Momentum(tau=0.5)\n\n self._pid_thetaxy = np.array([48., 40., 15.])\n self._pid_tweakper = np.array([1., 1., 1.])\n\n self._pids = {\n 'theta_x': PID(*self._pid_thetaxy, imax=60.),\n 'theta_y': PID(*self._pid_thetaxy, imax=60.),\n 'theta_z': PID(60., 20., 30., imax=60.),\n 'acc_z': PID(20., 1., 0., imax=800),\n }\n\n # logging\n self._datalogger = None\n if log:\n self._logger_setup()\n\n def _logger_setup(self):\n self._datalogger = logging.getLogger('data')\n self._datalogger.propagate = False\n fh = logging.FileHandler('action.log')\n self._datalogger.addHandler(fh)\n\n def set_despos(self, pos):\n self._despos = pos\n\n def get_despos(self):\n return self._despos\n\n def tweak_pid(self, type_, per):\n if type_ == 'P':\n self._pid_tweakper[0] = per\n elif type_ == 'I':\n self._pid_tweakper[1] = per\n elif type_ == 'D':\n self._pid_tweakper[2] = per\n gain = self._pid_thetaxy * self._pid_tweakper\n self._pids['theta_x'].set_gain(*gain)\n self._pids['theta_y'].set_gain(*gain)\n\n def get_control(self, thrust, angle_x, angle_y, omega_z):\n if not self._armed or self.stop_signal:\n self._thrust = -100\n return\n\n if self._thrust > CONST['armed_thrust']:\n dx = CONST['max_thrust'] - CONST['armed_thrust']\n else:\n dx = CONST['armed_thrust'] - CONST['disarmed_thrust']\n self._thrust = thrust * dx + CONST['armed_thrust']\n\n mxy, mz = CONST['max_anglexy'], CONST['max_anglez']\n self._target_angle = np.array([angle_x*mxy, angle_y*mxy, omega_z*mz])\n \n \n\n @asyncio.coroutine\n def run(self):\n try:\n yield from self._drone.start()\n #ready = yield from self._drone.pretest()\n #if not ready:\n #return False\n\n logger.info('controller start.')\n yield from self._run()\n except (asyncio.CancelledError, KeyboardInterrupt):\n logger.info('capture ctrl-C in controller.')\n finally:\n yield from self.landing()\n\n return True\n\n @asyncio.coroutine\n def _run(self):\n DTIME = 20e-3\n self._last_time = self._loop.time()\n\n while self._drone.alive() and not self.stop_signal:\n if self._armed:\n yield from self.update()\n yield from asyncio.sleep(0.01)\n\n\n @asyncio.coroutine\n def update(self):\n '''updates the motors according to the sensors' data\n '''\n if self._thrust < 10:\n return\n now = self._loop.time()\n dt = now - self._last_time\n\n acc, theta, omega, z = yield from self._drone.get_motion_sensors()\n\n theta_smooth = []\n omega_smooth = []\n for i in range(3):\n theome = np.array([theta[i], omega[i]])\n self._kf[i].update(now, theome)\n the, ome = self._kf[i].predict(now)\n theta_smooth.append(the)\n omega_smooth.append(ome)\n theta_smooth = np.array(theta_smooth)\n omega_smooth = np.array(omega_smooth)\n z_smooth = self.z_mom.append_value(now, z)\n zacc_smooth = self.zacc_mom.append_value(now, acc[2])\n # theta_smooth = self.theta_mom.append_value(now, theta)\n # omega_smooth = self.omega_mom.append_value(now, omega)\n #thetaxy_error = self._target_angle - theta_smooth[0:2]\n #thetaz_error = 0 - theta_smooth[2]\n theta_error = self._target_angle - theta_smooth\n\n theta_x_action = self._pids['theta_x'].get_control(\n now, theta_error[0], 0 - omega_smooth[0]\n )\n theta_y_action = self._pids['theta_y'].get_control(\n now, theta_error[1], 0 - omega_smooth[1]\n )\n theta_z_action = self._pids['theta_z'].get_control(\n now, theta_error[2], 0 - omega_smooth[2]\n )\n\n\n thrust_action = self._pids['acc_z'].get_control(\n now, 0 - z_smooth, 0 - zacc_smooth\n )\n\n self._action[0] = -theta_y_action + theta_z_action\n self._action[1] = theta_x_action + -theta_z_action\n self._action[2] = theta_y_action + theta_z_action\n self._action[3] = -theta_x_action + -theta_z_action\n self._thrust = self._thrust0 + thrust_action\n #self._thrust = 400\n #print(123)\n #print('thrust:', thrust_action)\n\n yield from self.send_control()\n\n #print('thrust:', thrust_action)\n self._last_time = now\n\n # logging\n if self._datalogger:\n self._datalogger.info(json.dumps({\n 'action': self._action.tolist(),\n 'accel': acc.tolist(),\n 'theta': theta.tolist(),\n 'omega': omega.tolist(),\n 'theta_smooth': theta_smooth.tolist(),\n 'omega_smooth': omega_smooth.tolist(),\n 'time': now,\n }))\n\n @asyncio.coroutine\n def send_control(self):\n if self._thrust >= 10:\n final_action = self._action + self._thrust\n final_action = np.maximum.reduce([final_action,\n np.full((4,), -100)])\n final_action = np.minimum.reduce(\n [final_action, np.full((4,), self._restriction)]\n )\n else:\n final_action = np.full((4, ), -100.)\n\n #final_action[0] += 20\n #final_action[1] += 4\n #final_action[2] += 10\n # final_action[1] = final_action[3] = -100.\n #print(final_action)\n #final_action = np.array([0., 0., 1e-4, 0.])\n yield from self._drone.set_motors(final_action)\n\n @asyncio.coroutine\n def takeoff(self):\n # TODO:\n # implement take off process and check if drone is stable.\n #self._thrust = 500\n return True\n\n @asyncio.coroutine\n def arm(self):\n self._armed = True\n self._thrust = CONST['armed_thrust']\n yield from self.send_control()\n return True\n\n @asyncio.coroutine\n def disarm(self):\n self._armed = False\n self._landing = True\n self._target_angle = np.array([0., 0., 0.])\n yield from self.landing()\n return True\n\n\n @asyncio.coroutine\n def landing(self):\n logger.info('landing...')\n\n # Land at speed 400 thrust per second\n while not self.stop_signal and self._thrust > 0:\n self._thrust -= 20\n yield from self.send_control()\n yield from asyncio.sleep(.05)\n\n self._thrust = -100\n self._landing = True\n logger.info('landed.')\n\n @asyncio.coroutine\n def stall(self):\n self._thrust = -100\n yield from self.send_control()\n\n\n @asyncio.coroutine\n def stop(self):\n self.stop_signal = True\n yield from self.stall()\n yield from self.stopped\n\n @asyncio.coroutine\n def preform_action(self, action, args):\n if action == 'stop':\n yield from self.stop()\n elif action == 'arm':\n print('Get Arm')\n yield from self.arm()\n elif action == 'disarm':\n yield from self.disarm()\n elif action == 'control':\n self.get_control(*args)\n elif action == 'tweak':\n self.tweak_pid(*args)\n \n\n\n"
},
{
"alpha_fraction": 0.42326492071151733,
"alphanum_fraction": 0.44086021184921265,
"avg_line_length": 24.5625,
"blob_id": "8da3b9464d40b84644a6cdda746cd07b06184444",
"content_id": "0161a3abd7417c82cd6dd3a7d0d299250d5625a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2046,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 80,
"path": "/mymath/kalfil.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom time import time as curtime\n\nclass KalmanFilter:\n def __init__(self, dimx, dimz, dimu):\n self.dimx = dimx\n self.dimz = dimz\n self.dimu = dimu\n self.x = np.zeros(dimx)\n self.u = np.zeros(dimu)\n self.S = np.eye(dimx) * 1E10\n self.time = 0\n\n def set_u(self, u):\n self.u = u\n\n def predict(self, time):\n dt = time - self.time\n if dt < 1E-3:\n return self.x\n self.time = time\n A = self.A(dt)\n Bf = self.Bf(dt, self.u)\n Q = self.Q(dt)\n self.x = np.dot(A, self.x) + Bf\n self.S = np.dot(np.dot(A, self.S), A.T) + Q\n return self.x\n\n def update(self, time, z):\n self.predict(time)\n C = self.C()\n R = self.R()\n t1 = z - np.dot(C, self.x)\n t2 = np.dot(self.S, C.T)\n t3 = np.linalg.pinv(np.dot(C, t2) + R)\n K = np.dot(t2, t3)\n self.x += np.dot(K, t1)\n self.S -= np.dot(np.dot(K, C), self.S)\n\n def setA(self, f):\n self.A = f\n def setBf(self, f):\n self.Bf = f\n def setC(self, f):\n self.C = f\n def setQ(self, f):\n self.Q = f\n def setR(self, f):\n self.R = f\n\nclass ThetaOmegaKalmanFilter(KalmanFilter):\n def __init__(self, theta_std, omega_std, time_const):\n super().__init__(2, 2, 1)\n def a(dt):\n return np.array([\n [1, dt],\n [0, 1]\n ])\n def bf(dt, u):\n return 0.\n def c():\n return np.eye(2)\n def q(dt):\n return np.array([\n [theta_std**2 * dt / time_const, 0.],\n [0., omega_std**2 * dt / time_const]\n ])\n def r():\n return np.array([\n [theta_std**2, 0.],\n [0., omega_std**2]\n ])\n self.setA(a)\n self.setBf(bf)\n self.setC(c)\n self.setQ(q)\n self.setR(r)\n\nif __name__ == '__main__':\n kf = ThetaOmegaKalmanFilter(0.1, 0.1, 0.04)\n\n"
},
{
"alpha_fraction": 0.8322580456733704,
"alphanum_fraction": 0.8322580456733704,
"avg_line_length": 36.75,
"blob_id": "b9d330ca8c485bf1d479b9dd4d2f7e7a579dcb99",
"content_id": "663bf2a28ea1bd00c81010e14e42f7768170957f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 4,
"path": "/drone/__init__.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "import asyncio\nfrom .base_drone import BaseDrone\nfrom .base_virtual_drone import BaseVirtualDrone\nfrom .simple_virtual_drone import SimpleVirtualDrone\n\n\n\n\n"
},
{
"alpha_fraction": 0.4555555582046509,
"alphanum_fraction": 0.48356807231903076,
"avg_line_length": 30.94499969482422,
"blob_id": "f97ea8e2d608c86718b78749de4eb601509c75c2",
"content_id": "e68969929c54a50ae1c3bd67315a116fd06d04af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6390,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 200,
"path": "/simulator/num_model.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3 \nimport asyncio\nimport logging\nimport numpy as np\nimport scipy.linalg\n\nlogger = logging.getLogger()\n\nclass Drone(object):\n def __init__(self):\n self.dt = 2E-4\n self.time = 0.0\n self.g = 9.80\n self.gvec = np.array([0., 0., -self.g])\n self.M = 1.250\n self.R = 0.23\n self.Iz = 0.25 * self.M * self.R**2\n self.Ixy = self.Iz * 0.5\n self.I = np.diag([self.Ixy, self.Ixy, self.Iz])\n self.LIFT_K = 7.5e-3\n self.DRAG_B = 0.5\n\n mag_theta = -37.3 / 180 * np.pi\n mag_phi = 94.9 / 180 * np.pi\n self.B = np.array([\n np.sin(mag_theta) * np.cos(mag_phi),\n np.sin(mag_theta) * np.sin(mag_phi),\n np.cos(mag_theta),\n ])\n\n self.noise_acc = 1 * 0\n self.noise_omega = 0.05 * 0\n self.noise_z = 0.5\n\n self.pos = np.zeros(3)\n #self.rot = np.eye(3)\n q = 0.4\n self.rot = np.array([[1, 0, 0],\n [0, np.cos(q), -np.sin(q)],\n [0, np.sin(q), np.cos(q)]])\n self.vel = np.zeros(3)\n self.omega = np.zeros(3)\n self.acc_sensor = np.array([0, 0, self.g])\n self.motor = np.zeros(4)\n\n self.ppos = [\n np.array([self.R, 0., 0.]),\n np.array([0., self.R, 0.]),\n np.array([-self.R, 0., 0.]),\n np.array([0., -self.R, 0.]),\n ]\n self.pdir = [1., -1., 1., -1.]\n\n def invrot(self):\n return self.rot.T\n\n def diff_matrix(self, omega, dt):\n olen = np.linalg.norm(omega)\n if olen:\n wx, wy, wz = omega / olen\n else:\n wx, wy, wz = 0., 0., 0.\n th = olen * dt\n K = np.array([\n [0., -wz, wy], \n [wz, 0., -wx], \n [-wy, wx, 0.],\n ])\n return np.eye(3) + np.sin(th) * K + (1. - np.cos(th)) * np.dot(K, K)\n # Rodrigue's formula; equivalent to exponential map exp(th*K)\n\n def lift(self, pomega):\n return self.LIFT_K * pomega\n\n def force(self, lifts): # internal frame\n f = np.array([0., 0., sum(lifts)])\n f -= self.DRAG_B * np.dot(self.invrot(), self.vel)\n return f\n\n def torque(self, lifts, pomega): # internal frame\n tau = np.zeros(3)\n for i in range(4):\n lf = np.array([0., 0., lifts[i]])\n tau += np.cross(self.ppos[i], lf)\n tau += np.array(\n [0., 0., .1 * (1. if i % 2 == 0 else -1.) * pomega[i]]\n )\n return tau\n\n @asyncio.coroutine\n def set_motors(self, motor):\n self.motor = motor\n asyncio.sleep(0.01)\n\n def step(self, dt=5e-4):\n pomega = self.motor\n rot = self.rot\n lifts = [self.lift(x) for x in pomega]\n force_int = self.force(lifts)\n torque_int = self.torque(lifts, pomega)\n force_ref = np.dot(rot, force_int) + self.M * self.gvec\n #force_ref *= 0\n torque_ref = np.dot(rot, torque_int)\n I_ref = np.dot(np.dot(rot, self.I), self.rot.T)\n omega_ref = self.omega\n \n acc_ref = force_ref / self.M\n self.acc_sensor = np.dot(self.rot.T, acc_ref - self.gvec)\n rotacc_ref = np.dot(\n np.linalg.inv(I_ref),\n torque_ref - np.cross(omega_ref, np.dot(I_ref, omega_ref))\n )\n\n dmx = self.diff_matrix(self.omega + rotacc_ref * dt / 2., dt)\n self.rot = np.dot(dmx, self.rot)\n #self.pos += self.vel * dt + acc_ref * dt**2 / 2.\n #self.vel += acc_ref * dt\n self.omega += rotacc_ref * dt\n\n def get_time(self):\n return asyncio.get_event_loop().time()\n\n @asyncio.coroutine\n def get_motion_sensors(self):\n acc = self.acc_sensor + \\\n np.random.normal(size=(3,)) * self.noise_acc\n mag = np.dot(self.rot.T, self.B)\n\n def get_theta(a, m):\n ab = a / np.linalg.norm(a)\n Q = np.array([0, 0, 1])[np.newaxis].T * ab\n Q += self.B[np.newaxis].T * m\n U, S, V = np.linalg.svd(Q)\n M = np.diag([1, 1, np.linalg.det(U) * np.linalg.det(V)])\n R = np.dot(U, np.dot(M, V))\n yaw = np.arctan2(R[1, 0], R[0, 0])\n pitch = np.arctan2(-R[2, 0], np.sqrt(R[2, 1] ** 2 + R[2, 2] ** 2))\n roll = np.arctan2(R[2, 1], R[2, 2])\n return np.array([roll, pitch, yaw])\n\n def get_real_theta():\n R = self.rot\n yaw = np.arctan2(R[1, 0], R[0, 0])\n pitch = np.arctan2(-R[2, 0], np.sqrt(R[2, 1] ** 2 + R[2, 2] ** 2))\n roll = np.arctan2(R[2, 1], R[2, 2])\n return np.array([roll, pitch, yaw])\n\n\n theta = get_theta(acc, mag)\n theta_r = get_real_theta()\n omega = np.dot(self.invrot(), self.omega) + \\\n np.random.normal(size=(3,)) * self.noise_omega\n z = self.pos[2] + np.random.normal() * self.noise_z\n return acc + self.gvec, self.gettheta(), omega, z\n\n\n def set_init(self, vel, omega):\n self.vel = np.array(vel, dtype=np.float64)\n self.omega = np.array(omega, dtype=np.float64)\n\n def gettheta(self):\n R = self.rot\n yaw = np.arctan2(R[1, 0], R[0, 0])\n pitch = np.arctan2(-R[2, 0], np.sqrt(R[2, 1] ** 2 + R[2, 2] ** 2))\n roll = np.arctan2(R[2, 1], R[2, 2])\n return np.array([roll, pitch, yaw])\n\n @asyncio.coroutine\n def _run(self):\n last_time = self.get_time()\n while True:\n try:\n yield from asyncio.sleep(0.0002)\n now = self.get_time()\n dt = now - last_time\n self.step(dt)\n last_time = self.get_time()\n except asyncio.CancelledError:\n logger.debug('stop num_model simulation.')\n break\n except KeyboardInterrupt:\n logger.debug('capture ctrl-C in num_model.')\n break\n\n @asyncio.coroutine\n def start_control(self):\n self._worker = asyncio.get_event_loop().create_task(self._run())\n\n @asyncio.coroutine\n def get_ready(self):\n return True\n\n @asyncio.coroutine\n def stop(self):\n self._worker.cancel()\n logger.debug('stopping num model...')\n yield from asyncio.wait_for(self._worker, None)\n\n def alive(self):\n return len(self._worker.get_stack()) > 0\n\n"
},
{
"alpha_fraction": 0.5196195244789124,
"alphanum_fraction": 0.5755053758621216,
"avg_line_length": 26.129032135009766,
"blob_id": "caa6d6af96b4e5f0ddc6f84dec4dc9aa09ecbfea",
"content_id": "b845e3d568f1697c9ed282f0e715002aa419e08e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 31,
"path": "/plot.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = {}\n\nwith open('action.log') as f:\n for l in f:\n a = json.loads(l)\n\n for x, y in a.items():\n if x not in data:\n data[x] = [y]\n else:\n data[x].append(y)\n\nL, R = 0, 220000\ntheta = np.array(list(zip(*data['theta_smooth'])))\nomega = np.array(list(zip(*data['omega'])))\naction = np.array(list(zip(*data['action'])))\ntime = np.array(data['time'])\n#plt.plot(time[L:R], theta[0][L:R], label='tx')\nplt.plot(time[-100000:], theta[1][-100000:], label='ty')\n#plt.plot(time[L:R], theta[2][L:R], label='tz')\n#plt.plot(time[L:R], omega[1][L:R], label='tz')\nplt.legend()\npl2 = plt.twinx()\npl2.plot(time[-100000:], action[0][-100000:]-action[2][-100000:], color='black', label='a2')\n#pl2.legend()\nplt.grid()\nplt.show()\n"
},
{
"alpha_fraction": 0.5583229660987854,
"alphanum_fraction": 0.5608136057853699,
"avg_line_length": 27,
"blob_id": "deb2d63ce7121de6fb83940b70bf0f84fd2dbccf",
"content_id": "a3ff8808c0bc9fbaa68a261cdb51ca0dd94bd18b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2409,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 86,
"path": "/simulator/server.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nimport asyncio\nimport websockets\nimport json\nimport logging\n\nfrom .simulator import Simulator\n\nlogger = logging.getLogger('websockets.protocol')\nlogger.setLevel(logging.WARNING)\nlogger = logging.getLogger()\n\nclass SimServer(object):\n def __init__(self, *, loop):\n self._sim = Simulator()\n self._conns = []\n self._loop = loop\n\n @asyncio.coroutine\n def _send(self, ws, mes):\n try:\n yield from ws.send(mes.encode())\n except websockets.exceptions.InvalidState:\n logger.warning(\"EInvalidState\")\n return \"EInvalidState\"\n except Exception:\n logger.warning(\"G__________G\")\n yield from ws.close()\n\n @asyncio.coroutine\n def _recv(self, ws):\n try:\n mesg = yield from ws.recv()\n except websockets.exceptions.InvalidState:\n logger.warning(\"EInvalidState\")\n return \"EInvalidState\"\n except Exception as e:\n print(e)\n logger.warning(\"G__________G\")\n yield from ws.close()\n return mesg\n\n @asyncio.coroutine\n def __call__(self, ws, uri):\n logger.info('concon connected')\n self._conns.append(ws)\n yield from self.run(ws)\n\n @asyncio.coroutine\n def run(self, ws):\n self._loop.create_task(self.send_loop(ws))\n self._loop.create_task(self.recv_loop(ws))\n while True:\n yield\n\n @asyncio.coroutine\n def recv_loop(self, ws):\n while ws.open:\n data = json.loads((yield from self._recv(ws)))\n if 'action' not in data:\n logger.warning('No action in data')\n return\n self._preform_action(data['action'])\n #yield from asyncio.sleep(0.02)\n\n def _preform_action(self, action):\n if action == 'start':\n self._loop.create_task(self._sim.run())\n\n @asyncio.coroutine\n def send_loop(self, ws):\n res = yield from self._sim.started\n if not res:\n return\n while ws.open:\n pos, ori = yield from self._sim.get_data()\n data = json.dumps({'pos':pos, 'ori':ori})\n yield from self._send(ws, data)\n yield from asyncio.sleep(0.02)\n\n @asyncio.coroutine\n def close(self):\n for ws in self._conns:\n yield from ws.close()\n yield from self._sim.stop()\n\n"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.5913978219032288,
"avg_line_length": 22.25,
"blob_id": "48bd5121548b00888924ba12acf74a6fbfd85176",
"content_id": "a9ffd6163b42eb8ac382315b84038ff5f9327a09",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 279,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 12,
"path": "/WebDrone/js/main.js",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "// Generated by CoffeeScript 1.9.3\n(function() {\n document.addEventListener(\"DOMContentLoaded\", function(event) {\n connect();\n return $('#start-btn').click(function() {\n return ws.send(JSON.stringify({\n action: 'start'\n }));\n });\n });\n\n}).call(this);\n"
},
{
"alpha_fraction": 0.4852941036224365,
"alphanum_fraction": 0.5357142686843872,
"avg_line_length": 23.947368621826172,
"blob_id": "18073be063f8e2ffdf57ce8c12f7f00864b262dc",
"content_id": "e7baadb3312a57511058cc3a001f0afb5257f31a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 19,
"path": "/simulator/physical_model.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport logging\nimport numpy as np\nimport scipy.linalg\n\nlogger = logging.getLogger()\n\nclass Drone(object):\n def __init__(self):\n self.g = 9.80\n self.gvec = np.array([0., 0., -self.g])\n self.M = 1.250\n self.R = 0.23\n self.Iz = 0.1 * self.M * self.R**2\n self.Ixy = self.Iz * 0.5\n self.I = np.diag([self.Ixy, self.Ixy, self.Iz])\n self.LIFT_K = 7.5e-3\n self.TDRAG_K = 0.0\n self.DRAG_B = 0.5\n\n\n"
},
{
"alpha_fraction": 0.489761084318161,
"alphanum_fraction": 0.49658703804016113,
"avg_line_length": 22.897958755493164,
"blob_id": "799f3e278ca6653231e17de9706810a0abfb19f9",
"content_id": "1eef7dad69c607994db502875d80877c52be6305",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1172,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 49,
"path": "/mymath/pid.py",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python3\n\nimport numpy as np\n\nclass PID(object):\n def __init__(self, kp, ki, kd, *, imax):\n self._last_time = None\n self._last_err = None\n self._int_err = 0\n self._imax = imax\n self.set_gain(kp, ki, kd)\n\n def set_gain(self, kp, ki, kd):\n self._kp = kp\n self._ki = ki\n self._kd = kd\n self._int_restriction = self._imax / (self._ki + 1e-10)\n\n def get_control(self, t, err, derr=None):\n\n up = err * self._kp \n\n if self._last_err is None:\n self._last_err = err\n\n if self._last_time is None:\n self._last_time = t\n return up\n\n dt = t - self._last_time + 1e-10\n if derr is None:\n derr = (err - self._last_err) / dt\n\n ud = derr * self._kd\n\n self._int_err += err * dt\n\n if self._int_err > self._int_restriction:\n self._int_err = self._int_restriction\n\n if self._int_err < -self._int_restriction:\n self._int_err = -self._int_restriction\n\n ui = self._int_err * self._ki\n\n self._last_err = err\n self._last_time = t\n\n return (up + ud + ui)\n\n"
},
{
"alpha_fraction": 0.6790123581886292,
"alphanum_fraction": 0.6975308656692505,
"avg_line_length": 12.5,
"blob_id": "5153d97ce1aab2838daceb47241c46612cf9bc67",
"content_id": "8452644fc1a6310943928e5be93c5f37c5128063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 12,
"path": "/README.md",
"repo_name": "AlphaLambdaMuPi/EECampTeaching",
"src_encoding": "UTF-8",
"text": "# Dependency\n```\npip3 install websockets\npip3 install pyserial\n```\n# How to run\n```\npython3 main.py -m sim\n```\n\n* important\nThe code may contains lot of bug now.\n"
}
] | 14 |
dela3499/arc-chart
|
https://github.com/dela3499/arc-chart
|
d945edcbd265650476b5021d04c176920d7c80f8
|
dc8977c490254cf94946fbc9e96d503a8e99e9ab
|
a713cd2969490f09f6bfff4aa7b17761e337052a
|
refs/heads/gh-pages
| 2021-01-10T18:41:09.124129 | 2014-11-20T19:22:11 | 2014-11-20T19:22:11 | 22,131,871 | 2 | 0 | null | 2014-07-23T04:40:50 | 2014-11-04T21:31:08 | 2014-11-12T17:14:18 |
Python
|
[
{
"alpha_fraction": 0.7765726447105408,
"alphanum_fraction": 0.7765726447105408,
"avg_line_length": 153,
"blob_id": "3dc75614786ac5f719b6ebea128f42ac95f96d82",
"content_id": "55326ca0145918836517d83186882925a17415fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 3,
"path": "/readme.md",
"repo_name": "dela3499/arc-chart",
"src_encoding": "UTF-8",
"text": "This is a small tool to visualize the structure in strings by drawing arcs connecting repeated substrings. The project is inspired by Martin Wattenberg's [Shape of Song](http://www.bewitched.com/song.html) project.\n\nSo far, I've just got a mockup, which you can see at the top-of-the-page link. It's got two panels. The large left panel contains the arc chart, and the right panel contains a text input, along with a few buttons to load sample strings into the text field."
},
{
"alpha_fraction": 0.529326319694519,
"alphanum_fraction": 0.5427637696266174,
"avg_line_length": 36.70547866821289,
"blob_id": "bd37fd091c7f7e7975b80498d25d71d5d2c3251f",
"content_id": "3ac33540914db1f4c356718546cfd043dec91277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5507,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 146,
"path": "/repeated_substring.py",
"repo_name": "dela3499/arc-chart",
"src_encoding": "UTF-8",
"text": "import json\n\nclass ArcChart(object):\n\n def __init__(self, original_string):\n # original string\n self.org = original_string\n # identical substrings\n self.substrings = self.get_repeated_substring() \n # non-overlaiing identical substrings\n ArcChart._remove_overlapping_substrings(self.substrings)\n # list of consecutive, non-overlapping pairs\n self.matching_pairs = ArcChart.get_consecutive_pairs(self.substrings)\n # list of maximal, consecutive, non-overlapping pairs\n ArcChart._remove_nonmaximal_pairs(self.matching_pairs) \n\n class Pair(object):\n def __init__(self, first_substring, second_substring):\n self.value = (first_substring, second_substring)\n def contains(self, pair):\n \"\"\" returns true if the calling object contains the argument object \"\"\"\n def _1contains2(substring1, substring2):\n x_start = substring1[0]\n x_end = substring1[1]\n y_start = substring2[0]\n y_end = substring2[1]\n return x_start <= y_start and x_end >= y_end\n pair1 = self.value\n pair2 = pair.value\n for i in xrange(2):\n if not ( _1contains2( pair1[i], pair2[i] ) ):\n return False\n return True\n\n def export_json_format_pairs(self, file_name, pairs=None):\n if not pairs:\n pairs = self.matching_pairs\n list_of_pairs = []\n for each_pair in pairs:\n start1 = each_pair.value[0][0]\n start2 = each_pair.value[1][0]\n length = each_pair.value[0][1] - start1 \n pair = {'a': start1, 'b': start2,\\\n 'n': length}\n list_of_pairs.append( pair )\n with open(file_name, 'w') as f:\n json.dump( list_of_pairs, f) \n\n def get_matching_pairs_in_strings(self):\n retlist = []\n for each_pair in self.matching_pairs:\n str1_start = each_pair.value[0][0]\n str1_end = each_pair.value[0][1]\n str2_start = each_pair.value[1][0]\n str2_end = each_pair.value[1][1]\n retlist.append( ( self.org[str1_start:str1_end], self.org[str2_start:str2_end]))\n return retlist\n\n\n @staticmethod\n def _conver_format(file_name):\n content = ''\n with open(file_name) as f:\n content = f.read()\n content = content.replace('\"', '')\n content = content.replace(':', '=')\n with open(file_name, 'w') as f:\n f.write(content) \n\n\n def get_substrings(self):\n \"\"\" returns all identical substrings of the original string \"\"\"\n s = self.org\n \tall_substrings = {} # store all repeated substrings here in a dictionary of lists of tuples\n \tfor index in range(0,len(s)):\n \t\tfor substring_length in range(1, len(s)-index+1):\n \t\t\tstart = index\n \t\t\tend = index+substring_length\n \t\t\tsubstring = s[start:end]\n \t\t\tif substring not in all_substrings:\n \t\t\t\tall_substrings[substring] = [(start,end)]\n \t\t\telse:\n \t\t\t\tall_substrings[substring].append((start,end))\n \treturn all_substrings\n \n def get_repeated_substring(self):\n s = self.org\n \treturn dict((k,v) for k,v in self.get_substrings().iteritems() if len(v) > 1)\n\n @staticmethod\n def get_consecutive_pairs(dic):\n \"\"\" \n returns a list of consecutive pairs from the given dictionary dic.\n dic should be pre-processed so that it does not contain any ocerlapping pairs\n \"\"\"\n retlist = [] \n for each_key in dic.keys():\n sublist = sorted(dic[each_key])\n if ( len(sublist) > 1 ):\n for i in range(len(sublist)-1):\n retlist.append( ArcChart.Pair(sublist[i], sublist[i+1]) )\n return retlist\n\n @staticmethod\n def _remove_overlapping_substrings(dic):\n for each_key in dic.keys():\n dic[each_key].sort()\n sublist = dic[each_key]\n for i in range(len(sublist)-1):\n to_be_removed = []\n for j in range(i+1, len(sublist)):\n if ArcChart.is_overlapping(sublist[i], sublist[j]):\n to_be_removed.append(sublist[j])\n for each_element in to_be_removed:\n try:\n sublist.remove(each_element) \n except ValueError:\n pass\n\n @staticmethod\n def is_overlapping(x, y):\n \"\"\"take tuples of form (start,end) that denote start and end points for\n each instance of a repeated substring\n return True if instances overlap (either the start or endpoint of the\n second instance lie between the start and endpoint of the first instance)\"\"\"\n \n def is_between(x,y1,y2):\n \"\"\"return true if x is between y1 and y2\"\"\"\n if x > y1 and x < y2:\n return True\n else:\n return False \n \n if is_between(y[0],x[0],x[1]) or is_between(y[1],x[0],x[1]):\n return True\n else:\n return False\n\n @staticmethod\n def _remove_nonmaximal_pairs(pairs):\n for pair1 in pairs[:]: \n removable_pairs = []\n for pair2 in pairs:\n if ( pair1 is not pair2 ):\n if ( pair1.contains(pair2) ):\n pairs.remove(pair2)\n \n"
},
{
"alpha_fraction": 0.6752136945724487,
"alphanum_fraction": 0.6752136945724487,
"avg_line_length": 10.600000381469727,
"blob_id": "b5a25120bf80ece3abd72a1f23492301bf6c19ec",
"content_id": "b6796c5a3809e209a6f4e22d14f6d712775521f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 10,
"path": "/test.py",
"repo_name": "dela3499/arc-chart",
"src_encoding": "UTF-8",
"text": "from repeated_substring import *\n\n\ns = \"ababab\"\na = ArcChart(s)\n\nx = a.matching_pairs\n\nfor xi in x:\n\tprint xi.value\n\n"
}
] | 3 |
AlikhanMussabekov/ProgrammingTechnologies
|
https://github.com/AlikhanMussabekov/ProgrammingTechnologies
|
d1c8e1ae968ac1fe58e3fca1b893dc9866e21daf
|
6983ff2591588f0f74ef49ff4f685ac911382b2a
|
bd995b84a8add9835b3e9fcaf121b6bda631a016
|
refs/heads/main
| 2023-01-23T10:53:53.962308 | 2020-11-30T14:04:35 | 2020-11-30T14:04:35 | 313,301,367 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6314554214477539,
"alphanum_fraction": 0.637910783290863,
"avg_line_length": 27.399999618530273,
"blob_id": "ebd553fae3b3c6ad1d74ef10d1c0ed205dbbce30",
"content_id": "7955a6910ca00a7f6d36decf091ad87e16ae7872",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2212,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 60,
"path": "/Lab4/file.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "\"\"\"\nПрочитать из файла (имя - параметр командной строки)\nвсе слова (разделитель пробел)\n\nСоздать \"Похожий\" словарь который отображает каждое слово из файла\nна список всех слов, которые следуют за ним (все варианты).\n\nСписок слов может быть в любом порядке и включать повторения.\nнапример \"and\" ['best\", \"then\", \"after\", \"then\", ...] \n\nСчитаем , что пустая строка предшествует всем словам в файле.\n\nС помощью \"Похожего\" словаря сгенерировать новый текст\nпохожий на оригинал.\nТ.е. напечатать слово - посмотреть какое может быть следующим \nи выбрать случайное.\n\nВ качестве теста можно использовать вывод программы как вход.парам. для следующей копии\n(для первой вход.парам. - файл)\n\nФайл:\nHe is not what he should be\nHe is not what he need to be\nBut at least he is not what he used to be\n(c) Team Coach\n\n\n\"\"\"\n\nimport random\nimport sys\n\ndef generate_new_text(first_word, dict):\n print(first_word)\n if first_word in dict:\n nextWord = random.sample(dict[first_word], 1)[0]\n generate_new_text(nextWord, dict)\n\ndef mem_dict(filename):\n word_dict = dict()\n with open(filename, \"r\") as f:\n for line in f:\n words = line.split(\" \")\n for i in range(0, len(words) - 1):\n if words[i] in word_dict:\n word_dict[words[i]].add(words[i + 1].rstrip())\n else:\n word_dict[words[i]] = {words[i + 1].rstrip()}\n return word_dict\n\ndef main():\n if sys.argv[1] != \"--file\" and len(sys.argv[2]) < 1:\n print('use: [--file] file')\n sys.exit(1)\n \n first_word = \"He\"#input('Enter first word: ')\n generate_new_text(first_word, mem_dict(sys.argv[2]))\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5503355860710144,
"alphanum_fraction": 0.5570470094680786,
"avg_line_length": 27.653846740722656,
"blob_id": "021061b959f80ad5e9cfe595ebe7e74d02595418",
"content_id": "76b31449bb3f6b9e01832200c832ea4223ea9767",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 26,
"path": "/Lab2/String.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "# 1.\n# Вх: строка. Если длина > 3, добавить в конец \"ing\",\n# если в конце нет уже \"ing\", иначе добавить \"ly\".\n\ndef v(s):\n return s + \"ing\" if len(s) > 3 else s + \"ly\"\n\n# 2.\n# Вх: строка. Заменить подстроку от 'not' до 'bad'. ('bad' после 'not')\n# на 'good'.\n# Пример: So 'This music is not so bad!' -> This music is good!\n\ndef nb(s):\n return s[0:s.find(\"not\")] + \"good\" + s[s.find(\"bad\") + len(\"bad\"):]\n\ndef test(res, expt):\n result = f\"Test ✅: \\'{res}\\' equals \\'{expt}\\'\" if res == expt else f\"Test 🚫: expected \\'{expt}\\', but \\'{res}\\' found\"\n print(result)\n\ndef main():\n test(v(\"ooeoo\"), \"ooeooing\")\n test(v(\"oo\"), \"ooly\")\n test(nb(\"This music is not so bad!\"), \"This music is good!\")\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5584872364997864,
"alphanum_fraction": 0.5857519507408142,
"avg_line_length": 29.7297306060791,
"blob_id": "18d3655ef6d6cdf7e1a25b8ba42dbf8827b0f1f4",
"content_id": "87bb067e16489d07a5f505857c9725d8b4fdb5a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1306,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 37,
"path": "/Lab3/names.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "import sys\nfrom bs4 import BeautifulSoup\n\n# для каждого переданного аргументом имени файла, вывести имена extr_name\n# напечатать ТОП-10 муж и жен имен из всех переданных файлов\n\ndef main():\n file_names = sys.argv[1:]\n \n if len(file_names) < 2:\n print('use: [--file] file [file ...]')\n sys.exit(1)\n \n for file_name in file_names[1:] : extr_name(file_name)\n\n# Вход: nameYYYY.html,\n# Выход: список начинается с года, продолжается имя-ранг в алфавитном порядке.\n# '2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' и т.д.\n\ndef extr_name(filename):\n with open(filename, \"r\") as f:\n soup = BeautifulSoup(f.read(), 'lxml')\n year = soup.find('input', id=\"yob\")['value']\n top = top_10(soup.find('table', summary=\"Popularity for top 1000\"))\n top.sort()\n print(f\"{year}, {', '.join(map(str, top)) }\")\n\ndef top_10(table):\n top = []\n result = [row.text.replace('\\n', ' ').split(' ')[1:4] for row in table.find_all('tr')][1:11]\n for row in result:\n top.append(f\"{row[1]} {row[0]}\")\n top.append(f\"{row[2]} {row[0]}\")\n return top\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5425220131874084,
"alphanum_fraction": 0.5865102410316467,
"avg_line_length": 27.41666603088379,
"blob_id": "234464277a83f8a16fd444b0f478d33e70e520be",
"content_id": "ad5180068e6f09bc323cecbb15e3a7538d25d77d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 24,
"path": "/Lab2/List.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "# 1.\n# Вх: список чисел, Возвр: список чисел, где\n# повторяющиеся числа урезаны до одного\n# пример [0, 2, 2, 3] returns [0, 2, 3].\n\ndef rm_adj(nums):\n return list(set(nums))\n\n# 2. Вх: Два списка упорядоченных по возрастанию, Возвр: новый отсортированный объединенный список\ndef sort_two(first, second):\n first += second\n first.sort()\n return first\n\ndef test(res, expt):\n result = f\"Test ✅: \\'{res}\\' equals \\'{expt}\\'\" if res == expt else f\"Test 🚫: expected \\'{expt}\\', but \\'{res}\\' found\"\n print(result)\n\ndef main():\n test(rm_adj([0, 2, 2, 3, 4]), [0, 2, 3, 4])\n test(sort_two([0, 2, 4], [2, 3 , 1]), [0, 1, 2, 2, 3, 4])\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6043643355369568,
"alphanum_fraction": 0.6068943738937378,
"avg_line_length": 26.258621215820312,
"blob_id": "9f7133fc6a32deadc1383a0c97a85a89a556eddb",
"content_id": "87608e28e74ef218ba67bd10aa82e38e0b40dacf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3591,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 116,
"path": "/Lab6/lab6.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "\"\"\"\nПростой RSS reader\n\nПри добавлении ленты (например https://habrahabr.ru/rss/interesting/)\nзаписи из добавленной ленты сканируются и заносятся в базу (например sqlite)\n\nПри нажатии на кнопку обновить - новое сканирование и добавление новых записей (без дублрования существующих)\n\nОтображение ленты начиная с самых свежих записей с пагинацией (несколько записей на странице)\n\nЗаписи из разных лент хранить и показывать отдельно (по названию ленты).\n\nВгимание:\nПосле сдачи и визирования отчета принести его на лекцию (за 5 мин до начала)\n+ Продублировать отчет и исходник(.zip, github и т.п.) письмом на isu\n\"\"\"\n\nimport sqlite3\nimport requests\nimport bs4\nfrom bs4 import BeautifulSoup\n\nfrom flask import request\nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\ndb_path = \"/Users/a.musabekov/pylab6.db\"\n\ndef create_connection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn\n \ndef create_table(conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n c.close()\n except Exception as e:\n print(e)\n\ndef create_table_if_needed(conn):\n sql_query = \"\"\" CREATE TABLE IF NOT EXISTS posts (\n guid text PRIMARY KEY,\n title text NOT NULL,\n description text\n ); \"\"\"\n create_table(conn, sql_query)\n \ndef feed_parse():\n article_list = []\n try:\n r = requests.get('https://habrahabr.ru/rss/interesting/')\n soup = BeautifulSoup(r.content, features = 'xml')\n articles = soup.findAll('item')\n for a in articles:\n article = {}\n article['guid'] = a.guid.get_text()\n article['title'] = a.title.get_text()\n \n dd = a.description.get_text()\n desc = BeautifulSoup(dd, features = 'xml')\n article['description'] = \" \".join([item for item in desc.find_all(text=True)])\n \n article_list.append(article)\n except Exception as e:\n print(e)\n\n return article_list\n\ndef save_feed(feed, conn):\n cursor = conn.cursor()\n for a in feed:\n query = f\"INSERT OR IGNORE INTO posts (guid, title, description) VALUES (\\\"{a['guid']}\\\", \\\"{a['title']}\\\", \\\"{a['description']}\\\");\"\n cursor.execute(query, a)\n conn.commit()\n cursor.close()\n\ndef main():\n conn = create_connection(db_path)\n cursor = conn.cursor()\n create_table_if_needed(conn)\n\[email protected]('/startgame', methods=['GET'])\ndef start_game():\n conn = create_connection(db_path)\n cursor = conn.cursor()\n \n feed = feed_parse()\n save_feed(feed, conn)\n \n sqlite_select_query = \"\"\"SELECT * from posts\"\"\"\n cursor.execute(sqlite_select_query)\n records = cursor.fetchall()\n cursor.close()\n \n articles = []\n \n for row in records:\n article = {}\n article['title'] = row[1]\n article['description'] = row[2]\n articles.append(article)\n \n return {'answer': articles}\n \nif __name__ == '__main__':\n main()\n app.run()\n"
},
{
"alpha_fraction": 0.5278733372688293,
"alphanum_fraction": 0.5609084367752075,
"avg_line_length": 32.79069900512695,
"blob_id": "ec1a69e53b5ee60b9117dfcb5bac19ac2d7c2b92",
"content_id": "4761f8e22728a163201639e3529c8ca5b7a42fbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1710,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 43,
"path": "/Lab1/list.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "# 1.\n# Вх: список строк, Возвр: кол-во строк\n# где строка > 2 символов и первый символ == последнему\ndef me(words):\n count = 0\n for word in words:\n if len(word) > 2 and word[0] == word[-1]:\n count += 1\n return count\n\n\n# 2.\n# Вх: список строк, Возвр: список со строками (упорядочено)\n# за искл всех строк начинающихся с 'x', которые попадают в начало списка.\n# ['tix', 'xyz', 'apple', 'xacadu', 'aabbbccc'] -> ['xacadu', 'xyz', 'aabbbccc', 'apple', 'tix']\ndef fx(words):\n x_list = []\n other_list = []\n for word in words:\n x_list.append(word) if word[0] == 'x' else other_list.append(word)\n return sorted(x_list) + sorted(other_list)\n\n\n# 3.\n# Вх: список непустых кортежей,\n# Возвр: список сортир по возрастанию последнего элемента в каждом корт.\n# [(1, 7), (1, 3), (3, 4, 5), (2, 2)] -> [(2, 2), (1, 3), (3, 4, 5), (1, 7)]\ndef sort_tuples_list(list):\n return sorted(list, key=lambda value: value[-1])\n\n\ndef test(res, expt):\n result = f\"Test ✅: \\'{res}\\' equals \\'{expt}\\'\" if res == expt else f\"Test 🚫: expected \\'{expt}\\', but \\'{res}\\' found\"\n print(result)\n\ndef main():\n test(me(['test ing', 'lorem ipsuml', 'hello world h', 'll']), 2)\n test(fx(['tix', 'xyz', 'apple', 'xacadu', 'aabbbccc']), ['xacadu', 'xyz', 'aabbbccc', 'apple', 'tix'])\n test(sort_tuples_list([(1, 7), (1, 3), (3, 4, 5), (2, 2)]), [(2, 2), (1, 3), (3, 4, 5), (1, 7)])\n return\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6027919054031372,
"alphanum_fraction": 0.6218274235725403,
"avg_line_length": 26.172412872314453,
"blob_id": "ef8d60af9d3ce5d844d6028f284fc23c7da0a595",
"content_id": "9bdc31889bb61e2d9689cdfd49edc8d088280656",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1914,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 58,
"path": "/Lab1/string.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "# 1.\n# Входящие параметры: int ,\n# Результат: string в форме\n# \"Number of: \", где число из вход.парам.\n# Если число равно 10 или более, напечатать \"many\"\n# вместо # Пример: (5) -> \"Number of: 5\"\n# (23) -> 'Number of: many'\n\ndef num_of_items(count):\n return f\"Number of: {count}\" if count < 10 else 'Many'\n\n\n# 2.\n# Входящие параметры: string s,\n# Результат: string из 2х первых и 2х последних символов s\n# Пример 'welcome' -> 'weme'.\ndef start_end_symbols(s):\n return s[0:2] + s[-2:]\n\n\n# 3.\n# Входящие параметры: string s,\n# Результат: string где все вхождения 1го символа заменяются на '*'\n# (кроме самого 1го символа)\n# Пример: 'bibble' -> 'bi**le'\n# s.replace(stra, strb)\n\ndef replace_char(s):\n first_char = s[0]\n return first_char + s[1:].replace(first_char, '*')\n\n\n# 4\n# Входящие параметры: string a и b,\n# Результат: string где и разделены пробелом\n# а превые 2 симв обоих строк заменены друг на друга\n# Т.е. 'max', pid' -> 'pix mad'\n# 'dog', 'dinner' -> 'dig donner'\ndef str_mix(a, b):\n return f\"{b[0:2] + a[2:]} {a[0:2] + b[2:]}\"\n\n\n# Provided simple test() function used in main() to print\n# what each function returns vs. what it's supposed to return.\ndef test(res, expt):\n result = f\"Test ✅: \\'{res}\\' equals \\'{expt}\\'\" if res == expt else f\"Test 🚫: expected \\'{expt}\\', but \\'{res}\\' found\"\n print(result)\n\n\ndef main():\n test(num_of_items(10), 'Many')\n test(start_end_symbols('welcome'), 'weme')\n test(replace_char('bibble'), 'bi**le')\n test(str_mix('dog', 'dinner'), 'dig donner')\n return\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.681085467338562,
"alphanum_fraction": 0.6953250765800476,
"avg_line_length": 30.016666412353516,
"blob_id": "72b4cab6a03770da900e84ad56fd82cc61d3dd7f",
"content_id": "111453c49c4cf90ddd09fd1d12818b0aefe26455",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4671,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 120,
"path": "/Lab5/lab5.py",
"repo_name": "AlikhanMussabekov/ProgrammingTechnologies",
"src_encoding": "UTF-8",
"text": "\"\"\"\nВход: файл guess.txt содержащий имена для угадывания\n(например из http://www.biographyonline.net/people/famous-100.html можно взять имена)\n\nНаписать игру \"Угадай по фото\"\n\n3 уровня сложности:\n1) используются имена только 1-10\n2) имена 1-50\n3) имена 1-100\n\n- из используемых имен случайно выбрать одно\n- запустить поиск картинок в Google по выбранному\n- получить ~30-50 первых ссылок на найденные по имени изображения\n- выбрать случайно картинку и показать ее пользователю для угадывания\n (можно выбрать из выпадающего списка вариантов имен)\n- после выбора сказать Правильно или Нет\n\nп.с. сделать серверную часть, т.е. клиент играет в обычном браузере обращаясь к веб-серверу.\n\nп.с. для поиска картинок желательно эмулировать обычный пользовательский запрос к Google\nили можно использовать и Google image search API\nhttps://ajax.googleapis.com/ajax/services/search/images? или др. варианты\nНО в случае API нужно предусмотреть существующие ограничения по кол-ву запросов\nт.е. кешировать информацию на случай исчерпания кол-ва разрешенных (бесплатных)\nзапросов или другим образом обходить ограничение. Т.е. игра не должна прерываться после N запросов (ограничение API)\n\nп.с. желательно \"сбалансировать\" параметры поиска (например искать только лица,\nиспользовать только первые 1-30 найденных и т.п.)\nдля минимизации того что найденная картинка не соответствует имени\n\"\"\"\n\nimport requests\nimport random\nimport os\nimport cv2\nimport numpy as np\n\nfrom PIL import Image\nfrom flask import request\nfrom flask import Flask\nfrom flask_cors import CORS\n\ngame_config = {'1': 10, '2': 20, '3': 30}\n\nimage_path = \"/Users/a.musabekov/labs/ProgrammingTechnologies/PythonLabs/Lab5/new_image.jpg\"\ncascade_path = \"/Users/a.musabekov/labs/ProgrammingTechnologies/PythonLabs/Lab5/haarcascade_frontalface_default.xml\"\nnames_path = \"/Users/a.musabekov/labs/ProgrammingTechnologies/PythonLabs/Lab5/names\"\n\napp = Flask(__name__)\nCORS(app)\n\ndef request_images(name):\n endpoint = \"https://www.googleapis.com/customsearch/v1\"\n params = {\n \"q\": name,\n \"num\": 10,\n \"imgSize\": \"medium\",\n \"searchType\": \"image\",\n \"filetype\": \"jpg\",\n \"key\": \"API_SECRET_KEY\",\n \"cx\": \"API_CX\"\n }\n \n response = requests.get(endpoint, params=params).json()\n if 'items' in response:\n try:\n return list(map(lambda x: x['link'], response['items']))\n except(IndexError, TypeError):\n return 'Server Error'\n return 'No Items in response'\n\n\ndef save_image(link):\n response = requests.get(link)\n if response.status_code == 200:\n with open(image_path, 'wb') as f:\n f.write(response.content)\n\n\ndef choose_image(images):\n for image in images:\n save_image(image)\n if (has_face_on_image(image_path)):\n return image\n\n\ndef has_face_on_image(path):\n faceCascade = cv2.CascadeClassifier(cascade_path)\n \n gray = Image.open(path).convert('L')\n image = np.array(gray, 'uint8')\n\n faces = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))\n return len(faces) > 0\n\n\ndef random_names(count):\n with open(names_path, \"r\") as f:\n names = set()\n names_file = list(f)\n while len(names) < count:\n names.add(random.choice(names_file).rstrip())\n return list(names)\n\n\[email protected]('/startgame', methods=['GET'])\ndef start_game():\n lvl = request.args.get('lvl')\n \n names = random_names(game_config[lvl])\n answer_name = random.choice(names)\n \n images = request_images(answer_name)\n image_link = choose_image(images)\n return {'image': image_link, 'names': names, 'answer': answer_name}\n\n \nif __name__ == '__main__':\n app.run()\n"
}
] | 8 |
erelin6613/ML_Logger
|
https://github.com/erelin6613/ML_Logger
|
e64a59ad0cb2cb9f83a2d7c4bccd72c3686674fa
|
1c29cd8d6a1f65b6077e14228d6ca106f49a76f7
|
69ecf73040deec4fcb34cac22e15254a4d0a2f6e
|
refs/heads/master
| 2022-11-19T12:33:03.765426 | 2020-07-25T07:41:15 | 2020-07-25T07:41:15 | 282,395,781 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7794970870018005,
"alphanum_fraction": 0.7794970870018005,
"avg_line_length": 73,
"blob_id": "72cf03b6a9661f806c5850665b742a89ea7afcd3",
"content_id": "309a54be4e6108d27aa72c1f620f56fe72c09786",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 517,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 7,
"path": "/README.md",
"repo_name": "erelin6613/ML_Logger",
"src_encoding": "UTF-8",
"text": "### ML_Logger\n\nML_Logger is a set of scripts (for now) developed to simplify logging procedures during data analysis, modeling and other related tasks.\n\n*Warning:* At this point of time only two decorators `@log_model(path)` and `@log_params(path)` are working correctly.\n\nIt is intetion of the author to build robust and easy to use tools to record model mertics, parameters, hyperparameters, key data transformations, etc. If you have any suggestion how to improve it or would like to contribute feel free to do so."
},
{
"alpha_fraction": 0.6790950894355774,
"alphanum_fraction": 0.6803519129753113,
"avg_line_length": 25.241758346557617,
"blob_id": "79c2b3fa152614e3c590109c2ddf024d5dcca380",
"content_id": "294529db21e4ec2b704db4c039c96f8b37a774f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2387,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 91,
"path": "/loggers.py",
"repo_name": "erelin6613/ML_Logger",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\nimport time\nimport threading\nimport functools\nimport logging\nimport json\nfrom datetime import datetime\n\ndef deduce_model(model):\n\tfrom torch.nn.modules.module import Module\n\tfrom sklearn.base import (ClassifierMixin,\n\t\tRegressorMixin, BaseEstimator, ClusterMixin)\n\tif isinstance(model, Module):\n\t\treturn model.state_dict\n\tif issubclass(model.__class__, ClassifierMixin):\n\t\treturn model.get_params\n\n\nclass BaseLogger:\n\n\tdef __init__(self, log_dir):\n\n\t\tself.log_dir = log_dir\n\n\tdef log(self, params, logfile='logfile.log'):\n\t\twith open(logfile, 'a') as f:\n\t\t\tf.write(str(time.time())+': '+str(params)+'\\n')\n\n\tdef print(self, logfile='logfile.log'):\n\t\twith open(logfile, 'r') as f:\n\t\t\tprint(f.read())\n\n\nclass ModelLogger(BaseLogger):\n\n\tdef __init__(self, model, log_dir='model_log'):\n\n\t\tsuper(ModelLogger, self).__init__(log_dir)\n\t\tself.model = model\n\t\tself.params = self.deduce_model()\n\t\tself.logfile = f'{str(self.model)}.log'\n\t\tthreading.Thread(target=self.monitor).start()\n\n\n\tdef monitor(self, timelaps=180):\n\t\tref = hash(self.model)\n\t\tself.log(self.model) #frozenset({self.model.__getstate__()}.items))\n\t\twhile True:\n\t\t\t# time.sleep(timelaps)\n\t\t\tchecksum = hash(self.model)\n\t\t\tif ref != checksum:\n\t\t\t\tself.log({self.model})\n\t\t\t\tprint('model object was changes')\n\ndef log_model(path=os.path.join('logs', 'model_log.log')):\n\tdef log_state(model):\n\n\t\[email protected](model)\n\t\tdef wrapper(*args, **kwargs):\n\t\t\tstate_func = deduce_model(model())\n\t\t\t# print(str(state_func()))\n\t\t\tlogger = logging.getLogger('LogModel')\n\t\t\tlogger.setLevel(logging.INFO)\n\t\t\tfile_handler = logging.FileHandler(path)\n\t\t\tlog_format = '{%(levelname)s %(asctime)s %(message)s}'\n\t\t\tlogger.addHandler(file_handler)\n\t\t\tlogger.info(str(datetime.now().strftime(\n\t\t\t\t'%Y-%m-%d %H:%M:%S'))+'\\n'+str(state_func())+'\\n')\n\t\t\treturn state_func\n\n\t\treturn wrapper\n\treturn log_state\n\ndef log_params(path=os.path.join('logs', 'params_log.log')):\n\tdef log_p(params):\n\n\t\[email protected](params)\n\t\tdef wrapper(*args, **kwargs):\n\t\t\tp = params.__defaults__\n\t\t\tlogger = logging.getLogger('LogParams')\n\t\t\tlogger.setLevel(logging.INFO)\n\t\t\tfile_handler = logging.FileHandler(path)\n\t\t\tlog_format = '{%(levelname)s %(asctime)s %(message)s}'\n\t\t\tlogger.addHandler(file_handler)\n\t\t\tlogger.info(str(datetime.now().strftime(\n\t\t\t\t'%Y-%m-%d %H:%M:%S'))+'\\n'+str(p)+'\\n')\n\t\t\t# return state_func\n\n\t\treturn wrapper\n\treturn log_p"
}
] | 2 |
BenjaminCollery/ROB
|
https://github.com/BenjaminCollery/ROB
|
6505237577e0a2eab2b555c49e30bb7823d17a99
|
36eb98576d0c95ee878587845cc0bd4425adc43b
|
234b8d4c7581c145b9f399f829c98b09982f1b67
|
refs/heads/master
| 2020-07-26T22:09:14.270735 | 2019-10-30T13:35:52 | 2019-10-30T13:35:52 | 208,780,152 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6400507092475891,
"alphanum_fraction": 0.6489226818084717,
"avg_line_length": 28.22222137451172,
"blob_id": "545b2bce65ca093bc4fd07ae3ce7dc019de2b49d",
"content_id": "a2faee17b4f2093c8ec652163241b59c6bb2dc15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 27,
"path": "/ROB311/TP6/main.py",
"repo_name": "BenjaminCollery/ROB",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import confusion_matrix\n\n\nif __name__ == \"__main__\":\n digits = pd.read_csv(\"optdigits.tra\", header=None).values\n X = digits[:,:-1]\n y = digits[:,-1]\n\n kmeans = KMeans(n_clusters=10)\n kmeans.fit(X)\n\n for i in range(10):\n pred = digits[kmeans.labels_==i][:,-1]\n print(f\"Predicted class {i}\")\n print(f\"\\tmost common element: {np.argmax(np.bincount(pred))} ({np.max(np.bincount(pred))} of {len(pred)})\")\n\n sns.heatmap(confusion_matrix(y, kmeans.labels_), annot=True)\n plt.title(\"Confusion matrix\")\n plt.xlabel(\"True class\")\n plt.ylabel(\"Predicted class\")\n plt.show()\n"
},
{
"alpha_fraction": 0.5785984992980957,
"alphanum_fraction": 0.5958333611488342,
"avg_line_length": 29.520231246948242,
"blob_id": "3fa72750210ad799620762ee569eb5a99bb5b014",
"content_id": "93520930beac23bc787b59a66368b4d07a48b5d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5283,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 173,
"path": "/ROB311/TP1/KNN.py",
"repo_name": "BenjaminCollery/ROB",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils.multiclass import unique_labels\n\n\nclass KNN:\n \"\"\"\n Class implementing the k-nearest neighbor algorithm.\n \"\"\"\n # fonction qui permet d initialiser la classe\n # inputs : k, nombre de voisins pris en compte dans la methode\n def __init__(self, k):\n self.k = k\n\n # retourne le label predit pour une data\n # inputs : x, data\n def _predict(self, x):\n i_k = np.argpartition(self._distance(x), self.k)[:self.k]\n return np.argmax(np.bincount(self.y[i_k]))\n\n # calcule la distance d un point x avec tous les points du training set\n # inputs : x, data\n def _distance(self, x):\n return np.sqrt(np.sum((self.x-x)**2, axis=1))\n\n # fonction qui permet de definir les data et labels du training set\n # inputs : x, data\n # y, labels\n def fit(self, x, y):\n self.x = x\n self.y = y\n\n # retourne le label predit pour x\n # inputs : x, data\n def predict(self, x):\n return np.array([self._predict(el) for el in x])\n\n # calcul le score du test set\n # inputs : x, data\n # y, labels\n def score(self, x, y):\n y_pred = self.predict(x)\n return 100*sum(y_pred == y)/len(y)\n\n\n# permet de charger le dataset Breast cancer Wisconsin\ndef load_breast_cancer_wisconsin():\n \"\"\"Load the Breast cancer Wisconsin dataset.\"\"\"\n data = pd.read_csv(\"breast-cancer-wisconsin.data\",\n header=None,\n na_values=\"?\"\n )\n\n # Fix wrong types\n data.dropna(inplace=True)\n for col in data.columns:\n data[col] = data[col].astype(int)\n\n x = data.values[:, 1:10]\n y = data.values[:, 10]\n\n return x, y\n\ndef load_haberman():\n \"\"\"Load the Haberman dataset.\"\"\"\n data = pd.read_csv(\"haberman.data\")\n x = data.values[:, 0:3]\n y = data.values[:, 3]\n\n return x, y\n\ndef plot_confusion_matrix(y_true, y_pred, class_names):\n \"\"\"\n Plot a confusion matrix\n Input:\n y_true: array of true labels\n y_pred: array predicted labels\n class_names: list of all class names\n \"\"\"\n plt.title(\"Confusion matrix\")\n ax = sns.heatmap(confusion_matrix(y_true, y_pred),\n cmap=plt.cm.Blues,\n annot=True,\n fmt=\"d\",\n xticklabels=class_names,\n yticklabels=class_names\n )\n ax.set(xlabel=\"True label\", ylabel=\"Predicted label\")\n\n return ax\n\n\nif __name__ == \"__main__\":\n # Brest cancer Wisconsin dataset\n x, y = load_breast_cancer_wisconsin()\n class_names = [2, 4]\n x_train, x_test, y_train, y_test = train_test_split(x, y)\n knn = KNN(6)\n knn.fit(x_train, y_train)\n acc = knn.score(x_test, y_test)\n y_pred = knn.predict(x_test)\n\n fig = plt.figure(f\"Breast cancer Wisconsin dataset (accuracy={acc:.4}%)\")\n plt.subplots_adjust(\n hspace=0.5,\n wspace=0.5\n )\n # Confusion matrix\n plt.subplot(2, 2, 1)\n plot_confusion_matrix(y_test, y_pred, class_names)\n\n # Plots\n hue = [f\"Class {y}\" for y in y_test]\n plt.subplot(2, 2, 2)\n plt.title(\"Feature 1 vs 5\")\n ax = sns.scatterplot(x_test[:,1], x_test[:,5], hue=hue)\n ax.set(xlabel=\"Feature 1\", ylabel=\"Feature 5\")\n # The two classes does not overlap on this plot.\n # It explain the high accuracy on this dataset.\n plt.subplot(2, 2, 3)\n plt.title(\"Feature 1 vs 2\")\n ax = sns.scatterplot(x_test[:,1], x_test[:,2], hue=hue)\n ax.set(xlabel=\"Feature 1\", ylabel=\"Feature 2\")\n plt.subplot(2, 2, 4)\n plt.title(\"Feature 1 vs 5\")\n hue2 = [\"True prediction\" if y_test[i]==y_pred[i] else \"Wrong prediction\"\n for i in range(len(y_test))]\n ax = sns.scatterplot(x_test[:,1], x_test[:,5], hue=hue2)\n ax.set(xlabel=\"Feature 1\", ylabel=\"Feature 5\")\n # The misclassified points are those on the edge between the two classes.\n\n # Haberman dataset\n x, y = load_haberman()\n class_names = [1, 2]\n x_train, x_test, y_train, y_test = train_test_split(x, y)\n knn = KNN(6)\n knn.fit(x_train, y_train)\n acc = knn.score(x_test, y_test)\n y_pred = knn.predict(x_test)\n\n plt.figure(f\"Haberman dataset(accuracy={acc:.4}%)\")\n plt.subplots_adjust(\n hspace=0.5,\n wspace=0.5\n )\n # Confusion matrix\n plt.subplot(2, 2, 1)\n plot_confusion_matrix(y_test, y_pred, class_names)\n\n # Plots\n hue = [f\"Class {y}\" for y in y_test]\n plt.subplot(2, 2, 2)\n plt.title(\"Feature 0 vs 1\")\n ax = sns.scatterplot(x_test[:,0], x_test[:,1], hue=hue)\n ax.set(xlabel=\"Feature 0\", ylabel=\"Feature 1\")\n plt.subplot(2, 2, 3)\n plt.title(\"Feature 1 vs 2\")\n ax = sns.scatterplot(x_test[:,1], x_test[:,2], hue=hue)\n ax.set(xlabel=\"Feature 1\", ylabel=\"Feature 2\")\n plt.subplot(2, 2, 4)\n plt.title(\"Feature 0 vs 2\")\n ax = sns.scatterplot(x_test[:,0], x_test[:,2], hue=hue)\n ax.set(xlabel=\"Feature 0\", ylabel=\"Feature 2\")\n # In all the plots the two classes overlap. It explains why knn performs\n # worse on this dataset than on the previous one.\n\n plt.show()\n"
},
{
"alpha_fraction": 0.6468373537063599,
"alphanum_fraction": 0.6513554453849792,
"avg_line_length": 26.66666603088379,
"blob_id": "cfa39323ee48ad7ae6bbe9afffea656b8af3c13f",
"content_id": "4fc6a591c1ae60b9fdbc147dbaec7210f59a9308",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1328,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 48,
"path": "/ROB311/TP4/main.py",
"repo_name": "BenjaminCollery/ROB",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport time\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.svm import SVC\n\n\nif __name__ == \"__main__\":\n t0 = time.time()\n print(\"Loading the dataset...\", end=\"\")\n X_train = pd.read_csv(\"mnist_train.csv\")\n y_train = X_train[\"label\"]\n X_train.drop(\"label\", axis=\"columns\", inplace=True)\n X_test = pd.read_csv(\"mnist_test.csv\")\n y_test = X_test[\"label\"]\n X_test.drop(\"label\", axis=\"columns\", inplace=True)\n print(\"\\tOk\")\n\n print(\"Training the model...\", end=\"\")\n pipe = Pipeline([\n (\"transformer\", Normalizer()),\n (\"classifier\", SVC(gamma=\"scale\", C=10))])\n pipe.fit(X_train, y_train)\n print(\"\\tOk\")\n\n score = pipe.score(X_test, y_test)\n print(f\"Accuracy: {score}\")\n\n y_pred = pipe.predict(X_test)\n cm = confusion_matrix(y_test, y_pred)\n\n t1 = time.time()\n print(f\"Finished in {t1-t0}s\")\n\n sns.set()\n sns.heatmap(data=cm, annot=True)\n plt.title(\"Confusion matrix\")\n plt.xlabel(\"True values\")\n plt.ylabel(\"Predicted values\")\n plt.show()\n"
},
{
"alpha_fraction": 0.43032094836235046,
"alphanum_fraction": 0.4636824429035187,
"avg_line_length": 27.530120849609375,
"blob_id": "4ad0dedd8042ac97a4f6fa58af7b2ba7f0897fc4",
"content_id": "bacd5dd51b082081ee3349ba0d5023cbded9ca6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2373,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 83,
"path": "/ROB311/TP2/value_iteration.py",
"repo_name": "BenjaminCollery/ROB",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport numpy as np\n\n\ndef close_enough(V, Vp, theta):\n \"\"\"\n Check if |V(S)-Vp(S)| < theta for all S.\n\n :param V: An array\n :param Vp: Another array\n :type V: numpy.ndarray\n :type Vp: numpy.ndarray\n :return: Whether V and Vp are close close enough\n :rtype: bool\"\"\"\n return np.max(np.abs(V - Vp)) < theta\n\ndef value_iteration(S, A, P, R, theta, gama):\n \"\"\"\n Implementation of the value iteration algorithm.\n\n :param S: The set of states\n :param A: The set of actions\n :param P: The transition matricies\n :param theta: Precision of the result\n :param gama: The gama parameter\n :type S: numpy.ndarray\n :type A: numpy.ndarray\n :type P: numpy.ndarray\n :type theta: float\n :type gama: float\n :returns: The utility of each state and the optimal policy\n :rtype: (numpy.ndarray, numpy.ndarray)\n \"\"\"\n V = np.zeros(S.shape)\n Vp = (1+theta)*np.ones(S.shape)\n # Compute iteratively the utility function\n X = np.zeros((S.shape[0], A.shape[0]))\n while not close_enough(V, Vp, theta):\n V = Vp\n for s in S:\n for a in A:\n X[s][a] = P[a][s]@V.reshape((-1, 1))\n Vp = R + gama*np.max(X, axis=1)\n # Compute the optimal policy for each state\n X = np.zeros((S.shape[0], A.shape[0]))\n for s in S:\n for a in A:\n X[s][a] += P[a][s]@V.reshape((-1, 1))\n pi = np.argmax(X, axis=1)\n\n return V, pi\n\n\nif __name__ == \"__main__\":\n x = 0.25\n y = 0.25\n gama = 0.9\n\n S = np.array([0, 1, 2, 3])\n A = np.array([0, 1, 2])\n # x: action\n # y: état de départ\n # z: état d'arrivé\n # P[x][y][z] = P(z|y,x)\n P = np.array([[[ 0, 0, 0, 0],\n [ 0, 1-x, 0, x],\n [1-y, 0, 0, y],\n [ 1, 0, 0, 0]],\n [[ 0, 1, 0, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0]],\n [[ 0, 0, 1, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0]]\n ])\n R = np.array([0, 0, 1, 10])\n theta = 1e-3\n\n V, pi = value_iteration(S, A, P, R, theta, gama)\n print(\"Utility of each state: \", V)\n print(\"Optimal policy for each state: \", pi)\n"
}
] | 4 |
keul/collective.microdata.contentlisting
|
https://github.com/keul/collective.microdata.contentlisting
|
6aaf94b044a903bd6a3ed15cb977a78b343ad80a
|
e75852f70289c1b5887bd66de7a89908854ba948
|
979ef7882849150659fa49a0dcab27486187df8a
|
refs/heads/master
| 2021-05-03T04:52:45.413643 | 2012-09-08T12:25:50 | 2012-09-08T12:25:50 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7486631274223328,
"alphanum_fraction": 0.751579999923706,
"avg_line_length": 38.55769348144531,
"blob_id": "b12a19e261cd7fc63dba76d6fdfeb2d8eed9a6eb",
"content_id": "5b8284d47c1268e96fa1283455aa9423452b4cd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2057,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 52,
"path": "/README.rst",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "Introduction\n============\n\nThis package is a Plone add-on for sites where `collective.microdata.core`__ is installed. See the\nproduct's page for more information about `microdata`__ and `schema.org implementation`__.\n\n__ https://github.com/keul/collective.microdata.core\n__ http://en.wikipedia.org/wiki/Microdata_%28HTML%29\n__ http://www.schema.org/\n\nYou could like to install this product when you need to get a set of microdata informations from your\nfolder contents pages.\n\nHowever this product only provide basic views implementation for the `Thing`__ vocabulary. A 3rd party\nproduct could extend it for additional vocabularies.\n\n__ http://www.schema.org/Thing\n\nAn implementation example could be taken from looking at `collective.microdata.event`__\n\n__ http://plone.org/products/collective.microdata.event\n\nLimitation and know issues\n==========================\n\nThis package is a bit experimental and it overrides a couple of Plone views:\n\n* \"Standard view\" (``folder_listing``)\n* \"Summary view\" (``folder_summary_view``)\n\nAlso:\n\n * (right now) those views can't be used anymore for full objects (only brains).\n * this is not working on Plone site root (original views will be loaded).\n\nHow to extend\n=============\n\nInstead of directly displaying how a single content looks like, the provided view of this product will look\nbefore for 3rd party implementations.\n\nThe customized view you are using on your folder will read from the current catalog brain the\n``microdata_itemtype`` metadata, then a view with a special name is searched.\n\nThe name must be something like \"``VOCABULARY_URL VIEW_ID_item``\".\n\nFor example, if the current content is providing the Person vocabulary (http://www.schema.org/Person)\nand you are listing it in the Plone \"Standard view\" (folder_listing), the view used for displaying the\nsingle entry will be named \"``http://www.schema.org/Person folder_listing_item``\".\n\nIf this view is not found, the brain will be displayed with default views, provided with the product, that\nwill display the content in the standard Plone way.\n"
},
{
"alpha_fraction": 0.7061728239059448,
"alphanum_fraction": 0.7086419463157654,
"avg_line_length": 32.83333206176758,
"blob_id": "92c2ac9b76b2f2a82a0e94aa1fd3d57078e07bb9",
"content_id": "940204f6519fb686878195ced14154a6cf6ae9a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 405,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 12,
"path": "/collective/microdata/contentlisting/interfaces.py",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf8 -*-\n\nfrom zope.interface import Interface\n\nclass IMicrodataListingLayer(Interface):\n \"\"\"Marker interface for the collective.microdata.contentlisting layer\"\"\"\n\nclass IItemListingView(Interface):\n \"\"\"A view able to display a an item with microdatat informations\"\"\"\n \n def __call__(item, microdata, *args, **kwargs):\n \"\"\"Render an item and it's microdata informations\"\"\""
},
{
"alpha_fraction": 0.6549707651138306,
"alphanum_fraction": 0.6584795117378235,
"avg_line_length": 31.846153259277344,
"blob_id": "12da9256f795ef30af07dae56ee337c1119565f0",
"content_id": "91eaab632ee5f126834444b9af04fd595218c1f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 855,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 26,
"path": "/collective/microdata/contentlisting/tests/base.py",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf8 -*-\n\nfrom Products.Five.browser import BrowserView\n\nclass NewsItemTestingMicrodataFolderListingAdapter(BrowserView):\n \n def __call__(self, item, microdata, *args, **kwargs):\n return \\\n\"\"\"<dt itemscope=\"itemscope\" itemtype=\"http://schema.org/Book\">\n<span itemprop=\"name\">Let\\'s display the \"%s\" book</span>\n</dt>\n\"\"\" % microdata.name\n\n\nclass NewsItemTestingMicrodataFolderSummaryViewAdapter(BrowserView):\n \n def __call__(self, item, microdata, *args, **kwargs):\n return \\\n\"\"\"<div class=\"tileItem visualIEFloatFix\" itemscope=\"itemscope\" itemtype=\"http://schema.org/Book\">\n <h2 class=\"tileHeadline\">\n <a href=\"http://nohost/plone/folder/news\" class=\"summary url\" itemprop=\"url\">\n <span itemprop=\"name\">Let\\'s summarize the \"%s\" book</span>\n </a>\n </h2>\n</div>\n\"\"\" % microdata.name\n\n"
},
{
"alpha_fraction": 0.6433430314064026,
"alphanum_fraction": 0.6438289880752563,
"avg_line_length": 30.66153907775879,
"blob_id": "6cd4d4337f0a5658be919c5d75a45214ff6a4469",
"content_id": "0cd5b2471ec9aedffdf70896a0f6e7958bbe2b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2058,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 65,
"path": "/collective/microdata/contentlisting/browser/view.py",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf8 -*-\n\nfrom zope.interface import implements\n\nfrom zope.component import queryAdapter, getMultiAdapter\nfrom zope.component.interfaces import ComponentLookupError\n\nfrom Products.Five.browser import BrowserView\nfrom plone.memoize import view\n\nfrom collective.microdata.core.interfaces import IMicrodataVocabulary\n\nfrom collective.microdata.contentlisting.interfaces import IItemListingView\n\nclass BaseListingView(BrowserView):\n\n TEMPLATE_ID = ''\n\n @view.memoize\n def get_microdata(self, brain):\n # look for a type-specific adapter, if any\n adapter = queryAdapter(brain, interface=IMicrodataVocabulary,\n name=brain.microdata_itemtype)\n if not adapter:\n # fallback to basic Thing adapter\n adapter = queryAdapter(brain, interface=IMicrodataVocabulary,\n name=u'') \n return adapter\n\n\n def render_item(self, item):\n microdata = self.get_microdata(item)\n try:\n view = getMultiAdapter ((self.context, self.request),\n name='%s %s_item' % (microdata.microdata_vocabulary, self.TEMPLATE_ID))\n except ComponentLookupError:\n view = getMultiAdapter ((self.context, self.request),\n name='%s_item' % self.TEMPLATE_ID)\n return view(item, microdata)\n\n\nclass ListingView(BaseListingView):\n \"\"\"View for the folder_listing template\"\"\"\n TEMPLATE_ID = 'folder_listing'\n\n\nclass SummaryView(BaseListingView):\n \"\"\"View for the folder_summary_view template\"\"\"\n\n TEMPLATE_ID = 'folder_summary_view'\n\n\nclass BaseItemListingView(BrowserView):\n \"\"\"Base class for listing a single item\"\"\"\n implements(IItemListingView)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.item = None\n \n def __call__(self, item, microdata, *args, **kwargs):\n self.item = item\n self.microdata = microdata\n return self.index()\n"
},
{
"alpha_fraction": 0.6216683983802795,
"alphanum_fraction": 0.6251298189163208,
"avg_line_length": 46.344261169433594,
"blob_id": "5963582e456dde4ef2c09225cee2133731cca659",
"content_id": "04124226599ca51c3ea14e6d41c4ec1e9129dc3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2889,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 61,
"path": "/collective/microdata/contentlisting/tests/test_microdata_contents.py",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom zope import interface\n\nfrom collective.microdata.contentlisting.testing import MICRODATA_CONTENTLISTING_INTEGRATION_TESTING\nfrom collective.microdata.core.interfaces import IMicrodataCoreLayer\n\nfrom collective.microdata.contentlisting.interfaces import IMicrodataListingLayer\n\nclass TestMicrodataFolderListing(unittest.TestCase):\n \n layer = MICRODATA_CONTENTLISTING_INTEGRATION_TESTING\n \n def markRequestWithLayer(self):\n # to be removed when p.a.testing will fix https://dev.plone.org/ticket/11673\n request = self.layer['request']\n interface.alsoProvides(request, IMicrodataCoreLayer)\n interface.alsoProvides(request, IMicrodataListingLayer)\n \n def setUp(self):\n self.markRequestWithLayer()\n portal = self.layer['portal']\n request = self.layer['request']\n portal.invokeFactory(type_name='Folder', id=\"folder\")\n self.folder = portal.folder\n self.folder.invokeFactory(type_name='News Item',\n id='news',\n title=\"The Lord of the Rings\",\n description=\"Boromir will die, sooner or later\",\n text=\"All begin in the Shrine...\",\n creator=\"J. R. R. Tolkien\")\n self.folder.invokeFactory(type_name='Document',\n id='front-page',\n title=\"A useless document\",\n description=\"This will be ignored\",\n text=\"Lorem ipsum\")\n request.set('ACTUAL_URL', 'http://nohost/plone/folder')\n\n def test_folder_listing_view_microdata_present(self):\n folder = self.folder\n folder.setLayout('@@folder_listing')\n self.assertTrue(folder().find('<dt itemscope=\"itemscope\" itemtype=\"http://schema.org/Book\">')>-1)\n self.assertTrue(folder().find('<dt itemscope=\"itemscope\" itemtype=\"http://schema.org/Thing\">')>-1)\n\n def test_folder_listing_view_custom(self):\n folder = self.folder\n folder.setLayout('@@folder_listing')\n self.assertTrue('Let\\'s display the \"The Lord of the Rings\" book' in folder())\n\n def test_folder_summary_view_microdata_present(self):\n folder = self.folder\n folder.setLayout('@@folder_summary_view')\n self.assertTrue(folder().find('<div class=\"tileItem visualIEFloatFix\" itemscope=\"itemscope\" itemtype=\"http://schema.org/Book\">')>-1)\n self.assertTrue(folder().find('<div class=\"tileItem visualIEFloatFix\" itemscope=\"itemscope\" itemtype=\"http://schema.org/Thing\">')>-1)\n\n def test_folder_summary_view_custom(self):\n folder = self.folder\n folder.setLayout('@@folder_summary_view')\n self.assertTrue('Let\\'s summarize the \"The Lord of the Rings\" book' in folder())\n\n"
},
{
"alpha_fraction": 0.7359004020690918,
"alphanum_fraction": 0.7370672821998596,
"avg_line_length": 38.53845977783203,
"blob_id": "a099da2c114fbd91b2f71901fbc60ea49b70cc61",
"content_id": "af7d18feaa321559337932ecc435ff0bc3494d39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2571,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 65,
"path": "/collective/microdata/contentlisting/testing.py",
"repo_name": "keul/collective.microdata.contentlisting",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport zope.component\nimport zope.interface\n\nfrom zope.configuration import xmlconfig\n\nimport zope.publisher.interfaces.browser\n\nfrom plone.testing import z2\n\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import setRoles\nfrom plone.app.testing import TEST_USER_ID\n\nfrom collective.microdata.core.testing import MicrodataCore\n\nimport collective.microdata.contentlisting.interfaces\nimport collective.microdata.contentlisting.tests.base\n\nclass MicrodataContentListing(MicrodataCore):\n\n defaultBases = (PLONE_FIXTURE, )\n\n def setUpZope(self, app, configurationContext):\n MicrodataCore.setUpZope(self, app, configurationContext)\n # Load ZCML for this package\n import collective.microdata.contentlisting\n xmlconfig.file('configure.zcml',\n collective.microdata.contentlisting,\n context=configurationContext)\n z2.installProduct(app, 'collective.microdata.contentlisting')\n\n zope.component.provideAdapter(\n collective.microdata.contentlisting.tests.base.NewsItemTestingMicrodataFolderListingAdapter,\n (zope.interface.Interface,\n collective.microdata.contentlisting.interfaces.IMicrodataListingLayer),\n provides=zope.publisher.interfaces.browser.IBrowserView,\n name=u'http://schema.org/Book folder_listing_item'\n )\n\n zope.component.provideAdapter(\n collective.microdata.contentlisting.tests.base.NewsItemTestingMicrodataFolderSummaryViewAdapter,\n (zope.interface.Interface,\n collective.microdata.contentlisting.interfaces.IMicrodataListingLayer),\n provides=zope.publisher.interfaces.browser.IBrowserView,\n name=u'http://schema.org/Book folder_summary_view_item'\n )\n\n\n def setUpPloneSite(self, portal):\n applyProfile(portal, 'collective.microdata.contentlisting:default')\n setRoles(portal, TEST_USER_ID, ['Member', 'Manager'])\n\n\nMICRODATA_CONTENTLISTING_FIXTURE = MicrodataContentListing()\nMICRODATA_CONTENTLISTING_INTEGRATION_TESTING = \\\n IntegrationTesting(bases=(MICRODATA_CONTENTLISTING_FIXTURE, ),\n name=\"MicrodataContentListing:Integration\")\nMICRODATA_CONTENTLISTING_FUNCTIONAL_TESTING = \\\n FunctionalTesting(bases=(MICRODATA_CONTENTLISTING_FIXTURE, ),\n name=\"MicrodataContentListing:Functional\")\n\n"
}
] | 6 |
nanahou/LPS_extraction
|
https://github.com/nanahou/LPS_extraction
|
0e59d9f1bf70cff817da3e461435efe969302da1
|
00992ef8d151f2c0a772432bd7d746d9b8e40a11
|
7a6face6008fa67081e00ad0ad2bf2a2801c2696
|
refs/heads/master
| 2020-12-28T03:04:22.804190 | 2020-06-11T02:00:45 | 2020-06-11T02:00:45 | 238,160,949 | 4 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5202109217643738,
"alphanum_fraction": 0.5536028146743774,
"avg_line_length": 21.760000228881836,
"blob_id": "91d46dd89fc1882e58133a50e6121e594300eaf4",
"content_id": "19cb15db1c02e18592517d7360536b76272132a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 25,
"path": "/audioread.py",
"repo_name": "nanahou/LPS_extraction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 16/11/17\n# @Author : NANA HOU\n# @Site : https://github.com/nanahou\n# @File : audioread.py\n\n\nimport scipy.io.wavfile as wav\nimport numpy as np\n\n\ndef audioread(filename):\n (rate, sig) = wav.read(filename)\n # print('just read:', sig)\n if sig.dtype == 'int16':\n nb_bits = 16\n elif sig.dtype == 'int32':\n nb_bits = 32\n else:\n print('no type match!', sig.dtype)\n \n max_nb_bit = float(2 ** (nb_bits - 1))\n sig = sig / (max_nb_bit + 1.0)\n return rate, sig, nb_bits\n"
},
{
"alpha_fraction": 0.6914893388748169,
"alphanum_fraction": 0.708776593208313,
"avg_line_length": 31,
"blob_id": "834c2a43035e31e7c5b76a262543d666ec454743",
"content_id": "616715605c97031417bff4b7d58b1dba48c69117",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1504,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 47,
"path": "/README.md",
"repo_name": "nanahou/LPS_extraction",
"src_encoding": "UTF-8",
"text": "# LPS_extraction\n============================================\n\nThe script is to extract log-power-spectrum (LPS) for speech enhancement and bandwidth extension.\n\n### Requirements\n\nThe model is implemented in PyTorch and uses several additional libraries. Specifically, we used:\n\n* `pytorch==1.0`\n* `python==3.6.8`\n* `numpy==1.15.4`\n* `scipy==1.2.0`\n\n### Setup\n\nTo install this package, simply clone the git repo:\n\n```\ngit clone https://github.com/nanahou/LPS_extraction.git;\ncd LPS_extraction;\n```\n\n### Contents\n\nThe repository is structured as follows.\n\n* `./data`: some audio samples from dataset[1]\n* `audioread.py`: the function to read audios\n* `extract_LPS.py`: the main scripts to extract features\n* `normhamming.py`: the function to apply a normalized square root hamming periodic window \n* `plot_spectrum.py`: the function to plot the LPS features\n* `sigproc.py`: including the functions to frame signals, deframe signals from [2]\n\n### Usage\n\n* If extracting LPS features, you only need to replace the path in `extract_LPS.py` with your own data path and run: \n\n ```python extract_LPS.py```\n\n* If plotting your features, you only need to call the function in `plot_spectrum.py`.\n\n### Reference\n```\n[1]. Valentini-Botinhao, C., Wang, X., Takaki, S. and Yamagishi, J., 2016. Speech Enhancement for a Noise-Robust Text-to-Speech Synthesis System Using Deep Recurrent Neural Networks. In Interspeech (pp. 352-356).\n[2]. https://github.com/jameslyons/python_speech_features/blob/master/python_speech_features/sigproc.py\n```\n"
},
{
"alpha_fraction": 0.6146496534347534,
"alphanum_fraction": 0.6305732727050781,
"avg_line_length": 25.125,
"blob_id": "e2ce59a6ae204b8f3d532b16229eaad217c0a0fe",
"content_id": "66763629ccfdd243b8b56a6895908e6b421e8791",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 24,
"path": "/plot_spectrum.py",
"repo_name": "nanahou/LPS_extraction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 31/1/18 11:50 AM\n# @Author : HOU NANA\n# @Site : http://github.com/nanahou\n# @File : plot_spectrum.py\n\nimport torch\nimport matplotlib.pyplot as plt\n\n\ndef plot_spectrum(d_spectrum, name):\n plt.interactive(False)\n color_map = plt.get_cmap('jet')\n plt.figure()\n d_spectrum = torch.squeeze(d_spectrum)\n print(d_spectrum.size())\n d_spectrum = d_spectrum.data.cpu().numpy()\n plt.imshow(d_spectrum, cmap=color_map)\n # plt.imshow(d_spectrum)\n plt.title(str(name))\n plt.show(block=True)\n input(\"Press Enter to exit..\")\n plt.close('all')\n\n"
},
{
"alpha_fraction": 0.5404275059700012,
"alphanum_fraction": 0.5766728520393372,
"avg_line_length": 27.08108139038086,
"blob_id": "952d1eafb2ac27afc335171b900af3e23f337fcb",
"content_id": "5b6ad30bff4dca91b5315c58816f0f02060d9cfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2152,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 74,
"path": "/extract_LPS.py",
"repo_name": "nanahou/LPS_extraction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 5/17/2019 3:33 PM\r\n# @Author : HOU NANA\r\n# @Site : http://github.com/nanahou\r\n# @File : extract_LPS.py\r\n\r\n\r\n'''\r\n This file is for extracting log-power-spectrum.\r\n'''\r\nimport numpy as np\r\nimport torch\r\nimport pickle\r\nimport time\r\nimport os\r\nimport sys\r\nfrom audioread import audioread\r\nfrom normhamming import normhamming\r\nfrom plot_spectrum import *\r\nfrom sigproc import *\r\n\r\n\r\ndef get_power_spec(filename, fft_len, frame_shift):\r\n # 1st process clean dataset\r\n rate, sig, nbits = audioread(filename)\r\n frames = framesig(sig, fft_len, frame_shift, lambda x: normhamming(x), True)\r\n # 2nd get power\r\n power_spec = powspec(frames, fft_len)\r\n power_spec = np.absolute(power_spec)\r\n return power_spec\r\n\r\n\r\ndef main():\r\n t_start = time.time()\r\n\r\n thred = -4\r\n fft_len_16k, frame_shift_16k = 512, 256\r\n data_path_16k = './data/wav/'\r\n LPS_path_16k = './data/LPS/'\r\n \r\n #create the output directory\r\n for dir in [LPS_path_16k]:\r\n if not os.path.exists(dir):\r\n os.makedirs(dir)\r\n \r\n #scan all the wav data under the path\r\n data_list = [x for x in os.listdir(data_path_16k) if x.endswith(\".wav\")]\r\n\r\n count = 1.0\r\n for item in data_list:\r\n item_16k = data_path_16k + item\r\n file_lps_16k = LPS_path_16k + item[:thred] + '.pkl'\r\n \r\n # extract magnitude and power\r\n power_16k = get_power_spec(item_16k, fft_len_16k, frame_shift_16k)\r\n power_16k = torch.from_numpy(power_16k.astype(np.float)).float()\r\n \r\n # convert to log space\r\n log_16k = torch.log(power_16k)\r\n # print(log_16k.size())\r\n \r\n #save the feature into .pkl file\r\n with open(file_lps_16k, 'wb') as out_dynamic_c:\r\n pickle.dump(log_16k, out_dynamic_c, True)\r\n \r\n if count % 1000 == 0:\r\n print('get features: [{}/{} ({:.0f}%)]'.format(count, len(data_list), 100. * count / len(data_list)))\r\n count = count + 1\r\n\r\n print('consuming: %f hours' % ((time.time() - t_start) / 3600))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.5941498875617981,
"alphanum_fraction": 0.6361974477767944,
"avg_line_length": 21.79166603088379,
"blob_id": "992289d3b6adda2b3bdea1108ae58891b0238d19",
"content_id": "672e403168b744fac53c057b9f1b681c0d88317e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 24,
"path": "/normhamming.py",
"repo_name": "nanahou/LPS_extraction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2017 Chenglin Xu\n\n\"\"\"\nnormalized square root hamming periodic window\n\"\"\"\n\nimport numpy\nfrom scipy.signal import hamming\n\n\ndef normhamming(fft_len):\n if fft_len == 512:\n frame_shift = 160\n elif fft_len == 256:\n frame_shift = 128\n else:\n print(\"Wrong fft_len, current only support 16k/8k sampling rate wav\")\n exit(1)\n win = numpy.sqrt(hamming(fft_len, False))\n win = win/numpy.sqrt(numpy.sum(numpy.power(win[0:fft_len:frame_shift],2)))\n return win\n"
}
] | 5 |
hirunwe/stock-prediction-lstm
|
https://github.com/hirunwe/stock-prediction-lstm
|
8380d25c5126f51dd36db475cc0312f4b8b97ee3
|
90c82575e17c7809cb216b35c749ca6eee39b4c9
|
31d40c0ac07a21fd9cfe32e57a7c31451ff21aca
|
refs/heads/main
| 2023-08-24T18:13:49.704022 | 2021-11-06T13:57:51 | 2021-11-06T13:57:51 | 425,255,812 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6706796288490295,
"alphanum_fraction": 0.7133980393409729,
"avg_line_length": 23.990291595458984,
"blob_id": "7a6d3e1a5cdd91a224f721fe305c439669a0fbb4",
"content_id": "9c75867a099b1a0273b8611e2940f253eb1d46d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2575,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 103,
"path": "/stock-prediction.py",
"repo_name": "hirunwe/stock-prediction-lstm",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport pandas_datareader as data\nfrom keras.models import load_model\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nimport streamlit as st\n\nstart = '2010-01-01'\nend = '2020-12-31'\n\nst.title(\"Stock Prediction App\")\n\nuser_input = st.text_input('Enter Stock Ticker', 'AAPL')\n\ndf = data.DataReader(user_input, 'yahoo', start, end)\n\n#Describing data\n\nst.subheader('Data from 2010 to 2020')\nst.write(df.describe())\n\n#visualizations\n\nst.subheader('Closing price vs. Time graph')\nfig = plt.figure(figsize=(12,6))\nplt.plot(df.Close)\nplt.xlabel('Year')\nplt.ylabel('Closing Price ($)')\nst.pyplot(fig)\n\n\nst.subheader('Closing price vs. Time graph with 100 Moving Average')\nfig = plt.figure(figsize=(12,6))\nma100 = df.Close.rolling(100).mean()\nplt.plot(df.Close, label= 'Actual Closing Price')\nplt.plot(ma100, 'r', label= 'Moving Average (100 days)')\nplt.xlabel('Year')\nplt.ylabel('Closing Price ($)')\nplt.legend()\nst.pyplot(fig)\n\n\nst.subheader('Closing price vs. Time graph with 100 and 200 Moving Averages')\nfig = plt.figure(figsize=(12,6))\nma100 = df.Close.rolling(100).mean()\nma200 = df.Close.rolling(200).mean()\nplt.plot(df.Close,label= 'Actual Closing Price')\nplt.plot(ma100, 'r',label= 'Moving Average (100 days)')\nplt.plot(ma200, 'g',label= 'Moving Average (200 days)')\nplt.xlabel('Year')\nplt.ylabel('Closing Price ($)')\nplt.legend()\nst.pyplot(fig)\n\n#splitting data intro training and testing\n\ndata_training = pd.DataFrame(df['Close'][0:int(len(df)*0.7)])\ndata_testing = pd.DataFrame(df['Close'][int(len(df)*0.7):int(len(df))])\nscaler = MinMaxScaler(feature_range=(0,1))\n\ndata_training_array = scaler.fit_transform(data_training)\n\n\n\n#loading the model\n\nmodel = load_model(\"ml_modal.h5\")\n\n#testing the model\npast_100_days = data_training.tail(100)\nfinal_df = past_100_days.append(data_testing, ignore_index = True)\ninput_data = scaler.fit_transform(final_df)\n\nx_test = []\ny_test = []\n\nfor i in range(100, input_data.shape[0]):\n x_test.append(input_data[i-100: i])\n y_test.append(input_data[i,0])\n\nx_test, y_test = np.array(x_test), np.array(y_test)\n\ny_predicted = model.predict(x_test)\n\nscaler = scaler.scale_\n\nscale_factor = 1/scaler[0]\ny_predicted = y_predicted * scale_factor\ny_test = y_test * scale_factor\n\n#Visualize the predictions\nst.subheader('Predictions vs. the Actual Value')\nfig2 = plt.figure(figsize=(12,6))\nplt.plot(y_test, 'b', label = 'Actual Value')\nplt.plot(y_predicted, 'r', label = 'Predicted Value')\nplt.xlabel('Time')\nplt.ylabel('Value $')\nplt.legend()\nst.pyplot(fig2)\n\n"
},
{
"alpha_fraction": 0.4689655303955078,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 16.125,
"blob_id": "10d36166f159c0ad1e83b7bd547e05ee117c172b",
"content_id": "8251b72e870b6c70aab238ccd492230b0edfc909",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "hirunwe/stock-prediction-lstm",
"src_encoding": "UTF-8",
"text": "keras==2.6.0\r\nmatplotlib==3.3.4\r\nnumpy==1.19.5\r\npandas==1.2.4\r\npandas_datareader==0.10.0\r\nplotly==5.3.0\r\nscikit_learn==1.0.1\r\nstreamlit==0.87.0\r\n"
},
{
"alpha_fraction": 0.5846862196922302,
"alphanum_fraction": 0.6164283752441406,
"avg_line_length": 28.47142791748047,
"blob_id": "03803cbef417cf55bd9cf0b618a936e49f9e9cd0",
"content_id": "1582879b66060163f6c8dcc8ceeefe7a69c6aee8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4127,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 140,
"path": "/stock.py",
"repo_name": "hirunwe/stock-prediction-lstm",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport plotly.express as px\nimport pandas_datareader as data\nfrom keras.models import load_model\nfrom sklearn.preprocessing import MinMaxScaler\nimport plotly.graph_objects as go\nimport streamlit as st\n\nstart = '2010-01-01'\nend = '2020-12-31'\n\nst.title(\"Stock Prediction App\")\n\nuser_input = st.text_input('Enter Stock Ticker', 'AAPL')\n\ndf = data.DataReader(user_input, 'yahoo', start, end)\ndf['Date'] = df.index\ndf['MA100'] = df.Close.rolling(100).mean()\ndf['MA200'] = df.Close.rolling(200).mean()\n\n#Describing data\n\nst.subheader('Data from 2010 to 2020')\nst.write(df.describe())\n\n#visualizations\n#1st Plot\nfig = px.line(df,x=\"Date\",y=\"Close\",title = 'Closing price vs. Time graph')\nfig.update_layout(xaxis_title='Year',\n yaxis_title='Closing Price ($)',\n template=\"simple_white\")\nst.plotly_chart(fig)\n\n#2nd Plot\n\nfig2 = go.Figure()\nfig2.update_layout(title = 'Closing price vs. Time graph with 100 Moving Average', xaxis_title='Year',\n yaxis_title='Closing Price ($)',\n template=\"simple_white\")\nfig2.add_trace(go.Scatter(x=df.Date, y=df.Close, \n line=dict(color='blue', width=1),\n name='Actual Closing Price'))\nfig2.add_trace(go.Scatter(x=df.Date, y=df.MA100, \n line=dict(color='orange', width=1),\n name='Moving Average (100 days)'))\nfig2.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n))\nst.plotly_chart(fig2)\n\n#3rd PLot\n\nfig3 = go.Figure()\nfig3.update_layout(title = 'Closing price vs. Time graph with 100 and 200 Moving Averages', xaxis_title='Year',\n yaxis_title='Closing Price ($)',\n template=\"simple_white\")\nfig3.add_trace(go.Scatter(x=df.Date, y=df.Close, \n line=dict(color='blue', width=1),\n name='Actual Closing Price'))\nfig3.add_trace(go.Scatter(x=df.Date, y=df.MA100, \n line=dict(color='red', width=1),\n name='Moving Average (100 days)'))\nfig3.add_trace(go.Scatter(x=df.Date, y=df.MA200, \n line=dict(color='green', width=1),\n name='Moving Average (200 days)'))\nfig3.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n))\nst.plotly_chart(fig3)\n\n\n#splitting data intro training and testing\n\ndata_training = pd.DataFrame(df['Close'][0:int(len(df)*0.7)])\ndata_testing = pd.DataFrame(df['Close'][int(len(df)*0.7):int(len(df))])\nscaler = MinMaxScaler(feature_range=(0,1))\n\ndata_training_array = scaler.fit_transform(data_training)\n\n\n\n#loading the model\n\nmodel = load_model(r'C:\\Users\\hirun\\Documents\\Stock-Final-Project\\ml_modal.h5')\n\n#testing the model\npast_100_days = data_training.tail(100)\nfinal_df = past_100_days.append(data_testing, ignore_index = True)\ninput_data = scaler.fit_transform(final_df)\n\nx_test = []\ny_test = []\n\nfor i in range(100, input_data.shape[0]):\n x_test.append(input_data[i-100: i])\n y_test.append(input_data[i,0])\n\nx_test, y_test = np.array(x_test), np.array(y_test)\n\ny_predicted = model.predict(x_test)\n\n\nscaler = scaler.scale_\n\nscale_factor = 1/scaler[0]\ny_predicted = y_predicted * scale_factor\ny_test = y_test * scale_factor\ny_predicted = y_predicted.flatten() \n\n#Visualize the predictions\n\nfig5 = go.Figure()\nfig5.update_layout(title = 'Predictions vs. the Actual Value', xaxis_title='Time',\n yaxis_title='Value $',\n template=\"simple_white\")\nfig5.add_trace(go.Scatter(y=y_test, \n line=dict(color='blue', width=1),\n name='Actual Value'))\nfig5.add_trace(go.Scatter(y=y_predicted, \n line=dict(color='orange', width=1),\n name='Predicted Value'))\nfig5.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n))\nst.plotly_chart(fig5)\n\n"
}
] | 3 |
CarlosViniMSouza/Projeto-Livro-Python
|
https://github.com/CarlosViniMSouza/Projeto-Livro-Python
|
d3bc609c4a3f0ca1671b9fe0434473aab181c50c
|
d9731712d611acd1ace9f3513c268bcf2ef38477
|
980c6eb12e62175bfb2991649c426655558ae817
|
refs/heads/master
| 2022-04-13T19:30:56.476393 | 2020-03-27T08:08:26 | 2020-03-27T08:08:26 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.706717848777771,
"alphanum_fraction": 0.7124760150909424,
"avg_line_length": 34.18055725097656,
"blob_id": "df43a0ebf3326f82b920e106d65cd679332bd6da",
"content_id": "0d48d34d49b759c2c42e7304f879f309316b06d0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2683,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 72,
"path": "/Aulas01e02.py",
"repo_name": "CarlosViniMSouza/Projeto-Livro-Python",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\r\n\r\nroot = tk.Tk()\r\nS = tk.Scrollbar(root)\r\nT = tk.Text(root, height=15, width=200)\r\nS.pack(side=tk.RIGHT, fill=tk.Y)\r\nT.pack(side=tk.LEFT, fill=tk.Y)\r\nS.config(command=T.yview)\r\nT.config(yscrollcommand=S.set)\r\nquote = \"\"\"Ok, Aqui é onde começamos a jornada na programação\r\n('Aula 01e02 - Introdução ao Python e Entendendo Primeiros conceitos')\r\n\r\n(Variaveis, Numeros, Matematica Basica)\r\n(As variáveis são nomes que damos para referenciar valores ao longo do nosso programa)\r\n(Aqui dizemos que x e y são variáveis, eles primeiro armazenaram os valores)\r\n(e depois os usamos para calcular operçaões)\r\n\r\n('arq02.txt','w')\r\n(além disso mudamos o valor que y representava atribuindo a ele o valor 7.)\r\n(podem facilitar a interpretação de um programa.)\r\n\r\n(Uma breve observação sobre variáveis em Python)\r\n(Variáveis servem para o programa reservar espaço na memoria para um determinado tipo de informação)\r\n(tudo em Python são objetos, então, as variáveis Python armazenam uma referência a um objeto.)\r\n\r\n(Como representar as variáveis)\r\n\r\n(podem possuir caracteres numéricos.)\r\n(não podem ter espaço entre elas)\r\n(não podem começar com números)\r\n(não podem ter caracteres de acentuação tais como ^, ~, !)\r\n(não podem ter caracteres especiais como @, & nem hifens)\r\n(determina vários métodos especiais e variáveis que usam esse padrão)\r\n(Na tabela abaixo exemplos de nomes validos e inválidos)\r\n\r\n(Nomes de variáveis validos e inválidos)\r\n(area_triangulo' - 'area triangulo' #não é permitido espaço em branco)\r\n(samuel_john ou samuel_&_john (não são validos caracteres especiais))\r\n(_inicio total-val (não são valido hifens))\r\n(Por fim, Python diferencia nomes de variáveis)\r\n(com letras maiúsculas e minúsculas exemplo: AREA, Area e area são 3 variáveis diferentes)\r\n\r\n(Operadores Aritméticos no Python)\r\n(+ - soma\\n)\r\n(– - subtração)\r\n(* - multiplicação)\r\n(/ - divisão)\r\n(// - Divisão trunca a parte fracionaria)\r\n(% - Produz o resto da divisão)\r\n(** - Exponenciação)\r\n\r\n(Operadores de comparação no Python)\r\n(abs(x) - Retorna o valor absoluto de x)\r\n(pow(x, y) - O mesmo x**y)\r\n(round(x, n) - Retorna um int ou float)\r\n\r\n(Operadores de Descrição no Python)\r\n( < - menor que)\r\n(<= - menor ou igual a)\r\n( > - maior que)\r\n(>= - maior ou igual a)\r\n(== - igual)\r\n(!= - diferente)\r\n\r\n(Operadores Lógicos no Python)\r\n(and P and Q - Resulta 'True' só se P e Q forem verdade)\r\n(or P or Q - Resulta 'False' só se P ou Q forem falsos; se não retorna True)\r\n(not P Se P - Resulta 'False' se P é falso; se não retorna True)\r\n##fim das aulas 1 e 2##\r\n\"\"\"\r\nT.insert(tk.END, quote)\r\ntk.mainloop()\r\n"
},
{
"alpha_fraction": 0.5342870950698853,
"alphanum_fraction": 0.6329551339149475,
"avg_line_length": 22.7439022064209,
"blob_id": "daaec720cd82b4afa842708b26947b574440b874",
"content_id": "3b201c6c286c4fe7500a7d25ad299048485d94c8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2029,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 82,
"path": "/FuncaoBotoes.py",
"repo_name": "CarlosViniMSouza/Projeto-Livro-Python",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\n\r\njanela = Tk()\r\njanela.geometry(\"450x450+300+300\")\r\n\r\ndef bt_click1():\r\n import Aula01e02.py\r\n\r\ndef bt_click2():\r\n import Aula3.py\r\n\r\ndef bt_click3():\r\n import Aula4.py\r\n\r\ndef bt_click4():\r\n import Aula5.py\r\n\r\ndef bt_click5():\r\n import Aula6.py\r\n\r\ndef bt_click6():\r\n import Aula7.py\r\n\r\ndef bt_click7():\r\n import Aula8.py\r\n\r\ndef bt_click8():\r\n import Aula9.py\r\n\r\ndef bt_click9():\r\n import Aula10.py\r\n\r\ndef bt_click10():\r\n import MenuAulasProntas.py\r\n\r\n\r\nbt1 = Button(janela, width=90, text=\"Aula01e02\", bg=\"red\", height=\"7\")\r\nbt1[\"command\"] = (bt_click1)\r\nbt1.place(x=10, y=60)\r\n\r\nbt2 = Button(janela, width=90, text=\"Aula03\", bg=\"blue\", height=\"7\")\r\nbt2[\"command\"] = (bt_click2)\r\nbt2.place(x=10, y=180)\r\n\r\nbt3 = Button(janela, width=90, text=\"Aula04\", bg=\"yellow\", height=\"7\")\r\nbt3[\"command\"] = (bt_click3)\r\nbt3.place(x=10, y=300)\r\n\r\nbt4 = Button(janela, width=90, text=\"Aula05\", bg=\"darkblue\", height=\"7\")\r\nbt4[\"command\"] = (bt_click4)\r\nbt4.place(x=10, y=420)\r\n\r\nbt5 = Button(janela, width=90, text=\"Aula06\", bg=\"gray\", height=\"7\")\r\nbt5[\"command\"] = (bt_click5)\r\nbt5.place(x=710, y=60)\r\n\r\nbt6 = Button(janela, width=90, text=\"Aula07\", bg=\"pink\", height=\"7\")\r\nbt6[\"command\"] = (bt_click6)\r\nbt6.place(x=710, y=180)\r\n\r\nbt7 = Button(janela, width=90, text=\"Aula08\", bg=\"orange\", height=\"7\")\r\nbt7[\"command\"] = (bt_click7)\r\nbt7.place(x=710, y=300)\r\n\r\nbt8 = Button(janela, width=90, text=\"Aula09\", bg=\"green\", height=\"7\")\r\nbt8[\"command\"] = (bt_click8)\r\nbt8.place(x=710, y=420)\r\n\r\nbt9 = Button(janela, width=90, text=\"Aula10\", bg=\"beige\", height=\"7\")\r\nbt9[\"command\"] = (bt_click9)\r\nbt9.place(x=710, y=540)\r\n\r\nbt10 = Button(janela, width=90, text=\"Ver todas as Aulas Juntas\", bg=\"darkgreen\", height=\"7\")\r\nbt10[\"command\"] = (bt_click10)\r\nbt10.place(x=10, y=540)\r\n\r\nlb = Label(janela, text=\"Testando Projeto de Linguagem de Programação Final - Python - Teste Concluido com Sucesso\",\r\nfont=('Times', '25'))\r\nlb.place(x=100, y=5)\r\n\r\njanela.geometry(\"1400x700+00+00\")\r\njanela.mainloop()"
},
{
"alpha_fraction": 0.8358209133148193,
"alphanum_fraction": 0.8358209133148193,
"avg_line_length": 32.5,
"blob_id": "377241d8883b7cadaac131340081e6a2ee59d579",
"content_id": "90972e2bc59d711c216aac8db7de86569bf38b3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 68,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/README.md",
"repo_name": "CarlosViniMSouza/Projeto-Livro-Python",
"src_encoding": "UTF-8",
"text": "# Primeiro_Projeto_Python\nPrimeiro trabalho de extensão com Python\n"
},
{
"alpha_fraction": 0.5993572473526001,
"alphanum_fraction": 0.6181039214134216,
"avg_line_length": 33.22641372680664,
"blob_id": "96ad403135b2315671775868419dfae999d2d8a8",
"content_id": "d926847d6221b63791cb9ae753a79bf16336a753",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1900,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 53,
"path": "/Aula09.py",
"repo_name": "CarlosViniMSouza/Projeto-Livro-Python",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\r\n\r\nroot = tk.Tk()\r\nS = tk.Scrollbar(root)\r\nT = tk.Text(root, height=15, width=200)\r\nS.pack(side=tk.RIGHT, fill=tk.Y)\r\nT.pack(side=tk.LEFT, fill=tk.Y)\r\nS.config(command=T.yview)\r\nT.config(yscrollcommand=S.set)\r\nquote = \"\"\"\r\n('Aula 09 - Modulo')\\n\")\r\n('Um módulo é simplesmente um arquivo contendo códigos Python que podem ser explorados por outros programas.')\\n\")\r\n('Um módulo completo pode ser chamado por outros programas através da declaração import (importar).')\\n\")\r\n\r\n('só podemos importar módulos para um programa se eles estiverem dentro do mesmo diretório.')\\n\")\r\n('editando código do programa area.py das funções retangulo, triangulo e circulo mostrados abaixo:')\\n\")\r\n\r\n(\"def __retangulo__(lado_a, lado_b):\\n\")\r\n(\" '''Calculando a área de um retângulo'''\\n\")\r\n(\" area = lado_a * lado_b\\n\")\r\n(\" return area\\n\")\r\n(\"def __triangulo__(lado, altura):\\n\")\r\n(\" '''Calculando a área de um triângulo'''\\n\")\r\n(\" area = (lado * altura) / 2\\n\")\r\n(\" return area\\n\")\r\n(\"def __circulo__(raio):\\n\")\r\n(\" '''Calculando a área de um círculo'''\\n\")\r\n(\" area = 3.14 * (raio ** 2)\\n\")\r\n(\" return area\\n\")\r\n(\"##Programa finalizado##\\n\")\r\n\r\n('O area.py esta pronto para servir como um módulo.')\\n\")\r\n('Se quisermos usar suas funções ')\\n\")\r\n('devemos usar a declaração 'import' para importar area.py.')\\n\")\r\n\r\n('arq011.txt', 'w')\\n\")\r\n('>>> import area')\\n\")\r\n('>>> ajuda(area)')\\n\")\r\n\r\n('>>> area.retangulo(20, 10)')\\n\")\r\n(\"arq.write('200')\\n\")\r\n('>>> area.triangulo(6, 5)\\n')\\n\")\r\n('15.0')\\n\")\r\n('>>> area.circulo(4)')\\n\")\r\n('50.24\\')\\n\")\r\n('>>>')\\n\")\r\n('Módulos podem ser muito grandes.)\\n\")\r\n('Python tem “baterias inseridas”, possuindo uma biblioteca de módulos')\\n\")\r\n('Busque um módulo na biblioteca padrão do Python ou no site PyPI')\\n\")\r\n(janela8, text=\"##fim da aula 09##\\n\")\r\n\"\"\"\r\nT.insert(tk.END, quote)\r\ntk.mainloop()\r\n"
}
] | 4 |
morLev/AdverseDrive
|
https://github.com/morLev/AdverseDrive
|
acfd36f4262846d0fce39c392bdd23d181b41a65
|
6cdd3cb1b60e79039dc534b20ffbfcd0d678572f
|
b451e4be6d9d0ab921897f381ba8f002dd56db29
|
refs/heads/master
| 2023-01-09T19:45:35.844446 | 2019-10-07T02:52:52 | 2019-10-07T02:52:52 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6223844289779663,
"alphanum_fraction": 0.65450119972229,
"avg_line_length": 31.109375,
"blob_id": "498f2fed62c9d3c361a01f4867f42eac41c1d7c5",
"content_id": "b6566b5d51fc3658883f0fdf7104cd93811abbaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2055,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 64,
"path": "/docs/experiment_details.md",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "# Experiment Details\n\n## Notations\n\n<dl>\n <dt>Agent / Model </dt>\n <dd>It is the model that controls the vehicle. It takes in relevant input (eg. camera images) and outputs the vehicle control commands (steering, throttle, brake). The Imitation Learning model is an example an agent. </dd>\n\n <dt>Scenario / Episode</dt>\n <dd>It is where an agent drives a vehicle from one point to another, or until a timeout flag is triggered.</dd>\n\n <dt>Attack / Adversary</dt>\n <dd>In the context of this project, it is drawing black lines on the road within Carla. Eg. `../adversary/adversary_Town01_nemesisA.png`</dd>\n\n <dt>Baseline</dt>\n <dd>It is a scenario without any attack.</dd>\n\n <dt>Hijack</dt>\n <dd>It is defined as the ability to modify the route taken by a vehicle by using physical adversaries.</dd>\n</dl>\n\n## Scenario setup\n\nDifferent tasks and defined in `carla/driving_benchmark/adversary_suite.py` as follows:\n\n```python\ndef _poses_town01_nemesis(self):\n \"\"\"\n Each matrix is a new task. We have all the four tasks\n \"\"\"\n\n def _poses_straight():\n return [[31, 36], [144, 146]]\n\n def _poses_turn_right():\n return [[42, 47], [79, 103]]\n\n def _poses_turn_left():\n return [[70, 66], [85, 98]]\n\n def _poses_straight_intersection():\n return [[100, 120], [119, 99]]\n\n def _poses_right_intersection():\n return [[107, 99], [119, 96]]\n\n def _poses_left_intersection():\n return [[100, 96], [107, 120]]\n\n return [_poses_straight(),\n _poses_turn_right(),\n _poses_turn_left(),\n _poses_straight_intersection(),\n _poses_right_intersection(),\n _poses_left_intersection()]\n```\nThe numbers for each pose correspond to various starting and ending positions on the map. For example, if we look at three poses at the intersection, we have 6 paths that correspond to the following:\n\n<div align=\"center\">\n<figure>\n <img src=\"../media/hijack_poses.jpg\" alt=\"hijack_poses\" width=80%>\n <figcaption>Hijack Poses</figcaption>\n</figure>\n</div>\n"
},
{
"alpha_fraction": 0.5040431022644043,
"alphanum_fraction": 0.5350404381752014,
"avg_line_length": 24.586206436157227,
"blob_id": "db676ece014ac110c2a1e453402fb1344a6cb562",
"content_id": "1e905c490567cb2b72c7baa187a304b607897354",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 29,
"path": "/run_docker.sh",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "echo \"Checking docker version...\";\nif echo $(sudo docker --version) | grep -Eq '19.0[3-9]|19.[1-9][0-9]|20' ; then\n echo \"Docker version >= 19.03 detected...\";\n sudo docker run \\\n -it --rm \\\n --gpus 0 \\\n --user root \\\n --net host \\\n -v $(pwd)/:/AdverseDrive \\\n -e NB_UID=$(id -u) \\\n -e NB_GID=$(id -g) \\\n -e GRANT_SUDO=yes \\\n xzgroup/adversedrive:latest \\\n /bin/bash;\nelse\n echo \"Docker version < 19.03 detected...\";\n sudo docker run \\\n -it --rm \\\n --runtime=nvidia \\\n -e NVIDIA_VISIBLE_DEVICES=0 \\\n --user root \\\n --net host \\\n -v $(pwd)/:/AdverseDrive \\\n -e NB_UID=$(id -u) \\\n -e NB_GID=$(id -g) \\\n -e GRANT_SUDO=yes \\\n xzgroup/adversedrive:latest \\\n /bin/bash;\nfi\n"
},
{
"alpha_fraction": 0.6726886034011841,
"alphanum_fraction": 0.6811902523040771,
"avg_line_length": 16.10909080505371,
"blob_id": "dba36b9d0acb1959eb0b5579c850a127a6010643",
"content_id": "5a901e745f15cc996ddd72cfd55b21f8d27b5819",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 941,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 55,
"path": "/docker/README.md",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "# AdverseDrive Docker\n\n### Prerequisites\n\n1. Install `docker`\n2. Install `nvidia-docker2`\n3. Add user to docker group\n\n### Build Docker\n\nChange `groupname` and `version` as needed.\n\n```\nsudo docker build -t groupname/AdverseDrive:version .\n```\n\n### Use Built docker\n\nSimple command:\n```\nsudo docker run \\\n-it --rm \\\n--runtime=nvidia \\\n--user root \\\n--net host \\\n-v $(pwd)/:/AdverseDrive \\\ngroupname/AdverseDrive:version /bin/bash\n```\n\nServer ready command:\n```\nsudo docker run \\\n-it --rm \\\n--runtime=nvidia \\\n--user root \\\n--net host \\\n-v $(pwd)/:/AdverseDrive \\\n-e NB_UID=$(id -u) \\\n-e DISPLAY \\\n-v /tmp/.X11-unix:/tmp/.X11-unix:rw \\\n-e NB_GID=$(id -g) \\\n-e GRANT_SUDO=yes \\\ngroupname/AdverseDrive:version /bin/bash\n```\n\nAlternatively use (in root directory):\n\n```\nsh run_docker.sh\n```\n\nNote:\n- This docker is Carla ready\n- This docker is Jupyter Notebook ready\n- Docker allows pygame visualization on servers with VNC/desktop mode enabled\n"
},
{
"alpha_fraction": 0.5028248429298401,
"alphanum_fraction": 0.700564980506897,
"avg_line_length": 16.700000762939453,
"blob_id": "162433c19b346e1b05438abfc069666006057030",
"content_id": "f6f36dc7cb424b283dfa02dff48cd1c74313e599",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 10,
"path": "/requirements.txt",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "pandas==0.24.1\nnumpy==1.16.2\nscipy==1.2.1\ntensorflow==1.13.1\nbayesian_optimization==1.0.1\nopencv_python==4.0.0.21\nmatplotlib==3.0.3\nimutils==0.5.2\nPillow==6.1.0\nprotobuf==3.9.2\n"
},
{
"alpha_fraction": 0.661803126335144,
"alphanum_fraction": 0.6750870943069458,
"avg_line_length": 39.637168884277344,
"blob_id": "e889b15bc01235f7d466d8724adfc30fa88b3302",
"content_id": "d50d777fa20a260c627556250556106f18b44103",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4592,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 113,
"path": "/start_hijacking_experiments.py",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport time\nimport numpy as np\nimport pandas as pd\nfrom carla_env import CarlaEnv\nfrom bayes_opt import UtilityFunction\nfrom bayes_opt import BayesianOptimization\nfrom carla.driving_benchmark import run_driving_benchmark\nfrom carla.driving_benchmark.experiment_suites import AdversarySuite\n\nwith open('config/hijacking_params.json') as json_file:\n args = json.load(json_file)\n\nbaseline_task = args['baseline_task']\ntarget_task = args['target_task']\nbaseline_scene = args['baseline_scene']\ntarget_scene = args['target_scene']\ncurr_port = args['port']\ncurr_gpu = args['GPU']\ncurr_town = args['town']\n\nrandom_points = args['random_points']\nsearch_points = args['search_points']\nacquisition_function = args['acquisition_function']\n\noverwrite_experiment = args['overwrite_experiment']\n\ndirectory_to_save = './_benchmarks_results/{}'.format(curr_town)\nif os.path.exists(directory_to_save):\n if overwrite_experiment:\n print(\"Removing {}\".format(directory_to_save))\n os.system(\"rm -rf {}\".format(directory_to_save))\n else:\n print(\"ERROR: A directory called {} already exists.\".format(directory_to_save))\n print(\"Please make sure to move the contents to a new location as running this program will overwrite the contents of this directory.\")\n exit()\n\nos.system(\"mkdir -p _benchmarks_results\")\nprint(\"Loading the Imitition Network and performing one simulation run for the target path..\")\nenv = CarlaEnv(task=target_task, town=curr_town, scene=target_scene,\n port=curr_port, save_images=False, gpu_num=curr_gpu)\nprint(\"Complete.\")\n\ntargetSteer = env.get_steer() # get the steering angles for the target run\nMAX_LEN = int(len(env.get_steer())*.8) # set maximum number of frames to 80 percent of target scenario\ntargetSteer = targetSteer[:MAX_LEN] # subset steering angles to maximum number of allowed frames\n\nenv.task = baseline_task\nenv.scene = baseline_scene\nenv.experiment_name = 'baseline'\n\n# reset experiment suite with base task + scene\nenv.experiment_suite = AdversarySuite(env.town, env.task, env.weather, env.iterations, env.scene)\n\n# run the baseline simulation\nprint(\"Running the simulation for the baseline path.\")\nrun_driving_benchmark(env.agent, env.experiment_suite, log_name=env.experiment_name,\n city_name=env.town, port=env.port, save_images=False)\nprint(\"Complete.\")\nbaseSteer = env.get_steer()\nMAX_LEN_B = int(len(baseSteer)*.8)\nbaseSteer = baseSteer[:MAX_LEN_B]\n\n\ndef target(pos1, rot1, pos2=0, rot2=0, width=10, length=200, colorR=0, colorG=0, colorB=0):\n # specify our attack (in this case double black lines) as a dictionary to pass to the CarlaEnv object.\n dict_params = {\n # the first line\n 0:{\n 'pos': int(pos1),\n 'rot': rot1,\n 'width': int(width),\n 'length': int(length),\n 'color': (int(colorB), int(colorG), int(colorR), 255)\n },\n # the second line\n 1:{\n 'pos': int(pos2),\n 'rot': rot2,\n 'width': int(width),\n 'length': int(length),\n 'color': (0, 0, 0, 255)\n }\n }\n\n # run the simulation with that attack and fetch the data from that run\n metrics = env.step(dict_params)\n\n # calculate the objective function we are trying to maximize\n attackSteer = metrics['steer'][:MAX_LEN]\n\n # if attackSteer vector is shorter than baseSteer, extend attackSteer with baseSteer.\n # This takes care of difference in vector lengths without changing the L1 value\n # as extended part of attackSteer will have zero difference with same part of baseSteer\n if len(attackSteer) < len(targetSteer):\n attackSteer = np.append(attackSteer, targetSteer[len(attackSteer):])\n\n # return objective function value for this particular run\n return -1 * np.sum(np.abs(attackSteer - targetSteer))\n\n\n# define the bounds for our attack parameters.\n# in our case, the position of both lines can start between pixel 0 and pixel 190.\n# the rotation of each line can over pi radians.\ncontrols = {'pos1': (0, 190),\n 'rot1': (0, 179),\n 'pos2': (0, 200),\n 'rot2': (0, 179)}\nprint(\"Running the Bayesian Optimizer for {} iterations.\".format(str(random_points + search_points)))\n# instantiate the bayesian optimizer\noptimizer = BayesianOptimization(target, controls, random_state=42)\noptimizer.maximize(init_points=random_points, n_iter=search_points, acq=acquisition_function)\n"
},
{
"alpha_fraction": 0.644444465637207,
"alphanum_fraction": 0.660277783870697,
"avg_line_length": 36.894737243652344,
"blob_id": "d50df90f0e5401b63f5a5ae3699f7ce1a3a96cd9",
"content_id": "bee33059a21d4057e6689f3f8eeadca1f375ca8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3600,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 95,
"path": "/start_infraction_experiments.py",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport time\nimport numpy as np\nimport pandas as pd\nfrom carla_env import CarlaEnv\nfrom bayes_opt import UtilityFunction\nfrom bayes_opt import BayesianOptimization\n\nwith open('config/infraction_params.json') as json_file:\n args = json.load(json_file)\n\n# CARLA parameters\ncurr_task = args['task']\ncurr_scene = args['scene']\ncurr_port = args['port']\ncurr_gpu = args['GPU']\ncurr_town = args['town']\n\n# bayesian parameters\nrandom_points = args['random_points']\nsearch_points = args['search_points']\nacquisition_function = args['acquisition_function']\n\noverwrite_experiment = args['overwrite_experiment']\n\ndirectory_to_save = './_benchmarks_results/{}'.format(curr_town)\nif os.path.exists(directory_to_save):\n if overwrite_experiment:\n print(\"Removing {}\".format(directory_to_save))\n os.system(\"rm -rf {}\".format(directory_to_save))\n else:\n print(\"ERROR: A directory called {} already exists.\".format(directory_to_save))\n print(\"Please make sure to move the contents as running this program will overwrite the contents of this directory.\")\n exit()\n\n\nnow = time.time()\nprint(\"Loading the Imitition Network and performing one simulation run for the baseline path..\")\nos.system(\"mkdir -p _benchmarks_results\")\nenv = CarlaEnv(task=curr_task, town='Town01_nemesisA', scene=curr_scene,\n port=curr_port, save_images=False, gpu_num=curr_gpu)\nprint(\"Complete.\")\n\nbaseSteer = env.baseline_steer # get the steering angles for the baseline run\nMAX_LEN = int(len(env.baseline_steer)*.8) # set maximum number of frames to 80 percent of baseline scenario\n\nbaseSteer = baseSteer[:MAX_LEN] # subset steering angles to maximum number of allowed frames\n\n\ndef target(pos1, rot1, pos2=0, rot2=0, width=10, length=200, colorR=0, colorG=0, colorB=0):\n # specify our attack (in this case double black lines) as a dictionary to pass to the CarlaEnv object.\n dict_params = {\n # the first line\n 0:{\n 'pos': int(pos1),\n 'rot': rot1,\n 'width': int(width),\n 'length': int(length),\n 'color': (int(colorB), int(colorG), int(colorR), 255)\n },\n # the second line\n 1:{\n 'pos': int(pos2),\n 'rot': rot2,\n 'width': int(width),\n 'length': int(length),\n 'color': (0, 0, 0, 255)\n }\n }\n\n # run the simulation with that attack and fetch the data from that run\n metrics = env.step(dict_params)\n\n # calculate the objective function we are trying to maximize\n attackSteer = metrics['steer'][:MAX_LEN]\n\n # if attackSteer vector is shorter than baseSteer, extend attackSteer with baseSteer.\n # This takes care of difference in vector lengths without changing the L1 value\n # as extended part of attackSteer will have zero difference with same part of baseSteer\n if len(attackSteer) < len(baseSteer):\n attackSteer = np.append(attackSteer, baseSteer[len(attackSteer):])\n\n # return objective function value for this particular run\n return np.sum(np.abs(attackSteer - baseSteer))\n\ncontrols = {'pos1': (0, 190),\n 'rot1': (0, 179),\n 'pos2': (0, 200),\n 'rot2': (0, 179)}\n\nprint(\"Running the Bayesian Optimizer for {} iterations.\".format(str(random_points + search_points)))\n# instantiate the bayesian optimizer\noptimizer = BayesianOptimization(target, controls, random_state=42)\noptimizer.maximize(init_points=random_points, n_iter=search_points, acq=acquisition_function)\n"
},
{
"alpha_fraction": 0.7418965697288513,
"alphanum_fraction": 0.7599999904632568,
"avg_line_length": 36.66233825683594,
"blob_id": "b6c896547fa952468a8d844f8ade9caf326fc6f8",
"content_id": "be06c69d562b41e32ff8c93a2d772d3970ec6c77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5800,
"license_type": "no_license",
"max_line_length": 290,
"num_lines": 154,
"path": "/README.md",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "# Adverse Drive\n\nThe goal of this project is to attack end-to-end self-driving models using physically realizable adversaries.\n\n|<center>Target Objective</center>| <center>Conceptual Overview</center>| <center>Example</center> |\n| :-: | :-: | :-: |\n|Collision Attack|<img src=\"media/collision_overview.png\" alt=\"collision_overview\"> | <img src=\"media/collision_adversary.gif\" alt=\"collision_adversary\"/>|\n|Hijacking Attack|<img src=\"media/hijack_overview.png\" alt=\"hijack_overview\">|<img src=\"media/hijack_adversary.gif\" alt=\"hijack_adversary\">|\n\n## Pre-requisites\n\n- Ubuntu 16.04\n- Dedicated GPU with relevant CUDA drivers\n- Docker-CE (for docker method)\n\n**Note: We highly recommend you use the [dockerized version](#docker-method-recommended) of our repository, due to being system independent. Furthermore, it would not affect the packages on your system.**\n\n## Installation\n1. Clone the AdverseDrive repository\n\n```bash\ngit clone https://github.com/xz-group/AdverseDrive\n```\n\n2. Export Carla paths to `PYTHONPATH`\n\n```bash\nsource export_paths.sh\n```\n\n3. Install the required Python packages\n\n```bash\npip3 install -r requirements.txt\n```\n\n4. Download the modified version of the Carla simulator[1], [carla-adversedrive.tar.gz](https://wustl.box.com/s/8k15yp7rb0ckcp7tqmhlh0rje1q1fcjm).\nExtract the contents of the directory and navigate into the extracted directory.\n\n```bash\ntar xvzf carla-adversedrive.tar.gz\ncd carla-adverserdrive\n```\n\n5. Run the Carla simulator on a terminal\n\n```bash\n./CarlaUE4.sh -windowed -ResX=800 -ResY=600\n```\nThis starts Carla as a server on port 2000. Give it about 10-30 seconds to start up depending on your system.\n\n6. On a new terminal, start a python HTTP server. This allows the Carla simulator to read the generated attack images and load it onto Carla\n\n```bash\nsh run_adv_server.sh\n```\n**Note: This requires port 8000 to be free.**\n\n7. On another new terminal, run the infraction objective python script\n\n```bash\npython3 start_infraction_experiments.py\n```\nNote: the Jupyter notebook version of this script, called `start_infraction_experiments.ipynb` describes each step in detail. It is recommended to use that while starting out with this repository. Use `jupyter notebook` to start a jupyter server in this directory.\n\n## How it Works\n\n1. The above steps sets up an experiment defined by the experiment parameters in `config/infraction_parameters.json`, including the Carla town being used, the task (straight, turn-left, turn-right), different scenes, the port number being used by Carla and Bayesian optimizer[3] parameters.\n2. Runs the `baseline scenario` where the Carla Imitation Learning[2] (IL) agent drives a vehicle from point A to point B as defined by the experiment scene and task. It returns a metric from the run (eg: sum of infraction for each frame). The baseline scenario is when there is no attack.\n3. The Bayesian Optimizer suggests parameters for the attack, based on the returned metric (which serves as the objective function that we are trying to maximize), the attack is generated by `adversary_generator.py` and placed in `adversary/adversary_{town_name}.png`.\n4. Carla reads the adversary image over the HTTP server and places in on pre-determined locations within the road.\n5. The IL model again runs through this `attack scenario` and returns a metric.\n6. Steps 3-5 are repeated for a set number of experiments, in which successful attacks would be found.\n\n\n### Docker Method (recommended)\n\nIt is expected that you have some experience with dockers, and have [installed](https://docs.docker.com/install/) and tested your installation to ensure you have GPU access via docker containers.\nA quick way to test it is by running:\n```bash\n# docker >= 19.03\ndocker run --gpus all,capabilities=utility nvidia/cuda:9.0-base nvidia-smi\n\n# docker < 19.03 (requires nvidia-docker2)\ndocker run nvidia/cuda:9.0-base --runtime=nvidia nvidia-smi\n```\nAnd you should get a standard `nvidia-smi` output.\n\n1. Clone the AdverseDrive repo\n\n```bash\ngit clone https://github.com/xz-group/AdverseDrive\n```\n\n2. Pull the modified version of the Carla simulator:\n\n```bash\ndocker pull xzgroup/carla:latest\n```\n\n3. Pull the `AdverseDrive` docker containing all the prerequisite packages for running experiments (also server-friendly)\n\n```bash\ndocker pull xzgroup/adversedrive:latest\n```\n\n4. Run the our dockerized Carla simulator on a terminal\n\n```bash\nsh run_carla_docker.sh\n```\nThis starts Carla as a server on port 2000. Give it about 10-30 seconds to start up depending on your system.\n\n6. On a new terminal, start a python HTTP server. This allows the Carla simulator to read the generated attack images and load it onto Carla\n\n```bash\nsh run_adv_server.sh\n```\nNote: This requires port 8000 to be free.\n\n7. On another new terminal, run the `xzgroup/adversedrive` docker\n\n```bash\nsh run_docker.sh\n```\n\n8. Run the infraction objective python script\n\n```bash\npython3 start_infraction_experiments.py\n```\n\n## More documentation\n- [Frequently Asked Questions](docs/faq.md)\n- [Experiment Details](docs/experiment_details.md)\n\n## References\n1. Carla Simulator: [https://github.com/carla-simulator/carla](https://github.com/carla-simulator/carla)\n2. Imitation Learning: [https://github.com/carla-simulator/imitation-learning](https://github.com/carla-simulator/imitation-learning)\n3. Bayesian Optimization: [https://github.com/fmfn/BayesianOptimization](https://github.com/fmfn/BayesianOptimization)\n\n## Citation\nIf you use our work, kindly cite us using the following:\n```\n@misc{boloor2019,\n title={Attacking Vision-based Perception in End-to-End Autonomous Driving Models},\n author={Adith Boloor and Karthik Garimella and Xin He and \n Christopher Gill and Yevgeniy Vorobeychik and Xuan Zhang},\n year={2019},\n eprint={1910.01907},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n```\n"
},
{
"alpha_fraction": 0.7398072481155396,
"alphanum_fraction": 0.7635285258293152,
"avg_line_length": 52.959999084472656,
"blob_id": "6b9db47098fddf072490249877af2fda814dde07",
"content_id": "533a50e3a7e54e32d439ad6b5e9b21ebba44b4af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1349,
"license_type": "no_license",
"max_line_length": 357,
"num_lines": 25,
"path": "/docs/faq.md",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "# Frequently Asked Questions (FAQs)\n\n##### What version of Carla did you use?\n\nv0.8.2 [https://github.com/carla-simulator/carla/tree/0.8.2](https://github.com/carla-simulator/carla/tree/0.8.2)\n\n##### What kind of models did you attack?\n\nEnd-to-end camera based models like [Imitation Learning](https://github.com/carla-simulator/imitation-learning) and [Reinforcement Learning](https://github.com/carla-simulator/reinforcement-learning).\n\n##### How long does it take to run on episode?\n\n10-20 seconds per episode.\n\n##### What ports are used?\n\nBy default, the Carla simulator server-client runs port `2000`. This can be changed by modifying the `-world-port=2000` argument while starting the Carla simulator and `--port=2000` argument while running any Python client. The adversary communication takes place over an HTTP port on `8000`. This port currently cannot be changed without repackaging Carla.\n\n##### I keep seeing `[Errno 104] Connection reset by peer` during the experiment. How do I fix it?\n\nThis happens because Carla is reset after each episode. At this time, sometimes the client and server lose connection, and this error pops up. We didn't remove it because it is an otherwise important error message when Carla client refuses to communicate with the simulator.\n\n##### How do I contribute to this repo?\n\nKindly submit a pull request.\n"
},
{
"alpha_fraction": 0.6790123581886292,
"alphanum_fraction": 0.6790123581886292,
"avg_line_length": 15.199999809265137,
"blob_id": "fa5a2e28897e4db3aa0fb847ee2560944fefaf93",
"content_id": "055bbca9e38dff707bb939e4ee0470dfdf05bfa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 5,
"path": "/docker/readme-for-jupyter.md",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "### Running Jupyter Notebook\n\n```\njupyter notebook --no-browser --allow-root\n```\n"
},
{
"alpha_fraction": 0.4937717914581299,
"alphanum_fraction": 0.5215080380439758,
"avg_line_length": 36.39751434326172,
"blob_id": "168ec16e1782be4e7e2e0584d6ad55d065c1c42a",
"content_id": "0489b8c706140eac5178838899b69430feeb81a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6021,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 161,
"path": "/adversary_generator.py",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\nimport os\nimport imutils\n\nclass AdversaryGenerator:\n def __init__(self, city_name, sizeX=200, sizeY=200, transparency=True,\n path='adversary/', record=False):\n \"\"\"\n Library containing different shapes, along with the ability\n to create .png images with the shapes.\n adversary generated will be of form `adversary_city_name.png` in the path directory\n sizeX and sizeY pertain to the size of the canvas on which the\n attack pattern is drawn on CARLA (recommended 200x200).\n POSITION_RANGE = (0, 200)\n ROTATION_RANGE = (0, 180)\n WIDTH_RANGE = (0, 50)\n LENGTH_RANGE = (0, 200)\n COLOR_TUPLE_RANGE = (0, 255)\n \"\"\"\n self.city_name = city_name\n self.sizeX = sizeX\n self.sizeY = sizeY\n\n if transparency:\n self.channels = 4 # alpha channel\n else:\n self.channels = 3\n\n self.path = path\n self.record = record\n self.counter = 0\n\n if self.record:\n os.system('mkdir -p {}adversaries/'.format(self.path))\n\n self.clear_canvas()\n\n # adversary MUST be generated in the 'adversary' directory and MUST have\n # the naming convention 'adversary_townname.png'\n self.image_label = 'adversary_' + self.city_name + '.png'\n self.draw_image()\n\n def clear_canvas(self):\n \"\"\"\n Clears the canvas to make it available for a new pattern\n \"\"\"\n self.canvas = np.zeros((self.sizeX, self.sizeY, self.channels), dtype=np.uint8)\n\n def draw_image(self):\n \"\"\"\n Writes the canvas to a png file.\n Uncomment below code to save patterns at every 'draw_image' call in a\n separate directory\n \"\"\"\n if self.record:\n if self.counter == 0:\n cv2.imwrite(\"{}adversaries/baseline.png\".format(self.path, self.counter), self.canvas)\n else:\n cv2.imwrite(\"{}adversaries/adversary_{:04}.png\".format(self.path, self.counter), self.canvas)\n self.counter += 1\n cv2.imwrite(\"{}{}\".format(self.path, self.image_label), self.canvas)\n\n def lines_adversary(self, adversary_params):\n \"\"\"\n Generates a .png image of a line with parameters described in adversary_params\n adversary_params = {\n 'pos': 10, -> int (0 -> 200)\n 'rot': 20, -> float (0 -> 180)\n 'width': 20, -> int (1 -> 200)\n 'color': (0, 0, 0, 255) -> int tuple (0->255)\n }\n \"\"\"\n self.clear_canvas()\n\n pos = adversary_params['pos']\n rot = adversary_params['rot']\n width = adversary_params['width']\n color = adversary_params['color']\n\n cv2.rectangle(self.canvas, (pos, 0),\n (pos + width, self.sizeY), color, -1)\n self.canvas = imutils.rotate(self.canvas, angle=rot)\n self.draw_image()\n # cv2.imwrite(\"{}{}\".format(self.path, self.image_label), self.canvas)\n\n def multi_lines(self, adversary_params):\n \"\"\"\n generates a multi-lines .png image with the lines' parameters define in adversary_params.\n adversary_params in this case would be dictionary of dictionaries, more\n general version of 'lines_adversary'\n Ex:\n adversary_params = {\n 0:{\n 'pos': 10,\n 'rot': 20,\n 'width': 20,\n 'length': 100,\n 'color': (0, 0, 0, 255)\n },\n 1:{\n 'pos': 100,\n 'rot': 80,\n 'width': 40,\n 'length': 10,\n 'color': (0, 255, 0, 255)\n }\n }\n \"\"\"\n self.clear_canvas()\n\n for line_id in sorted(adversary_params.keys()):\n overlay = np.zeros((self.sizeX, self.sizeY, self.channels), dtype=np.uint8)\n pos = adversary_params[line_id]['pos']\n rot = adversary_params[line_id]['rot']\n width = adversary_params[line_id]['width']\n length = adversary_params[line_id]['length']\n color = adversary_params[line_id]['color']\n cv2.rectangle(overlay, (pos, 0),\n (pos + width, length), color, -1)\n overlay = imutils.rotate(overlay, angle=rot)\n overlay_pos = (0, 0)\n self.canvas = self.overlay_image_alpha(self.canvas, overlay, 0, 0)\n self.draw_image()\n # cv2.imwrite(\"{}{}\".format(self.path, self.image_label), self.canvas)\n\n def overlay_image_alpha(self, background, foreground, x=0, y=0):\n \"\"\"\n Overlay img_overlay on top of img at the position specified by\n pos and blend using alpha_mask.\n Source: https://stackoverflow.com/a/52742571\n \"\"\"\n rows, cols, channels = foreground.shape\n trans_indices = foreground[...,3] != 0 # Where not transparent\n overlay_copy = background[y:y+rows, x:x+cols]\n overlay_copy[trans_indices] = foreground[trans_indices]\n background[y:y+rows, x:x+cols] = overlay_copy\n return background\n\nif __name__ == '__main__':\n # Example usage: using adversary generator to create 2-line attacks\n # five times, while recording each of them\n advgen = AdversaryGenerator('example', record=True)\n for i in range(5):\n adversary_params = {\n 0:{\n 'pos': np.random.randint(200),\n 'rot': 20,\n 'width': 20,\n 'length': 100,\n 'color': (0, 0, 0, 255)\n },\n 1:{\n 'pos': np.random.randint(200),\n 'rot': 80,\n 'width': 40,\n 'length': 10,\n 'color': (0, 255, 0, 255)\n }\n }\n advgen.multi_lines(adversary_params)\n"
},
{
"alpha_fraction": 0.5838364958763123,
"alphanum_fraction": 0.6075243949890137,
"avg_line_length": 34.88333511352539,
"blob_id": "fe464b776f66a818df5fd114af8d139595071683",
"content_id": "e3ad8e3d6afd8939bed899e3e8eb4ca163cb617b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2153,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 60,
"path": "/utils.py",
"repo_name": "morLev/AdverseDrive",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom carla.planner.map import CarlaMap\n\n\ndef plot_trajectories(trajectory_dict, title, add_legend=True):\n \"\"\"\n Plots a set of trajectories on CARLA Town 01\n inputs: trajectory_dict and title of plot\n example:\n trajectory_dict = {'baseline' : {'x' : [0,1,2], 'y' [3,4,5]}}\n Each key of the dictionary will correspond to the label for that trajectory\n and each value is a dictionary with keys 'x' and 'y' whose values are\n array-like. See supplied jupyter notebooks for more examples.\n \"\"\"\n carla_map = CarlaMap('Town01_nemesis', 0.1653, 50)\n image = mpimg.imread(\"carla/planner/Town01_nemesis.png\")\n fig, ax = plt.subplots(1)\n pad = 30\n\n fig.set_size_inches(10, 10)\n plt.rcParams.update({'font.size': 12})\n ax.imshow(image, alpha=0.4)\n\n all_x_pixels = []\n all_y_pixels = []\n\n for label, positions in trajectory_dict.items():\n x_position = positions['x']\n y_position = positions['y']\n\n pixelX = []\n pixelY = []\n for i in range(len(x_position)):\n pixel = carla_map.convert_to_pixel([x_position[i], y_position[i], 0])\n pixelX.append(pixel[0])\n pixelY.append(pixel[1])\n all_x_pixels.append(pixel[0])\n all_y_pixels.append(pixel[1])\n\n if len(x_position) == 1:\n plt.scatter(pixelX[0], pixelY[0], label=label, s=500)\n else:\n if label.lower() == 'baseline':\n plt.plot(pixelX, pixelY, linestyle='dashed', label=label, color='k',markersize=12, linewidth=4)\n else:\n plt.plot(pixelX, pixelY,linestyle='dashed', label=label, color='blue',markersize=12, linewidth=4)\n\n xmin = np.maximum(0, min(all_x_pixels) - pad)\n xmax = np.minimum(image.shape[1], max(all_x_pixels) + pad)\n ymin = np.maximum(0, min(all_y_pixels) - pad)\n ymax = np.minimum(image.shape[0], max(all_y_pixels) + pad)\n plt.axis([xmin, xmax, ymax, ymin])\n plt.title(title)\n if add_legend:\n plt.legend()\n plt.xlabel('x')\n plt.ylabel('y')\n return plt\n"
}
] | 11 |
Colby-England/CS_361_Project
|
https://github.com/Colby-England/CS_361_Project
|
b219c94e13467cfb2ff56dac06c4958c5f53c7de
|
3e1b9f163faa909dfaae77ff6dc3804e97d690ef
|
7bb8f208764c35b41847093d5ab78b747ad524ca
|
refs/heads/master
| 2023-07-04T04:00:28.368819 | 2021-08-14T01:40:56 | 2021-08-14T01:40:56 | 389,815,990 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7489471435546875,
"alphanum_fraction": 0.7536265850067139,
"avg_line_length": 82.82353210449219,
"blob_id": "423df7acc6416b0f4d90a98b790341ef2f232b6c",
"content_id": "ca5b4d0f985c7cc9f334699b18d8f751461b0cb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4276,
"license_type": "no_license",
"max_line_length": 440,
"num_lines": 51,
"path": "/service/app.js",
"repo_name": "Colby-England/CS_361_Project",
"src_encoding": "UTF-8",
"text": "const express = require(\"express\");\nconst app = express();\napp.set('port', process.argv[2]);\napp.use(express.json())\n\nconst defintions = {\n \"Chest Press\": \"The bench press, or chest press, is an upper-body weight training exercise in which the trainee presses a weight upwards while lying on a weight training bench. The exercise uses the pectoralis major, the anterior deltoids, and the triceps, among other stabilizing muscles. A barbell is generally used to hold the weight, but a pair of dumbbells can also be used.\",\n \"Lat Pulldown\": \"The pull-down exercise is a strength training exercise designed to develop the latissimus dorsi muscle. It performs the functions of downward rotation and depression of the scapulae combined with adduction and extension of the shoulder joint.\",\n \"Pectoral Fly\": \"A fly or flye is a strength training exercise in which the hand and arm move through an arc while the elbow is kept at a constant angle. Flies are used to work the muscles of the upper body.\",\n \"Biceps Curl\": \"The term biceps curl refers to any of a number of weight training exercises that primarily targets the biceps brachii muscle. It may be performed using a barbell, dumbbell, resistance band, or other equipment.\",\n \"Triceps Press\": \"Lying triceps extensions, also known as skull crushers and French extensions or French presses, are a strength exercise used in many different forms of strength training. Lying triceps extensions are one of the most stimulating exercises to the entire triceps muscle group in the upper arm.\",\n \"Shoulder Press\": \"The overhead press (abbreviated OHP), also referred to as a shoulder press, military press, or simply the press, is a weight training exercise with many variations. It is typically performed while either standing or sitting sometimes also when squatting, in which a weight is pressed straight upwards from racking position until the arms are locked out overhead, while the legs, lower back and abs maintain balance.\",\n \"Row\": \"In strength training, rowing (or a row, usually preceded by a qualifying adjective — for instance a seated row) is an exercise where the purpose is to strengthen the muscles that draw the rower's arms toward the body (latissimus dorsi) as well as those that retract the scapulae (trapezius and rhomboids) and those that support the spine (erector spinae).\",\n \"Leg Press\": \"The leg press is a compound weight training exercise in which the individual pushes a weight or resistance away from them using their legs. The term leg press machine refers to the apparatus used to perform this exercise.\",\n \"Leg Extension\": \"The leg extension is a resistance weight training exercise that targets the quadriceps muscle in the legs. The exercise is done using a machine called the Leg Extension Machine.\",\n \"Leg Curl\": \"The leg curl, also known as the hamstring curl, is an isolation exercise that targets the hamstring muscles. The exercise involves flexing the lower leg against resistance towards the buttocks.\",\n \"Hip Abduction\": \"Hip abduction is the movement of the leg away from the midline of the body. The hip abductors are important and often forgotten muscles that contribute to our ability to stand, walk, and rotate our legs with ease.\",\n \"Hip Adduction\": \"Hip adductors are the muscles in your inner thigh that support balance and alignment. These stabilizing muscles are used to adduct the hips and thighs or move them toward the midline of your body.\"\n}\n\n\napp.get('/', (req, res) => {\n res.sendFile(\"C:/Users/engla/Desktop/OSU Online Comp Sci/2021_Summer/CS_361/Project/service/index.html\")\n})\n\n// post route handles requests for the defined defintions of different workouts.\napp.post('/', (req, res, next) => {\n\n let response = {}\n response['description'] = defintions[req.body.workout]\n\n res.send(JSON.stringify(response))\n});\n\n// error routes\napp.use(function(req,res){\n res.status(404);\n // res.render('404');\n });\n \n app.use(function(err, req, res, next){\n console.error(err.stack);\n res.type('plain/text');\n res.status(500);\n // res.render('500');\n });\n\n// listen on port specified with node index.js XXXX\napp.listen(app.get('port'), () => {\n console.log(`Express started on port ${app.get('port')}`);\n});"
},
{
"alpha_fraction": 0.5896806120872498,
"alphanum_fraction": 0.6081396341323853,
"avg_line_length": 34.75225067138672,
"blob_id": "c99ac1573f81d39a3365e41ce2517b4a00619a8a",
"content_id": "81d84cf79de7830eb4c4e76ffadebfb91ff753b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15873,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 444,
"path": "/refactored_canvas.py",
"repo_name": "Colby-England/CS_361_Project",
"src_encoding": "UTF-8",
"text": "from tkinter import * \nfrom tkinter import messagebox\nfrom tkinter.colorchooser import askcolor\nfrom tkinter.filedialog import asksaveasfilename\nimport tkinter.tix as tix\nfrom PIL import ImageTk, Image, ImageGrab\nimport requests\nfrom urllib.request import urlopen\nimport uuid\nfrom io import BytesIO\nimport ctypes\nctypes.windll.shcore.SetProcessDpiAwareness(2)\n\nimport webbrowser\n\n\n# Define Contants for use on the microservice call\nSERVICE_URL = \"https://www.weshavens.info:443/uploadV2\"\nSERVICE_PATH = \"/CanvasApp/\"\n\n\n# Define tooltip constants\nURL_HELP = \"Enter the URL\\nfor the image\\nyou wish to\\nedit.\"\n\nBEGIN_HELP = \"Start your project\\nwith the image and size\\nspecified above.\"\n\nIMAGE_WIDTH_HELP = \"\"\"Select the width of your\nmodified image. Valid \nranges are from 100 pixels\nto 720 pixels.\"\"\"\n\nIMAGE_HEIGHT_HELP = \"\"\"Select the height of your\nmodified image. Valid \nranges are from 100 pixels\nto 576 pixels.\"\"\"\n\nBRUSH_HELP = \"\"\"This button toggles the\nbrush tool on and off.\nWhen toggled on it will\nallow you to click and\nhold to draw on the picture.\"\"\"\n\nLINE_HELP = \"\"\"This button toggles the\nline tool on and off.\nWhen toggled on it will\nallow you to draw a line\nbetween two points by clicking\nat the first point, dragging the \nmouse to the second point and\nreleasing the mouse button.\"\"\"\n\nCOLOR_HELP = \"\"\"This button opens a color\nselect window. The color will then\nbe used for the drawing tools.\n\"\"\"\n\nBRUSH_SIZE_HELP = \"\"\"This slide changes the width\nof the brush tool. The units\nare in pixels and range from\n1 to 100.\n\"\"\"\n\nUNDO_HELP = \"\"\"This button will remove\nedits from the canvas\nin reverse order of when\nthey were made.\"\"\"\n\nSAVE_HELP = \"\"\"This button will open\na dialog to save your image.\"\"\"\n\nclass CanvasPage():\n\n def __init__(self, master) -> None:\n\n # initialize variables for drawing tools\n self.left_mouse_position = \"up\"\n self.x_pos, self.y_pos = None, None\n self.x1, self.y1, self.x2, self.y2 = None, None, None, None\n self.selected_color = [None, \"Black\"]\n\n # initialize variables for tracking edits and undoing\n self.edit_num = 0\n self.undo_stack = []\n\n # create frame to hold canvas \n self.canvas_frame = Frame(master)\n self.canvas_frame.grid(row=0, column=0)\n\n # create frame to hold buttons\n self.button_frame = Frame(master)\n self.button_frame.grid(row=1, column=0, sticky='nsew')\n\n # populate canvas and button frames\n self.config_button_frame(master)\n self.setup_drawing_area()\n\n # open the start menu ontop of the root menu\n self.open_start_menu()\n\n def setup_buttons(self, master):\n\n self.line_scale = Scale(self.button_frame, from_=1, to=100, orient=HORIZONTAL)\n self.line_scale.grid(row=0, column=4)\n\n self.color_btn = Button(self.button_frame, text='Select a Color', command=self.choose_color, fg=self.selected_color[1])\n self.color_btn.grid(row=1, column=4)\n\n self.copy_image = Button(self.button_frame, text='Save Image', command= lambda: self.save_image(master, self.drawing_area))\n self.copy_image.grid(row=1, column=7)\n\n self.undo_btn = Button(self.button_frame, text='Undo', command=self.confrim_undo)\n self.undo_btn.grid(row=0, column=7)\n\n self.help_btn = Button(self.button_frame, text=\"Help\", command=self.open_help_menu)\n self.help_btn.grid(row=0, column=9)\n \n def setup_tooltips(self):\n\n help_text = [BRUSH_HELP, BRUSH_SIZE_HELP, UNDO_HELP, LINE_HELP, COLOR_HELP, SAVE_HELP]\n\n\n help_counter = 0\n for row_num in range(0, 2):\n for col_num in range(2, 11, 3):\n label = Label(self.button_frame, text=\"?\")\n tooltip = tix.Balloon(self.button_frame)\n tooltip.bind_widget(label, balloonmsg=help_text[help_counter])\n label.grid(row=row_num, column=col_num)\n\n help_counter += 1\n\n def setup_tool_radio(self):\n\n # intialize list of tool types\n self.tools = [\"brush\", \"line\"]\n\n # create radio buttons of tool types\n self.selected_tool = StringVar(self.button_frame)\n self.selected_tool.set(\"brush\")\n\n row_count = 0\n for tool in self.tools:\n Radiobutton(self.button_frame,\n text=tool,\n variable=self.selected_tool,\n value=tool).grid(row=row_count, column=1)\n row_count += 1\n\n def config_button_frame(self, master):\n\n self.button_frame.columnconfigure(0, weight=1)\n self.button_frame.columnconfigure(3, weight=1)\n self.button_frame.columnconfigure(6, weight=1)\n self.button_frame.columnconfigure(9, weight=1)\n\n self.setup_tooltips()\n self.setup_tool_radio()\n self.setup_buttons(master)\n \n def setup_drawing_area(self):\n # create default canvas\n self.drawing_area = Canvas(self.canvas_frame, width=600, height=600, bg='white')\n self.drawing_area.pack()\n\n # bind events to the canvas\n self.drawing_area.bind(\"<Motion>\", self.motion)\n self.drawing_area.bind(\"<ButtonPress-1>\", self.left_mouse_down)\n self.drawing_area.bind(\"<ButtonRelease-1>\", self.left_mouse_up)\n\n def open_start_menu(self):\n \n # create toplevel menu on top of root menu\n self.splash_screen = Toplevel(root)\n self.splash_screen.attributes('-topmost', 'true')\n self.splash_screen.grab_set()\n\n # place top level in middle of root window\n self.splash_screen.geometry(\"300x300\")\n\n # configure column and row weights to center the widgets\n self.splash_screen.grid_columnconfigure(0, weight=1)\n self.splash_screen.grid_columnconfigure(4, weight=1)\n self.splash_screen.grid_rowconfigure(0, weight=1)\n self.splash_screen.grid_rowconfigure(5, weight=1)\n\n # create url labels\n self.url_label = Label(self.splash_screen, text=\"Image URL\")\n self.url_help_icon = Label(self.splash_screen, text=\"?\")\n \n # create tooltip for url \n self.url_help = tix.Balloon(self.splash_screen)\n self.url_help.bind_widget(self.url_help_icon, balloonmsg=URL_HELP)\n\n # create entry for URL\n self.url = StringVar(self.splash_screen)\n self.url.set(\"Enter the image url:\")\n self.url_entry = Entry(self.splash_screen, textvariable=self.url, fg=\"grey\")\n self.url_entry.bind(\"<FocusIn>\", self.handle_focus_in)\n \n # place the url row in the grid\n self.url_entry.grid(row=1, column=2, pady=10)\n self.url_label.grid(row=1, column=1, pady=10)\n self.url_help_icon.grid(row=1, column=3, pady=10)\n\n # create image_sizes labels\n self.image_width_label = Label(self.splash_screen, text=\"Image Width\")\n self.image_height_label = Label(self.splash_screen, text=\"Image Height\")\n self.image_width_help_icon = Label(self.splash_screen, text=\"?\")\n self.image_height_help_icon = Label(self.splash_screen, text=\"?\")\n\n # create tooltip for image sizes\n self.image_width_help = tix.Balloon(self.splash_screen)\n self.image_width_help.bind_widget(self.image_width_help_icon, balloonmsg=IMAGE_WIDTH_HELP)\n\n self.image_height_help = tix.Balloon(self.splash_screen)\n self.image_height_help.bind_widget(self.image_height_help_icon, balloonmsg=IMAGE_HEIGHT_HELP)\n\n # create options for image size\n # self.image_sizes = ['320x240', '1024x768', '1280x1024', '720x576', '1280x720', '1920x1080']\n # self.image_size_string = StringVar(self.splash_screen)\n # self.image_size_string.set(\"Select Image Size\")\n # self.image_size_option = OptionMenu(self.splash_screen, self.image_size_string, *self.image_sizes)\n # self.image_size_option.grid(row=2, column=2, pady=10)\n\n # create options for image width and height\n self.width_spin_val = StringVar()\n self.width_spin = Spinbox(self.splash_screen, from_=100, to=720, textvariable=self.width_spin_val)\n\n self.height_spin_val = StringVar()\n self.height_spin = Spinbox(self.splash_screen, from_=100, to=576, textvariable=self.height_spin_val)\n \n # place image sizes row in grid\n self.width_spin.grid(row=2, column=2, pady=10)\n self.height_spin.grid(row=3, column=2, pady=10)\n\n self.image_width_help_icon.grid(row=2, column=3, pady=10)\n self.image_height_help_icon.grid(row=3, column=3, pady=10)\n\n\n self.image_width_label.grid(row=2, column=1, pady=10)\n self.image_height_label.grid(row=3, column=1, pady=10)\n\n # create button to open canvas page\n self.begin_help_icon = Label(self.splash_screen, text=\"?\")\n\n # create begin tooltip\n self.begin_help = tix.Balloon(self.splash_screen)\n self.begin_help.bind_widget(self.begin_help_icon, balloonmsg=BEGIN_HELP)\n\n # create begin project button\n self.begin_btn = Button(self.splash_screen, text='Open Canvas Page', command=self.start_project)\n \n # place begin project row in grid\n self.begin_btn.grid(row=4, column=2, pady=10)\n self.begin_help_icon.grid(row=4, column=3, pady=10)\n\n def open_help_menu(self):\n\n webbrowser.open(\"file://C:/Users/engla/Desktop/OSU_Online_Comp_Sci/2021_Summer/CS_361/Project/canvas_help.html\")\n\n def start_project(self):\n\n if self.url.get() == \"Enter the image url:\":\n messagebox.showerror(\"URL Error\", \"Please enter an image url!\")\n return\n \n try:\n int(self.width_spin_val.get())\n except:\n messagebox.showerror(\"Width Error\", \"Please enter a number between 100 & 720 for width!\")\n return\n\n try:\n int(self.height_spin_val.get())\n except:\n messagebox.showerror(\"Height Error\", \"Please enter a number between 100 & 576 for height!\")\n return\n\n if int(self.width_spin_val.get()) > 720 or int(self.width_spin_val.get()) < 100:\n messagebox.showerror(\"Width Error\", \"Please enter a number between 100 & 720 for width!\")\n return\n\n if int(self.height_spin_val.get()) > 576 or int(self.height_spin_val.get()) < 100:\n messagebox.showerror(\"Height Error\", \"Please enter a number between 100 & 576 for height!\")\n return\n\n self.resize_canvas()\n\n def resize_canvas(self):\n\n self.drawing_area.config(width=int(self.width_spin_val.get()), height=int(self.height_spin_val.get()))\n self.canvas_frame.config(width=int(self.width_spin_val.get()), height=int(self.height_spin_val.get()))\n\n self.image_width = self.width_spin_val.get()\n self.image_height = self.height_spin_val.get()\n\n self.open_image(self.width_spin_val.get(), self.height_spin_val.get())\n\n def open_image(self, width, height):\n\n self.image_type = self.url.get()[self.url.get().rfind(\".\"):]\n\n self.remote_url = self.request_image(self.url.get())[\"url\"]\n\n # self.img_url = 'https://pernetyp.sirv.com/CanvasApp/3fcd2d4c-7fb4-4919-851b-2426605178b3'\n\n self.modified_url = self.remote_url + \"?scale.width=\" + width + \"&scale.height=\" + height + \"&scale.option=ignore\"\n\n self.raw_image = urlopen(self.modified_url).read()\n\n self.img = ImageTk.PhotoImage(Image.open(BytesIO(self.raw_image))) # PIL solution\n self.drawing_area.create_image(0, 0, anchor=NW, image=self.img)\n self.splash_screen.destroy()\n \n def request_image(self, image_url):\n self.request_params = {\"url\": image_url,\n \"path\": SERVICE_PATH,\n \"file\": str(uuid.uuid4()) + self.image_type\n }\n \n request = requests.post(SERVICE_URL, data=self.request_params)\n\n return request.json()\n\n def left_mouse_down(self, event=None):\n\n self.left_mouse_position = \"down\"\n\n self.x1 = event.x\n self.y1 = event.y\n\n # print(f'x: {self.x1}, y: {self.y1}')\n\n def left_mouse_up(self, event=None):\n\n self.left_mouse_position = \"up\"\n\n self.x_pos = None\n self.y_pos = None\n\n self.x2 = event.x\n self.y2 = event.y\n\n current_tool = self.selected_tool.get()\n\n if current_tool == \"line\":\n self.line_draw(event)\n \n self.undo_stack.append(\"edit#\" + str(self.edit_num))\n print(self.undo_stack)\n self.edit_num += 1\n\n def motion(self, event=None):\n\n if self.selected_tool.get() == \"brush\":\n self.brush_draw(event)\n\n def line_draw(self, event=None):\n\n if None not in (self.x1, self.y1, self.x2, self.y2):\n event.widget.create_line(self.x1,\n self.y1,\n self.x2,\n self.y2,\n smooth=True,\n fill=self.selected_color[1],\n tags=(\"edit#\" + str(self.edit_num)))\n\n def brush_draw(self, event=None):\n\n if self.left_mouse_position == \"down\":\n\n if self.x_pos is not None and self.y_pos is not None:\n event.widget.create_line(self.x_pos, \n self.y_pos, \n event.x, \n event.y, \n smooth=True, \n width=self.line_scale.get(), \n capstyle=ROUND, \n joinstyle=ROUND,\n fill=self.selected_color[1],\n tags=(\"edit#\" + str(self.edit_num)))\n \n self.x_pos = event.x\n self.y_pos = event.y\n\n def handle_focus_in(self, event=None):\n if self.url.get() == \"Enter the image url:\":\n event.widget.delete(0, END)\n event.widget.config(fg=\"black\")\n \n def choose_color(self):\n self.selected_color = askcolor(title='Choose a color')\n self.color_btn.config(fg=self.selected_color[1])\n # print(self.selected_color)\n\n def save_image(self, master, widget):\n self.x = master.winfo_rootx() + widget.winfo_rootx()\n self.y = widget.winfo_rooty()\n\n self.canvas_geometry = widget.winfo_geometry().split(sep=\"+\")\n\n self.widget_x1 = int(self.canvas_geometry[1])\n self.widget_y1 = int(self.canvas_geometry[2])\n self.widget_x2 = self.widget_x1 + int(self.image_width)\n self.widget_y2 = self.widget_y1 + int(self.image_height)\n\n file_name = asksaveasfilename(initialdir=\"/\", title=\"Save as...\", filetypes=((\"png files\", \".png\"), (\"all files\", \"*.*\")))\n\n ImageGrab.grab().crop((self.x, self.y, self.x + self.widget_x2, self.y + self.widget_y2)).save(file_name)\n\n def undo(self):\n\n if len(self.undo_stack) > 0:\n self.drawing_area.delete(self.undo_stack.pop())\n\n def confrim_undo(self):\n\n answer = messagebox.askyesno(title='Confirm Undo', message=\"Are you sure that you want to undo?\")\n \n if answer:\n self.undo()\n \ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n root.destroy()\n\nif __name__ == \"__main__\":\n # Create main window and set to full-screen\n root = tix.Tk()\n root.state('zoomed')\n root.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n # configure grid rows & columns to center the frames within the main window\n root.grid_rowconfigure(0, weight=1)\n root.grid_rowconfigure(1, weight=1)\n root.grid_columnconfigure(0, weight=1)\n\n # create a new Canvas page object \n canvas = CanvasPage(root)\n\n\n root.mainloop()"
}
] | 2 |
meyersbs/personality
|
https://github.com/meyersbs/personality
|
4e987891bd938dfc839ac24e9096a37f928f110f
|
69cdb45c002d9fd5c395d4632d38e2ccacbe1eff
|
7016451e0315c4eebcf76197850132eecf0e8665
|
refs/heads/master
| 2021-01-22T04:10:09.446705 | 2018-08-22T20:13:04 | 2018-08-22T20:13:04 | 92,436,773 | 4 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5042084455490112,
"alphanum_fraction": 0.5270541310310364,
"avg_line_length": 42.75438690185547,
"blob_id": "25694e1b6a56f07b0120cebb19e07cd2bb711712",
"content_id": "27f8e2b22eab4ae6517fd08d76fe3a636dc4c7f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2495,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 57,
"path": "/personality/helpers.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "import csv\n\nfrom .constants import CY, MA, YE, GR, OR, RE, XX, VERBS_PATH\n\ndef clean_list(list_to_clean):\n return [v for v in list_to_clean if v != '']\n\n#### FROM github.com/andymeneely/sira-nlp/app/lib/helpers.py#L95-L113\ndef get_verbs():\n \"\"\"\n Return the contents of verbs file pointed to by the filepath argument as a\n dictionary in which the key is the conjugate of a verb and the value is\n uninflected verb form of the conjugate verb.\n For example, {'scolded': 'scold', 'scolding': 'scold'}\n Adapted from code provided in NodeBox:\n https://www.nodebox.net/code/index.php/Linguistics#verb_conjugation\n \"\"\"\n verbs = dict()\n with open(VERBS_PATH) as file:\n reader = csv.reader(file)\n for row in reader:\n for verb in row[1:]:\n verbs[verb] = row[0]\n\n return verbs\n\ndef print_warning(label, sent, test, train):\n out = \"{:s}\\n==== WARNING: Invalid Prediction ===={:s}\".format(RE, XX)\n out += \" Label:\\t{:s}\".format(label)\n out += \" Sentence:\\t{:s}\".format(sent)\n out += \"{:s} Expected:\\t{:s}{:s}\".format(GR, test, XX)\n out += \"{:s} Predicted:\\t{:s}{:s}\".format(YE, train, XX)\n out += \"{:s}\\n=====================================\\n\".format(RE)\n\n print(out)\n\ndef print_preds(preds, title):\n out = \"\\n============================\"\n out += \"\\n\" + title\n out += \"{:s}\\n {:=<27s}{:s}\".format(CY, \"EXTRAVERSION: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"shy\", \"extraverted\")\n out += \"\\n %{: >13f} {: >11f}\".format(preds['eRatio_n'], preds['eRatio_y'])\n out += \"{:s}\\n {:=<27s}{:s}\".format(MA, \"NEUROTICISM: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"secure\", \"neurotic\")\n out += \"\\n %{: >13f} {: >11f}\".format(preds['nRatio_n'], preds['nRatio_y'])\n out += \"{:s}\\n {:=<27s}{:s}\".format(YE, \"AGREEABLENESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"uncooperative\", \"friendly\")\n out += \"\\n %{: >13f} {: >11f}\".format(preds['aRatio_n'], preds['aRatio_y'])\n out += \"{:s}\\n {:=<27s}{:s}\".format(GR, \"CONSCIENTIOUSNESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"careless\", \"precise\")\n out += \"\\n %{: >13f} {: >11f}\".format(preds['cRatio_n'], preds['cRatio_y'])\n out += \"{:s}\\n {:=<27s}{:s}\".format(OR, \"OPENNESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"unimaginative\", \"insightful\")\n out += \"\\n %{: >13f} {: >11f}\".format(preds['oRatio_n'], preds['oRatio_y'])\n out += \"\\n============================\"\n\n print(out)\n\n"
},
{
"alpha_fraction": 0.5719490051269531,
"alphanum_fraction": 0.5719490051269531,
"avg_line_length": 19.33333396911621,
"blob_id": "bd5cfec50e09f8fe3f179ac3df418c3df4344923",
"content_id": "6cf4caafe9b937e115a8cdd24d0fa3023fbc8727",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 27,
"path": "/personality/status.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "from .word import *\n\n__all__ = ['Status', 'Statuses']\n\n\nclass Status(object):\n @classmethod\n def __init__(cls, status):\n instance = cls()\n\n instance.words = list()\n for word in status:\n instance.words.append(Words.__init__(word))\n\n return instance\n\nclass Statuses(object):\n @classmethod\n def __init__(cls, statuses):\n instance = cls()\n\n instance.statuses = list()\n\n for status in statuses:\n instance.statuses.append(Status.__init__(status))\n\n return instance\n"
},
{
"alpha_fraction": 0.5635980367660522,
"alphanum_fraction": 0.6303583979606628,
"avg_line_length": 30.600000381469727,
"blob_id": "353058ec81043ad83864aae85a9f7d3c5995131a",
"content_id": "fae0a68164fe070c8f12f558529e8cda3b82354a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1423,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 45,
"path": "/personality/constants.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "from pkg_resources import resource_filename\n\nDATASET_ORIGINAL_PATH = resource_filename(\n 'personality', 'data/mypersonality_final.csv'\n )\nDATASET_CLEANED_PATH = resource_filename(\n 'personality', 'data/mypersonality_new.csv'\n )\nAGGREGATE_INFO_FILE = resource_filename(\n 'personality', 'data/aggregate_info.pkl'\n )\nVERBS_PATH = resource_filename('personality', 'data/verbs.txt')\n\nAGGREGATE_TRAINING = resource_filename(\n 'personality', 'data/aggregate_train.pkl'\n )\nAGGREGATE_TESTING = resource_filename(\n 'personality', 'data/aggregate_test.pkl'\n )\nAGGREGATE_TESTING_STATUSES = resource_filename(\n 'personality', 'data/aggregate_test_statuses.pkl'\n )\nTRAIN_PREDICTIONS = resource_filename(\n 'personality', 'data/training_predictions.pkl'\n )\nTEST_PREDICTIONS = resource_filename(\n 'personality', 'data/testing_predictions.pkl'\n )\n\nRATIO_KEYS = [\n 'eRatio_n', 'eRatio_y', 'nRatio_n', 'nRatio_y', 'aRatio_n', 'aRatio_y',\n 'cRatio_n', 'cRatio_y', 'oRatio_n', 'oRatio_y'\n ]\nFREQ_KEYS = [\n 'eFreq_n', 'eFreq_y', 'nFreq_n', 'nFreq_y', 'aFreq_n', 'aFreq_y',\n 'cFreq_n', 'cFreq_y', 'oFreq_n', 'oFreq_y'\n ]\n\nCY = \"\\033[1;48;5;51;38;5;232m\"\nMA = \"\\033[1;48;5;201;38;5;232m\"\nYE = \"\\033[1;48;5;220;38;5;232m\"\nGR = \"\\033[1;48;5;46;38;5;232m\"\nOR = \"\\033[1;48;5;202;38;5;232m\"\nRE = \"\\033[1;48;5;88;38;5;232m\"\nXX = \"\\x1b[0m\"\n\n"
},
{
"alpha_fraction": 0.49135929346084595,
"alphanum_fraction": 0.5073510408401489,
"avg_line_length": 43.56321716308594,
"blob_id": "8169482668790bc0a3b0f03c9bdc7f8dbd8738ef",
"content_id": "01e73b06c8f2c6a421b78248e4bb4426a8993776",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7754,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 174,
"path": "/personality/word.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "__all__ = ['Word']\n\nfrom .constants import CY, MA, YE, GR, OR, XX\n\nclass Word(object):\n @classmethod\n def init(cls, word, word_data=None):\n \"\"\"\n Given a word (str) and it's associated data (list), create a new 'Word'\n object with 24 fields:\n word : the given word\n count : the number of occurences of this word within the dataset\n eFreq_n : the number of times this word is associated with a status\n labelled as 'shy'\n eFreq_y : the number of times this word is associated with a status\n labelled as 'extraverted'\n nFreq_n : the number of times this word is associated with a status\n labelled as 'secure'\n nFreq_y : the number of times this word is associated with a status\n labelled as 'neurotic'\n aFreq_n : the number of times this word is associated with a status\n labelled as 'uncooperative'\n aFreq_y : the number of times this word is associated with a status\n labelled as 'friendly'\n cFreq_n : the number of times this word is associated with a status\n labelled as 'careless'\n cFreq_y : the number of times this word is associated with a status\n labelled as 'precise'\n oFreq_n : the number of times this word is associated with a status\n labelled as 'unimaginative'\n oFreq_y : the number of times this word is associated with a status\n labelled as 'insightful'\n eRatio_n : the ratio of eFreq_n / count\n eRatio_y : the ratio of eFreq_y / count\n nRatio_n : the ratio of nFreq_n / count\n nRatio_y : the ratio of nFreq_y / count\n aRatio_n : the ratio of aFreq_n / count\n aRatio_y : the ratio of aFreq_y / count\n cRatio_n : the ratio of cFreq_n / count\n cRatio_y : the ratio of cFreq_y / count\n oRatio_n : the ratio of oFreq_n / count\n oRatio_y : the ratio of oFreq_y / count\n \"\"\"\n instance = cls()\n\n instance.word = word\n\n instance.count = 1\n\n if instance.word == '([{<NULL>}])':\n instance.eFreq_n = 0\n instance.eFreq_y = 0\n instance.nFreq_n = 0\n instance.nFreq_y = 0\n instance.aFreq_n = 0\n instance.aFreq_y = 0\n instance.cFreq_n = 0\n instance.cFreq_y = 0\n instance.oFreq_n = 0\n instance.oFreq_y = 0\n else:\n if word_data[0] == 'n':\n instance.eFreq_n = 1 # shy\n instance.eFreq_y = 0 # extravert\n else:\n instance.eFreq_n = 0 # shy\n instance.eFreq_y = 1 # extravert\n if word_data[1] == 'n':\n instance.nFreq_n = 1 # secure\n instance.nFreq_y = 0 # neurotic\n else:\n instance.nFreq_n = 0 # secure\n instance.nFreq_y = 1 # neurotic\n if word_data[2] == 'n':\n instance.aFreq_n = 1 # uncooperative\n instance.aFreq_y = 0 # friendly\n else:\n instance.aFreq_n = 0 # uncooperative\n instance.aFreq_y = 1 # friendly\n if word_data[3] == 'n':\n instance.cFreq_n = 1 # careless\n instance.cFreq_y = 0 # precise\n else:\n instance.cFreq_n = 0 # careless\n instance.cFreq_y = 1 # precise\n if word_data[4] == 'n':\n instance.oFreq_n = 1 # unimaginative\n instance.oFreq_y = 0 # insightful\n else:\n instance.oFreq_n = 0 # unimaginative\n instance.oFreq_y = 1 # insightful\n\n\n instance.eRatio_n = float( instance.eFreq_n / instance.count )\n instance.eRatio_y = float( instance.eFreq_y / instance.count )\n instance.nRatio_n = float( instance.nFreq_n / instance.count )\n instance.nRatio_y = float( instance.nFreq_y / instance.count )\n instance.aRatio_n = float( instance.aFreq_n / instance.count )\n instance.aRatio_y = float( instance.aFreq_y / instance.count )\n instance.cRatio_n = float( instance.cFreq_n / instance.count )\n instance.cRatio_y = float( instance.cFreq_y / instance.count )\n instance.oRatio_n = float( instance.oFreq_n / instance.count )\n instance.oRatio_y = float( instance.oFreq_y / instance.count )\n\n #print(instance)\n return instance\n\n\n def update_freqs(self, updates=['n','n','n','n','n']):\n self.count += 1\n if updates[0] == 'n':\n self.eFreq_n += 1 # shy\n else:\n self.eFreq_y += 1 # extravert\n if updates[1] == 'n':\n self.nFreq_n += 1 # secure\n else:\n self.nFreq_y += 1 # neurotic\n if updates[2] == 'n':\n self.aFreq_n += 1 # uncooperative\n else:\n self.aFreq_y += 1 # friendly\n if updates[3] == 'n':\n self.cFreq_n += 1 # careless\n else:\n self.cFreq_y += 1 # precise\n if updates[4] == 'n':\n self.oFreq_n += 1 # unimaginative\n else:\n self.oFreq_y += 1 # insightful\n\n self.update_ratios()\n #print(self)\n return True\n\n\n def update_ratios(self):\n self.eRatio_n = float( self.eFreq_n / self.count )\n self.eRatio_y = float( self.eFreq_y / self.count )\n self.nRatio_n = float( self.nFreq_n / self.count )\n self.nRatio_y = float( self.nFreq_y / self.count )\n self.aRatio_n = float( self.aFreq_n / self.count )\n self.aRatio_y = float( self.aFreq_y / self.count )\n self.cRatio_n = float( self.cFreq_n / self.count )\n self.cRatio_y = float( self.cFreq_y / self.count )\n self.oRatio_n = float( self.oFreq_n / self.count )\n self.oRatio_y = float( self.oFreq_y / self.count )\n\n\n def __str__(self):\n out = \"\\n=============================\"\n out += \"\\nWORD: \" + self.word\n out += \"{:s}\\n {:=<27s}{:s}\".format(CY, \"EXTRAVERSION: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"shy\", \"extraverted\")\n out += \"\\n # {: >13d} {: >11d}\".format(self.eFreq_n, self.eFreq_y)\n out += \"\\n % {: >13f} {: >11f}\".format(self.eRatio_n, self.eRatio_y)\n out += \"{:s}\\n {:=<27s}{:s}\".format(MA, \"NEUROTICISM: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"secure\", \"neurotic\")\n out += \"\\n # {: >13d} {: >11d}\".format(self.nFreq_n, self.nFreq_y)\n out += \"\\n % {: >13f} {: >11f}\".format(self.nRatio_n, self.nRatio_y)\n out += \"{:s}\\n {:=<27s}{:s}\".format(YE, \"AGREEABLENESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"uncooperative\", \"friendly\")\n out += \"\\n # {: >13d} {: >11d}\".format(self.aFreq_n, self.aFreq_y)\n out += \"\\n % {: >13f} {: >11f}\".format(self.aRatio_n, self.aRatio_y)\n out += \"{:s}\\n {:=<27s}{:s}\".format(GR, \"CONSCIENTIOUSNESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"careless\", \"precise\")\n out += \"\\n # {: >13d} {: >11d}\".format(self.cFreq_n, self.cFreq_y)\n out += \"\\n % {: >13f} {: >11f}\".format(self.cRatio_n, self.cRatio_y)\n out += \"{:s}\\n {:=<27s}{:s}\".format(OR, \"OPENNESS: \", XX)\n out += \"\\n {: >13s} {: >11s}\".format(\"unimaginative\", \"insightful\")\n out += \"\\n # {: >13d} {: >11d}\".format(self.oFreq_n, self.oFreq_y)\n out += \"\\n % {: >13f} {: >11f}\".format(self.oRatio_n, self.oRatio_y)\n out += \"\\n=============================\"\n return out\n"
},
{
"alpha_fraction": 0.5431292653083801,
"alphanum_fraction": 0.5541523694992065,
"avg_line_length": 37.13877487182617,
"blob_id": "ac7f92b83af9b46a158a0ba3bc84e42d94539834",
"content_id": "985c428d067848105b306c53b9a05556b0805f59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9344,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 245,
"path": "/personality/predict.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "import csv\nimport math\nimport os\nimport _pickle\nimport re\n\nfrom difflib import get_close_matches\n\nfrom . import aggregate, helpers, lemmatizer\nfrom . import word as WORD\nfrom .constants import *\n\nDATA = None\nDATA_TRAIN = None\nDATA_TEST = None\n\ndef normalize_ratios(preds):\n \"\"\"\n Given a well-defined dictionary of personality values, return a dictionary\n with normalized ratios. Ratios are normalized by taking the sum of the '_y'\n and '_n' ratios for a given personality class, and dividing the '_y' and\n '_n' ratios by the sum. This gives us a float between 0 and 1 denoting our\n certainty that the given sentence/text is each respective class.\n\n Example:\n INPUT: preds = {'eRatio_n': 0.6, 'eRatio_y': 1.4,\n 'nRatio_n': 0.3, 'nRatio_y': 1.7,\n ...\n }\n OUTPUT: preds_copy = {'eRatio_n': 0.3, 'eRatio_y': 0.7,\n 'nRatio_n': 0.15, 'nRatio_y': 0.85\n ...\n }\n \"\"\"\n norm_num = preds['eRatio_n'] + preds['eRatio_y']\n\n preds_copy = preds.copy()\n if norm_num != 0:\n for key in RATIO_KEYS:\n preds_copy[key] = float( preds[key] / norm_num )\n\n return preds_copy\n\ndef predict_sent(sent):\n \"\"\"\n Given a sentence (str), return a well-formed dictionary of personality\n values where the key is a unique identifier (int), and the value is a list,\n where index 0 is a word (str) and index 1 is its corresponding Word object.\n\n Prediction follows the following process:\n 1) Split the sentence on spaces and underscores.\n 2) For each word in the split sentence, if the word is present in the\n 'training' data, use the corresponding predictions from the 'training'\n data.\n 3) If any of the words in the sentence were not present in the 'training'\n data, find the lemma for each non-present word, and use the\n corresponding predictions from the 'training' data.\n 4) If any of the non-presents words had lemmas that were also not present,\n use the library difflib to find the word from the 'training' data that\n most closely matches the non-present word, and use its corresponding\n predictions from the 'training' data.\n \"\"\"\n global DATA\n preds = {}\n flag = False\n tokens = helpers.clean_list(re.split(r\"[\\s_\\-]+\", sent))\n for i, word in enumerate(tokens):\n pred = predict_str(word)\n if pred is None:\n flag = True\n preds[i] = [word, pred]\n\n if not flag:\n return preds\n else:\n lemmas = lemmatizer.NLTKLemmatizer(\n helpers.clean_list(re.split(r\"[\\s_\\-]+\", sent))\n ).execute()\n for key, val in preds.items():\n if val[1] is None:\n # Try to make a prediction using the lemma.\n pred = predict_str(lemmas[key])\n if pred is None:\n # Make a prediction with the closest matching word in the\n # training data.\n pred = get_close_matches(val[0], DATA.keys(), 1, 0.0)\n preds[key] = [val[0], predict_str(pred)]\n # This should never happen, but it's a safety net.\n if preds[key][1] is None:\n preds[key] = [\n \"([{<NULL>}])\",\n WORD.Word.init(\"([{<NULL>}])\", None)\n ]\n\n return preds\n\ndef aggregate_sent(preds):\n \"\"\"\n Aggregate the word-level personality predictions into a single dictionary\n with sentence-level predictions. Aggregation is simple: for each key in\n the given (well-formed) prediction dictionary, add its corresponding\n value to respective key in the 'out' dictionary.\n\n Example:\n INPUT: preds = {0: ['i' , <Word{'eFreq_n': 0.6, 'eFreq_y': 1.4, ...}>,\n 1: ['am', <Word{'eFreq_n': 0.3, 'eFreq_y': 1.7, ...}>,\n ...\n }\n OUTPUT: out = {'eFreq_n': 0.9, 'eFreq_y': 3.1, ... }\n \"\"\"\n count = 0\n out = {\n 'eFreq_n': 0, 'eFreq_y': 0, 'eRatio_n': 0, 'eRatio_y': 0,\n 'nFreq_n': 0, 'nFreq_y': 0, 'nRatio_n': 0, 'nRatio_y': 0,\n 'aFreq_n': 0, 'aFreq_y': 0, 'aRatio_n': 0, 'aRatio_y': 0,\n 'cFreq_n': 0, 'cFreq_y': 0, 'cRatio_n': 0, 'cRatio_y': 0,\n 'oFreq_n': 0, 'oFreq_y': 0, 'oRatio_n': 0, 'oRatio_y': 0,\n 'count': 0\n }\n for key, val in preds.items():\n word = val[1]\n out['count'] += word.count\n out['eFreq_n'] += word.eFreq_n\n out['eFreq_y'] += word.eFreq_y\n out['nFreq_n'] += word.nFreq_n\n out['nFreq_y'] += word.nFreq_y\n out['aFreq_n'] += word.aFreq_n\n out['aFreq_y'] += word.aFreq_y\n out['cFreq_n'] += word.cFreq_n\n out['cFreq_y'] += word.cFreq_y\n out['oFreq_n'] += word.oFreq_n\n out['oFreq_y'] += word.oFreq_y\n\n if out['count'] != 0:\n for r_key, f_key in zip(RATIO_KEYS, FREQ_KEYS):\n out[r_key] = float( out[f_key] / out['count'] )\n\n return out\n\ndef predict_str(word):\n \"\"\"\n Helper function to determine whether or not a given word exists within the\n 'training' data. If it does, return the corresponding Word object, else\n return None.\n \"\"\"\n global DATA\n # This should never happen, but it's a safety net.\n if type(word) != list and type(word) != str:\n return None\n # TODO: Figure out why the given 'word' is sometimes a list of length 1.\n elif type(word) == list:\n return DATA[word[0]] if word[0] in DATA.keys() else None\n elif type(word) == str:\n return DATA[word] if word in DATA.keys() else None\n\ndef predict_split(train_size):\n if os.path.getsize(TRAIN_PREDICTIONS) == 0:\n print(\"Splitting Training/Testing Data...\")\n _, _, statuses = aggregate.aggregate_train_test(train_size)\n statuses = \"\\n\".join(statuses)\n print(\"Gathering Features...\")\n train_preds = predict(statuses, text_type='str', data=AGGREGATE_TRAINING)\n print(\"==== DUMPING TRAINING ====\")\n with open(TRAIN_PREDICTIONS, 'wb') as f:\n _pickle.dump(train_preds, f)\n test_preds = predict(statuses, text_type='str', data=AGGREGATE_TESTING)\n print(\"==== DUMPING TESTING ====\")\n with open(TEST_PREDICTIONS, 'wb') as f:\n _pickle.dump(test_preds, f)\n\n with open(TRAIN_PREDICTIONS, 'rb') as f:\n train_preds = _pickle.load(f)\n helpers.print_preds(train_preds, \"TRAINING RESULTS\")\n with open(TEST_PREDICTIONS, 'rb') as f:\n test_preds = _pickle.load(f)\n helpers.print_preds(test_preds, \"TESTING RESULTS\")\n print(\"Reading Testing Data...\")\n with open(AGGREGATE_TESTING_STATUSES, 'r') as f:\n csv_reader = csv.reader(f, delimiter=',', quotechar='\"')\n\n predictions = {}\n print(\"Collecting Predictions...\\n\")\n for i, status in enumerate(csv_reader):\n print(\"\\r\" + str(i), end=\"\")\n train_pred = predict(status[0], text_type='str', data=AGGREGATE_TRAINING)\n test_pred = predict(status[0], text_type='str', data=AGGREGATE_TESTING)\n predictions[i] = [status[0], pred_to_labels(train_pred), pred_to_labels(test_pred)]\n\n #sys.exit()\n return predictions\n\ndef pred_to_labels(pred):\n labels = {\"e\": 'n', \"n\": 'n', \"a\": 'n', \"c\": 'n', \"o\": 'n'}\n if pred['eRatio_y'] > pred['eRatio_n']:\n labels[\"e\"] = 'y'\n if pred['nRatio_y'] > pred['nRatio_n']:\n labels[\"n\"] = 'y'\n if pred['aRatio_y'] > pred['aRatio_n']:\n labels[\"a\"] = 'y'\n if pred['cRatio_y'] > pred['cRatio_n']:\n labels[\"c\"] = 'y'\n if pred['oRatio_y'] > pred['oRatio_n']:\n labels[\"o\"] = 'y'\n\n #print(labels)\n return labels\n\ndef predict(file_in, text_type='file', data=AGGREGATE_INFO_FILE):\n \"\"\"\n Given a valid filepath, run predict_sent() for each line in the file and\n print out the prediction values for each personality class.\n \"\"\"\n global DATA\n # Load the 'training' data once\n with open(data, 'rb') as in_file:\n DATA = _pickle.load(in_file)\n\n if text_type == 'file':\n # Get the raw text from file_in\n with open(file_in, newline='') as in_file:\n csv_reader = csv.reader(in_file, delimiter=',', quotechar='\"')\n text = \"\\n\".join(in_file.readlines())\n else:\n text = file_in\n\n # Aggregate the file-level values\n preds = {'eFreq_n': 0, 'eFreq_y': 0, 'eRatio_n': 0, 'eRatio_y': 0,\n 'nFreq_n': 0, 'nFreq_y': 0, 'nRatio_n': 0, 'nRatio_y': 0,\n 'aFreq_n': 0, 'aFreq_y': 0, 'aRatio_n': 0, 'aRatio_y': 0,\n 'cFreq_n': 0, 'cFreq_y': 0, 'cRatio_n': 0, 'cRatio_y': 0,\n 'oFreq_n': 0, 'oFreq_y': 0, 'oRatio_n': 0, 'oRatio_y': 0,\n 'count': 0}\n for sent in aggregate._clean(text.lower()).split('\\n'):\n if sent != '':\n # Predict & Aggregate sentence-level values\n p = predict_sent(sent)\n aggs = aggregate_sent(p)\n # Update the file-level values\n for key, val in aggs.items():\n preds[key] += val\n\n preds = normalize_ratios(preds)\n helpers.print_preds(preds, text[0:50])\n\n return preds\n"
},
{
"alpha_fraction": 0.4582524299621582,
"alphanum_fraction": 0.4673139154911041,
"avg_line_length": 31.87234115600586,
"blob_id": "9f62d0b2a8ec230dbf9b6cedc75161b0d0327b86",
"content_id": "1710695720101d5f7aedb9321f79a7f5a1b62b45",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1545,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 47,
"path": "/personality/clean.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "import csv\nimport os\n\nfrom . import constants\n\nHEADER = [\n # New Label # Old Label\n \"text\", # STATUS : a Facebook status\n \"eBin\", # cEXT : binary Extraversion flag\n \"nBin\", # cNEU : binary Neuroticism flag\n \"aBin\", # cAGR : binary Agreeableness flag\n \"cBin\", # cCON : binary Conscientiousness flag\n \"oBin\" # cOPN : binary Openness flag\n ]\n\ndef clean_data(data_in=constants.DATASET_ORIGINAL_PATH,\n data_out=constants.DATASET_CLEANED_PATH):\n\n with open(data_in, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n next(reader, None) # skip the header row\n\n with open(data_out, \"w\") as outfile:\n writer = csv.writer(outfile, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n writer.writerow(HEADER)\n\n author = \"\"\n text = []\n for row in reader:\n if author == \"\":\n author = row[0]\n text.append(row[1])\n elif author == row[0]:\n text.append(row[1])\n else:\n new_row = [\" \".join(text), row[7], row[8], row[9], row[10], row[11]]\n writer.writerow(new_row)\n author = \"\"\n text = []\n\n# print(row)\n# writer.writerow(row[0:12])\n\n\nif __name__==\"__main__\":\n clean_data()\n"
},
{
"alpha_fraction": 0.7707316875457764,
"alphanum_fraction": 0.7707316875457764,
"avg_line_length": 54.90909194946289,
"blob_id": "6046ae6b413b6063344166d70888d79c1bb93459",
"content_id": "6af6062beaae6b9e0e5811e8193827fc26b0f56f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 615,
"license_type": "permissive",
"max_line_length": 400,
"num_lines": 11,
"path": "/README.md",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "# personality\n\n### Description\n\nOne of the big issues in procuring personality insights from linguistic data is that the two major software systems that do so, [LIWC](https://liwc.wpengine.com/) and [IBM Watson](https://www.ibm.com/watson/developercloud/doc/personality-insights/index.html), are not open source. We have little, if any, idea what goes on behind-the-scenes. On top of that, those two systems are not (entirely) free.\n\nThis is my attempt to implement a system like LIWC or IBM Watson that can predict the personality aspects of the author of a piece of writing.\n\n### Dataset\n\n### Personality Classes\n"
},
{
"alpha_fraction": 0.5800561904907227,
"alphanum_fraction": 0.5854400992393494,
"avg_line_length": 31.12030029296875,
"blob_id": "96f543145272bec174edf3117bb6fb2bc80b2170",
"content_id": "ee0e2c462cef72298dabfa49a4f2ac7b68c88b86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4272,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 133,
"path": "/main.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "import argparse\nimport csv\nimport os\nimport _pickle\n\nfrom personality import constants, helpers\n\ndef aggregate(args):\n if os.path.exists(args.data):\n aggregate_data(data=args.data)\n else:\n pass\n\ndef clean(args):\n if os.path.exists(args.data_in) and os.path.exists(args.data_out):\n clean_data(data_in=args.data_in, data_out=args.data_out)\n else:\n pass\n\ndef predict(args):\n if args.str_in is None:\n if os.path.exists(args.file_in):\n pred.predict(os.path.abspath(args.file_in), text_type='file')\n else:\n pred.predict(args.str_in, text_type='str')\n\ndef performance(args):\n print(\"Starting Up...\")\n performance_data(args.train_split)\n\ndef print_data(args):\n # TODO: Move this elsewhere.\n with open(constants.AGGREGATE_INFO_FILE, 'rb') as f:\n preds = _pickle.load(f)\n\n results = {'eRatio_y': 0, 'eRatio_n': 0, 'nRatio_y': 0, 'nRatio_n': 0,\n 'aRatio_y': 0, 'aRatio_n': 0, 'cRatio_y': 0, 'cRatio_n': 0,\n 'oRatio_y': 0, 'oRatio_n': 0}\n for key, pred in preds.items():\n if pred.eRatio_y > pred.eRatio_n:\n results['eRatio_y'] += 1\n else:\n results['eRatio_n'] += 1\n if pred.nRatio_y > pred.nRatio_n:\n results['nRatio_y'] += 1\n else:\n results['nRatio_n'] += 1\n if pred.aRatio_y > pred.aRatio_n:\n results['aRatio_y'] += 1\n else:\n results['aRatio_n'] += 1\n if pred.cRatio_y > pred.cRatio_n:\n results['cRatio_y'] += 1\n else:\n results['cRatio_n'] += 1\n if pred.oRatio_y > pred.oRatio_n:\n results['oRatio_y'] += 1\n else:\n results['oRatio_n'] += 1\n\n helpers.print_preds(results, '')\n with open('results.pkl', 'wb') as f:\n _pickle.dump(results, f)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=''\n )\n subparsers = parser.add_subparsers(title='Commands', dest='command')\n subparsers.required = True\n\n\n parser_aggregate = subparsers.add_parser(\n 'aggregate', help=\"Aggregate word-level 'ENACO' scores from the \"\n \"provided 'training' data.\"\n )\n parser_aggregate.add_argument(\n '-d', '--data', default=constants.DATASET_CLEANED_PATH,\n help='The absolute path of the file to aggregate.'\n )\n parser_aggregate.set_defaults(handler=aggregate)\n\n parser_clean = subparsers.add_parser(\n 'clean', help=\"Remove irrelevant columns from the dataset.\",\n )\n parser_clean.add_argument(\n '-i', '--data_in', default=constants.DATASET_ORIGINAL_PATH,\n help=\"The absolute path of the file to be cleaned.\"\n )\n parser_clean.add_argument(\n '-o', '--data_out', default=constants.DATASET_CLEANED_PATH,\n help=\"The absolute path to save the cleaned input to.\"\n )\n parser_clean.set_defaults(handler=clean)\n\n parser_performance = subparsers.add_parser(\n 'performance', help=\"Get preformance for the 'classifier'.\",\n )\n parser_performance.add_argument(\n '-t', '--train_split', default=0.75,\n help=\"The percentage of the data to keep as 'training' data.\"\n )\n parser_performance.set_defaults(handler=performance)\n\n\n parser_print = subparsers.add_parser(\n 'print', help=\"Print the an 'ENACO' scores for the whole dataset.\",\n )\n parser_print.set_defaults(handler=print_data)\n\n\n parser_predict = subparsers.add_parser(\n 'predict', help=\"Predict an 'ENACO' score.\",\n )\n group = parser_predict.add_mutually_exclusive_group(required=True)\n group.add_argument(\n '-s', '--str_in', type=str,\n help=\"Use to specify a str input.\"\n )\n group.add_argument(\n '-f', '--file_in', type=str,\n help=\"Use to specify a filename.\"\n )\n parser_predict.set_defaults(handler=predict)\n\n\n args = parser.parse_args()\n\n from personality.clean import clean_data\n from personality.aggregate import aggregate_data\n import personality.predict as pred\n from personality.performance import performance_data\n args.handler(args)\n"
},
{
"alpha_fraction": 0.33974745869636536,
"alphanum_fraction": 0.3653036653995514,
"avg_line_length": 41.10126495361328,
"blob_id": "d578bf652ff4f896639bbb35dcb45fd023857158",
"content_id": "bffcb5783c99660de8324406b639d5e6e550c974",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3326,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 79,
"path": "/personality/performance.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "from . import predict, helpers\nfrom .constants import CY, MA, YE, GR, OR, XX\n\ndef avg_tot(metric, support):\n tot, avg = (0.0,)*2\n for key, val in metric.items():\n avg += float(val*support[key])\n tot += support[key]\n\n return float( avg / tot ), tot\n\ndef prfs(predictions):\n results = {\n 'e': {'fp': 0, 'fn': 0, 'tp': 0, 'tn': 0},\n 'n': {'fp': 0, 'fn': 0, 'tp': 0, 'tn': 0},\n 'a': {'fp': 0, 'fn': 0, 'tp': 0, 'tn': 0},\n 'c': {'fp': 0, 'fn': 0, 'tp': 0, 'tn': 0},\n 'o': {'fp': 0, 'fn': 0, 'tp': 0, 'tn': 0}\n }\n print(\"Evaluating Predictions...\")\n for key, val in predictions.items():\n print(\"Prediction: \" + str(key))\n sentence = val[0]\n train = val[1]\n test = val[2]\n for label in ['e', 'n', 'a', 'c', 'o']:\n if train[label] == 'y':\n if test[label] == 'y':\n results[label]['tp'] += 1\n else:\n results[label]['fp'] += 1\n elif train[label] == 'n':\n if test[label] == 'y':\n results[label]['fn'] += 1\n else:\n results[label]['tn'] += 1\n else:\n helpers.print_warning(\n label, sentence, test[label], train[label]\n )\n\n p, r, f, s = ({},)*4\n for key, val in results.items():\n p[key] = float( val['tp'] / (val['tp'] + val['fp']) )\n r[key] = float( val['tp'] / (val['tp'] + val['fn']) )\n f[key] = float( (2*val['tp'])/((2*val['tp']) + val['fp'] + val['fn']) )\n s[key] = val['tp'] + val['tn'] + val['fp'] + val['fn']\n\n return p, r, f, s\n\ndef performance_data(train_size):\n predictions = predict.predict_split(train_size)\n p, r, f, s = prfs(predictions)\n print_preformance(p, r, f, s)\n\ndef print_preformance(p, r, f, s):\n p_avg, s_tot = avg_tot(p, s)\n r_avg, _ = avg_tot(r, s)\n f_avg, _ = avg_tot(f, s)\n\n out = \"\\n==== RESULTS: ===============================================\"\n out += \"\\n | precision | recall | f-score | support |\"\n out += \"\\n-------------------------------------------------------------\"\n out += \"\\n{:s}{: >16s}{:s} | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(CY, \"EXTRAVERSION:\", XX, p['e'], r['e'], f['e'], s['e'])\n out += \"\\n{:s}{: >16s}{:s} | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(MA, \"NEUROTICISM:\", XX, p['n'], r['n'], f['n'], s['n'])\n out += \"\\n{:s}{: >16s}{:s} | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(YE, \"AGREEABLENESS:\", XX, p['a'], r['a'], f['a'], s['a'])\n out += \"\\n{:s}{: >16s}{:s} | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(GR, \"CONSCIENTIOUSNESS:\", XX, p['c'], r['c'], f['c'], s['c'])\n out += \"\\n{:s}{: >16s}{:s} | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(OR, \"OPENNESS:\", XX, p['o'], r['o'], f['o'], s['o'])\n out += \"\\n-------------------------------------------------------------\"\n out += \"\\n AVG / TOT: | {: >9.2f} | {: >6.2f} | {: >7.2f} | {: >7d} |\" \\\n .format(p_avg, r_avg, f_avg, int(s_tot))\n out += \"\\n=============================================================\\n\"\n\n print(out)\n"
},
{
"alpha_fraction": 0.5608803629875183,
"alphanum_fraction": 0.5654951930046082,
"avg_line_length": 37.58904266357422,
"blob_id": "57c9dfcb7521836c0fe86cbf6a6489f180756e89",
"content_id": "9cd7016c228db890069374c4fce489bd41940459",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2817,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 73,
"path": "/personality/aggregate.py",
"repo_name": "meyersbs/personality",
"src_encoding": "UTF-8",
"text": "import csv\nimport _pickle\nimport sys\nimport random\nimport re\n\nfrom . import constants, status, word\n\ndef _clean(word):\n new_word = re.sub(r\"[\\.!?,:;@%$\\^\\)\\(\\]\\[\\{\\}\\*=\\+\\<\\>\\'\\\"#]+\", '', word)\n return new_word.replace('\\r', '\\n')\n\ndef aggregate_train_test(train_size, data=constants.DATASET_CLEANED_PATH):\n with open(data, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n statuses = list(csv_reader)\n statuses = statuses[1:] # skip the header row\n size = len(statuses)\n\n train_words, test_words = dict(), dict()\n train_size = int(size*train_size)\n\n random.shuffle(statuses)\n\n for i in range(0, train_size):\n status = statuses[i]\n for w in re.split(r'[\\s_\\-]+', _clean(status[0].lower())):\n if _clean(w) not in train_words.keys():\n train_words[_clean(w)] = word.Word.init(_clean(w), status[6:])\n else:\n train_words[_clean(w)].update_freqs(status[6:])\n\n for i in range(train_size, size):\n status = statuses[i]\n for w in re.split(r'[\\s_\\-]+', _clean(status[0].lower())):\n if _clean(w) not in test_words.keys():\n test_words[_clean(w)] = word.Word.init(_clean(w), status[6:])\n else:\n test_words[_clean(w)].update_freqs(status[6:])\n\n with open(constants.AGGREGATE_TRAINING, 'wb') as out_file:\n _pickle.dump(train_words, out_file)\n with open(constants.AGGREGATE_TESTING, 'wb') as out_file:\n _pickle.dump(test_words, out_file)\n stats = []\n with open(constants.AGGREGATE_TESTING_STATUSES, 'w') as out_file:\n csv_writer = csv.writer(out_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n for status in statuses:\n csv_writer.writerow([status[0]])\n stats.append(status[0])\n return train_words, test_words, stats\n\ndef aggregate_data(data=constants.DATASET_CLEANED_PATH):\n with open(data, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n next(csv_reader, None) # skip the header row\n\n words = dict()\n # for each line in the csv file\n for status in csv_reader:\n # for each word in the status\n for w in re.split(r'[\\s_\\-]+', _clean(status[0].lower())):\n # create a Word object if we don't have one\n if _clean(w) not in words.keys():\n words[_clean(w)] = word.Word.init(_clean(w), status[6:])\n # update the Word object if it exists\n else:\n words[_clean(w)].update_freqs(status[6:])\n\n with open(constants.AGGREGATE_INFO_FILE, 'wb') as out_file:\n _pickle.dump(words, out_file)\n return words\n"
}
] | 10 |
ixysoft/notes
|
https://github.com/ixysoft/notes
|
394c71754007950efa6d32fce8b81c482ae80156
|
cf38fa8a82357a49e36b80f1d5ec7a334ca45dcb
|
0ed3a95250bab04f122084d3ab6d45bd91e62aac
|
refs/heads/master
| 2021-08-16T03:01:20.223389 | 2020-07-21T06:06:32 | 2020-07-21T06:06:32 | 205,130,379 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8253968358039856,
"alphanum_fraction": 0.8492063283920288,
"avg_line_length": 40.33333206176758,
"blob_id": "03724bf53e5d15c68b4121cf26b2d572c838ec1e",
"content_id": "e0c78764fe22f8f8d9b6e8e331d2cace4b38afda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 3,
"path": "/算法/位运算/整数异号/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 判断两个整数是否异号\n已知数字的最高位表示符号,所以只需要判断最高位是否相反即可.我们很容易想到异或操作.不同数字返回1,相同数字返回0. \n我们对两个数字进行异或操作,得到的结果最高位如果是1(即该数是负数)时,说明需要判断的两个数异号. \n"
},
{
"alpha_fraction": 0.5528249740600586,
"alphanum_fraction": 0.5558107495307922,
"avg_line_length": 28.625850677490234,
"blob_id": "82324d166354e58472fc6e92c6fc1e3e32665e71",
"content_id": "4c875013705ed9c12745b888c7a7041cfdb264dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4624,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 147,
"path": "/数据结构/树/字典树/term1/trie.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"trie.h\"\n\ntriep trie(){\n triep tree = (triep)malloc(sizeof(Trie));\n tree->root = NULL;\n tree->size = 0;\n\n return tree;\n}\n\ntrienodep trienode(byte key,void* data,int size){\n trienodep node = (trienodep)malloc(sizeof(TrieNode));\n node->key = key;\n node->data = data;\n node->size = size;\n node->next = NULL;\n node->next_level = NULL;\n\n return node;\n}\n\nBoolean insertNode(triep tree,string key,void* data,int size){\n return _insertNode(tree,key,data,size,TRUE,FALSE);\n}\n\nBoolean _insertNode(triep tree,string key,void* data,int size,Boolean fail_when_exists,Boolean fail_when_not_exists){\n if(tree == NULL || key == NULL) return FALSE;\n int col,row;\n trienodep* nodep = &(tree->root);\n trienodep* last_nodep = NULL;\n trienodep node = tree->root;\n for(row = 0; key[row] != '\\0'; row++){ //遍历所有字符\n byte ch = (byte)key[row];\n for(;node != NULL && node->key != ch;node = node->next){\n nodep = &(node->next);\n }\n if(node == NULL){ //节点不存在,则创建节点\n if(fail_when_not_exists == TRUE){ //如果fail_when_not_exists设定为true\n return FALSE;\n }\n node = trienode(ch,NULL,size);\n *nodep = node;\n }\n last_nodep = nodep; //记录最后一级的指针\n nodep = &(node->next_level);\n node = node->next_level; //node移至下一级\n }\n if(last_nodep == NULL) return FALSE; //空字符串\n trienodep last = *last_nodep;\n if(fail_when_exists == TRUE && last->data != NULL) return FALSE; //设置fail_when_exists标志\n if(last->data != NULL){\n free(last->data);\n last->data = NULL;\n }else{ //当前元素不存在\n tree->size++;\n }\n last->data = malloc(size);\n memcpy(last->data,data,size);\n\n return TRUE; //插入成功\n}\n\ntrienodep searchNode(triep tree,string key){\n int col,row;\n trienodep node = tree->root;\n trienodep last = NULL;\n for(row = 0; key[row] != '\\0'; row++){ //遍历所有字符\n byte ch = (byte)key[row];\n for(;node != NULL && node->key != ch;node = node->next);\n if(node == NULL){ //节点不存在,则创建节点\n return NULL;\n }\n last = node;\n node = node->next_level; //node移至下一级\n }\n return last; //返回找到的节点\n}\n\nsearchresultp search(triep tree,string key){\n trienodep res = searchNode(tree,key);\n searchresultp result = (searchresultp)malloc(sizeof(SearchResult));\n result->data = NULL;\n result->status = (res != NULL ? TRUE : FALSE);\n if(result->status == TRUE){\n result->data = res->data;\n }\n return result;\n}\n\nBoolean updateNode(triep tree,string key,void* data,int size){\n return _insertNode(tree,key,data,size,FALSE,TRUE);\n}\n\nBoolean updateOrSetNode(triep tree,string key,void* data,int size){\n return _insertNode(tree,key,data,size,FALSE,FALSE);\n}\n\nBoolean deleteNode(triep tree,string key){\n return _deleteNode(tree,key,FALSE); //采用普通方式抹除\n}\n\nBoolean _deleteNode(triep tree,string key,Boolean recusive_delete){\n if(tree == NULL || key == NULL) return FALSE;\n int col,row;\n trienodep* path[8192]; //路径栈\n int path_idx = -1; //路径顶部\n trienodep node = tree->root;\n trienodep* nodep = &tree->root;\n trienodep* last_nodep = NULL;\n trienodep last = NULL;\n for(row = 0; key[row] != '\\0'; row++){ //遍历所有字符\n byte ch = (byte)key[row];\n for(;node != NULL && node->key != ch;node = node->next);\n if(node == NULL){ //节点不存在,则创建节点\n return FALSE;\n }\n path[++path_idx] = nodep; //记录当前路径节点的指针\n last = node;\n last_nodep = nodep;\n nodep = &(node->next_level);\n node = node->next_level; //node移至下一级\n }\n if(recusive_delete == TRUE){\n for(;path_idx >= 0;path_idx--){\n trienodep* tmp = path[path_idx]; //节点缓存\n if(tmp == NULL) return FALSE;\n if((*tmp) != last && (*tmp)->data != NULL) break; \n if((*tmp)->next_level == NULL){\n trienodep next = (*tmp)->next;\n if(*tmp != NULL)\n free(*tmp); //释放*tmp\n *tmp = next;\n }\n }\n }else{\n if(last->data != NULL)\n free(last->data);\n last->data = NULL;\n }\n tree->size--;\n\n return TRUE;\n}\n\nBoolean hardDeleteNode(triep tree,string key){\n return _deleteNode(tree,key,TRUE);\n}"
},
{
"alpha_fraction": 0.751861035823822,
"alphanum_fraction": 0.751861035823822,
"avg_line_length": 24.0625,
"blob_id": "09b7d6aae2d47fbff5ba12c8b50402ee800efe8c",
"content_id": "f15b21254e33d6a8ce03e2a972abfacd3639237c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 16,
"path": "/编程开发/Linux/命令/grep替代命令ag&ack.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# grep的替代命令\n以前需要查询包含某个字符串可以用grep命令\n比如:`grep -ri 需要查找的字符串 目录`\n现在在linux中有两个更好用的工具:`ag`和`ack`\n需要查询当前目录下包含某个字符串的文件可以执行: \n`ag 字符串`或`ack 字符串` \n需要查询指定目录下包含指定字符串的文件只需要执行: \n`ag 字符串 目录`或`ack 字符串 目录` \n指定-i选项可以忽略字符串的大小写 \ne.g. \n```\nag apple # 查询当前文件夹下包含apple的文件\nag apple /root/ # 查询/root文件夹下包含apple的文件\nag -i apple /root/ # 查询/root文件夹下包含apple的文件,忽略大小写\n```\n经过简单的测试,ag的速度比ack的查询速度快很多.可能是因为ag的名字短一点的缘故(手动滑稽) \n"
},
{
"alpha_fraction": 0.6658415794372559,
"alphanum_fraction": 0.6782178282737732,
"avg_line_length": 21.44444465637207,
"blob_id": "2f59488aaccbc745f831d90def6ac34c4c9dfa9a",
"content_id": "bbef5a8f3f418ecf28651d1691be088d55bd2c74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 18,
"path": "/leetcode/17-电话号码的字母组合/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 九键电话号码的字母组合\n**题目:**\n给定一个仅包含数字`2-9`的字符串,返回所有它能表示的字母组合。 \n\n给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。 \n\n**示例:** \n```\n输入:\"23\"\n输出:[\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\n```\n**说明:** \n尽管上面的答案是按字典序排列的,但是你可以任意选择答案输出的顺序。 \n\n**思路:** \n这个题目整体比较简单,采用深度遍历的思想即可. \n具体如下: \n先遍历处于第一个位置的数字按键上有的字母,在遍历每个字母时,将数字字符串指针后移,重复这个过程,直到最后运行到字符串尾部,此时我们获取到了一个有效的组合.此时我们把这个组合保存.最后一层函数退出,上一层遍历下一个字母.重复这个过程,最后所有的情况都遍历完毕.\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 13.399999618530273,
"blob_id": "9f35775c2ffb540a0ab5b3e067112a4b7dd947cb",
"content_id": "d4093a56e5e7215b15c6754f7343209caae63723",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 5,
"path": "/编程开发/软件安装/php相关.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# PHP下常见的软件安装方式\n## pecl\n这个包含在php-pear中,用apt方式安装即可\n## phpize\n包含在php-dev中\n"
},
{
"alpha_fraction": 0.7828571200370789,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 42.25,
"blob_id": "49ca4db918f17bb76c90b74960fcd9d2c50b90e9",
"content_id": "72bfd07deac6b09888225ad262cc2dbc3e2e3cbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 4,
"path": "/DailyCodingProblems/389_composition_and_inheritance_google/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 389_composition_and_inheritance_google\nThis problem was asked by Google. \n\nExplain the difference between composition and inheritance. In which cases would you use each? \n"
},
{
"alpha_fraction": 0.8928571343421936,
"alphanum_fraction": 0.8928571343421936,
"avg_line_length": 27,
"blob_id": "d1ab5144f1863160d1a7c941d4fc2ffa65cc99ec",
"content_id": "01058e39f5a47fc2188fef65d22f9fd37396b24b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 2,
"path": "/编程开发/Linux/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Linux方面的笔记\n可能会涉及Linux c编程,linux的shell及一些linux下系统维护的知识\n"
},
{
"alpha_fraction": 0.7932960987091064,
"alphanum_fraction": 0.7932960987091064,
"avg_line_length": 28.5,
"blob_id": "e262a839b3e415e2aa7b3182fe4681b02ff702cf",
"content_id": "57c5790b1503a68b0579fd114ab1897b96726bb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 6,
"path": "/算法/排序算法/睡眠排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 睡眠排序 \n见名知意,通过睡眠的方式排序一个队列.理论上能得到一个排序好的队列. \n但是由于思想过于残暴,工作中几乎见不到,传说擅长使用这种算法的大佬被打死的不计其数. \n睡眠排序属于一种不正经的排序,在这里仅做笔记. \n**思路** \n利用很佛系的思想,睡觉.需要排序的数字当做当做睡眠时间,睡眠完毕放入新队列.所有元素睡醒排序即完成. \n"
},
{
"alpha_fraction": 0.6282051205635071,
"alphanum_fraction": 0.6794871687889099,
"avg_line_length": 11,
"blob_id": "cf557f237ec47c24bc61b244499a84fcb3cc7a0b",
"content_id": "18bfb7868d5ee47d8ce65dc1310a1d5d03a8bcd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 256,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 13,
"path": "/数据结构/树/哈夫曼树/term2/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# huff文件格式\n前4个字节:HUF\\n\nsize: 4b key区域长度\nkey:len:code\n...\n\ncode区:\nsize: 4b 二进制code的大小\ncode的二进制数据\n\n需要的方法:\n字符的code转存储形式\n 二进制字符串转二进制数组 \"10\" -> 0b10\n"
},
{
"alpha_fraction": 0.6231505870819092,
"alphanum_fraction": 0.6370757222175598,
"avg_line_length": 16.646154403686523,
"blob_id": "dd53754a7bdbd676cf0560a3680f211aadaeb7da",
"content_id": "f2ce465c73ddd55685d7f1a7266ac562156c023c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2581,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 65,
"path": "/外语/韩语/语法/韩语语法体系及用法2.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 韩语语法及用法2\n1. 主语 \n韩语句子主要分为: \n什么 (谁) 做什么 \n什么 (谁) 怎么样 \n什么 (谁) 是什么 (谁)\n等等. \n예:\n主语 宾语 谓语 \n그가 편지를 쓴다. \n준호가 똑똑하다. \n이것이 책상이다. \n谓语쓰다. 똑똑하다,책상이다.所表现的动作,状态,属性的主体就是主语 \n\n主语的表示: \n韩语主语在名词或具有名词性质的语言要素后加上主格助词이/가来表示. \n예: 준수 -가 뛰어왔다. 准洙跑来了. \n눈 -이 내린다. 眼睛往下看. \n사고 -가 일어났다. 出事了. \n\n有时候,不带主格助词也能表示主语. \n예: 너 어디 아프니?你哪里不舒服? \n누나 왔다. 어서 문 열어 워라.姐姐来了.赶快去开门. \n除此之外,께서和에서也能表示主语. \n께서表示尊敬主语,에서表示集体名词作主语. \n예: 선생님 -이 -> 선생님 -께서 나에게 질문하셨다.老师问我问题了. \n아버지 -가 -> 아버지 -께서 오셨다. 爸爸来了. \n우리나라 -가 -> 우리나라 -에서 2002년 월드컵을 개최했다. 我们国家举办了2002年的世界杯. \n이 회사 -가 -> 이 회사 -에서 신제품을 개발했다. 这家公司开发了新产品. \n\n关于主语的位置: \n主语一般出现在句子最前面,但是可以根据需要变换到其它位置. \n예: \n내가 민수를 사랑한다. \n민수를 내가 사랑한다. \n민수를 사랑한다,내가. \n사랑한다,내가 님수를. \n사랑한다,민수를 내가. \n\n主语省略的情况: \n主语是句子的必需成分,但是有时也可以省略.在可以从上下文中推断出主语时,主语就可以省略. \n예:\n() 볼고기를 먹어 봤는데 참 맛있었어요.(我)吃了烤肉,觉得很好吃. \n() 남대문 시장에 한번 가 보세요.(你)去过一次南大门市场吧. \n\n第一句省略了第一人称主语,第二句省略了第二人称主语. \n回答疑问句的时候也可以省略主语. \n예: \n준수: 영미 뭐 해요? \n영미 엄마: 공부하고 있다. \n\n在不能明确主语时,也可能省略主语: \n예: \n불이야!着火了. \n민수가 안 오면 큰일인데.民洙要是不来的话就糟了. \n셋에 둘을 더하면 다섯이다.3加2等于5. \n李居中的불이야,큰일이다,다섯이다.主语不明确当成惯用语来用. \n\n多主语的情况: \n韩语中有一些句子看上去有两个主语. \n예: \n1. 준호가 돈이 많다. \n2. 철수가 성격이 좋다. \n3. 민수가 키가 크다. \n此时形容词做谓语. \n"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.7666666507720947,
"avg_line_length": 18.33333396911621,
"blob_id": "c3d865f3985f59b75e67098f7ed45bf8d1d0d666",
"content_id": "04ff29f189659a2873b309ca11c3acd9a93bb933",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/外语/英语/词汇/20200102.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 今日词汇 \nchronological 年代,年代学的.\nchronological order按年代排序. \n"
},
{
"alpha_fraction": 0.3690369129180908,
"alphanum_fraction": 0.4032403230667114,
"avg_line_length": 22.14583396911621,
"blob_id": "87ab10d012c9d7dc6d1056b30058e9c335f9e30f",
"content_id": "2c0a060e3760a843e242542c410cc3d2efff7439",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 48,
"path": "/DailyCodingProblems/399_sum_same_value_Facebook/c/bruteforce/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 讲真,这个Hard类型的题目看着有点蒙,可能是我没看懂题吧.\n * 在这里我把我心里理解的这道题用我的思路解决出来\n * */\n#include<stdio.h>\n#define SIZE 1000\nint result[3][SIZE];\n\nint getResult(int arr[],int size,int ret[3][SIZE]){\n int i,j,sum = 0;\n for(i = 0; i < size; i++)\n sum+=arr[i];\n if(sum % 3 != 0) return 0; //不能被3整除\n sum /= 3;\n for(i = j = 0; i < 3; i++){\n int tmp = 0;\n int k = 0;\n for(;j < size && tmp < sum;j++,k++){\n tmp+=arr[j];\n ret[i][k] = arr[j];\n }\n if(tmp != sum) return 0;\n ret[i][k] = -1;\n }\n return j == size; //遍历到了最后\n}\n\nint main(){\n int arr[] = {\n 1,3,4,8,2,2,2,2\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i,j;\n if(!getResult(arr,size,result)){\n printf(\"null\\n\");\n }else{\n printf(\"[\\n\");\n for(i = 0; i < 3; i++){\n printf(\" [\");\n for(j = 0; result[i][j] >= 0; j++){\n printf(\"%d%c\",result[i][j],result[i][j+1] == -1 ? '\\0':',');\n }\n printf(\"]%c\\n\",i == 2 ? '\\0' : ',');\n }\n printf(\"]\\n\");\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4192139804363251,
"alphanum_fraction": 0.44687044620513916,
"avg_line_length": 19.205883026123047,
"blob_id": "561c5a6e678ed1d3b2c9099608b2d274a11177db",
"content_id": "9fc1aabf9fb0eec7a02dded415735b8553407e14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 34,
"path": "/算法/排序算法/选择排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n//交换两个数字\n#define swap(a,b) {int tmp=a;a=b;b=tmp;}\n\nint selection_sort(int arr[],int size){\n int i,j;\n int min = 0;\n for(i = 0; i < size - 1; i++){\n min = i;\n for(j = i+1; j < size; j++){\n if(arr[min] > arr[j]) min = j; //选择最小的元素\n }\n swap(arr[min],arr[i]);\n }\n return 0;\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n\n printf(\"排序前:\");\n for(i = 0; i < size; i++) printf(\"%d \",arr[i]);\n puts(\"\");\n selection_sort(arr,size);\n printf(\"排序后:\");\n for(i = 0; i < size; i++) printf(\"%d \",arr[i]);\n puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7578397393226624,
"alphanum_fraction": 0.803135871887207,
"avg_line_length": 37.13333511352539,
"blob_id": "c8dfa38052a2d5b25a1bbbce31160d24766b37a2",
"content_id": "e19d0abdc4add8ec9806d10d37d6696cbc3ef2b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 810,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 15,
"path": "/编程开发/后端/ElasticSearch/Elaticsearch安装.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 安装(elasticsearch版本6.8.0)\n```\nwget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.0.zip\nunzip elasticsearch-6.8.0.zip\ncd elasticsearch-6.8.0\n``` \n执行\n```shell\nsudo sysctl -w vm.max_map_count=262144\nsudo sysctl -p\n``` \n之后运行`./bin/elasticsearch`即可启动elasticsearch. \n这个窗口是阻塞式的,如果需要在后台运行,可以执行`./bin/elasticsearch -d`以守护进程的形式运行elasticsearch. \nelasticsearch启动后,在终端中执行:`curl localhost:9200`可以得到一个与当前运行的elasticsearch相关的json对象. \nelasticsearch默认是在本机运行,如果需要外网可用,可以在config/elasticsearch.yml文件中,去掉network.host的注释,将值改成0.0.0.0或者本机的外网ip.重启elasticsearch之后配置生效. \n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 14,
"blob_id": "30a5c5cf09d621d4d4b6342e99aeca7693b00232",
"content_id": "3079d64423bde211a0295b6eee1c9c5c11b4448c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/编程开发/Linux/命令/ls替代命令exa.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# exa\n命令格式与ls差不多,详细的可以参考命令手册. \n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 43.5,
"blob_id": "27efc1ad7758bbb25e1339b05b4dd137c86896da",
"content_id": "c1ec765d45a05967b7ab2f80b2898f084ea110d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 4,
"path": "/算法/排序算法/归并排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 归并排序:O(nlogn) \n这个算法理解起来比较简单,从知道原理到实现,其实还是有一定的距离的. \n算法的主要思路是分而治之,将一个大的排序问题一分为二变成两个小的排序子问题,这样递归下去,直到无法在细分,进行排序,然后逆向返回给上一级的排序问题,直到最后排序完毕. \n这种思路用递归比较好解释,迭代的话,可以从最小子问题开始向整个问题逆推. \n"
},
{
"alpha_fraction": 0.89552241563797,
"alphanum_fraction": 0.89552241563797,
"avg_line_length": 43,
"blob_id": "192a43c5e1cd90f923dce38688a6e981d606c703",
"content_id": "a4e7e499ce20a04849814df391bbea0436e00022",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 3,
"path": "/编程开发/前端/css/fixed样式无法根据视图固定.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# fixed元素失效\n今天调整一个样式的时,需要在指定位置设定position属性为fixed,但是在设置之后发现使用fixed属性没有任何效果. \n经过google之后,了解到fixed属性的祖先元素的transform属性不为none会影响到元素的表现. \n"
},
{
"alpha_fraction": 0.458450049161911,
"alphanum_fraction": 0.48085901141166687,
"avg_line_length": 18.472726821899414,
"blob_id": "42018bdf3f964b87f1f543eeae1515b35b244654",
"content_id": "57eea9e87335d2e06206cc6b61d868acab6ea588",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1479,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 55,
"path": "/算法/排序算法/冒泡排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 算法思路:\n * 依次比较相邻的元素,判断是否符合给定规则(大小),对于不符合规则的两个元素,\n * 交换对应的值,遍历到最后一个没有排序好的元素.排序的序列中最后一个元素将\n * 会是序列中的最大或最小值(根据给定规则).重复这个过程,直到所有的元素都确定\n * 位置\n **/\n\n#include<stdio.h>\n\n//交换两个数字\n#define swap(a,b) {int tmp=a;a=b;b=tmp;}\n\n//冒泡算法\nint bubble_sort(int arr[],int size){\n int i,j;\n for(i = size-1; i > 0; i--){\n int swap_flag = 0; //交换标志,用来减少交换步奏\n for(j = 0; j < i; j++){\n if(arr[j] > arr[j+1]){ //这里排成从小到大的顺序,前数比后数大,就交换位置\n swap(arr[j],arr[j+1]);\n swap_flag = 1; //设置交换标志\n }\n }\n if(swap_flag == 0) return 0; //过程中没有出现交换的情况,说明arr整体已经有序\n }\n return 0;\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n\n int size = sizeof(arr)/sizeof(arr[0]); //获取数组的长度\n int i;\n\n //排序前输出数组\n printf(\"交换前:\\n\");\n for(i = 0; i < size; i++){\n printf(\"%d \",arr[i]);\n }\n printf(\"\\n\");\n\n bubble_sort(arr,size);\n \n //排序后输出数组\n printf(\"交换后:\\n\");\n for(i = 0; i < size; i++){\n printf(\"%d \",arr[i]);\n }\n printf(\"\\n\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4505672752857208,
"alphanum_fraction": 0.4797406792640686,
"avg_line_length": 21.851852416992188,
"blob_id": "0be4360ccad6f9d3f1adf887821020524529e5d4",
"content_id": "1615420b55be9474784283264c1bc61134befdbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 27,
"path": "/算法/排序算法/侏儒排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n#define swap(a,b) {int tmp=a;a=b;b=tmp;}\n\nvoid gnome_sort(int arr[],int size){\n int pos = 0;\n while(pos < size){\n if(pos == 0 || arr[pos] >= arr[pos-1]) pos++; //此处应该为>=,而不是>.否则有相同元素的情况下,会陷入死循环\n else{\n swap(arr[pos],arr[pos-1]);\n pos--;\n }\n }\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n puts(\"排序前:\");for(i=0;i<size;i++)printf(\"%d \",arr[i]);puts(\"\");\n gnome_sort(arr,size);\n puts(\"排序后:\");for(i=0;i<size;i++)printf(\"%d \",arr[i]);puts(\"\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 12.199999809265137,
"blob_id": "c8a591a0b6bcd65342bca4830d0195c92f394138",
"content_id": "10a0d021a8b0fdcad6ff0d91a11c78922cdbd2d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 5,
"path": "/编译器/乔姆斯基文法分类.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 乔姆斯基文法分类 \n乔姆斯基将文法分为大致四类: \n1. 0型文法\n0型文法又称无限制文法.\n对于`$\\bigvee $`\n"
},
{
"alpha_fraction": 0.761904776096344,
"alphanum_fraction": 0.7644110321998596,
"avg_line_length": 18.950000762939453,
"blob_id": "bacc1460ac82aa7f5e2416039bc6fd380c0f5911",
"content_id": "e9e41371401b7e2bf2c37adc691792939686c06d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 20,
"path": "/算法/其他/TopK问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# TopK问题\n```\n在一个无序序列中,找到第K大的数字. \n```\n**思路一**: \n```\n读取前K个数,创建一个小根堆.\n继续遍历无序序列,如果元素元素比堆顶的元素大,\n说明至少存在K+1个元素比它大,堆顶元素出堆,当前元素入堆.\n继续遍历,重复上面的操作.直到最后堆顶元素即为第K大的元素. \n```\n**思路二**: \n```\n利用快排思想,指定一个基准数,这里我指定为当前趟的首元素.\n经过一趟(比基准数大的(不小于)放在基准数左边,比基准数小的(不大于)放在基准数右边),我们至少可以确定当前位置是有效的.\n在程序里直接使用了下标存储序列,所以很容易就可以获取到基准元素的位置.\n我们判断这个位置在是否等于k,如果等于k,说明这个数就是第k大的数.\n如果小于k,说明第k大的数在基准数字当前位置的右边,我们考虑右半部分就好了.\n当前位置比k大的情况也同理. \n```\n"
},
{
"alpha_fraction": 0.6698113083839417,
"alphanum_fraction": 0.6698113083839417,
"avg_line_length": 31.615385055541992,
"blob_id": "8fe877aa8636e2694c103025d3379bbad86c3f36",
"content_id": "b52421d11aecebb9fc86b1f05cd94a333e91c4f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 13,
"path": "/编程开发/后端/Wordpress/函数/wp_enqueue_style.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# wp_enqueue_style函数 \n**作用** \n将css样式文件加入队列. \n**原型** \n```php\nwp_enqueue_style($handle,$src,$deps,$ver,$media);\n$handle 字符串 (必须) 样式表文件名 默认None\n$src 字符串 (可选) Wordpress根目录下的样式表路径(如/css/mystyle.css) 默认false\n$deps 数组 (可选) 样式表所依靠的句柄组成的数组;加载该样式前需要加载其他样式表.若没有依赖关系,返回false 默认[]\n$ver 字符串 (可选) 指示样式表版本号的字符串 默认false\n$media 布尔型 (可选) 该字符串指定了为样式表而定义的媒体.如'all','screen','handeld','print' 默认false\n该函数无返回值\n```\n"
},
{
"alpha_fraction": 0.6586102843284607,
"alphanum_fraction": 0.6737160086631775,
"avg_line_length": 13.954545021057129,
"blob_id": "4accca21bf64e17b1dbe9667a7e989b7ac913f3b",
"content_id": "04c86b4d155ee05a3d6507e1c8b4ea4e4b7ed99b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 22,
"path": "/编程开发/Linux/常见问题/Nginx配置BasicAuth登录.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Nginx配置Basic Auth登录认证\n1. 安装httpd-tools \n```\napt install httpd-tools\n```\n2. 创建授权用户及密码 \n```\nhtpasswd -c -d 导出auth文件路径 用户名\n```\n3. 配置Nginx \n在对应配置文件的server配置中添加: \n```\nauth_basic \"名称\";\nauth_basic_user_file 认证文件路径;\n```\n4. 重新载入配置文件 \n```\nnginx -t # 测试配合文件是否存在错误\nnginx -s reload # 重新载入配置文件\n```\n5. 访问对应的url \n访问对应的URL,此时会要求输入用户名密码. \n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 34,
"blob_id": "a16443197606984718c9fa4c4b123cc8dcf211e4",
"content_id": "1881f66aa6aae29ee15b16cca73f260928c48bcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 2,
"path": "/算法/排序算法/奇偶排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 奇偶排序\n首先按照下标奇偶-奇偶....的顺序排列相邻的数字到最后,然后按照偶奇-偶奇...的顺序排列相邻的数字到最后.直到整体有序. \n"
},
{
"alpha_fraction": 0.745591938495636,
"alphanum_fraction": 0.758186399936676,
"avg_line_length": 27.214284896850586,
"blob_id": "76708554c7183ce280ca50b942f7cc9c486b3584",
"content_id": "5812e5c432776f03b14bb20368bb518a9d56de2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 565,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 14,
"path": "/编程开发/后端/问题排查/pecl安装yar出现curl问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# pecl安装yar出现的问题\n今天用pecl安装了一下yar,经过一系列过程之后,爆出了:\n```\nconfigure: error: Please reinstall the libcurl distribution - easy.h should be in <curl-dir>/include/curl/\n```\n问题. \n从描述中可以看出是curl库除了问题,经过一番谷歌,在github中找到了解决方案: \n```\n# 创建curl链接\nsudo ln -s /usr/include/x86_64-linux-gnu/curl /usr/local/include/curl\n# 下载curl库\nsudo apt-get install libcurl4-gnutls-dev\n```\n上面的过程一路正常之后,执行`pecl install yar`,一切安装顺利. \n"
},
{
"alpha_fraction": 0.7910447716712952,
"alphanum_fraction": 0.7910447716712952,
"avg_line_length": 33,
"blob_id": "2ca0c66d69b96eef530396c34bf093bcc8de88b3",
"content_id": "b507fd1b54c58cf340ede93d150fbe73dec76521",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 2,
"path": "/数据结构/树/红黑树/python/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 代码源于github\n[代码来源](https://github.com/JinheonBaek/DS-RedBlackTree)"
},
{
"alpha_fraction": 0.6482240557670593,
"alphanum_fraction": 0.6837431788444519,
"avg_line_length": 30.106382369995117,
"blob_id": "bd131e615acae8fbc67baccba70ea7b3ff2fdb0f",
"content_id": "aedf2d04c6ca8375707cdb345fcc5c8fe182e0c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2984,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 47,
"path": "/算法/动态规划/高楼扔鸡蛋/高楼扔鸡蛋问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 高楼扔鸡蛋\n**题目** \n题设我们手里有K个鸡蛋,在我们面前有一栋N层高的楼,而我们手里的鸡蛋是加强蛋,并没有那么易碎。试求出在最坏的情况下至少需要扔多少次鸡蛋才能确定在某一层之后会碎。 \n虽然题目有点扯蛋的意味,但是作为一个拥有端正态度的孩子,就是是扯蛋,我也应该好好的研究研究。 \n**思考**\n我们知道如果鸡蛋不限量,我们很容易就能想到用二分法丢鸡蛋,很容易就能确定在哪一层鸡蛋开始会碎。但是在我们没有足够多鸡蛋的情况下,我们就不太可能这么奢侈了,比如有15层楼,但是只有两个鸡蛋,假设在第9层开始鸡蛋会碎,第一次,我们在(15/2 = 7)第7层扔,没有碎,接下来我们在((8+15)/2 = 11)第11层扔鸡蛋没了也没测出来,所以当我们还剩一个鸡蛋的时候,为了测出蛋碎点,我们就必须用遍历的方式。 \n这里我们知道: \n1. 当没有鸡蛋的时候,需要扔0次能确定在某一层会碎。 \n2. 当有一个鸡蛋的时候,最坏的情况下,需要遍历n层楼才能确定蛋碎点 \n上面两种特殊情况说完了之后,我们再讨论一般情况。 \n假设现在在第i层楼,现在有两种可能: \n1. 蛋碎了,现在有k-1个蛋,我们可以确定蛋碎点在1~i-1之间 \n2. 蛋没碎,现在有k个蛋,我们可以确定蛋碎点在i+1到n之间 \n这两种情况里面,我们应该选择到该层次数较大的那一个(最差的情况),而且还应该需要+1,因为需要多测试一次才能确定是否处于临界位置。 \n\n好了讨论完一般情况,我们可以写出最差情况下最少需要验证次数的表达式: \n```\ndef count(k,n):\n if k == 0:\n return 0\n if k == 1:\n return n\n\n res = n\n for i = 1 to n: # 遍历每一层的最差最少次数\n max_res = max(count(k-1,i-1),count(k,n-i))+1 # 当前楼层最差的情况下需要的次数\n res = min(res,max_res) # 到当前楼层位置取较小的那个\n return res\n```\n但是我们很容易看一看到,在上面的函数中,count方法被重复调用了很多次,而且很多调用是没有必要的,我们知道count(k,n)中调用了count(k-1,i-1),count(k,n-i)这两个狮子,而这两种情况都是在count(k,n)之前执行的,count(k,n)的值并不会影响到其他两个式子的值。这个时候,我们就可以把这两个不会再后面改变的值缓存起来。这个时候我们可以一个二维数组缓存。 \n```\ndim DP[1000][1000]\n\ndef count(k,n):\n if k == 0:\n return 0\n if k == 1:\n return n\n if DP[k][n] > 0:\n return DP[k][n]\n res = n\n for i = 1 to n:\n max_res = max(count(k-1,i-1),count(k,n-1))+1\n res = min(res,max_res)\n return DP[k][n] = res\n```\n经过这番操作之后,中间很多无用的过程就被略过了,大大提高了效率。 \n"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.7407407164573669,
"avg_line_length": 17,
"blob_id": "b8404a0d14e4df44bcba41d987caa19b22f9b03f",
"content_id": "548447873b049c4a23aa783914ac607c3b739a60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 3,
"path": "/编程开发/软件安装/软件/百度云/BaiduGo常见问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 百度云常见问题\n1. appid错误:\n在软件输入`config set -appid=266719`\n"
},
{
"alpha_fraction": 0.7797356843948364,
"alphanum_fraction": 0.7797356843948364,
"avg_line_length": 24,
"blob_id": "a909608918daa1520f8838a98a72f9587b961a28",
"content_id": "d514c50cae9ab6d4c877d18c0e5283bcc3c891c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/编程开发/后端/Wordpress/问题/后台不显示链接选项.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 后台不显示链接选项\n今天学习Wordpress主题开发,突然发现自己的后台跟老师的后台显示得有些不一样. \n老师提到了Wordpress的链接选项,我看了一下自己的后台并没有. \n经过一番查找,在百度上找到了一种可行的方案: \n```\n在functions.php中添加:\nadd_filter('pre_option_list_manager_enabled','__return_true');\n```\n刷新一下页面即可看到链接选项. \n"
},
{
"alpha_fraction": 0.72062087059021,
"alphanum_fraction": 0.72062087059021,
"avg_line_length": 16.346153259277344,
"blob_id": "7d6e4c1242e7ad20d5435737fe87fdbc03c58c38",
"content_id": "248280b7ef8347e0f4694a51139e7c7e455ec978",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 26,
"path": "/编程开发/后端/Composer/composer阿里源.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 阿里源\ncomposer.phar下载地址:\n```\nhttps://mirrors.aliyun.com/composer/composer.phar\n```\n\n全局配置阿里源:\n```\ncomposer config -g repo.packagist composer https://mirrors.aliyun.com/composer/\n```\n取消配置:\n```\ncomposer config -g --unset repos.packagist\n```\n当前项目配置阿里源:\n```\ncomposer config repo.packagist composer https://mirrors.aliyun.com/composer/\n```\n取消阿里源:\n```\ncomposer config --unset repos.packagist\n```\n调试:\n```\ncomposer -vvv require alibabacloud/sdk(这里是需要下载的包)\n```\n"
},
{
"alpha_fraction": 0.6904761791229248,
"alphanum_fraction": 0.6904761791229248,
"avg_line_length": 12.333333015441895,
"blob_id": "760e2949488036fb8227a4f3af2ec390b63634e7",
"content_id": "0aa5e00861956760277e92a350ba502673bf3912",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 3,
"path": "/数学/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 数学\n关于数学学习中的一些笔记和想法... \nMMP,现在一点想法都没有 \n"
},
{
"alpha_fraction": 0.8136042356491089,
"alphanum_fraction": 0.8138986825942993,
"avg_line_length": 42,
"blob_id": "7dff58e17abe57b864053d45e4029761606f5deb",
"content_id": "106725cfd2b978a488105756aad7729b9213ea65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7158,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 79,
"path": "/编程开发/后端/Laravel/Laravel官方教程笔记/5.8/1. 开始/3. 目录结构.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 目录结构\n## 介绍 \nLaravel默认的应用结构旨在为各种不同规模的应用提供一个好的起点.如果有必要,我们可以根据自己的需要组织项目的结构. \nLaravel几乎没有什么强制的限制,也不会限定你编写的类的位置,只需要符合composer可以自动载入的规则即可. \n### 关于模型目录的问题 \n在很多mvc框架中都会默认存在models目录,而这个目录在laravel中是不存在的,刚开始学的时候可能会有一些迷惑性. \n其实这是因为models这个词本身就具有迷惑性,很多人对models有不同的看法,有些人觉得模型是所有业务逻辑的统称,而另一部分人认为模型只是与相关的数据库的一种交互. \n因为这个原因,laravel作者干脆抛弃了这个目录,用户可以根据自己的喜好选择加与不加models目录. \n## 应用根目录 \n### app目录 \napp目录包含应用的核心代码.我们写的跟应用相关的代码也几乎都在这个目录之下. \n### bootstrap目录 \n`bootstrap`目录包含的`app.php`是整个框架的启动文件,这个目录也是框架为了优化性能(注入路由,服务缓存等)生成的文件的缓存(`cache`)目录. \n### config目录 \n这个目录包含应用中所有的配置文件.\n### database目录 \n这个目录中包含跟数据库相关的一些文件或者目录,比如数据库迁移文件,数据模型工厂,数据种子等. \n有时候我们也会将sqlite数据库的文件放到这个目录下\n### public目录 \n应用入口,包含`index.php`文件及一些静态文件. \n### resources目录 \n这个目录包含所有原始的,未编译的资源(LESS,SASS,Js文件等),语言文件,视图文件等与前端相关的文件. \n### routes目录 \n这个目录包含所有应用路由相关的文件,默认情况下,laravel会自动导入下面的几个路由文件:`web.php`,`api.php`,`console.php`及`channel.php` \n`web.php`包含的路由会在RouteServiceProvider中被放到web中间件组中,默认情况下,这个目录下的路由会提供会话状态,CSRF保护及cookie加密等功能. \n如果我们的应用没有提供一个无状态的,RESTful api.所有的路由最好放在`web.php`文件中. \n`api.php`包含的路由会在RouteServiceProvider中被放到api中间件中,默认提供限流的功能,这里的路由默认更倾向于无状态,用tokens认证并且不会访问会话状态的请求. \n`console.php`文件中可以定义我们在终端命令中访问的路由. \n`channels.php`文件中可以注册我们需要的事件广播频道. \n### storage目录 \n这个目录中包含编译之后的blade模板,基于文件的session,文件缓存及其他一些框架生成的缓存.这个目录被分成`app`,`framework`及`logs`目录. \n其中`app`目录可以被用于保存应用生成的文件,`framework`目录用于保存框架生成的文件或者缓存,`logs`包含应用中的所有日志信息. \n在`storage/app`下的public目录可以用作存储用户生成的文件(比如头像),这个文件需要所有的用户都能访问到.我们可以用户`php artisan storage:link`将此目录链接到`public/storage`. \n### tests目录 \n`tests`目录包含我们写好的所有自动测试文件.其中每一个文件都应该以`Test`后缀结尾. \n我们可以通过`phpunit`或者`php vendor/bin/phpunit`运行我们写好的测试. \n### vendor目录 \n包含所有的composer依赖 \n## App目录 \n项目中大多数的内容都处于这个目录之下.此目录默认被防止在`App`命名空间下,并遵循`PSR-4`的自动载入标准. \n其中默认包含很多附加目录,如`Console`,`Http`,`Providers`等.可以把`Console`和`Http`目录想成提供给应用核心的接口. \nHTTP协议和CLI都可以与应用交互,但是几乎并不包含实际的业务逻辑.其中`Console`目录包含所有的artisan命令,`Http`目录包含控制器,中间件及请求等. \n我们可以通过artisan的`make`命令在app目录中生成其他的目录,比如`make:job`生成`app/Jobs`. \n### Broadcasting目录 \n此目录包含应用中所有的广播频道类,我们可以通过`make:channel`命令生成目录下的广播类. \n默认情况下,这个目录并不存在. \n### Console目录 \n此目录包含应用中所有自定的artisan命令.我们可以通过`make:command`命令新的自定义命令. \n除了自定义的命令类外,这个目录还包含console和核心文件,我们可以再其中注册我们定义的定时任务. \n### Events目录 \n此目录默认不存在,我们可以通过`event:generate`或者`make:event`生成. \n此目录包含所有的事件类 \n### Exceptions目录 \n此目录包含应用中异常处理程序,我们也可以将应用中抛出的异常防止到此目录下. \n如果需要自定义异常的日志生成及渲染,我们可以修改此目录中的`Handler`类. \n### Http目录 \n此目录包含项目中的控制器,中间件及form请求.几乎所有的进入应用中的请求的处理逻辑都放在这里. \n### Jobs目录 \n此目录默认不存在.我们可以通过`make:job`命令生成. \n此目录是所有可排队的任务类的根据地. \n任务可能背排队或者在当前的生命周期同步执行.在当前请求中同步执行的任务,我们一般称为\"命令\",因为他们是在命令默认下运行的. \n### Listeners目录 \n这个目录默认不存在,我们可以通过`event:generate`或`make:listener`生成.该目录包含指定event触发时的处理逻辑. \n时间监听者接收一个事件实例,并且在事件被触发时执行响应的逻辑.比如`UserRegistered`时间可能被一个名为`SendWelcomeEmail`的监听者处理. \n### Mail目录 \n目录默认不存在,我们可以通过`make:mail`命令生成. \n此目录包含所有的应用需要发送的邮件类.邮件对象允许我们将所有的邮件构建逻辑封装到一个单独的简单的类中,我们可以通过`Mail::send`方法发送. \n### Notifications目录 \n此目录默认不存在,我们可以通过`make:notification`命令创建. \n这个目录包含应用中所有\"事务性的(transactional)\"通知(这里还不是特别理解....),比如与应用事件相关的简单的通知. \nLaravel的通知功能可以抽象的表示发送诸如邮件,Slack,SMS或者数据库保存等相关的功能. \n### Policies目录 \n此目录默认不存在,我们可以通过`make:policy`创建.此目录包含应用中的授权认证策略.我们可以使用这些策略决定用户是否拥有对某个资源进行某个动作的权限. \n### Providers目录 \n此目录包含所有应用中的服务提供者. \n这些服务提供者通过绑定服务绑定到对应的服务容器,注册事件或者执行其他为接下来请求准备的其他任务,来引导我们编写好的应用\n在新版的laravel应用中,此目录已经包含了几个提供者了,我们可以很自由的添加需要的服务提供者 \n### Rules目录 \n此目录默认不存在,我们可以通过`make:rule`生成该目录.该目录包含自定义的验证规则(validation rules)对象.这些规则一般都是用于封装一些复杂的校验逻辑."
},
{
"alpha_fraction": 0.6050955653190613,
"alphanum_fraction": 0.6242038011550903,
"avg_line_length": 13.272727012634277,
"blob_id": "9544a5d919c598e9bca8d29c175e392e3556c797",
"content_id": "2c3d7b952a75d5078ce568e76541d9da8ed06e8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 11,
"path": "/外语/韩语/语法/지요.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 지요 \n表示一种双方都知道的事物,说出来只是希望得到对方肯定的回答. \n意为:...吧 \n```\n1)덥다 热\n덥지요. 热吧.\n2)한국말이 어렵습니다. 韩国话难\n한국말이 어렵지요. 韩国话难吧\n3)저 아이가 예쁩니다. 那孩子漂亮吧\n저 아이가 예쁩지요. 那孩子漂亮吧. \n```\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 11,
"blob_id": "3fac953bf17184f32dc9625c832e6deac8b4aaa0",
"content_id": "d1888997b50aa808f4e3b5369ec696ca47daad13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/编程开发/Linux/命令/top替代命令htop.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# htop\n界面更加友好,具体可以参考手册. \n"
},
{
"alpha_fraction": 0.8215222954750061,
"alphanum_fraction": 0.8320209980010986,
"avg_line_length": 30.75,
"blob_id": "4edb64f5ce9003c3d2cdfeca1bb0b2905ebcfae3",
"content_id": "30d2606e90bb3af79aa5a050e381de58702346d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1005,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 12,
"path": "/算法/位运算/多数元素/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 寻找超过半数的元素\n一个数列中有一个元素出现的次数超过半数,找出超过半数的元素. \n目前看到两种解决的方法: \n1.剔除法\n```\n遍历所有的元素,两两挑选,不相同则把这两个元素从数列剔除.知道剩下的元素都相同.则剩下的元素必为超过半数的元素.\n``` \n这种方法虽然比较容易理解,但是用C语言实现起来比较麻烦....不过这种思路倒是可以借鉴一下. \n2.位掩法\n```\n因为数列中每个数字占用的空间都是相同的,所以我们记录每个位上,1出现的次数,超过半数的必定属于超过半数的数字,原因很简单,我们看极端情况,假设只有两个数字,一个元素超过半数,另一个元素假设比它少一个,假设这两个数字的二进制位没有重合的,即使这样,无论如何属于该数字的二进制位都不会超过半数.根据这个原理,我们可以对应二进制位上1出现半数以上的必然会属于超过半数的元素.\n```\n"
},
{
"alpha_fraction": 0.8478260636329651,
"alphanum_fraction": 0.8478260636329651,
"avg_line_length": 21,
"blob_id": "bfe5371cec1ed7db86bac703a69439a0dc8f4563",
"content_id": "378f7bfd4ef4f8bbb43c30a6eb0775a28f430d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/leetcode/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Leetcode题目笔记\n按题号建立目录,目录中以编程语言为子目录存放源代码及思路 \n"
},
{
"alpha_fraction": 0.4926108419895172,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 13,
"blob_id": "36e47b8d02d48ca0cd34aaaa5e0a741d5a02856b",
"content_id": "8cdd8658ef03dc5eaebe1ac148244e2e4fd09233",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 798,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 29,
"path": "/leetcode/78-子集/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 子集\n**题目:** \n给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。 \n\n**说明:**解集不能包含重复的子集。 \n\n**示例:** \n\n**输入:**`nums = [1,2,3]` \n**输出:** \n```\n[\n [3],\n [1],\n [2],\n [1,2,3],\n [1,3],\n [2,3],\n [1,2],\n []\n]\n```\n\n**思路:** \n这个题目可以利用二进制的思想解决,对于一个数列上的数字,它只有表现形式(出现或者不出现),分别对应的1,0. \ne.g.\n在[1,3,4]中有三个位置,我们可以将其抽象为3个二进制位,二进制位上的值可0可1,所以一共可以组合出的结果为2的3次方种. \n假设二进制位为001,则表示当前情况中1不存在,3不存在,4存在,即子序列[4]. \n能够理解这种思想,整个题目也就简单了.\n"
},
{
"alpha_fraction": 0.36274510622024536,
"alphanum_fraction": 0.4117647111415863,
"avg_line_length": 17.789474487304688,
"blob_id": "50f44ef8d9ec8eaaf39ab01bcb75ecdd50efea57",
"content_id": "c64bce198fcfcd1ba388e8395a532dca990ddfa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 714,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 38,
"path": "/DailyCodingProblems/412_look_and_say_Epic/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nvoid look_and_say(char look[],int say[],int n){\n if(n <= 1){\n puts(look);\n return;\n }\n char tmp = look[0];\n int i = 0;\n int cnt = 0;\n int idx = 0;\n do{\n if(look[i] != tmp){\n say[idx<<1] = cnt;\n say[(idx<<1)|1] = tmp-'0';\n idx++;\n cnt = 0;\n }\n tmp = look[i];\n cnt++;\n i++;\n }while(look[i-1] != '\\0');\n int last = idx << 1;\n look[0] = '\\0';\n for(i = 0; i < last; i++){\n sprintf(look,\"%s%d\",look,say[i]);\n }\n look_and_say(look,say,n-1);\n}\n\nchar look[1000000] = \"1\";\nint say[1000000];\n \nint main(){\n int N = 50;\n look_and_say(look,say,N);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.41999998688697815,
"alphanum_fraction": 0.4449999928474426,
"avg_line_length": 20.052631378173828,
"blob_id": "482a8055cbb747f4e75fd379765b746d0e87f28c",
"content_id": "ee88dd3e36315618415e59f195b951f94ac44372",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 19,
"path": "/leetcode/78-子集/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int arr[] = {\n 1,2,3\n };\n\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n int begin = 0;\n int end = 1 << size;\n int tmp,bit;\n for(i = begin; i < end; i++){ //遍历所有的情况\n for(tmp = i,bit=0; tmp > 0; tmp>>=1,bit++){ //依次遍历当前数字的有效二进制位\n if(tmp%2)printf(\"%d \",arr[bit]); //输出符合要求的序列\n }\n puts(\"\");\n }\n}\n"
},
{
"alpha_fraction": 0.84375,
"alphanum_fraction": 0.84375,
"avg_line_length": 15,
"blob_id": "977315ad6825186ecbfbb5af7c78ae36fab366ad",
"content_id": "ac6ed6b1d9a6fe840eb49c2032731e3ca849cd20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/算法/动态规划/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 动态规划\n这里记录我遇到的有关动态规划的题目已经解题思路。\n"
},
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 23,
"blob_id": "1b4e9d85393dc3652016022c388837a4115b2c25",
"content_id": "ba34f5fbc55a24820cf5a6df611ce81cdd3207c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 2,
"path": "/编程开发/Linux/命令/nnn工具.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# nnn\n这个命令可以用来替代`du -sh *`命令,用于查找各文件,目录的空间占用情况. \n"
},
{
"alpha_fraction": 0.6901960968971252,
"alphanum_fraction": 0.772549033164978,
"avg_line_length": 62.25,
"blob_id": "2cefb4f6bcef30cae9fe03349d413fef4143b363",
"content_id": "b63793e505da0cf973c1f20b1d1999431548de43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 4,
"path": "/DailyCodingProblems/390_num_not_exists_Two_Sigma/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 390_num_not_exists_Two_Sigma\nThis problem was asked by Two Sigma. \n\nYou are given an unsorted list of 999,000 unique integers, each from 1 and 1,000,000. Find the missing 1000 numbers. What is the computational and space complexity of your solution? \n"
},
{
"alpha_fraction": 0.7772511839866638,
"alphanum_fraction": 0.7962085604667664,
"avg_line_length": 40.79999923706055,
"blob_id": "92b42914943f3b93d7405d17f2cfd5f3a097b672",
"content_id": "e64fd68952faeba9ab7d7920a0e77ef0a160ebd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 331,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 5,
"path": "/硬件/c51/Linux上编译51程序.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Linux上编译51程序\n在linux中,可以利用sdcc编译51的c程序.虽然与windows上的keil c有细微的差异,但是大体上相同. \n编译方法:`sdcc xxx.c` \n然后我们利用packihx生成hex文件:`packihx xxx.ihx > xxx.hex` \n之后利用stcflash(https://github.com/laborer/stcflash)进行hex文件烧录即可. \n"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 22,
"blob_id": "45628708bbc8a68bbba7aa11e63dff370eedf528",
"content_id": "8664d7cfce72967195dd19bf2e50686767b6b0a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 2,
"path": "/算法/排序算法/侏儒排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 侏儒排序\n类似于插入排序,但是实现类似于冒泡排序.实现起来非常简单,甚至不需要嵌套循环 \n"
},
{
"alpha_fraction": 0.4968183934688568,
"alphanum_fraction": 0.5022026300430298,
"avg_line_length": 24.22222137451172,
"blob_id": "64991cfdecb0967b6b415db7986f3ab31cfccde7",
"content_id": "1475c53a459c057b8408526aa453288a4148aec4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2453,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 81,
"path": "/编程开发/后端/Wordpress/函数/get_terms.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# get_terms\n**说明** \n检索分类法或分类法列表中的term. \n**用法** \n`get_term($taxonomies, $args = '')` \n传递变量按`wp_parse_args()`等函数所用的格式. \n`$myterms = get_terms('orderby=count&hide_empty=false');` \n未指定值的变量使用以下的默认值(下文中有说明). \n下面的列表中含有$args,将讲些默认值. \n```\n$args = [\n 'orderby' => 'name', //排序依据\n 'order' => 'ASC', //升序降序\n 'hide_empty'=> true, //隐藏空条目\n 'exclude' => [], //排除\n 'exclude_tree' => [],\n 'include' => [],\n 'number' => 10, //返回条目\n 'fields' => 'all', //默认all\n 'slug' => '', //任何含有slug的term都可以作为该变量的值\n 'parent' => '',\n 'hierarchical'=>true, //是否返回层级分类法,默认为true\n 'child_of' => 0, //默认为0,获取该term的所有后代\n 'get' => , //可以通过为'all'复制来改写'hide_empty'和'child_of'\n 'name_like' => '', //默认为''\n 'pad_counts'=> false, //默认为false,为true时将计算包括$terms在内的所有子元素\n 'offset' => 0,\n 'search' => ,\n 'cache_domain'=>'core'\n];\n```\n**e.g.** \n字符串形式: \n```php\n$categories = get_terms('category','orderby=count&hide_empty=1');\n``` \n数组形式: \n```php\n$categories = get_terms('category',[\n 'orderby' => 'count',\n 'hide_empty' => 0\n]);\n```\n获取所有的友情链接的分类: \n```php\n$link_categories = get_terms('link_category','orderby=count&hide_empty=0');\n```\n列出所有不带链接的自定义分类: \n```php\n$terms = get_terms('my_taxonomy');\n$count = count($terms);\nif($count > 0){\n echo \"<ul>\";\n foreach ( $terms as $term ){\n echo \"<li>\" . $term->name . \"</li>\";\n }\n echo \"</ul>\";\n}\n``` \n列出所有带上链接的自定义分类: \n```php\n$args = ['taxonomy'=>'my_term'];\n\n$terms = get_terms('my_term',$args);\n\n$count = count($terms);\n$i = 0;\nif($count > 0){\n $cape_list = '<p class=\"my_term-archive\">';\n foreach($terms as $term){\n $i++;\n $term_list .= '<a href=\"/term-base/\"' . $term->slug . '\" title=\"'. sprintf(__('View all post filed under %s', 'my_localization_domain'),$term->name). '\">' . $term->name . '</a>';\n if($count != $i){\n $term_list .= ' - ';\n }else{\n $term_list .= '</p>';\n }\n }\n echo $term_list;\n}\n```\n"
},
{
"alpha_fraction": 0.495626837015152,
"alphanum_fraction": 0.5218659043312073,
"avg_line_length": 25.384614944458008,
"blob_id": "44695f88cf1189a57fbc738afd3bfb0fec00c81b",
"content_id": "0af8665adb6c03b5723836f9cc96d64c09d64c57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3284,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 78,
"path": "/算法/位运算/只出现一次的元素3/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n//模拟位运算的方式\nint getSingle(int arr[],int size){\n int ones = 0, twos = 0; //出现1次,出现2次的二进制位\n int threes; //出现三次的二进制位\n int i;\n for(i = 0; i < size; i++){\n /**\n * 这里的ones代表以往结果中出现一次的二进制位结果.\n * 可能第一次会让人迷茫,其实第一次也是一样的,ones为0,\n * 表示之前没有出现过一次的元素.\n * 我们利用ones & 当前遍历到的元素即可得到出现过两次的元素.\n * 原因:我们知道ones各个二进制位表示出现过1次的二进制位,那么在\n * 之前出现过一次,在当前的元素中依然存在的,是不是就出现了两次呢. \n * twos前面的或也很好理解,之前出现过两次的元素不一定在当前元素中\n * 存在,ones是之前出现过一次的,ones&arr[i]是之前出现过一次,\n * 当前又出现了一次的,所以属于新的出现两次的元素.\n **/\n twos |= ones & arr[i];\n /**\n * 从异或运算的性质可知,两二进制位相同为0,不同为1.\n * 所以之前出现过一次的(ones中二进制位出现的1),当前元素不存在,\n * 或者之前元素(ones)没出现过,当前元素出现的二进制位,我们都看成新的\n * 出现一次的元素.\n **/\n ones ^= arr[i];\n /**\n * ones表示出现过一次的二进制位,twos表示出现过两次的二进制位,\n * 如果一个二进制位同时被标记为出现1次和出现两次,\n * 说明这个二进制位出现了三次.这里体现了twos放在ones前面的效果.\n * 由异或运算的性质可知,同一个数字连续异或另一个数字两次的结果依然是当前数字\n * e.g.\n * 12^13^13 = 12\n * twos中的二进制位表示其至少出现两次,\n * ones中的二进制位表示其出现过奇数次.\n * 同时满足该两件的最小整数就是3.\n * 自然而然,我们需要把同时满足条件的二进制位归0,自然而然的,\n * 我们就用到了取反操作.把原来是0的二进制位变成1,是1的地方变成0\n **/\n threes = ones & twos;\n ones &= ~threes;\n twos &= ~threes;\n }\n //遍历完整个数列,ones即是我们需要的结果\n return ones;\n}\n\n//统计二进制位的方式\nint getSingle2(int arr[],int size){\n int i,j;\n int one = 0; //唯一的数\n for(i = 0; i < 32; i++){ //int类型为32位\n int count = 0; //当前位置二进制位上1出现的次数\n int mask = 1 << i; //位掩码\n for(j = 0; j < size; j++){\n if((arr[j] & mask) > 0){ //当前遍历的数字对应二进制位上为1\n count++;\n }\n }\n if(count%3 == 1){ //当前二进制位出现次数不为3的倍数,其属于只出现1次的数字\n one|=mask;\n }\n\n }\n return one;\n}\n\nint main(){\n int arr[] = {\n 1,2,1,1,2,3,3,4,2\n };\n int size = sizeof(arr)/sizeof(arr[0]);\n printf(\"模拟法:%d\\n\",getSingle(arr,size));\n\n printf(\"计数法:%d\\n\",getSingle2(arr,size));\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6909090876579285,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 20.799999237060547,
"blob_id": "1c6bc9627c1269c81faefeb0f0c2c22ead4c14cf",
"content_id": "70192e0c9db24a431bf6cafbf8d50eaf3a1820af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 10,
"path": "/编程开发/后端/Wordpress/问题/安装出现重定向过多.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "安装wordpress时,chrome上出现`ERR_TOO_MANY_REDIRECTS`标志,无法正常访问网站. \n**解决:** \n```\n在wp-config.php文件最前面加上下面两句\ndefine('WP_HOME','网站域名');\ndefine('WP_SITEURL','网站域名');\n即可\n```\n**原因分析:** \n由于网站域名未定义,导致访问网站时,一直在尝试访问/index.php,造成了循环. \n"
},
{
"alpha_fraction": 0.7399267554283142,
"alphanum_fraction": 0.7509157657623291,
"avg_line_length": 33.125,
"blob_id": "9f74ac66c785165b97202c2aeb6d1e26bdd43d8c",
"content_id": "6a7aa789057a16ec79d6356b96057d6da3b20cd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 8,
"path": "/DailyCodingProblems/398_deleteKthElemInList_Amazon/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 398_deleteKthElemInList_Amazon\nThis problem was asked by Amazon.\n\nGiven a linked list and an integer `k`, remove the `k-th` node from the end of the list and return the head of the list.\n\n`k` is guaranteed to be smaller than the length of the list.\n\nDo this in one pass.\n"
},
{
"alpha_fraction": 0.6465378403663635,
"alphanum_fraction": 0.6626409292221069,
"avg_line_length": 24.346939086914062,
"blob_id": "284d60406cd7b7ff875fe65f83be513cec4ceb85",
"content_id": "92127a2bbaeaec5d9d537bc3dde08b298a8be836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2118,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 49,
"path": "/编程开发/后端/调度/限流.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 限流\n>笔记摘自掘金 \n**限流的意义:** \n防止访问超过网站承受的程度,导致崩溃. \n**常见的限流方式:** \n- 令牌桶算法 \n- 漏桶算法 \n一. 漏桶算法 \n可以把漏桶想象成一个浴缸,浴缸漏水的速度是一定的,容量也是一定的,当流入浴缸的水超过浴缸的容纳量时,浴缸中的水就会溢出. \n漏桶算法的两种实现: \n1. 不允许突发流量的情况,平缓的处理请求,如果进水的速度大于出水的速度,直接舍弃多余的水. \n2. 允许一定的突发流量的情况.允许桶被装满,但是装满后如果继续装入,就会被限流.相当于加了一层Buffer. \n漏铜算法需要考虑的两个参数: \n- 桶的容量 \n- 漏水的速度 \n漏铜速度可以平滑网络上的突发流量(因为漏水速度固定) \n二. 令牌桶算法 \n令牌桶中的桶与漏桶的不同之处在于,令牌桶并不用来装流量,而是用来装令牌.每个桶可以装固定个数的令牌,以一定的速率向桶中扔令牌,而每个请求进来时,会去桶中拿一块令牌,当桶中没有令牌可拿时,当前请求等待或者被抛弃. \n三. 分布式限流 \n可以利用redis来实现 \n**大致思路:** \n设定一个请求上限和一个当前请求计数.如果当前请求计数超过设置的请求上限,当前请求等待或者被抛弃. \n```lua\nlocal key = 'rate.limit:' .. KEYS[1] --限流KEY\nlocal limit = tonumber(ARGV[1]) --限流大小\nlocal current = tonumber(redis.call('get',key) or '0')\nif current + 1 > limit then --如果先出限流大小\n return 0\nelse --请求数加1,并设置1s过期\n redis.call('INCRBY',key,'1')\n redis.call('expire',key,'1')\n return current + 1\nend\n```\n**PHP代码:** \n```php\n<?php\nfunction accquire($limit){\n $redis = new Redis();\n define('KEY','rate.limit:CURRENT');\n $redis->connect('localhost',6379);\n $current = $redis->get(KEY); //当前计数\n if(empty($current)) $current = 0;\n if($current >= $limit) return 0; //计数已满\n $redis->incr(KEY);\n $redis->expire(KEY,1);\n return $current+1);\n}\n```\n"
},
{
"alpha_fraction": 0.3837934136390686,
"alphanum_fraction": 0.48174533247947693,
"avg_line_length": 22.39583396911621,
"blob_id": "057343d2e77cadf856883fc7f787ddca03c19e91",
"content_id": "e49b9960f5392783103d6a3ab649703d7e941b3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 48,
"path": "/算法/其他/寻找两个有序数组的中位数/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<limits.h>\n\nint max(int a,int b){\n return a>b?a:b;\n}\n\nint min(int a,int b){\n return a>b?b:a;\n}\n\ndouble find_middle(int arr1[],int size1,int arr2[],int size2){\n if(size1 > size2) return find_middle(arr2,size2,arr1,size1); //确保size2 >= size1\n int size = size1 + size2;\n int L1,L2,R1,R2,c1,c2,lo = 0,hi = 2 * size1;\n while(lo <= hi){ //二分法\n c1 = (lo+hi) / 2;\n c2 = size - c1;\n L1 = (c1 == 0) ? INT_MIN:arr1[(c1-1)/2];\n R1 = (c1 == 2*size1) ? INT_MAX:arr1[c1/2];\n L2 = (c2 == 0)?INT_MIN:arr2[(c2-1)/2];\n R2 = (c2 == 2*size2) ? INT_MAX:arr2[c2/2];\n\n if(L1 > R2)\n hi = c1 - 1;\n else if(L2 > R1)\n lo = c1+1;\n else\n break;\n }\n return (max(L1,L2)+min(R1,R2))/2.0;\n}\n\nint main(){\n int arr1[] = {\n 1\n };\n int arr2[] = {\n 2,3,4,5,7,8,8,9\n };\n //1 2 3 3 4 4 5 5 7 7 8 8 9 9 -> 5\n int size1 = sizeof(arr1) / sizeof(arr1[0]);\n int size2 = sizeof(arr2) / sizeof(arr2[0]);\n\n printf(\"中位数:%0.6g\\n\",find_middle(arr1,size1,arr2,size2));\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4148148000240326,
"alphanum_fraction": 0.5962963104248047,
"avg_line_length": 10.65217399597168,
"blob_id": "68a93e0d455d741019fe6a87d91304783d2b0e25",
"content_id": "029664b8d606a9c690b4ff7e421f5d5187e35678",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 23,
"path": "/算法/排序算法/基数排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 基数排序 \n话说这个算法我都有点忘了 \n这个写一下大致思路吧..... \n从低位到高位,以每个数位上的数字为标准确定位置,依次移动数位,知道所有的数位都排列完毕 \ne.g. \n有数列`1,11,2,3,42,34,54`\n1. 按个位分组(个位相同) \n```\n1,11\n2,42 \n3\n54\n``` \n合并得到一个新的数组:`1,11,2,42,3,54` \n2. 按十位分组(十位相同)\n```\n1 2 3\n11\n34\n54\n``` \n合并:`1,2,3,11,34,54` \n得到最后排序完毕的数列 \n"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8203125,
"avg_line_length": 41.66666793823242,
"blob_id": "5407dcbccb6e576b38aa6a8bad16a6235d26c691",
"content_id": "87f7a36aeda51d2f138937d1a342613f0892a927",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 3,
"path": "/算法/排序算法/选择排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 选择排序:(时间复杂度:O(N^2))\n这个算法很容易理解,你可以想想自己有一副牌,每次把牌里面最小的拿出来,与他所能放的最靠前的那个数交换,直到最后整副牌都变得有序了. \n(当然整理牌的过程不像这么脑残,实际的过程更像是插入排序.这都是题外话了)\n"
},
{
"alpha_fraction": 0.6368421316146851,
"alphanum_fraction": 0.6526315808296204,
"avg_line_length": 16.537036895751953,
"blob_id": "44036af5831f7d1e66eeb0008724c9b72d6710a0",
"content_id": "58bdb152db614ca37998d5a22adbad725137c015",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1962,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 54,
"path": "/博弈论/博弈的思维看世界/博弈的术语.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 博弈的术语\n1. 参与人(players):理性选择的主体 \n2. 信息(information):参与者有关博弈的知识.\"知己知彼,百战不殆\" \n3. 行动(action):参与者能够选择的变量\n4. 策略(strategies):参与者在行动之前,事先准备好的一套完整的行动方案(预案) \n```\n 预则立,不预则废.\n (1).完整性 \n (2).多样性 \n (3).不可观察性 \n```\n```\n 好的应急预案: \n 1. what?(信息分类) \n 2. who?(责任到人) \n 3. action?(措施具体) \n 4. when?(时效性)\n``` \n5. 损益(payoff): 参与者的得与失 \n6. 结局(outcome):所有参与者选择各自策略后的结果 \n7. 均衡(equilibrium):所有参与者最优策略组合 \n8. 博弈的规则(rules of the game):参与者,行动和结果合起来称为博弈的规则. \n博弈论(game theory):是一种研究人们怎么做策略(行动)选择及其最后的均衡结果会是什么的理论. \n\n# 博弈的分类: \n```\n合作博弈和非合作博弈 \n合作博弈:\n 指参与者能够达成一种具有约束力的协议,在协议范围内选择有利于双方的策略.\n非合作博弈:\n 指参与者无法达成有利于双方的策略. \n```\n```\n静态博弈和动态博弈\n静态博弈:\n 指在博弈中,参与者同时选择,或虽非同时选择,但是在逻辑时间上是同时的.\n动态博弈:\n 指在泊一中,参与者的行动有先后顺序,且后行动者能够观察到先行动者的行动. \n```\n```\n完全信息博弈和不完全信息博弈:\n完全信息博弈:\n 指博弈双方能够获取到的信息相同\n不完全信息博弈:\n 指博弈双方能够获取到的信息不对称.\n```\n```\n零和博弈和非零和博弈:\n零和博弈:\n 指博弈前的损益总和与博弈后的损益总和相等.\n非零和博弈:\n 指博弈后的损益大于(小于)博弈前的损益总和.(正和或负和)\n```\n总结: 不玩负和游戏,少玩零和游戏,多玩正和游戏. \n\n"
},
{
"alpha_fraction": 0.43918246030807495,
"alphanum_fraction": 0.4576271176338196,
"avg_line_length": 22.057470321655273,
"blob_id": "311f75563b009963e4eda9606674f06b3d21d851",
"content_id": "7902e93be2ab9d5f2c519f95b00a8ab345e090c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2244,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 87,
"path": "/算法/其他/TopK问题/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n//交换两个数字\nvoid swap(int *a, int *b){\n int c = *a;\n *a = *b;\n *b = c;\n}\n\n//调整堆\nvoid adjust(int arr[],int i,int size){\n if(i >= size) return;\n int left = i*2+1; //左孩子\n int right = i*2+2; //右孩子\n int min = i;\n if(left < size && arr[min] > arr[left]) min = left;\n if(right < size && arr[min] > arr[right]) min = right;\n if(min != i){ //出现调整的情况\n swap(&arr[min],&arr[i]);\n adjust(arr,min,size);\n }\n}\n\n//创建堆\nvoid build_heap(int arr[],int size){\n int i;\n for(i = size/2 - 1; i >= 0; i--){\n adjust(arr,i,size);\n }\n}\n\n\n//通过建堆获取最大的第K个数\nint find_top_k_by_heap(int arr[],int k,int size){\n if(k > size) return -1; //k大于size,返回-1\n build_heap(arr,k); //创建一个大小为k的堆\n int i;\n for(i = k; i < size; i++){\n if(arr[0] < arr[i]){ //当前的数比堆顶的数更大\n swap(&arr[0],&arr[i]);\n adjust(arr,0,k); //调整堆\n }\n }\n return arr[0]; //返回堆顶元素\n}\n\n\n//Quick Select\nint find_top_k_by_quick_select(int arr[],int k,int size){\n if(k > size) return -1; //没有第k大的数\n int left = 0, right = size - 1;\n int i = left,j = right;\n int tmp;\n do{ //i不是当前的数\n i = left;\n j = right;\n tmp = arr[i]; //将当前的元素作为基准元素\n while(i < j){\n for(;i < j && arr[j] <= tmp;j--);arr[i] = arr[j];\n for(;i < j && arr[i] >= tmp;i++);arr[j] = arr[i];\n }\n arr[i] = tmp;\n if(i > k - 1){ //左边元素比较多\n right = i-1;\n }else if(i < k - 1){ //右边元素比较多\n left = i+1;\n }\n }while(i != k - 1); //i == k - 1时退出\n\n return tmp;\n}\n\nint main(){\n int arr[] = {1,3,4,0,0,5,8,5,9,0};\n int size = sizeof(arr) / sizeof(arr[0]);\n int k = 6;\n \n puts(\"---By Heap---\");\n for(k = 1; k <= size; k++)\n printf(\"第%d大的数:%d\\n\",k,find_top_k_by_heap(arr,k,size));\n puts(\"---By Quick Select---\");\n for(k = 1; k <= size; k++)\n printf(\"第%d大的数:%d\\n\",k,find_top_k_by_quick_select(arr,k,size));\n \n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4545454680919647,
"alphanum_fraction": 0.46993008255958557,
"avg_line_length": 21.34375,
"blob_id": "4301482848aa4b210c210e4692020f2c4dc14ed6",
"content_id": "9691da2ea2f72adb44f2687945c5ac2bac2cef87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 753,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 32,
"path": "/算法/贪心算法/最多可以参与几项活动/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\nstruct activity{\n int start; //开始时间\n int end; //结束时间\n}activity[100007];\n\n//交换依据\nint cmp(const void * a, const void * b){\n return ((struct activity*)a)->end > ((struct activity*)b)->end;\n}\n\nint main(){\n int N;\n int i,j;\n int sum;\n while(~scanf(\"%d\",&N)){\n for(i = 0; i < N; i++)\n scanf(\"%d%d\",&activity[i].start,&activity[i].end);\n qsort(activity,N,sizeof(struct activity),cmp); //按结束时间排序\n sum = N>0;\n for(i = 1,j = 0; i < N && j < N; i++){\n if(activity[j].end <= activity[i].start){\n j=i;\n sum++;\n }\n }\n printf(\"%d\\n\",sum);\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.49719327688217163,
"alphanum_fraction": 0.5060144066810608,
"avg_line_length": 18.184616088867188,
"blob_id": "d93980daba8466cc95872dcb16df84a9c1ac5af9",
"content_id": "2e8e88561308ed8761fa33fb2e1115934e8decbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1303,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 65,
"path": "/DailyCodingProblems/398_deleteKthElemInList_Amazon/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\ntypedef struct Node{\n int val;\n struct Node *next;\n}Node,*node;\n\nvoid insert(node* proot,int val);\nnode delete(node root,int k);\nvoid visit(node root);\n\nint main(){\n node root = NULL;\n insert(&root,1);\n insert(&root,2);\n insert(&root,3);\n insert(&root,4);\n insert(&root,5);\n root = delete(root,0);\n insert(&root,5);\n visit(root);\n return 0;\n}\n\n//遍历元素\nvoid visit(node root){\n if(root == NULL) return;\n node tmp = NULL;\n for(tmp = root; tmp ; tmp = tmp->next){\n printf(\"%d%s\",tmp->val,tmp->next ? \"->\" : \"\");\n }\n puts(\"\");\n}\n\n//插入元素\nvoid insert(node* proot,int val){\n node root = *proot;\n root = (node)malloc(sizeof(Node));\n root->val = val;\n root->next = *proot;\n *proot = root;\n}\n\n//这里假设k从0开始\nnode delete(node root,int k){\n node tmp = NULL;\n if(k == 0){ //删除第一个元素\n tmp = root->next;\n free(root);\n root = tmp;\n }else{ //删除其他元素\n int i;\n tmp = root;\n for(i = 1; i < k && tmp != NULL; i++){\n tmp = tmp->next;\n }\n if(tmp->next){\n node next = tmp->next;\n tmp->next = tmp->next->next;\n free(next);\n }\n }\n return root;\n}\n"
},
{
"alpha_fraction": 0.8776119351387024,
"alphanum_fraction": 0.8805969953536987,
"avg_line_length": 82.25,
"blob_id": "7e6430cdb257d2b2335aea4209ba426a2e278ec4",
"content_id": "e884eced9a5cdb4ee5958b7437bccc50bd1f3a60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 201,
"num_lines": 4,
"path": "/算法/排序算法/希尔排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 希尔排序\n这个算法的时间复杂度比较难分析. \n这里讲讲思路,希尔排序可以看成插入排序的一种改良版本,跟常规的插入排序不同的是,希尔排序的步长是递减的(所以又被叫做递减增量排序算法),排序中最核心的部分在于步长的选择,希尔排序的效率跟步长有很大的关系. \n初始转态下,步长设置为数组总长度的一般,然后用插入排序的思想,遍历后半边的序列,这样保证了间隔当前步长的元素是有序的(虽然整体不一定有序),然后将步长减半,逐渐使控制更加精细,直到最后排序完毕.虽然表面上每一趟都是借用了插入排序,但是算法整体采用了一种分而治之的手法,使得整体的运算次数减少了很多,加上插入排序本身并没有像冒泡排序选择排序一样,频繁的调用交换,所以性能上整体优于O(N^2)的排序算法. \n"
},
{
"alpha_fraction": 0.5699481964111328,
"alphanum_fraction": 0.5730569958686829,
"avg_line_length": 19.125,
"blob_id": "12fd3301d030df49f4389b21f528c7deb6539b0d",
"content_id": "0dae5a7a0586d0b52551a131a62233271ab6c9a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 977,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 48,
"path": "/数据结构/树/2-3树/term1/tree23.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"tree23.h\"\n\ntreep tree(){\n treep ret = (treep)malloc(sizeof(Tree));\n ret->root = NULL;\n ret->size = 0;\n\n return ret;\n}\n\nnodep create_node(treep t,keyp first_key,nodep first_child,int size){ //创建新的节点\n if(t == NULL) return NULL;\n nodep ret = (nodep)malloc(sizeof(Node));\n ret->parent = NULL;\n ret->keys = first_key;\n ret->children = first_child;\n ret->size = size;\n ret->next = NULL;\n if(first_child != NULL)\n ret->next = first_child->next;\n\n return ret;\n}\n\nBoolean insertNode(treep t,int key){\n return _insertNode(t,t->root,key);\n}\n\nBoolean _insertNode(treep t,nodep root,int key){\n if(t == NULL) return NULL;\n nodep node = NULL;\n keyp key = NULL;\n while(root != NULL){\n while(root->keys->key > key){\n node = root;\n root = root->next;\n }\n \n }\n}\n\nBoolean fixNode(treep t,nodep n){\n int size = n->size;\n}\n\nkeyp searchNode(treep t,int key){\n \n}"
},
{
"alpha_fraction": 0.3865814805030823,
"alphanum_fraction": 0.4121405780315399,
"avg_line_length": 22.475000381469727,
"blob_id": "425ff8f311c2bc86509aec6edb6ec0939ad0bf7e",
"content_id": "c55a8aa85308f239428589e389ffc54f2a3b63ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 989,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 40,
"path": "/算法/排序算法/鸡尾酒排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n#define swap(a,b) {int tmp=a;a=b;b=tmp;}\n\n//鸡尾酒排序\nvoid cocktail_sort(int arr[],int size){\n int i,j;\n int bottom = 0;\n int top = size - 1;\n char swaped = 1; //交换标志\n while(swaped){\n swaped = 0;\n for(i = bottom; i < top; i++){\n if(arr[i] > arr[i+1]){\n swaped = 1;\n swap(arr[i],arr[i+1]);\n }\n }\n top--; //右边界左移\n for(i = top; i > bottom; i--){\n if(arr[i-1] > arr[i]){\n swaped = 1;\n swap(arr[i],arr[i-1]);\n }\n }\n bottom++; //左边界右移\n }\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n puts(\"排序前:\");for(i = 0; i < size; i++)printf(\"%d \",arr[i]);puts(\"\");\n cocktail_sort(arr,size);\n puts(\"排序后:\");for(i = 0; i < size; i++)printf(\"%d \",arr[i]);puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.469991534948349,
"alphanum_fraction": 0.4911242723464966,
"avg_line_length": 20.509090423583984,
"blob_id": "86c51d63c1909f755f73300b019191292fa2aa82",
"content_id": "d185efdca73da47a805a63eb8c99373516105431",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 55,
"path": "/算法/排序算法/堆排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n//数字交换\nvoid swap(int *a,int *b){\n int c = *a;\n *a = *b;\n *b = c;\n}\n\n//调整某个节点\nvoid adjust(int arr[],int i,int size){\n if(i >= size) return;\n int left = i*2+1; //左子节点\n int right = i*2+2; //又子节点\n int max = i; //假设当前节点为最大节点\n if(left < size && arr[max] <= arr[left]) max = left;\n if(right < size && arr[max] <= arr[right]) max = right;\n if(max != i){ //存在节点调整\n swap(&arr[max],&arr[i]);\n\n adjust(arr,max,size); //递归调整下一层\n }\n}\n\n//初始化堆\nvoid heap_init(int arr[],int size){\n int i;\n for(i = size/2 - 1; i >= 0; i--){ //从最后一个元素的父元素逆序遍历调整\n adjust(arr,i,size);\n }\n}\n\n//堆排序\nvoid heap_sort(int arr[],int size){\n heap_init(arr,size); //初始化堆,此时堆顶最大\n int i;\n for(i = size-1; i > 0; i--){ //将堆顶元素与最后一个元素对调,从堆顶冲\n swap(&arr[0],&arr[i]); //交换两个数字\n adjust(arr,0,i);\n }\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr)/sizeof(arr[0]);\n int i;\n\n puts(\"排序前\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n heap_sort(arr,size);\n puts(\"排序后\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.73828125,
"alphanum_fraction": 0.73828125,
"avg_line_length": 24.600000381469727,
"blob_id": "bce92dce7c4e5e12b44ce71453f9188cd0cb8514",
"content_id": "f629fac0363536a42213836e64c370219ff6b399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 366,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 10,
"path": "/编程开发/前端/css/chrome滚动条样式.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "**参数说明:** \n```\n::-webkit-scrollbar 滚动条整体部分,可以设置宽度啥的\n::-webkit-scrollbar-button 滚动条两端的按钮\n::-webkit-scrollbar-track 外层轨道\n::-webkit-scrollbar-track-piece 内层滚动槽\n::-webkit-scrollbar-thumb 滚动的滑块\n::-webkit-scrollbar-corner 边角\n::-webkit-resizer 定义右下角拖动块的样式\n```\n"
},
{
"alpha_fraction": 0.46916890144348145,
"alphanum_fraction": 0.5013405084609985,
"avg_line_length": 14.22449016571045,
"blob_id": "690fbb630e0c8b7c1671d66d4d30847882cf98dd",
"content_id": "95794fa33c04c2a856a8890657e51b8f0e37f9d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 49,
"path": "/DailyCodingProblems/400_sublist_sum_Goldman_Sachs/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n#define SIZE 10000\nint table[SIZE];\n\nint lowbit(int num);\nvoid update(int i,int size,int val);\nint getSum(int i);\nint sum(int i,int j);\n\nint main(){\n int arr[] = {\n 1,2,3,4,5,6\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n for(i = 0; i < size; i++)\n update(i,size,arr[i]);\n\n printf(\"%d\\n\",sum(1,3));\n\n return 0;\n}\n\nint lowbit(int val){\n return val & (-val);\n}\n\nvoid update(int i,int size,int val){\n i+=1;\n while(i <= size){\n table[i-1] += val;\n i += lowbit(i);\n }\n}\n\nint getSum(int i){\n i+=1;\n int s = 0;\n while(i > 0){\n s+=table[i-1];\n i-=lowbit(i);\n }\n return s;\n}\n\nint sum(int i,int j){\n return getSum(j-1)-getSum(i-1);\n}\n"
},
{
"alpha_fraction": 0.4867549538612366,
"alphanum_fraction": 0.5132450461387634,
"avg_line_length": 25.676469802856445,
"blob_id": "a36c7fab9ebb1b2ae7cb4d253be7bd0d02a0111a",
"content_id": "fd0199f3274d58ee454cf92b2caa75de6ab949df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1034,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 34,
"path": "/leetcode/正则表达式/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding:utf8\nclass Solution:\n '''\n 判断是否匹配给定的模式\n\n >>> import sys\n >>> sys.setrecursionlimit(10000000)\n >>> s = Solution()\n >>> s.isMatch('apppppppl','ap*l')\n True\n >>> s.isMatch('apa','app')\n False\n >>> a = 'a{b}c{d}e'.format(b='j'*10000,d='o'*5)\n >>> s.isMatch(a,'a')\n False\n >>> s.isMatch(a,'ab*cd*e')\n False\n >>> s.isMatch(a,'aj*co*e')\n True\n '''\n def isMatch(self,s,p):\n if not p: # p为空\n return not s # 如果s为空,则匹配成功,否则失败\n \n # 获取第一个字节的匹配情况,s为空,返回false,s不为空,字符相同或为.匹配成功,否则失败\n first_char_match = bool(s) and p[0] in {s[0],'.'}\n if len(p) > 1 and p[1] == '*': # p的长度大于1\n return (self.isMatch(s,p[2:]) or first_char_match and self.isMatch(s[1:],p))\n else:\n return first_char_match and self.isMatch(s[1:],p[1:])\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()"
},
{
"alpha_fraction": 0.44877344369888306,
"alphanum_fraction": 0.5064935088157654,
"avg_line_length": 18.25,
"blob_id": "ba84fcedf66c8c05c4a29f674cd58b2c01de823f",
"content_id": "838d9bf4e76bd0794cce9ef8fb6de0a14a1068eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 36,
"path": "/算法/动态规划/高楼扔鸡蛋/code/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\nint DP[1000][1000];\n\nint max(int a,int b){\n return a>b?a:b;\n}\n\nint min(int a,int b){\n return a>b?b:a;\n}\n\nint count(int k,int n){\n if(k == 0) return 0;\n if(k == 1) return n;\n //if(DP[k][n]) return DP[k][n];\n\n int i;\n int res = n;\n for(i = 1; i <= n; i++){\n res = min(res,max(count(k-1,i-1),count(k,n-i))+1);\n }\n return DP[k][n] = res;\n}\n\nint main(int argc,char** argv){\n if(argc != 3) return -1;\n int k = atoi(argv[1]);\n int n = atoi(argv[2]);\n if(k < 0 || k >= 1000 || n < 0 || n >= 1000) return -1;\n memset(DP,0,sizeof(int)*1000*1000);\n printf(\"%d\\n\",count(k,n));\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6809954643249512,
"alphanum_fraction": 0.7081447839736938,
"avg_line_length": 19.952381134033203,
"blob_id": "ce091fda07f4390932228b18e9adbf70e911d02a",
"content_id": "fea67773b73a6e8c4a458f10083e596dea3be169",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 966,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/编程开发/面试问题/数亿用户统计独立用户访问量(UV).md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 大量用户访问去重统计\n在某些情形下,我们可能需要统计某个网站或者网页的用户访问量.此时有哪些方案可以考虑. \n1. 使用Hash \n利用hash统计不重复的用户访问数量. \n**优点:** \n简单,易实现,准确. \n**缺点:** \n内存占用大,随着key的增加,性能会下降,对于巨量数据,可能不是一种好方式. \n2. 利用bitset \n一个二进制位代表一个用户坑,一个长度为32位变量里面可以表示32个用户的状态. \n**优点:** \n空间占用相对较少,查询方便. \n**缺点:** \n对于用户量不多的情况,其占用空间较大. \n除此之外,需要维护一个非登录用户的bitset. \n3. 使用概率算法 \n利用HyperLogLog算法,不存储具体的值,只存放计算概率的相关数据. \n**优点:** \n内存占用少,对于一个指定的key,只需要占用12kb的空间. \n**缺点:** \n计算的数据并不是特别准确,理论上有0.89%的误差. \n"
},
{
"alpha_fraction": 0.5672383904457092,
"alphanum_fraction": 0.5948297381401062,
"avg_line_length": 21.857954025268555,
"blob_id": "e1d1096d987ace8151272cdfde56b2a59e61c842",
"content_id": "cbe02927140a077cf609e672e1078649b230a41e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5235,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 176,
"path": "/编程开发/后端/Yii2/Rules规则.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Rules验证规则\n1. **required**:必须验证属性,CRequiredValidator的别名,确保特性不为空 \n```\n[['字段名1','字段名2'],'required'] //字段1,字段2必填\n[['字段名'],'required','requiredValue'=>'必填值','message'=>'提示信息']\n```\n2. **email**:邮箱验证||CEmailValidator的别名,确保特性的值为有效的邮箱地址 \n```\n['email','email']\n```\n3. **match**:正则验证||CRegularExpressionValidator的别名,确保特性匹配一个正则表达式 \n```\n[['字段名'],'match','pattern'=>'正则表达式','message'=>'提示信息']\n[['字段名'],'match','not'=>'true','pattern'=>'正则表达式','message'=>'正则取反']\n['username','match','pattern'=>'/^[a-z]\\w*$/i']\n```\n4. **url**:网址||CUrlValidator的别名,确保特性是一个有效的路径 \n```\n[['website'],'url','defaultScheme'=>'http']\n```\n5. **captcha**:验证码||CCaptchaValidator的别名,确保了特性的值始于CAPTCHA显示出来的验证码 \n```\n['verificationCode','captcha']\n```\n6. **safe**:安全,对指定数据不进行验证 \n['description','safe']\n7. **campare**:比较||CCompareValidator的别名,确保一个值等于另一个特性或者常量 \n```\n['repassword','compare','compareAttribute'=>'password','message'=>'两次密码不一致']\n['age','compare','compareAttribute'=>30,'operator'=>'>=']\n```\n8. **default**:默认值||CDefaultValidator的别名,为特性指派了一个默认值 \n```\n['age','default','value'=>null]\n```\n9. **exist**:存在||CExistValidator的别名,确保属性值存在于指定的数据表字段中. \n```\n['字段名','exist']\n[\n 'email',\n 'exist',\n 'targetClass'=>'\\common\\model\\User',\n 'filter'=>['status'=>User::STATUS_ACTIVE],\n 'message'=>'没有符合条件的条目'\n]\n```\n10. **file**:文件||CFileValidator的别名,确保了特性包含了一个上传文件的名称 \n```\n['primaryImage','file','extensions'=>['png','jpg','gif']]\n['textFile','file','extensions'=>['txt','pdf'],'maxSize'=>1024*1024*1024]\n```\n11. **filter**:过滤器||CFilterValidator的别名,使用一个filter转换属性 \n```\n[['username','email'],'filter','filter'=>'trim','skipOnArray'=>'true']\n['email','filter','filter'=>function($value){\n return strtolower($value); //返回处理过的值\n}]\n```\n12. **in**:范围||CRangeValidator的别名,确保了特性出现在一个预定的值列表中 \n```\n['level','in','range'=>[1,2,3]]\n```\n13. **unique**:唯一性||CUniqueValidator的别名,确保了特性在数据表字段中是唯一的 \n```\n['字段名','unique']\n``` \n**唯一联合索引:** \n```\n[\n ['app_id','group_id'],\n 'unique',\n 'targetAttribute'=>['app_id','group_id'],\n 'message'=>'app_id和group_id已经被占用'\n]\n```\n14. **integer**: 整数 \n```\n['age','integer']\n```\n15. **number**: 数字 \n```\n['salary','number']\n```\n16. **double**: 双精度浮点数 \n['salary','double']\n17. **date**: 日期 \n```\n[['from','to'],'date']\n```\n18. **string**: 字符串 \n```\n['username','string','length'=>[4,24]]\n```\n19. **boolean**: 布尔值||CBooleanValidator的别名 \n```\n['字段名','boolean','trueValue'=>true,'falseValue'=>false,'strict'=>true]\n[\n ['selected','boolean'],\n [\n 'deleted',\n 'boolean',\n 'trueValue'=>true,\n 'falseValue'=>false,\n 'strict'=>true //是否要求待测输入必须严格匹配trueValue和falseValue,默认为false\n ]\n]\n```\n20. **image**:是否为一个有效的图片文件 \n```\n[\n 'primaryImage',\n 'image',\n 'extensions'=>'png,jpg',\n 'minWidth'=>100,\n 'maxWidth'=>1000,\n 'minHeight'=>100,\n 'maxHeight'=>1000\n]\n```\n21. **each**: 遍历,ids和product_ids是数字的集合 \n```\n[['ids','product_ids'],'each','rule'=>['integer']]\n```\n22. **自定义rules**:\n```\n['password','validatePassword']\n/**\n * 创建验证密码发方法\n * @param string $attribute 正在被验证的字段\n * @param array $params 规则中附加的其他键值对\n */\npublic function validatePassword($attribute, $params)\n{\n if (!$this->hasErrors()) {\n $user = $this->getUser(); //获取当前的用户\n if ( !$user || !$user->validatePassword($this->password)) {\n $this->addError($attribute, '账号或者密码错误');\n }\n }\n}\n```\n23. **trim**: 去除首尾空白字符 \n```\n['email','trim']\n等同于\n['email','filter','trim']\n```\n24. **ip**: IP地址\n```\n[\n //检查ip是否有效\n ['ip_addr','ip'],\n //检查ip_addr是否为一个有效的ipv6或子网地址\n //被检查的值将会被展开成一个完整的IPv6表示方法\n ['ip_addr','ip','ipv4'=>false,'subnet'=>null,'expandIPV6'=>true],\n //价差ip_addr是否为一个有效的ip地址(v4或v6)\n //允许地址存在一个表示非的字符!\n ['ip_addr','ip','negation'=>true]\n]\n```\n**该验证器存在以下参数:**\n- ipv4:ipv4检测,默认true\n- ipv6:ipv6检测,默认true\n- subnet:是否启用CIDR子网监测\n + true:子网是必须的,如果不是标准的CIDR格式将被拒绝\n + false:子网不能是CIDR\n + null:CIDR可选\n 默认值为false\n- ranges:允许或禁止的ipv4或ipv6范围的数组.\n```\n['client_ip','ip','ranges'=>[\n '192.168.10.128',\n '!192.168.10.0/24',\n 'any' //允许任何其他IP地址\n]]\n```\n"
},
{
"alpha_fraction": 0.7534246444702148,
"alphanum_fraction": 0.767123281955719,
"avg_line_length": 22.66666603088379,
"blob_id": "51c2f6970b8f78848335e1449e0a382f9f0d9b99",
"content_id": "e326f878388ce054488c08a121da5485c878dbd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 183,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 3,
"path": "/DailyCodingProblems/399_sum_same_value_Facebook/c/bruteforce/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 暴力法解决思路.\n遍历一遍得出总数,除以3得到目标值.(可能不能整除,此时直接失败) \n再一次遍历,求出某段连续区间的和与目标值比较. \n"
},
{
"alpha_fraction": 0.49193549156188965,
"alphanum_fraction": 0.524193525314331,
"avg_line_length": 11.399999618530273,
"blob_id": "01fbe1ab983eac65113cb48f7f28e9649c8c51f4",
"content_id": "5a0dbfff79dbef43e90c9436a608fc6f4a322983",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 10,
"path": "/算法/位运算/整数异号/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint isOp(int a,int b){\n return (a^b)<0;\n}\n\nint main(){\n printf(\"%d\\n\",isOp(-1,2));\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7639751434326172,
"alphanum_fraction": 0.7919254899024963,
"avg_line_length": 28.090909957885742,
"blob_id": "51ca7ecc2b5c3e48a564210337aafede5075fd4e",
"content_id": "0ea558f7b83f832ada1630f3ee7e496869070290",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 520,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 11,
"path": "/编程开发/后端/Wordpress/问题/安装主题出现curl error.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 问题描述\n今天在家里构建了一套Wordpress系统,安装主题时,出现\n```\ncURL error 18: transfer closed with 1600284 bytes remaining to read\n```\n的问题,经过一番谷歌找到了解决方法:\n将/var/lib/nginx目录所有者改为www-data \n```\nchown -R www-data:www-data /var/lib/nginx\n```\n这个问题的原因可能是wordpress在利用curl下载文件时,需要借助nginx做中间缓存,而此时php-fpm的运行者www-data没有对/var/lib/nginx的操作权限.导致一直安装不上主题. \n"
},
{
"alpha_fraction": 0.8108108043670654,
"alphanum_fraction": 0.8108108043670654,
"avg_line_length": 16.5,
"blob_id": "a146f826aa90c0905e2d84b92c1d5f77c3e57022",
"content_id": "34ab33717b1dac9d3630bd7fdb5d53b74b655573",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/编程开发/Linux/常见问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 常见问题\n这个目录记录着本人在Linux中遇到的问题及解决方案. \n"
},
{
"alpha_fraction": 0.5210084319114685,
"alphanum_fraction": 0.5210084319114685,
"avg_line_length": 22.866666793823242,
"blob_id": "8f085e504f79460d51856d8c2b4190e4e730e9b9",
"content_id": "2735a4e7f6258b36b62c6b83f8b55fc825146c83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 15,
"path": "/数据结构/树/红黑树/python/node.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self,newval):\n self.val = newval\n self.left = NullNode()\n self.right = NullNode()\n self.parent = None\n self.color = 'red'\n\nclass NullNode:\n def __init__(self):\n self.val = None\n self.left = None\n self.right = None\n self.parent = None\n self.color = 'black'"
},
{
"alpha_fraction": 0.7078384757041931,
"alphanum_fraction": 0.7695962190628052,
"avg_line_length": 43.31578826904297,
"blob_id": "bb5e793186574b4c36542176f2ca26e427ea0b52",
"content_id": "995a429ef88607bdb0e0213e3c666c53ef60f70d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1222,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 19,
"path": "/编程开发/Linux/常见问题/安装mysql签名无效.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Mysql签名无效\n今天重新安装mysql的时候,有一步需要执行apt update. \n执行之后显示: \n```\nW: GPG 错误:http://repo.mysql.com/apt/ubuntu bionic InRelease: 下列签名无效: EXPKEYSIG 8C718D3B5072E1F5 MySQL Release Engineering <[email protected]>\nE: 仓库 “http://repo.mysql.com/apt/ubuntu bionic InRelease” 没有数字签名。\nN: 无法安全地用该源进行更新,所以默认禁用该源。\nN: 参见 apt-secure(8) 手册以了解仓库创建和用户配置方面的细节。\nW: GPG 错误:http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 Release: 下列签名无效: EXPKEYSIG 58712A2291FA4AD5 MongoDB 3.6 Release Signing Key <[email protected]>\nE: 仓库 “http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 Release” 没有数字签名。\nN: 无法安全地用该源进行更新,所以默认禁用该源。\nN: 参见 apt-secure(8) 手册以了解仓库创建和用户配置方面的细节。\n```\n跟上方错误类似的提示,经过一番查资料,发现是签名的问题,重新加上签名就好了. \n```\n\nsudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 8C718D3B5072E1F5\nsudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 58712A2291FA4AD5\n```\n"
},
{
"alpha_fraction": 0.5984848737716675,
"alphanum_fraction": 0.6136363744735718,
"avg_line_length": 14.84000015258789,
"blob_id": "15b934d803a2b6c1408afb0ccda77f3d5461ca71",
"content_id": "d5c9a9b07edc7bf91ed7f5495df40d9c9bcd0d9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 25,
"path": "/外语/韩语/语法/기가.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "- 기가 어떻습니까? \n相当于汉语的\"(做起来)...怎么样?\" \ne.g. \n```\n1)가:공부하다. 学习\n나:공부하기가 어떱습니까? 学起来怎么样?\n2)가:기다리다. 等待\n나:기다리기가 어떻습니까? 等待的感觉如何?\n3)가:일하다. 工作\n나:일하기가 어떱습니까? 工作感觉怎么样?\n```\n\n-기가 -ㅂ니다/습니다. \n惯用句型: \n`-기가`与尊敬阶式陈述终结词尾\"-ㅂ니다/습니다.\"构成. \n相当于韩语的\"做起来...\" \ne.g.\n```\n1)가:공부하기가 어떻습니까? 学起来如何?\n나:공부하기가 재미있습니다. 学起来很有意思.\n2)가:기다리기가 어떻습니까? 等得感觉如何?\n나:기다리기가 피곤합니다. 等起来很累.\n3)가:일하기가 어떻습니까? 工作起来如何?\n나:일하기가 어렵습니다. 工作起来很累.\n```\n"
},
{
"alpha_fraction": 0.8600000143051147,
"alphanum_fraction": 0.8600000143051147,
"avg_line_length": 23,
"blob_id": "0b4a558627f40e5c0344d97231c16b04dd72c7e1",
"content_id": "888a756b88590ee4f746ea0147d8d7ca19d43a72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 2,
"path": "/编程开发/数据库/mysql/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# MySQL相关的笔记\n这里记录着我在工作和学习中遇到的一些MySQL相关的问题及解决方案. \n"
},
{
"alpha_fraction": 0.5788113474845886,
"alphanum_fraction": 0.6236003637313843,
"avg_line_length": 28.605262756347656,
"blob_id": "c1befae4dfb408ea416101347ce7e34c4a957424",
"content_id": "0e2787818dc19b2b7826be2e64f389cb132be2d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2077,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 38,
"path": "/leetcode/696-计数二进制子串/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\r\n* 解题思路:\r\n* 解题的关键点在于字符串状态转化的这个点,从这个点向两边延伸.总情况数总是倾向于最小的一遍.\r\n* 比如:00011 这个字符串的转折点在0到1之间,从这个转折点向两边延伸,因为右边比较少,所以只可能是两种情况(比较少的连续数字的长度).\r\n* 有了这个思路,这道题目也就简单了,我这里的思路是记录下0,1字符连续的个数.每次遇到转折点(从一个字符转到另一个字符),我们进行一次统计.\r\n* 这里统计的并不是当前遍历到的转折点与其前面连续数字的情况数,而是计算上一个连续数字与上上个连续数字的情况数.\r\n* 比如: 0001111000 第一个转折点的时候 0的计数为3,虽然已经遍历到了1,但是此时的1还没有计数.直到下一个转折点0,此时新一轮的0也还没有计数,\r\n* 但是上一轮的一对数据已经产生,我们在0的计数和1的计数中选取一个最小值,加入总计数.\r\n* 这里有两种特殊情况需要考虑,一种是第一个转折点,很容易知道,在第一个转折点的时候,另外一个还没有计数,所以0和1的计数最小值依然还是0,对结果无影响.\r\n* 第二种特殊情况是遍历结束后,因为在遍历到结尾的\\0时,循环结束,并不会处理当前已经计数的数字,所以在返回结果时,加上上一对数据.\r\n*/\r\n\r\n#include<stdio.h>\r\n\r\n#define min(a,b) ((a)>(b)?(b):(a))\r\n\r\nint subcount(char *str){\r\n int count[2] = {0}; //初始化数组记录两个字符的计数\r\n int i;\r\n int sum = 0; //总数目\r\n for(i = count[str[0]&0x01] = 1; str[i] != '\\0'; i++){\r\n char idx = str[i] & 0x01; //下标\r\n if(str[i-1] == str[i]) count[idx]++; //字符相同则\r\n else{ //出现不相同元素\r\n sum += min(count[0],count[1]);\r\n count[idx] = 1; //重新设置count值\r\n }\r\n }\r\n\r\n return sum+min(count[0],count[1]);\r\n}\r\n\r\nint main(){\r\n char str[256];\r\n while(~scanf(\"%s\",str)){\r\n printf(\"%d\\n\",subcount(str));\r\n }\r\n}"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 13.5,
"blob_id": "86f1ec125f191ca4ee9ed9d58cd10a328cf38b4d",
"content_id": "82877dd2ee058c129694f167946f950b6ae09a39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 71,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 2,
"path": "/编程开发/Linux/命令/cloc.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# cloc命令\n用于统计目录中文件的代码数和代码语言. \n"
},
{
"alpha_fraction": 0.39055299758911133,
"alphanum_fraction": 0.5299538969993591,
"avg_line_length": 23.799999237060547,
"blob_id": "64cf5d2c84dd995a30b64e6a150842cd9b80b340",
"content_id": "5c7eccd99fcf9a4a3d1d1118bb8e6b055d9e5ca1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 868,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 35,
"path": "/DailyCodingProblems/385_decrpt_apple/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nconst unsigned char keys[1024] = \"2222222222222222222222222222222222222\";\n\nunsigned char hex(unsigned char * s){\n int i = 0;\n unsigned char res = 0;\n unsigned char tmp = 0;\n for(i = 0; i < 2; i++){\n if(s[i] <= '9') tmp = s[i] - '0';\n else tmp = s[i] - 'a' + 10;\n // printf(\"[%c]\",s[i]);\n res = (res << 4) | tmp;\n }\n return res;\n}\n\nvoid encrypt(char str[],char out[]){\n int i;\n for(i = 0; keys[i] != '\\0'; i++){\n unsigned char tmp = hex(str+i*2);\n out[i] = tmp ^ keys[i];\n //printf(\"%c\", keys[i] ^ tmp);\n //printf(\"%d,%d\\n\",tmp,tmp^str[i]);\n }\n out[i] = '\\0';\n}\n\nint main(){\n unsigned char dec[1024] = \"7a575e5e5d12455d405e561254405d5f1276535b5e4b12715d565b5c551262405d505e575f\";\n unsigned char out[1024];\n encrypt(dec,out);\n puts(out);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 9,
"blob_id": "f04faae8ddaeacf6005427e99b77838134c7374b",
"content_id": "b20ac0e392ddb358bfe911475611091ab9f708bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/编程开发/Linux/UNP/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Unix网络编程\n按照章节创建目录 \n"
},
{
"alpha_fraction": 0.8648648858070374,
"alphanum_fraction": 0.8648648858070374,
"avg_line_length": 54.5,
"blob_id": "f90a58cf7c55687b02739914955787ebf576b449",
"content_id": "9ea7525fd886d20e08510eddff0b7f1a7efa8fd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 4,
"path": "/编程开发/Linux/常见问题/切换启动环境.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 切换启动环境\n在Linux管理中,有时候可能会碰到需要切换默认启动环境的情况. \n比如从默认命令行环境切换到默认桌面环境或者从默认桌面环境切换到默认命令行启动. \n如果需要从命令行启动切换到桌面环境启动,我们可以使用:`systemctl set-default graphical.target`,如果需要从默认桌面环境切换到命令行启动,我们可以使用`systemctl set-default multi-user.target`\n"
},
{
"alpha_fraction": 0.4065420627593994,
"alphanum_fraction": 0.43691587448120117,
"avg_line_length": 18.454545974731445,
"blob_id": "60d9ea4fc6b83d92c430e217af5b6b8e68452d57",
"content_id": "9090a525321ed09b387ca36ca95745813b03af40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 22,
"path": "/DailyCodingProblems/380_divide_Nextdoor/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n#python3\n\ndef divide(a,b):\n if b == 0:\n return None\n elif b == 1:\n return a\n left = 0\n right = a\n while left < right:\n cur = (left + right)>>1\n res = cur * b\n if res <= a and a - res < b:\n left = right = cur\n elif res > a:\n right = cur - 1\n else:\n left = cur + 1\n return (left,a - left * b)\n\nprint(divide(100,31))\n"
},
{
"alpha_fraction": 0.7538265585899353,
"alphanum_fraction": 0.7678571343421936,
"avg_line_length": 42.55555725097656,
"blob_id": "9f139890a8df8de4e3a9028757159bc722c5610e",
"content_id": "d708e4b5bb9d83b40c8b79d14ff09c7685f71c58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1144,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 18,
"path": "/编程开发/后端/Wordpress/问题/wordpress迁移并切换域名.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# wordpress迁移并切换域名\n[本文来源](https://www.centos.bz/2017/08/move-wordpress-change-domain/)\n这个方法给了我很大的帮助: \n首先我们需要先迁移Wordpress: \n1. 打包源数据库与网站整站源码\n2. 源码上传到新服务器上,数据库新建(重新安装一遍wordpress)后恢复(用打包的数据库sql恢复). \n3. 将域名解析到新服务器上\n4. 删除新服务器wordpress根目录下的wp-config.php文件 \n5. 打开域名,重新安装.(这一步有待确认) \n\n进行了上述操作后,我们接下来需要做得就是更改域名了,这一步涉及到数据库的操作.具体如下: \n```\nUPDATE wp_options SET option_value = replace(option_value, 'www.mydomain.com','www.newdomain.com') ;\nUPDATE wp_posts SET post_content = replace(post_content, 'www.mydomain.com','www.newdomain.com') ;\nUPDATE wp_comments SET comment_content = replace(comment_content, 'www.mydomain.com', 'www.newdomain.com') ;\nUPDATE wp_comments SET comment_author_url = replace(comment_author_url, 'www.mydomain.com', 'www.newdomain.com') ;\n```\n之后再访问网站就会发现原来的链接全部迁移完成了\n"
},
{
"alpha_fraction": 0.44252872467041016,
"alphanum_fraction": 0.6839080452919006,
"avg_line_length": 173,
"blob_id": "1310b891749a62a226e3bf7f3b2c36c6007499f2",
"content_id": "5ad274c27e95f2f70d3248880e884ae3a4beceff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 1,
"path": "/leetcode/137-只出现一次的数字2/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "[解题思路](https://github.com/ixysoft/notes/tree/master/%E7%AE%97%E6%B3%95/%E4%BD%8D%E8%BF%90%E7%AE%97/%E5%8F%AA%E5%87%BA%E7%8E%B0%E4%B8%80%E6%AC%A1%E7%9A%84%E5%85%83%E7%B4%A03)\n"
},
{
"alpha_fraction": 0.4384133517742157,
"alphanum_fraction": 0.4384133517742157,
"avg_line_length": 6.693548202514648,
"blob_id": "a99ff1d408dc5aae3c800b20e2de0e2744c6d86c",
"content_id": "511fc73bcee5f74021a0bc50d3c038309a1745ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 885,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 62,
"path": "/外语/韩语/2019-09-06.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "- 동호회 \n同好会,兴趣协会 \n- 모임 \n集会,聚会 \n- 축가 \n祝贺歌 \n- 별로 \n不怎么 \n- 포기하다 \n抛弃,放弃 \n- 연세 \n年岁,年龄 \n- 청첩장 \n请牒状,请帖 \n- 피로연 \n披露宴,喜宴 \n- 후배 \n后辈 \n- 촬열을 하다 \n摄影,拍摄 \n- 환영회 \n欢迎会 \n- 종일 \n整天 \n- 미안하다 \n不好意思,抱歉(未安) \n- 마음씨\n心肠 \n- 동네 \n村子里 \n- 남산 도서관 \n南山图书馆 \n- 노선도 \n路线图 \n- 케이티엑스(KTX) \n韩国高速列车 \n- 잡다 \n抓 \n- 낚시하다 \n钓鱼 \n- 서울 타워 \n首尔塔 \n- 캐인 택시 \n私人出租车 \n- 놀이공원 \n游乐园 \n- 하차 \n下车 \n- 무궁화호 \n木槿花号 \n- 음악회 \n音乐会 \n- 번 \n番,次,号 \n- 벨 \nbell,钟,铃 \n- 승진 \n升进,晋升. \n- 토끼 \n兔子 \n- 승강장 \n升降场,月台 \n"
},
{
"alpha_fraction": 0.46825939416885376,
"alphanum_fraction": 0.4873720109462738,
"avg_line_length": 23.016393661499023,
"blob_id": "e06964be85cf9104eacd20a8d202f00621b7f2ef",
"content_id": "5a3964b5fa38097b4d9b8e45b210464d1ba90358",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2021,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 61,
"path": "/算法/排序算法/快速排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * C语言里面是内置了快速排序函数的....\n * 详情参考qsort函数\n *\n * 我们主要是研究这个算法的思想,所以对一些东西进行了假定,\n * 比如qsort中的cmp函数,我们已经假定成排序成从小到大的顺序,所以省略了cmp函数\n * 又在示例中使用的是int类型的数组,所以我们没必要指定每个数据块的大小.\n * 了解了原理之后,我们可以很容易将函数实现成满足需求的方式\n **/\n\n#include<stdio.h>\n\nint quick_sort(int arr[],int size){\n //快速排序排序的边界\n int left = 0;\n int right = size - 1;\n int stack[10000]; //这里用数组实现栈结构,用于存储需要排序的边界,根据自己的需求可以改成对应的结构\n int st = 0; //栈下标\n int i,j;\n //将初始的边界压入栈中\n stack[st++] = left;\n stack[st++] = right;\n while(st > 0){ //栈非空\n //从栈中弹出表示边界的元素\n j = right = stack[--st];\n i = left = stack[--st];\n //将第一个元素当成标兵元素,所有的元素跟它比较\n int tmp = arr[i];\n while(i < j){\n for(;i<j && arr[j] >= tmp; j--);arr[i] = arr[j];\n for(;i<j && arr[i] <= tmp; i++);arr[j] = arr[i];\n }\n arr[i] = tmp;\n //经过上面的过程,标兵元素的位置被选出\n\n //分解成子问题,求解tmp左边部分\n if(left < i - 1){\n stack[st++] = left;\n stack[st++] = i-1;\n }\n //以相同的原理,排列tmp右边的部分\n if(right > j + 1){\n stack[st++] = j+1;\n stack[st++] = right;\n }\n }\n return 0;\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr)/sizeof(arr[0]);\n int i;\n\n printf(\"排序前:\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n quick_sort(arr,size);\n printf(\"排序后:\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.478515625,
"alphanum_fraction": 0.48388671875,
"avg_line_length": 25.953947067260742,
"blob_id": "0dc5c84078d806aa238b1b165c8eb9f01434c74f",
"content_id": "de343e63aa1a5c3dbe415627a3100db4016e66fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4478,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 152,
"path": "/数据结构/树/红黑树/c/term2/rb.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"rb.h\"\n\nnodep new_node(int key){\n nodep tmp = (nodep)malloc(sizeof(Node));\n tmp->parent = tmp->left = tmp->right = NULL;\n tmp->key = key;\n tmp->color = RED;\n return tmp;\n}\n\nint visit(nodep root){ //中序遍历\n // if(root != NULL){\n // visit(root->left);\n // printf(\"%d \",root->key);\n // visit(root->right);\n // }\n nodep stack[1024];\n int len = 0;\n stack[len++] = root;\n stack[len++] = NULL;\n nodep tmp = NULL;\n int i;\n for(i = 0; i < len; i++){\n tmp = stack[i];\n if(tmp == NULL){\n if(len > 0 && stack[len-1] != NULL)\n stack[len++] = NULL;\n printf(\"\\n\");\n }else{\n printf(\"%d[%s]{%d} \",tmp->key,isRed(tmp) ? \"R\" : \"B\",tmp->parent == NULL ? -1 : tmp->parent->key);\n // printf(\"%p\\n\",tmp);\n if(tmp->left != NULL)\n stack[len++ % 1024] = tmp->left;\n\n if(tmp->right != NULL)\n stack[len++ % 1024] = tmp->right;\n }\n }\n\n}\n\n/**\n * 绕node左旋\n */\nnodep rotate_left(nodep node){\n if(node == NULL || node->right == NULL) return node; //左旋失败\n nodep right = node->right; //右节点\n if(node->parent != NULL){\n if(isLeft(node)){ //当前节点为父节点的左节点\n parent(node)->left = right;\n }else{\n parent(node)->right = right;\n }\n }\n right->parent = node->parent;\n node->right = right->left;\n if(right->left != NULL)\n right->left->parent = node;\n right->left = node; //将right的左节点设定为node\n node->parent = right; //将node的父节点设定为right\n\n return right;\n}\n\n/**\n * 绕node右旋\n */\nnodep rotate_right(nodep node){\n if(node == NULL || node->left == NULL) return node; //左旋失败\n nodep left = node->left; //右节点\n if(node->parent != NULL){\n if(isLeft(node)){ //当前节点为父节点的左节点\n parent(node)->left = left;\n }else{\n parent(node)->right = left;\n }\n }\n left->parent = node->parent;\n node->left = left->right;\n if(left->right != NULL)\n left->right->parent = node;\n left->right = node; //将right的左节点设定为node\n node->parent = left; //将node的父节点设定为right\n\n return left;\n}\n\n\nint insert(nodep* rootp,int key){\n return _insert(rootp,key,NULL,rootp);\n}\n\nint _insert(nodep* entryp,int key,nodep pa,nodep* rootp){\n if(rootp == NULL || entryp == NULL) return 0;\n nodep entry = *entryp;\n nodep root = *rootp;\n if(entry == NULL){\n entry = new_node(key);\n entry->parent = pa;\n *entryp = entry;\n insert_fix(rootp,entry);\n return 1;\n }\n\n if(entry->key == key) return 0; //插入失败\n if(entry->key > key) return _insert(&(entry->left),key,entry,rootp);\n return _insert(&(entry->right),key,entry,rootp);\n}\n\nint insert_fix(nodep* rootp,nodep node){\n if(rootp == NULL) return 0; //rootp不允许为NULL\n nodep root = *rootp;\n if(node != NULL && isRed(parent(node))){ //父节点为红色\n while(isRed(parent(node))){ //父节点为红色\n if(isRed(uncle(node))){ //叔父节点为红色\n parent(node)->color = uncle(node)->color = BLACK;\n node = grandparent(node);\n if(node == NULL)\n break;\n node->color = RED;\n }else{ //叔父节点为黑色或不存在,(不存在两层时,根节点是红色的情况)\n nodep p = parent(node);\n nodep g = grandparent(node);\n if(isLeft(p)){ //左孩子\n if(isRight(node)){ //LR\n rotate_left(node);\n }\n node = rotate_right(g);\n }else{\n if(isLeft(node)){\n rotate_right(node);\n }\n node = rotate_left(g);\n }\n //父节点变黑色,祖父节点变红色,旋转后,父节点作为根节点,此时黑节点高度不破坏\n p->color = BLACK;\n g->color = RED;\n }\n }\n }\n if(node != NULL){\n if(parent(node) == NULL) root = node; //设定新的根节点\n }\n root->color = BLACK; //根节点设定为黑色\n *rootp = root;\n return 1;\n}\n\n//删除指定的键\nint delete(nodep* rootp,int key){\n\n}"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 6,
"blob_id": "86acc2e537642579558e546895cd10c1a26b5fe9",
"content_id": "5c88b932f3c66969bf9f04ab58ae13fe5edde5cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 7,
"num_lines": 2,
"path": "/编程开发/Mac/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Mac\nMac使用笔记\n\n"
},
{
"alpha_fraction": 0.7885881662368774,
"alphanum_fraction": 0.7988295555114746,
"avg_line_length": 21.04838752746582,
"blob_id": "68cf4e23f8ffee337664e8a5a2a02b603944939a",
"content_id": "3b09ed21208e42213c04a15dffb466fd1aa916b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3421,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 62,
"path": "/数据结构/树/红黑树/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 红黑树\n**红黑树性质:** \n1. 节点是红色或者黑色\n2. 根节点为黑色\n3. 叶子节点为黑色\n4. 红色节点的子节点为黑色\n5. 任何一条根节点到叶子节点路径上的黑色节点数相同 \n**节点的插入:** \n```\n新节点默认设置为红色\n如果树为空,则插入一个根节点,颜色设置为黑色。 \n如果当前插入节点的父节点为黑色,不进行调整\n如果当前插入节点的父节点为红色,根据红黑树的性质可知必定存在祖父节点。判断叔父节点是否为红色\n如果叔父节点为红色,则反转叔父,父,祖父节点的颜色,此事祖父节点为红色,将祖父节点视作新插入的节点进行判断。直到整体满足红黑树的性质 \n如果叔父节点为黑色,反转父,祖父节点的颜色。\n判断当前节点与祖父节点的关系:\n1. 左子节点的左子节点\n对祖父节点进行右旋,我们记为(LL)\n2. 右子节点的右子节点\n对祖父节点进行左旋,我们记为(RR)\n3. 左子节点的右子节点\n对当前节点的父节点进行左旋,对祖父节点进行右旋操作。这个过程我们记为(LR)\n4. 右子节点的左子节点\n对当前节点的父节点进行右旋,对祖父节点进行左旋操作。这个过程我们记为(RL)\n进过此四步后,树整体符合红黑树的性质\n```\n**节点删除:** \n```\n假设需要删除节点的前驱节点或后继节点为U,具体选择前驱或者后继看情况. \n前驱节点的左孩子为V或后继节点的右孩子为V,这里根据上一步确定. \n我们把U称为待删除节点.(在BST中我们一般不直接删除某个节点,而是删除其前驱或后继节点,然后将前驱或者后继节点的值传递给要删除的节点)\n待删除节点U的兄弟节点我们称之为S,S的左孩子我们称之为S_l,右孩子我们称为S_r.\nU的父节点我们称为P. \n接下来的情况中,我们分析前驱节点存在的情况.后继节点存在的情况同理. \n不同的情况分析:\n1. 当前节点为根节点且没有后代\n直接删除就ok了. \n2. U为红节点.\n此时,替死鬼节点U是红接地,我们可以确定P和V都是黑色的节点,由红黑树的性质可以,删除红节点不会破坏红黑树的性质,所以我们可以节点U删除,并把V提到U的位置. \n3. U为黑节点.\n此时删除该节点黑导致红黑树失衡.U一侧子树少了一层.\n我们分情况讨论:\n(1) S为红节点\n此时我们可以确定P和S_l,S_r都是黑节点.\n我们将右子树绕P左旋,并将S反转为黑色.此时以S为根的子树左右两侧重新平衡.\n(2) S为黑节点\n这里的情况比较复杂......\n我们继续分情况讨论:\na. S至少存在一个红色的子节点.\nai. RR.\n 此时我们将S_r设为黑色,并以P节点左旋.\naii. RL\n 此时我们先以绕S节点,将S_l右旋成RR的情况,再进行左旋\nb. S没有红色的子节点....\n此时S也没有红节点可借,此时我们需要讨论\n我们将S设置为红色.此时以P为根的子树少了一层.\nbi. P为黑节点\n此时我们需要重新调整以P为子树的上级树结构的平衡.\n这里我之前有点懵逼,现在突然想到,此时相当于P节点变成了之前的V节点,可以继续套用上面的分析操作,直到将树调整到平衡的状态**********\nbii. P为红节点\n我们可以将P设置为黑节点,此时以P为根的子树高度补齐.\n```\n"
},
{
"alpha_fraction": 0.6042154431343079,
"alphanum_fraction": 0.6276346445083618,
"avg_line_length": 11.142857551574707,
"blob_id": "9975013ae66bf8f5c755793d487ab87c981f0b8d",
"content_id": "3a9ebaa98c90d4a1474d9973f15d68b06d3c5da1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 943,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 35,
"path": "/外语/韩语/语法/2019-10-24.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 基本结构\n1. 主谓\n아이가 운다. 小孩哭了 \n꽃이 핀다. 花开了 \n2. 主宾谓\n학생이 편지를 쓴다. 学生写信 \n그가 선물을 산다. 他买礼物 \n## 语序特征\n1. 修饰语 被修饰语\n새 차新车 \n내가 사랑하 여자.我心爱的女人 \n준호의 책. 准浩的书 \n\n준호가 천천히 걷는다.准浩慢慢的走 \n영미는 무척 아름답다.英美非常漂亮 \n철수는 매우 빠르게 달린다.哲淑跑的很快. \n\n2. 名词 助词\n철수가 영희를 길에서 만났다. \n\n3. 自立动词 辅助动词\n나는 문을 열어 보았다. \n바쁘실 텐데 와 주셔서 감사합니다. \n\n## 基本句型\n1. 主谓\n꽃이 핀다.\n2. 主状谓\n영미가 의자에 앉았다.\n3. 主宾谓\n영미는 준호를 사랑한다.\n4. 主补谓\n준호는 어른이 되었다.准浩是成年人了. \n5. 主宾状谓\n영미는 준호를 천재로 여긴다.英美觉得准浩是个天才. \n"
},
{
"alpha_fraction": 0.4079528748989105,
"alphanum_fraction": 0.4418262243270874,
"avg_line_length": 20.90322494506836,
"blob_id": "44c8aada5e14a6c3baad5e43ab77ec16da194273",
"content_id": "dc58f1bdece13872e6c5cbcd1fbec99407e9cb70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 31,
"path": "/算法/排序算法/奇偶排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n#define swap(a,b) {int tmp=a;a=b;b=tmp;}\n\nvoid oe_sort(int arr[],int size){\n char sorted = 0; //有序标志\n int i,base = 0;\n while(!sorted){\n sorted = 1; //有序\n for(i=base;i<size-1;i+=2){\n if(arr[i] > arr[i+1]){\n swap(arr[i],arr[i+1]);\n sorted = 0;\n }\n }\n base = 1 - base;\n }\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n puts(\"排序前\");for(i=0;i<size;i++)printf(\"%d \",arr[i]);puts(\"\");\n oe_sort(arr,size);\n puts(\"排序后\");for(i=0;i<size;i++)printf(\"%d \",arr[i]);puts(\"\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5481927990913391,
"alphanum_fraction": 0.5722891688346863,
"avg_line_length": 34.53571319580078,
"blob_id": "5758e3423f486875a9bf5bc70ed2f1e1544e4aef",
"content_id": "aa636135044bce4117fecb8ab0f009a49e12ff7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1754,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 28,
"path": "/数学/高等数学/函数的性质.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 函数的几种性质\n## 1. 函数的有界性 \n设函数$$f(x)$$的定义域为$D$,数集$$X \\subset D$$.如果存在数$K_1$,使得\n\t\t$$f(x) \\le K_1$$\n对任一$x \\in X$都成立,那么称$f(x)$在$X$上有上界,而$K_1$称为$f(x)$在$X$上的一个上界.如果存在数$K_2$,使得\n\t\t$f(x) \\ge K_2$\n对任一$x \\in X$都成立,那么称$f(x)$在$X$上有下界,而$K_2$称为$f(x)$在$X$上的一个下界.如果存在一个正数$M$,使得\n\t\t$|f(x)| \\le M$\n对于任一$x \\in X$都成立,那么称$f(x)$在$X$上有界,如果这样的$M$不存在,就称$f(x)$在$X$上无界;也就是说,如果对于任何正数M,总存在$x_1 \\in X$,使得$|f(x_1) \\gt M|$,那么函数$f(x)$在$X$上无界.单有上界或者下界都算无界.\n\n## 2. 函数的单调性 \n设函数$f(x)$的定义域为$D$,如果区间$I \\subset D$,如果对于区间$I$上任意两点$x_1$及$x_2$,当$x_1 \\lt x_2$时,恒有 \n\t\t$f(x_1) \\lt f(x_2)$\n则称$f(x)$在区间$I$上单调递增;如果对于区间$I$上任意两点$x_1$及$x_2$,当$x_1 \\lt x_2$时,恒有 \n\t\t$f(x_1) \\gt f(x_2)$\n则称$f(x)$在区间$I$上是单调减少的.单调递增或单调递减的函数统称为单调函数. \n## 3. 函数的奇偶性 \n设函数$f(x)$的定义域$D$关于原点对称.如果对于任一$x \\in D$, \n\t\t$f(-x)=f(x)$\n则称$f(x)$为偶函数,如果对于任一$x \\in D$, \n\t\t$f(-x)=-f(x)$\n恒成立,则称$f(x)$为奇函数. \n偶函数关于$y$轴对称,奇函数关于原点对称. \n\n## 4.函数的周期性 \n设函数$f(x)$的定义域为$D$,如果存在一个正数$l$,使得任一$x \\in D$有$x \\pm l \\in D$,且 \n\t\t$f(x+l) = f(x)$\n恒成立,那么称$f(x)$为一个周期函数,$l$称为$f(x)$的周期,通常我们说的周期指的是函数的最小正周期. "
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 4,
"blob_id": "a1cd1b50337cc63e756aabf7b993b713f852e1bd",
"content_id": "e2040fa165e7c88e2d1cee27e435871921863297",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 4,
"num_lines": 2,
"path": "/外语/韩语/书籍/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 书籍\n韩语书籍\n"
},
{
"alpha_fraction": 0.7661691308021545,
"alphanum_fraction": 0.7810945510864258,
"avg_line_length": 38.79999923706055,
"blob_id": "85253e75457d057f9ce3616fe97b64287de62ae1",
"content_id": "08f0e2fe8a575cbfbdc81df690e52447e9e49c6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 303,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 5,
"path": "/DailyCodingProblems/388_cookie_Airtable/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 388_cookie_Airtable\nThis problem was asked by Airtable. \nHow would you explain web cookies to someone non-technical? \n**思路:** \n我觉得cookie可以类比成我们的身份证,身份证是政府(server)放在我们这里的可以用来识别我们(client)身份的一种工具。 \n"
},
{
"alpha_fraction": 0.4644886255264282,
"alphanum_fraction": 0.6633522510528564,
"avg_line_length": 26,
"blob_id": "adf6c6077ed959aa1961b881e4d0d23d158969e6",
"content_id": "09721cd31f091bc2f00d61bb550ec74fc39fb298",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1336,
"license_type": "no_license",
"max_line_length": 272,
"num_lines": 26,
"path": "/算法/位运算/异或和/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 按位异或的\n**题目**: \n计算1^2^3^....^n之后的结果. \n**思路**: \n这个题目直接用遍历的话,也是可以的,但是由于n的大小不确定,所以时间复杂度为O(N),在这个题目中其实还是有可优化的空间的. \n我们列一下前次运算: \n```\n1 0001 ->0001 1\n2 0010 ->0011 3\n3 0011 ->0000 0\n4 0100 ->0100 4\n5 0101 ->0001 1\n6 0110 ->0111 7\n7 0111 ->0000 0\n8 1000 ->1000 8\n```\n仔细观察我们就能发现,在这些数字中,每四个数字能构成一次比较有规律的循环. \n我们单独提出低二位(4个数字作为一个分组),每四个数字会依次出现00,01,10,11,当数字最后两位为01时,我们能知道前面的所有数字必定出现了偶数次(除去低两位的所有二进制都是4的倍数次才会改变),当低二位为00时,表示新进了以为,此时我们应该研究11时的情况,除去低二位,我们知道除去低二位的所有的数字必定已经出现了偶数次(否则不可能出现低二位的11),所以高位为0,低二位一次00^01^10^11 == 0,即低二位为11时,结果为0,所以当n低二位为00是,结果为0^n==n,同理当低二位为10时,结果为1^n == n+1. \n故我们可以列出所有情况: \n```\n00 n\n01 1\n10 n+1\n11 0\n```\n11-推导出->00-推导出->01-推导出->10 \n"
},
{
"alpha_fraction": 0.8026315569877625,
"alphanum_fraction": 0.8157894611358643,
"avg_line_length": 23.66666603088379,
"blob_id": "c8937638f99a115ff54a2b1f4213dc4887e6e5d9",
"content_id": "34ac6b38807ea2b757808f1e7dce5d498c386808",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 3,
"path": "/数据结构/树/AVL树/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# AVL树\nAVL树是一棵高度平衡的二叉搜索树(BST),其任意节点的左右子树高度差不大于1. \n我觉得最核心的部分在于数的树的自我调整部分. \n"
},
{
"alpha_fraction": 0.5643776655197144,
"alphanum_fraction": 0.6974248886108398,
"avg_line_length": 23.526315689086914,
"blob_id": "d3414887861c71a6d06694fa4b0a84928c6d78ae",
"content_id": "bf7c7f0d3521f4f6cb27a7097d16003f965cac08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 988,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 19,
"path": "/算法/排序算法/计数排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 计数排序\n基数排序是一个典型的用空间换时间的算法,在需要排序的区间范围不大的时候.其效率远高于比较排序(快速排序等) \n思路:\n给定一个可能容纳所有元素的缓存空间,初始化所有值为0,遍历待排序数组,每遍历一个数字,在数字对应的下标元素值+1.遍历完成待排序数组后,我们再遍历一遍缓存空间,想待排数组放入缓存数组下标对应的值的个数的下标.这句话说得有点糊涂,这里举个例子就懂了. \ne.g. \n`1,3,4,0,0,5,8,5,9,0`\n申请一个临时空间,用于存储待排数组中数字出现的次数 \n```\n0 1 2 3 4 5 6 7 8 9 (申请的临时空间的下标值)\n0 0 0 0 0 0 0 0 0 0\n``` \n按照上面的方法遍历:(申请了十个值) \n```\n0 1 2 3 4 5 6 7 8 9\n3 1 0 1 1 2 0 0 1 1\n``` \n下标对应待排数组中的数字,里面的值代表对应数字出现的次数 \n从左到右遍历缓存数组即得到排好的数据: \n```0 0 0 1 3 4 5 5 8 9```\n"
},
{
"alpha_fraction": 0.811965823173523,
"alphanum_fraction": 0.8148148059844971,
"avg_line_length": 28.16666603088379,
"blob_id": "a8e9cefc1375b23e1ae73a13999609768e97496a",
"content_id": "28f46f2d4b8245da3ee6860094a16abff8096781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1842,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/编程开发/数据库/分库分表.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 分库分表\n分库分表是为了解决由于数据量过大而导致数据库性能降低的问题,将原来独立的数据库,数据表拆分为若干数据库数据表,降低单一数据库,数据表的压力. \n## 分库分表的方式:\n### 垂直分表\n1. 将原来一个表的字段拆分到不同的表中,减小锁表的几率,减小读写冲突的几率.\n2. 可以将某些热门字段单独提取出来,是的这些字段操作的效率不至于被其他无关信息拖累. \n### 垂直分库\n为原来处于一个数据库中的不同表创建不同的数据库,使得各个数据库的分工更明确. \n### 水平分表\n按照一定的规则,将原来一个数据表中的数据拆分到不同的数据表中,水平分表不会改变表的结构. \n### 水平分库\n按照一定的规则,将数据库中的表中的数据放到不同的数据库中.水平分库也不会该表数据库的结构. \n## 分库分表带来的问题\n### 事务一致性问题\n分库分表可能将数据放在不同的服务器上,此时不可避免的就需要将事务一致性的要求置于考虑范围之内.\n### 跨节点关联查询\n在没有分库前,可以很方便的进行关联查询,但是在分库后,由于两个表不在一个数据库,甚至没有在一个服务器,此时无法进行关联查询.\n需要解决这个问题,可以将获取的数据进行二次拼接.\n### 跨节点分页,排序函数\n与上面的问题相同的是,在分库分表之后,分页,排序这些操作也需要对获取的数据进行二次操作. \n### 主键重复\n分库分表使得原有的自增长主键消除,为了解决这个问题,需要设计算法,生成全局主键.\n### 高频,数据量小的表\n比如数据字典,参数表,地理位置表,可以在每个数据库中各保存一份,更新时同时发送到所有的分库进行更新. \n"
},
{
"alpha_fraction": 0.7172130942344666,
"alphanum_fraction": 0.7827869057655334,
"avg_line_length": 39.66666793823242,
"blob_id": "a65401a66fe68f4fc7a0a9c2c41f493c0a9e81ee",
"content_id": "3cce27fd6298084397e2d329f6d9d7f3c8943b88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 6,
"path": "/编程开发/数据库/mysql/远程无法连接mysql.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 问题描述\n由于在执行mysql_secure_installation时,选择了禁止远程访问,导致远程无法访问. \n这个问题之前已经遇到过,所以很快就有了方案,这里记录一下: \n```\n进入mysql配置目录下的/etc/mysql/mariadb.conf.d目录,编辑50-server.cnf文件,将其中的bind-address = 127.0.0.1改成bind-address = 0.0.0.0即可,这里的0.0.0.0也可以改成能被外部访问的其他本机ip\n```\n"
},
{
"alpha_fraction": 0.6976743936538696,
"alphanum_fraction": 0.7151162624359131,
"avg_line_length": 42,
"blob_id": "0bca4f3a3e195f32ef1e0ace04ca44a18155e731",
"content_id": "4f687ba14baf2ed2e3affb67614d953feb54d932",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 4,
"path": "/编程开发/Linux/常见问题/ssh免密登陆配置无效解决方案.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# SSH免密登陆配置正常但是无法登陆的问题 \n1. 查看安全日志`cat /var/log/secure`,定位到是因为autothorized_keys文件及其所在目录的问题。 \n2. 修改权限:`chmod g-w ~/.ssh/authorized_keys`,`chmod g-w ~/.ssh/` \n3. 再次登陆,此时成功\n"
},
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 12,
"blob_id": "45f39859417f37c9ec8f312181a8ec33d7dbc746",
"content_id": "2e7c2001457a3d4e1d81a62afdd92032c6dc17e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 2,
"path": "/算法/排序算法/猴子排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 猴子排序(Bogo排序)\n来不及解释了,快上车 \n"
},
{
"alpha_fraction": 0.5837320685386658,
"alphanum_fraction": 0.5933014154434204,
"avg_line_length": 18,
"blob_id": "dda295b8aedae3b2e24cac2a944d7a2fedc9f463",
"content_id": "d29b7e00ba3cefd5228c2245b61b0eedddd761ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 441,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 11,
"path": "/外语/韩语/语法/에 갑시다.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# -에 갑시다 \n接在体词后面,惯用型.\n`에 가다`后面接尊敬阶共同式终结词尾\"-(으)ㅂ시다.\"表示与对方一起去哪里. \n可以推出一般形式:`ㅂ시다`((一起)...吧) \ne.g. \n```\n1)가배가 고픕니다.肚子饿了.\n나:아!그렇습니까?식당에 갑시다. 啊!是吗?(一起)去饭店(吃饭)吧 \n2)가:피곤합니다. 累了\n아!그렇습니까?쉽시다.啊!是吗?(一起)休息一下吧\n```\n"
},
{
"alpha_fraction": 0.41418564319610596,
"alphanum_fraction": 0.46234676241874695,
"avg_line_length": 24.954545974731445,
"blob_id": "2238b075b06d2c43041da17357aa2a7bf5ec2865",
"content_id": "3301962638938b49a0e8e52e2793e3b05183d9dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 44,
"path": "/算法/排序算法/基数排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 至少需要分配10*size的空间,因为存在极端情况所有的数字都相同\n */\n#include<stdio.h>\n\nint tmp[10][10000]; //这里为了方便理解代码,设置了一个固定的空间的数组,每行第一个元素表示该行数据的数目,所以这里最多能排9999个数字\n\nint radix_sort(int arr[],int size){\n int ok = 0; //排序完成标志\n int radix;\n int i,j;\n for(i = 0; i < 10; i++) tmp[i][0] = 0; //计数归0\n for(radix = 1;ok == 0;radix *= 10){\n ok = 1; //假设排序完毕\n for(j = 0; j < size; j++){\n int left = arr[j]/radix;\n if(left >= 10) ok = 0; //存在没有除尽的数字,需要继续排序\n int idx = left%10; //下标\n int offset = ++tmp[idx][0]; //偏移\n tmp[idx][offset] = arr[j]; //将当前元素存入tmp中\n }\n\n //整理在tmp中的数据\n int idx = 0;\n for(i = 0; i < 10; i++){\n for(j = 1; j <= tmp[i][0]; j++) arr[idx++] = tmp[i][j];\n tmp[i][0] = 0;\n }\n }\n return 0;\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n\n int size = sizeof(arr)/sizeof(arr[0]);\n int i;\n printf(\"排序前:\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n radix_sort(arr,size);\n printf(\"排序后:\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7342767119407654,
"alphanum_fraction": 0.7452830076217651,
"avg_line_length": 24.11842155456543,
"blob_id": "244e8b08ba7e757ede34636dcfd50def1ad06b35",
"content_id": "355a645b6a63e18c00351b658ebefb4365a04253",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3020,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 76,
"path": "/编程开发/后端/Laravel/Laravel官方教程笔记/5.8/1. 开始/1. 安装.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Laravel安装\n## 1. 服务器要求: \n如果没有使用`Homestead`,我们需要确保下面的这些软件及插件存在. \n- PHP>=7.1.3\n- BCMath扩展\n- Ctype扩展\n- JSON扩展\n- Mbstring扩展\n- Openssl扩展\n- PDO扩展\n- Tokenizer扩展\n- XML扩展\n## 2. 安装Laravel\n### 1. 通过Laravel Installer \n需要在系统中安装composer之后才能使用composer命令. \n```\ncomposer global require laravel/installer\n```\n将installer所在的目录加入PATH中. \n- macOS and GNU / Linux Distributions: $HOME/.composer/vendor/bin \n- Windows: %USERPROFILE%\\AppData\\Roaming\\Composer\\vendor\\bin \n### 2. 通过Composer直接创建 \n```\ncomposer create-project --prefer-dist laravel/larevel blog \"5.8.*\"\n下载5.8版本的laravel\n```\n### 3. 启动本地开发服务器 \n在laravel中,我们可以通过下面的命令调用php自带的开发服务器启动我们的项目. \n```\nphp artisan serve\n```\n其中artisan在laravel项目的根目录下. \n## 3. 配置\n### 1. 公共目录 \nlaravel是一个典型的单入口应用框架,其公共位置为laravel根目录下的public目录. \n其中存在一个index.php文件作为整个应用的入口. \n### 2. 配置文件 \nlaravel中所有的配置文件放在根目录下的config目录中.官方的每个配置文件都会有相应的注释. \n### 3. 目录权限 \n在安装好laravel之后,我们需要配置一些必要的权限,比如storage和bootstrap/cache权限.我们应该给这两个目录加上写权限. \n这点对初学者极其不友好..... \n### 4. application key\n对于composer正常安装的laravel,这个key一般都是安装好的,在有些情况下,这个key可能不存在 \n这个时候我们可以利用`php artisan key:generate`生成一个新的key. \n**如果这个key不存在,所有的用户会话及其他加密数据都将变得不安全**\n### 5. 附加配置 \n一般情况下,在下载好laravel并配置好权限之后,我们就可以直接使用了. \n但是在某些情况下,我们可能需要进行一些定制的配置,比如修改修改`config/app.php`中的`timezone`或者`locale`等配置. \n当然我们可能也需要定制配置下面的一些配置. \n- 缓存配置 \n- 数据库配置 \n- 会话配置 \n## 4. 在服务器端的配置 \n### 美化URL(URL静态化) \n#### 1. Apache \nlaravel包含了一个public/.htaccess文件用于提供一个不包含index.php在路径中的URL. \n如果我们的laravel运行在apache服务器中,我们首先需要确定mod_rewrite模块被启用. \n在.htaccess在apache中没有正常运行的时候,我们可以尝试下面的配置: \n```\nOptions +FollowSymLinks -Indexes\nRewriteEngine On\n\nRewriteCond %{HTTP:Authorization} .\nRewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization}]\n\nRewriteCond %{REQUEST_FILENAME} !-d\nRewriteCond %{REQUEST_FILENAME} !-f\nRewriteRule ^ index.php [L]\n```\n#### 2. Nginx \n我们可以使用下面的配置将所有的请求导入到`index.php`文件: \n```\nlocation / {\n try_files $uri $uri/ /index.php?$query_string;\n}\n```"
},
{
"alpha_fraction": 0.5130434632301331,
"alphanum_fraction": 0.533695638179779,
"avg_line_length": 24.55555534362793,
"blob_id": "151c93a32612acab40dc7169af67ea875ded97f6",
"content_id": "a34115fceb85c66a1026a6194bf31c643054d36f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2346,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 72,
"path": "/leetcode/17-电话号码的字母组合/c/number.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n\nchar letters[][5] = {\n \"abc\",\n \"def\",\n \"ghi\",\n \"jkl\",\n \"mno\",\n \"pqrs\",\n \"tuv\",\n \"wxyz\"\n};\n\n/**\n * numbers 输入的数字字符串\n * words 用于生成最后的字母组合的字符串\n * cur words当前位置的下标\n * ret 最后的结果二维数组\n * index 当前ret的下标指针\n **/\nvoid trip(char *numbers,char *words,int cur,char **ret,int *index){\n int i = *index,j;\n if(*numbers != '\\0'){ //numbers当前的字符不为尾部\n char idx = *numbers - '2'; //获取当前数字在letters数组中的下标\n for(j = 0; letters[idx][j]!='\\0'; j++){ //遍历当前数字按键的所拥有的字母\n words[cur] = letters[idx][j]; //保存当前字母到words的当前位置\n trip(numbers+1,words,cur+1,ret,index); //在当前字母的状态下,遍历下一个数字的所有情况\n }\n }else{ //已经到了numbers的尾部,此时生成了一个有效的组合\n words[cur] = '\\0'; //字符串尾部填0\n ret[i] = (char*)malloc(cur+1); //为当前的组合申请一块空间\n strcpy(ret[i],words); //将当前组合存放到刚刚申请的空间\n *index = i+1; //ret的下标加1\n //printf(\"%s\\n\",words);\n }\n}\n\n//拨号\nchar** dial(char *numbers,int *returnSize){\n if(numbers == NULL || numbers[0] == '\\0'){ //不存在numbers的情况\n *returnSize = 0;\n return NULL;\n }\n char word[1024];\n int l = strlen(numbers);//输入数字的长度\n int size = 1; //结果的总数\n int i;\n for(i = 0; numbers[i] != '\\0'; i++)\n size*=strlen(letters[numbers[i]-'2']); //获取所有可能的组合总数\n *returnSize = size; //将size赋给*returnSize\n char **ret = (char**)malloc(sizeof(char*)*size); //申请存放所有可能情况指针的空间\n int index = 0; //ret当前的下标\n trip(numbers,word,0,ret,&index);\n \n return ret;\n}\n\nint main(){\n int size = 0;\n char** ret = dial(\"94664\",&size); //传入94664\n int i;\n for(i = 0; i < size; i++){ //输出所有可能的结果\n printf(\"%s\\n\",ret[i]);\n free(ret[i]); //释放掉申请的空间\n ret[i] = NULL;\n }\n free(ret); //释放掉所有的情况\n ret = NULL;\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4968602955341339,
"alphanum_fraction": 0.5141287446022034,
"avg_line_length": 23.037734985351562,
"blob_id": "459ed0a320f77ae88438df727c0447837b0a11e0",
"content_id": "373ed4cf71451e73b5ea107d1fcd56c8a77042de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1298,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 53,
"path": "/leetcode/78-子集/c/code.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n 这里是构造的leetcode形式的源代码\n#endif\n\n#include<stdio.h>\n#include<stdlib.h>\n\nint ones(int n){\n int count = 0;\n for(;n > 0;n&=n-1,count++);\n return count;\n}\n\nint **subsets(int *nums,int numsSize,int *returnSize,int **returnColumnSizes){\n int end = 1<<numsSize;\n int **ret = (int**)malloc(sizeof(int*)*end);\n *returnColumnSizes = (int*)malloc(sizeof(int)*end);\n int tmp;\n for(int begin = 0;begin < end;begin++){\n int cnt = ones(begin);\n (*returnColumnSizes)[begin] = cnt;\n if(cnt == 0) ret[begin] = NULL;\n else{\n ret[begin] = (int*)malloc(sizeof(int)*cnt);\n tmp = begin;\n for(int i=0,j=0;tmp > 0; tmp>>=1,i++){\n if(tmp&0x01) ret[begin][j++] = nums[i]; \n }\n }\n }\n *returnSize = end;\n return ret;\n}\n\nint main(){\n int nums[] = {\n 1,2,3\n };\n int size = sizeof(nums)/sizeof(nums[0]);\n int i,j;\n int returnSize;\n int *returnColSize = NULL;\n int **ret = subsets(nums,size,&returnSize,&returnColSize);\n printf(\"%d,%d\\n\",returnSize,returnColSize[0]);\n for(i = 0; i < returnSize; i++){\n for(j = 0; j < returnColSize[i]; j++){\n printf(\"%d \",ret[i][j]);\n }\n puts(\"\");\n }\n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.6679920554161072,
"alphanum_fraction": 0.6739562749862671,
"avg_line_length": 49.29999923706055,
"blob_id": "e712c2156974d3929fb9d00a41e78979321fe13d",
"content_id": "1e189303b6c64b778ea420df3090fed21cf24ab8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 503,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 10,
"path": "/DailyCodingProblems/383_embolden_gusto/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 383_embolden_gusto\nThis problem was asked by Gusto.\n\nImplement the function `embolden(s, lst)` which takes in a string `s` and list of substrings `lst`, and wraps all substrings in `s` with an HTML bold tag `<b>` and `</b>`.\n\nIf two bold tags overlap or are contiguous, they should be merged.\n\nFor example, given `s = abcdefg` and `lst = [\"bc\", \"ef\"]`, return the string `a<b>bc</b>d<b>ef</b>g`.\n\nGiven `s = abcdefg` and `lst = [\"bcd\", \"def\"]`, return the string `a<b>bcdef</b>g`, since they overlap.\n"
},
{
"alpha_fraction": 0.7702702879905701,
"alphanum_fraction": 0.7702702879905701,
"avg_line_length": 17,
"blob_id": "20bd0c8f5af716f6bbffd5667b7e41e86532bf03",
"content_id": "89b1c236178a493312398d8f38a39937e78c3118",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 4,
"path": "/DailyCodingProblems/399_sum_same_value_Facebook/c/fenwick/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# fenwick\n树状数组解决思路. \n利用树状数组整理出整个序列的一棵二叉索引树(树状数组). \n在分别获取某段区间的和是否为目标值. \n"
},
{
"alpha_fraction": 0.3610038757324219,
"alphanum_fraction": 0.37451738119125366,
"avg_line_length": 18.923076629638672,
"blob_id": "6cb16fb551e6f9a4b525e92d9ab23b7937ecc7ff",
"content_id": "10a1119dc31bc68996e77053bf924fe6d0163823",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 574,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 26,
"path": "/算法/其他/最长连续非递减序列/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 连续1次的我们看成0次,这里需要该的话,只需要把max的初始值改为1即可\n **/\n#include<stdio.h>\n#include<limits.h>\n\nint main(){\n int N;\n while(~scanf(\"%d\",&N)){\n int max = 0;\n int cur = INT_MAX;\n int last;\n int move = 1;\n for(int i = 0; i < N; i++){\n last = cur;\n scanf(\"%d\",&cur);\n if(cur >= last){\n move++;\n if(move > max) max = move;\n }else{\n move = 1;\n }\n }\n printf(\"%d\\n\",max);\n }\n}\n"
},
{
"alpha_fraction": 0.47860991954803467,
"alphanum_fraction": 0.48245903849601746,
"avg_line_length": 28.911184310913086,
"blob_id": "5d2fd3a21b27fbf3d3d2104520d9402a06db94c9",
"content_id": "f100834806355b6a887d81ab88d7f904ea9333dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 9639,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 304,
"path": "/数据结构/表/跳跃表/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n#include<sys/time.h>\n#include<time.h>\n\n#define SIZE 1000\nint arr[SIZE];\n\ntypedef struct Data{\n int val;\n}Data,*pdata; //data类型\n\ntypedef struct Node{\n int key; //键\n pdata data; //数据\n struct Node * next;\n struct Node * next_level;\n}Node,*pnode; //普通节点\n\ntypedef struct Head{\n pnode head; //头结点\n struct Head * prev_level; //前驱指针\n struct Head * next_level; //后继指针\n}Head,*phead; //头结点\n\ntypedef struct SkipList{\n phead head;\n phead tail;\n int level;\n}SkipList,*pskiplist;\n\n//创建一个普通节点\npnode node(int key,pdata data){\n pnode ret = (pnode)malloc(sizeof(Node));\n ret->key = key;\n ret->next = ret->next_level = NULL;\n ret->data = data;\n return ret;\n}\n\n//创建新的一层\nvoid create_new_level(pskiplist* plist,pnode next_level,int key,pdata data){\n pskiplist list = *plist;\n phead head = (phead)malloc(sizeof(Head));\n head->prev_level = NULL;\n pnode root = node(key,data);\n root->next_level = next_level;\n head->head = root;\n if(list == NULL){ //空表\n list = (pskiplist)malloc(sizeof(SkipList));\n list->head = list->tail = head;\n list->level = 1;\n head->next_level = NULL;\n *plist = list;\n }else{\n head->next_level = list->head;\n list->head->prev_level = head;\n list->head = head;\n list->level++;\n }\n\n}\n\nint should_up(){\n struct timeval tv;\n gettimeofday(&tv,NULL);\n return tv.tv_usec & 1;\n}\n\npnode _insert(pskiplist * pList,phead * pHead,pnode * pCur,int key,int val){\n pskiplist list = * pList;\n if(list == NULL){\n pdata data = (pdata)malloc(sizeof(Data));\n data->val = val;\n create_new_level(pList,NULL,key,data); //创建根节点\n return NULL;\n }\n phead head = (pHead == NULL?NULL:*pHead);\n pnode cur = (pCur == NULL ? NULL : *pCur);\n if(head == NULL || cur == NULL){ //头指针或当前节点为空\n return NULL;\n }\n\n if(cur->key == key){ //已经存在该键值\n cur->data->val = val;\n return NULL;\n }else if(cur->key < key){ //当前键值小于需要查找的键\n //printf(\"找%d\\n\",key);\n for(;cur->next != NULL && cur->next->key <= key;cur = cur->next);\n if(cur->key == key){ //存在该节点\n cur->data->val = val;\n return NULL;\n }else{ //尾部或者到临界节点\n pnode ret = NULL;\n if(cur->next_level == NULL){\n //printf(\"最后一层停在:%d\\n\",cur->key);\n pdata data = (pdata)malloc(sizeof(Data));\n data->val = val;\n pnode root = node(key,data);\n root->next = cur->next;\n cur->next = root;\n if(should_up()){\n if(head->prev_level == NULL) //第一层\n create_new_level(pList,root,key,root->data);\n else\n return root;\n }\n return NULL;\n }\n\n ret = _insert(pList,&(head->next_level),&(cur->next_level),key,val);\n if(ret != NULL){ //存在返回值\n pnode root = node(key,ret->data);//当前节点的下一节点\n root->next = cur->next;\n root->next_level = ret;\n cur->next = root;\n if(should_up()){\n if(head->prev_level == NULL) //第一层\n create_new_level(pList,root,key,ret->data);\n else\n return root;\n }\n }\n return NULL;\n }\n }else{ //当前键值大于需要查找的键\n //printf(\"最左端找%d\\n\",key);\n pnode ret = NULL;\n if(head->next_level == NULL){\n pdata data = (pdata)malloc(sizeof(Data));\n data->val = val;\n ret = node(key,data);\n ret->next = head->head;\n }else{\n ret = _insert(pList,&(head->next_level),&(head->next_level->head),key,val);\n if(ret != NULL){\n //printf(\"添加:%d\\n\",ret->key);\n pnode tmp = ret;\n ret = node(key,ret->data);\n ret->next = head->head;\n ret->next_level = tmp;\n }\n }\n\n if(head->head == cur && ret != NULL){ //当前为第一个元素\n head->head = ret;\n }\n if(should_up()){\n if(head->prev_level == NULL && ret != NULL)\n create_new_level(pList,ret,key,ret->data);\n else\n return ret;\n }\n }\n return NULL;\n}\n\nvoid insert(pskiplist * pList,int key,int val){\n pskiplist list = *pList;\n phead * pHead = list == NULL ? NULL:&(list->head);\n pnode * pCur = pHead == NULL ? NULL : &((*pHead)->head);\n _insert(pList,pHead,pCur,key,val);\n}\n\nvoid visit(pskiplist list){\n if(list == NULL) return;\n phead head = list->head;\n for(;head != NULL; head = head->next_level){\n pnode tmp = NULL;\n for(tmp = head->head; tmp != NULL; tmp = tmp->next){\n //printf(\"[%d]%d\",tmp->key,tmp->data->val);\n printf(\"[%d]\",tmp->key);\n //printf(\"[%d]%p\",tmp->key,tmp->next_level,tmp->data->val);\n if(tmp->next) printf(\"->\");\n }\n puts(\"\");\n }\n}\n\nvoid gen(int arr[],int size,int scale){\n int i;\n for(i = 0; i < size; i++){\n arr[i] = rand() % scale;\n }\n}\n\nint search(pskiplist list,int key,int * ret){\n if(list == NULL || list->head == NULL) return 0;\n phead head = list->head;\n pnode tmp = head->head;\n while(tmp != NULL){\n if(key == tmp->key){\n *ret = tmp->data->val;\n return 1;\n }else if(key < tmp->key || tmp->next == NULL || tmp->next->key > key){\n if(head->head == tmp){\n head = head->next_level;\n tmp = head == NULL ? NULL:head->head;\n }else{\n head = head->next_level;\n tmp = tmp->next_level;\n }\n }else{\n tmp = tmp->next;\n }\n }\n return 0;\n}\n\n//删除元素\nint _del(pskiplist *pList,phead *pHead,pnode * proot,int key){\n //如果为首元素,则将首元素设置为key元素的下一个元素\n pskiplist list = * pList;\n if(list == NULL) return 0; //删除失败\n phead head = pHead == NULL ? NULL : *pHead;\n if(head == NULL) return 0; //删除失败\n pnode tmp = proot == NULL ? NULL : *proot;\n if(tmp == NULL) return 0; //没有找到\n if(head->head->key == key){ //第一个元素\n pnode next = head->head->next;\n if(head->next_level == NULL)\n free(head->head->data);\n free(head->head);\n head->head = next;\n if(head->head == NULL){ //当前层最后一个元素\n phead nextHead = list->head->next_level;\n if(nextHead)\n nextHead->prev_level = NULL;\n free(list->head);\n list->head = nextHead;\n list->level--;\n }\n\n if(list->head == NULL){ //删除该表\n free(list);\n *pList = NULL;\n }\n if(head->next_level){ //非最后一层\n return _del(pList,&(head->next_level),&(head->next_level->head),key); //递归删除\n }\n return 1;\n }else if(head->head->key > key){ //首元素过大\n if(head->next_level == NULL) return 0;\n return _del(pList,&(head->next_level),&(head->next_level->head),key);\n }else{ //存在下一个节点\n if(tmp->next != NULL){ //下一个元素存在\n for(;tmp->next != NULL && tmp->next->key < key;tmp = tmp->next);\n\n printf(\"Find %d,Cur: %d\\n\",key,tmp->key);\n if(tmp->next == NULL){ //到达队尾\n if(tmp->next_level == NULL) return 0; //删除失败\n return _del(pList,&(head->next_level),&(tmp->next_level),key); //没有到达最后一层,查找下一层\n }else if(tmp->next->key == key){ //下一个元素为key\n pnode keyNode = tmp->next;\n tmp->next = keyNode->next;\n if(tmp->next == NULL)\n free(keyNode->data);\n free(keyNode);\n return _del(pList,&(head->next_level),&(tmp->next_level),key);\n }else if(tmp == head->head){//第一个元素\n return _del(pList,&(head->next_level),&(head->next_level->head),key);\n }else{ //后面的节点比需要找的节点大\n return _del(pList,&(head->next_level),&(tmp->next_level),key);\n }\n }else if(tmp == head->head){\n return _del(pList,&(head->next_level),&(head->next_level->head),key);\n }\n }\n return -1;\n}\n\nint del(pskiplist *pList,int key){\n pskiplist list = *pList;\n phead * pHead = (list == NULL ? NULL : &(list->head));\n pnode * pRoot = (pHead == NULL ? NULL :&(list->head->head));\n return _del(pList,pHead,pRoot,key);\n}\n\nint main(){\n srand(time(NULL));\n gen(arr,SIZE,40);\n int i;\n pskiplist list = NULL;\n int d[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(d) / sizeof(d[0]);\n for(i = 0; i < size; i++){\n insert(&list,d[i],i);\n //visit(list);\n //puts(\"\");\n }\n del(&list,3);\n visit(list);\n int ret = 0;\n int key = 4;\n if(search(list,key,&ret)){\n printf(\"key:%d=>val:%d\\n\",key,ret);\n }else{\n printf(\"没有找到:%d\\n\",key);\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5085790753364563,
"alphanum_fraction": 0.5237712264060974,
"avg_line_length": 27.04511260986328,
"blob_id": "398a8901e4452fa374e403170dd9bb169deb3894",
"content_id": "b1356f76497d5b729c59e2f77033ba6185107a88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 11554,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 399,
"path": "/数据结构/树/哈夫曼树/term2/huffman.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"huffman.h\"\n\nhuffnodep huffnode(int key,int weight){\n huffnodep ret = (huffnodep)malloc(sizeof(HuffNode));\n ret->key = key;\n ret->weight = weight;\n ret->left = ret->right = NULL;\n\n return ret;\n}\n\nhufflistp hufflist(){\n hufflistp ret = (hufflistp)malloc(sizeof(HuffList));\n ret->head = NULL;\n memset(ret->keys,0,sizeof(ret->keys[0])*256);\n ret->size = 0;\n\n return ret;\n}\n\nBoolean insertHuffNode(hufflistp list,huffnodep node){\n if(list == NULL || node == NULL || node->weight <= -256) return FALSE;\n hufflistnodep cur = list->head;\n hufflistnodep* rootp = &(list->head);\n hufflistnodep* last = NULL; //当前指针的前驱指针\n hufflistnodep tmp = (hufflistnodep)malloc(sizeof(HuffListNode));\n tmp->node = node;\n tmp->next = NULL;\n if(node->key >= 0 && node->key < 256){\n list->keys[node->key] = node->weight; //添加key到keys字典\n }\n list->size++;\n\n for(;cur != NULL && cur->node->weight < node->weight; cur = cur->next){\n last = rootp;\n rootp = &(cur->next);\n }\n\n tmp->next = cur;\n if(last == NULL){ //第一个元素\n list->head = tmp;\n }else{ //向当前节点前面插入tmp节点\n (*last)->next = tmp;\n }\n\n return TRUE;\n}\n\nhuffnodep shiftHuffNode(hufflistp list){\n if(list == NULL || list->head == NULL) return NULL;\n huffnodep ret = list->head->node;\n hufflistnodep next = list->head->next;\n free(list->head);\n list->head = next;\n list->size--;\n\n return ret;\n}\n\n//通过huffman list构建\nhufftreep hufftree(hufflistp list){\n hufftreep tree = (hufftreep)malloc(sizeof(HuffTree));\n tree->root = NULL;\n tree->size = 0;\n memset(tree->codes,0,sizeof(tree->codes));\n\n huffnodep a = NULL;\n huffnodep b = NULL;\n huffnodep c = NULL;\n tree->size = 2 * list->size - 1;\n while(list->size > 1){ //hufflistp长度大于1\n a = shiftHuffNode(list);\n b = shiftHuffNode(list);\n c = huffnode(-256,a->weight+b->weight); //新的节点\n c->left = a;\n c->right = b;\n insertHuffNode(list,c); //将c压回list\n }\n tree->root = c;\n\n //生成所有key的huffman编码\n char codes[8092]; //huffman编辑路径\n\n return genhuffcodes(tree,tree->root,codes,0);\n}\n\n//获取文件内容的BUF\nhuffbufp getFileBuf(const char* filename){\n FILE* fp = fopen(filename,\"r\");\n if(fp == NULL) return NULL;\n fseek(fp,0L,SEEK_END);\n int len = ftell(fp);\n rewind(fp); //重设\n huffbufp ret = (huffbufp)malloc(sizeof(HuffBuf));\n ret->code = (char*)malloc(len+1);\n ret->size = len;\n fread(ret->code,1,len,fp);\n fclose(fp);\n\n return ret;\n}\n\nhufftreep genhuffcodes(hufftreep tree,huffnodep node,char codes[],int idx){\n if(tree == NULL || node == NULL){ //到达底部\n return NULL;\n }\n\n if(node->left == NULL && node->right == NULL){ //叶子节点\n int key = node->key;\n huffcodep code = (huffcodep)malloc(sizeof(HuffCode));\n code->code = (char*)malloc(idx+1);\n code->size = idx;\n memcpy(code->code,codes,code->size);\n code->code[code->size] = '\\0';\n tree->codes[key] = code;\n }{\n codes[idx] = '1'; //右\n genhuffcodes(tree,node->right,codes,idx+1);\n codes[idx] = '0'; //左\n genhuffcodes(tree,node->left,codes,idx+1);\n }\n\n return tree;\n}\n\n//通过文件生成huffman list\nhufflistp getHuffListByFile(const char* filename){\n huffbufp buf = getFileBuf(filename);\n if(buf == NULL) return NULL;\n\n hufflistp list = getHuffListByBuf(buf);\n free(buf->code);\n buf->code = NULL;\n free(buf);\n buf = NULL;\n\n return list;\n}\n\nhufflistp getHuffListByBuf(huffbufp buf){\n if(buf == NULL || buf->code == NULL) return NULL;\n\n char* code = buf->code;\n\n hufflistp list = hufflist();\n for(int i = 0; code[i] != '\\0'; i++){\n unsigned char ch = code[i];\n list->keys[ch]++;\n }\n\n for(int i = 0; i < 256; i++){\n if(list->keys[i] > 0){ //插入存在的字符\n insertHuffNode(list,huffnode(i,list->keys[i]));\n }\n }\n\n return list;\n}\n\nhuffcodep getHuffCode(hufftreep tree,int key){\n if(key < 256 && key >= 0 && tree->codes[key] > 0){\n return tree->codes[key];\n }\n return NULL;\n}\n\nhuffresultp getHuffCodesByFile(const char* filename){\n huffresultp result = (huffresultp)malloc(sizeof(HuffResult));\n result->code = NULL;\n huffbufp buf = getFileBuf(filename); //文件缓存\n if(buf == NULL) return NULL;\n\n hufflistp list = getHuffListByBuf(buf); //huffman list\n\n result->tree = hufftree(list);\n int buf_len = buf->size;\n int len = 0;\n for(int i = 0; buf->code[i] != '\\0'; i++){\n int key = (unsigned char)buf->code[i];\n huffcodep code = getHuffCode(result->tree,key);\n if(code == NULL){\n printf(\"LLL:%c{%d}\\n\",key,key);\n return NULL;\n }\n len+=code->size;\n }\n result->code = (char*)malloc(len+1);\n result->code[0] = '\\0';\n for(int i = 0; buf->code[i] != '\\0'; i++){\n unsigned char key = buf->code[i];\n huffcodep code = getHuffCode(result->tree,key);\n strncat(result->code,code->code,code->size);\n }\n\n return result;\n}\n\nhuffbufp getOriginBuf(huffresultp result){\n if(result == NULL || result->code == NULL || result->tree == NULL) return NULL;\n hufftreep tree = result->tree;\n char* code = result->code;\n int len = 0;\n for(int i = 0; code[i] != '\\0';){\n huffnodep root = tree->root; //根节点\n while(root->left != NULL && root->right != NULL && code[i] != '\\0'){ //双子节点存在\n root = (code[i] == '0' ? root->left : root->right);\n i++;\n }\n if((root->left != NULL || root->right != NULL) && code[i] == '\\0'){ //错误\n return NULL;\n }\n len++;\n // printf(\"解析:%c{%s}\\n\",root->key,tree->codes[root->key]->code);\n }\n\n huffbufp ret = (huffbufp)malloc(sizeof(HuffBuf));\n ret->code = (char*)malloc(len+1);\n ret->code[0] = '\\0';\n ret->size = len;\n\n int idx = 0;\n for(int i = 0; code[i] != '\\0';){\n huffnodep root = tree->root; //根节点\n while(root->left != NULL && root->right != NULL && code[i] != '\\0'){ //双子节点存在\n root = (code[i] == '0' ? root->left : root->right);\n i++;\n }\n ret->code[idx++] = root->key;\n }\n ret->code[idx] = '\\0';\n\n return ret;\n}\n\nint putOriginToFile(huffresultp result,const char* filename){\n if(result == NULL) return 0;\n // printf(\"res1[%d]:%s\\n\",(int)strlen(result->code),result->code);\n // huffbufp b = str2bin(result->code);\n // printf(\"%d\\n\",b->size);\n // printf(\"res2:%s\\n\",bin2str(b));\n // return 0;\n\n huffbufp buf = str2bin(result->code); //huffman code转成buf\n int i = 0;\n int len = 0; \n for(i = 0; i < 256; i++){\n if(result->tree->codes[i] > 0){ //\n len+= 5+result->tree->codes[i]->size; //key[1]:len[4]:size\n }\n }\n huffbufp keys = (huffbufp)malloc(sizeof(HuffBuf));\n keys->code = (char*)malloc(len);\n keys->size = 0;\n //获取keys\n int idx = 0;\n for(i = 0; i < 256; i++){\n if(result->tree->codes[i] > 0){ //\n keys->code[idx++] = i; //key\n int len = result->tree->codes[i]->size;\n memcpy(keys->code+idx,&len,4); //key size\n // printf(\"%c[%d]:%d{%s}\\n\",i,i,len,result->tree->codes[i]->code);\n idx+=4;\n huffbufp tmp = str2bin(result->tree->codes[i]->code);\n // printf(\"%d,%d\\n\",tmp->code[0],tmp->size);\n int tsize = toByte(tmp->size);\n memcpy(keys->code+idx,tmp->code,tsize);\n idx+=tsize;\n }\n }\n\n keys->size = idx; //诸多键的总空间\n \n //写出标准文件\n //HUF\\n\n //size: 4b\n //keys\n //size: 4b\n //codes\n FILE* fp = fopen(filename,\"w\");\n if(fp == NULL) return -1;\n fwrite(\"HUF\\n\",1,4,fp);\n fwrite(&idx,1,4,fp); //size\n fwrite(keys->code,1,keys->size,fp); //写入code\n fwrite(&(buf->size),1,4,fp); //size\n fwrite(buf->code,1,toByte(buf->size),fp);\n fclose(fp);\n\n return 4+4+keys->size+4+buf->size;\n}\n\n\nhuffbufp str2bin(char* str){ //二进制字符串转二进制数组\n // printf(\"bin:%s\\n\",str);\n if(str == NULL) return NULL;\n huffbufp buf = (huffbufp)malloc(sizeof(HuffBuf));\n int l = strlen(str);\n int size = (l / 8) + (l % 8 > 0);\n\n buf->code = (char*)malloc(l);\n memset(buf->code,0,l);\n for(int i = 0; i < l; i++){\n int idx = i/8;\n int bi = i%8;\n buf->code[idx] |= (str[i] == '0' ? 0:1) << bi;\n }\n buf->size = l;\n\n return buf;\n}\n\nchar* bin2str(huffbufp buf){\n char* ret = (char*)malloc(buf->size+1);\n for(int i = 0; i < buf->size; i++){\n int idx = i / 8;\n int offset = i % 8;\n ret[i] = (buf->code[idx] & (0x01 << offset)) ? '1' : '0';\n }\n ret[buf->size] = '\\0';\n\n return ret;\n}\n\nhuffbufp readHuffFile(const char* filename){\n huffbufp buf = getFileBuf(filename);\n if(buf == NULL) return NULL;\n\n if(memcmp(buf->code,\"HUF\\n\",4) != 0) return NULL; //文件不以BUF\\n开头\n huffresultp result = (huffresultp)malloc(sizeof(HuffResult));\n //BUF\\n\n //key size\n int key_size = *(int*)(buf->code+4);\n int base = 8; //偏移量\n hufftreep tree = (hufftreep)malloc(sizeof(HuffTree));\n tree->root = NULL;\n tree->size = 0;\n huffcodep* codes = tree->codes; //key对应代码\n memset(codes,0,sizeof(huffcodep)*256);\n \n int oft = 0;\n for(;oft < key_size;){\n int offset = base+oft;\n unsigned char key = buf->code[offset];\n // printf(\"%d[%c]\\n\",key,key);\n int size = *(int*)(buf->code+offset+1); //长度\n int byte = toByte(size);\n huffbufp htmp = (huffbufp)malloc(sizeof(HuffBuf));\n //键对应代码\n htmp->code = buf->code+offset+5; //缓存代码\n htmp->size = size; //缓存大小\n // printf(\"[%c]%d\\n\",key,key);\n huffcodep tmp = (huffcodep)malloc(sizeof(HuffCode));\n tmp->size = size; //key的大小\n tmp->code = bin2str(htmp);\n tree->codes[key] = tmp;\n tree->size++; //树的大小增加\n huffnodep root = tree->root;\n if(root == NULL){\n tree->root = huffnode(-256,0);\n root = tree->root;\n }\n for(int i = 0; i < tmp->size; i++){\n char ch = tmp->code[i];\n huffnodep node = NULL;\n if(ch == '0'){\n node = root->left;\n if(node == NULL){\n node = huffnode(-256,0);\n }\n root->left = node;\n }else{\n node = root->right;\n if(node == NULL){\n node = huffnode(-256,0);\n }\n root->right = node;\n }\n if(i == tmp->size - 1)\n node->key = key;\n root = node;\n }\n oft+=5+byte;\n }\n\n huffbufp tmp = (huffbufp)malloc(sizeof(HuffBuf));\n tmp->code = buf->code+base+oft+4;\n tmp->size = *(int*)(buf->code+base+oft);\n // printf(\"tmp size:%d\\n\",tmp->size);\n result->tree = tree;\n result->code = bin2str(tmp);\n // printf(\"%s\\n\",result->code);\n\n // for(int i = 0; i < 256; i++){\n // if(codes[i]!=NULL){\n // printf(\"%c[%d]:%s\\n\",i,i,codes[i]->code);\n // }\n // }\n\n return getOriginBuf(result);\n}\n"
},
{
"alpha_fraction": 0.25517240166664124,
"alphanum_fraction": 0.28045976161956787,
"avg_line_length": 17.913043975830078,
"blob_id": "74774fb9f0b2dedeb4e881e3d44311fabfbd305c",
"content_id": "11fc6421bf56595acff8d791cdb9da5e87a5db4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 23,
"path": "/算法/位运算/异或和/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int n;\n while(~scanf(\"%d\",&n)){\n int res = 0;\n switch(n&0x03){\n case 0:\n res = n;\n break;\n case 1:\n res = 1;\n break;\n case 2:\n res = n+1;\n break;\n case 3:\n res = 0;\n break;\n }\n printf(\"%d\\n\",res);\n }\n}\n"
},
{
"alpha_fraction": 0.8275862336158752,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 49.25,
"blob_id": "baf008ccc65b74935a2dcd8c9d72005b3f859dea",
"content_id": "c5aeae1a40f6947b14977b2cc6e55d1c83c69c6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 4,
"path": "/算法/排序算法/鸡尾酒排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 鸡尾酒排序\n鸡尾酒排序是冒泡排序的一种变种,与冒泡排序的区别在于,鸡尾酒排序是双向的,从两边开始排序,而不是冒泡排序中的单项排序. \n一般来说鸡尾酒排序可以获得比冒泡排序稍好的性能,比如整体大致有序且与冒泡排序方向相同,这个时候冒泡的性能就会比较低,而鸡尾酒排序的效果就会好一点. \n比如序列:1,3,4,5,0.利用冒泡排序就需要至少4趟才能整体有序,但是鸡尾酒排序只需要一趟即可达到效果. \n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.7407407164573669,
"avg_line_length": 13,
"blob_id": "af717b8b003100275b8973ad5cfc83bccd5f508f",
"content_id": "9873a78ecdd76d251c16eb907bb8383eefb5ccfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 61,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/数据结构/树/2-3树/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 2-3树 \n指定的节点可以存放1个或者2个节点。"
},
{
"alpha_fraction": 0.819327712059021,
"alphanum_fraction": 0.8424369692802429,
"avg_line_length": 117.5,
"blob_id": "19a61120844da734ae1ddac33c3467c4cf644b5a",
"content_id": "ce9a1bafce8df19ef2fd8323b7521ba7366bd903",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 832,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 4,
"path": "/编程开发/后端/消息队列/rabbitmq-server安装.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Debian上安装RabbitMQ-Server\n我在RabbitMQ在debian上安装过程中遇到了很多坑,官网网站上的安装方法隐藏的比较深,而且讲的并不是特别详细,特别是,按照上面的方法安装还无法启动RabbitMQ.最后这个问题在Google上找到了答案: \n可以利用:`http://www.rabbitmq.com/releases/rabbitmq-server/v3.4.3/rabbitmq-server_3.4.3-1_all.deb`这个网址下载到一个可用的rabbitmq-server安装包,将这个安装包下载到本地之后,再利用`dpkg -i rabbitmq-server_3.4.3-1_all.deb`安装这个包,在安装过程中,可能会出现依赖问题,此时执行一下`sudo apt install -f`,该命令执行成功后,再重复执行刚刚的dpkg命令即可安装好rabbitmq-server. \n为了验证rabbitmq-server安装成功,可以执行`rabbitmqctl status`查看当前的状态. \n"
},
{
"alpha_fraction": 0.7591623067855835,
"alphanum_fraction": 0.7879580855369568,
"avg_line_length": 28.384614944458008,
"blob_id": "75e0de0bd4c186add26d31dcfe0f7212a40d5366",
"content_id": "9a4725a226e4a17705c531269d38610c7388e091",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 13,
"path": "/编程开发/Linux/常见问题/debian9安装mysql.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Debian9安装mysql\n由于社区在debian中移除了mysql的源,改为了mariadb源,这里如果需要安装mysql的话,我们需要加入对应的apt源,具体步奏如下: \n```\n# 切入家目录(这里不一定是家目录,不影响当前工作的地方即可)\ncd\n# 下载apt配置文件\nwget https://dev.mysql.com/get/mysql-apt-config_0.8.14-1_all.deb\n# 执行这个deb文件\nsudo dpkg -i mysql-apt-config_0.8.14-1_all.deb\n# 将第一项修改为你需要的mysql版本即可\n```\n完成上述步奏后,我们需要执行一下`sudo apt update`更新一下源. \n之后再执行`sudo apt install mysql-server`即可安装mysql\n"
},
{
"alpha_fraction": 0.4402810335159302,
"alphanum_fraction": 0.4871194362640381,
"avg_line_length": 18.363636016845703,
"blob_id": "ed564b900c9714d0b367625d736423a20aabc72e",
"content_id": "606299382160f1a1025783de117d84102665a757",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 22,
"path": "/数据结构/树/红黑树/c/term2/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include \"rb.h\"\n\nint main(){\n nodep root = NULL;\n // insert(&root,3);\n // insert(&root,5);\n // insert(&root,7);\n // insert(&root,9);\n // insert(&root,11);\n // insert(&root,13);\n // insert(&root,15);\n // for(int i = 0; i < 10; i++)\n // insert(&root,i*2+1);\n insert(&root,1);\n insert(&root,3);\n insert(&root,4);\n insert(&root,0);\n\n visit(root);\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.5082781314849854,
"alphanum_fraction": 0.5198675394058228,
"avg_line_length": 17.303030014038086,
"blob_id": "24a5553fdd705602c3755982b04382cc76cfc66c",
"content_id": "60de17b20f31b5cf56c3353d01ae895f9e3a8450",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 33,
"path": "/数据结构/树/红黑树/c/term1/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"rb.h\"\n#include<time.h>\n\nint cmp(const void* a,const void* b){\n return *(int*)a > *(int*)b;\n}\n\n#define SIZE 5\nint data[SIZE];\n\nvoid genData(int arr[],int size){\n int i;\n for(i = 0; i < size; i++){\n arr[i] = rand() % size;\n }\n qsort(arr,size,sizeof(int),cmp);\n}\n\nint main(){\n srand(time(NULL));\n node root = NULL;\n int size = sizeof(data) / sizeof(data[0]);\n genData(data,size);\n int i;\n for(i = 0; i < size; i++){\n insert(&root,i,data[i]);\n }\n\n for(i = 0; i < 1; i++)\n delete(&root,root->key);\n preOrderVisit(root);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7387387156486511,
"alphanum_fraction": 0.7387387156486511,
"avg_line_length": 14.714285850524902,
"blob_id": "78b64af410024f0b84043bc5eeaf72ecedc43bd5",
"content_id": "060818ea7b0ab84a5d6d7610357f072cefdf426e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 310,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 14,
"path": "/编程开发/Linux/常见问题/selinux工具包安装.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# selinux工具包安装\n今天sftp的时候需要用到setenforce命令,结果显示command not found. \n经过一番google,查到需要安装selinux工具包. \nRedhat: \n```\nyum -y install libselinux-utils\n```\n\nDebian: \n```\napt install -y selinux-utils\n```\n\n安装好工具后,setenforce即可使用了. \n"
},
{
"alpha_fraction": 0.45551130175590515,
"alphanum_fraction": 0.4842850863933563,
"avg_line_length": 25.89285659790039,
"blob_id": "01855fbdc9822cc9fb4e23eaab712d6b176f41f6",
"content_id": "b16c1aae61c3a99004c06d36c7a81287e6e3b883",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2525,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 84,
"path": "/算法/排序算法/归并排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 代码来源于维基百科.\n * 额外的空间消耗可以通过一些手法去除\n **/\n#include<stdio.h>\n#include<stdlib.h>\n\n//获取x,y中的最小值\nint min(int x,int y){\n return x < y ? x : y;\n}\n\nvoid merge_sort(int arr[],int len){\n int *a = arr;\n int *b = (int*)malloc(len*sizeof(int));\n int seg,start; //seg为段的大小,start为开始位置\n for(seg = 1; seg < len; seg<<=1){ //从最小段(长度为1)逆推到整个问题\n for(start = 0; start < len; start+= seg<<1){\n int low = start; //最低位开始下标\n int k = low;\n int mid = min(start+seg,len); //中间位置下标\n int high = min(start+2*seg,len); //最高位置下标\n int start1 = low,end1 = mid;\n int start2 = mid,end2 = high;\n while(start1 < end1 && start2 < end2){\n b[k++] = a[start1] < a[start2] ? a[start1++] : a[start2++];\n }\n while(start1 < end1)\n b[k++] = a[start1++];\n while(start2 < end2)\n b[k++] = a[start2++];\n }\n int *temp = a;\n a = b;\n b = temp; //交换a,b的指针值\n }\n if(a != arr){ //a与数组的地址不相同,说明a只想b的指针\n int i;\n for(i = 0; i < len; i++) //将临时空间的值赋给数组\n b[i] = a[i];\n b = a; //将临时空间的地址赋给b\n }\n free(b);//释放临时空间\n}\n\n//递归部分\nvoid merge_sort_rec(int arr[],int reg[],int start,int end){\n if(start >= end) return; //结束条件\n int len = end - start;\n int mid = (len >> 1) + start;\n int start1 = start,end1 = mid;\n int start2 = mid+1,end2 = end;\n merge_sort_rec(arr,reg,start1,end1);\n merge_sort_rec(arr,reg,start2,end2);\n int k = start;\n while(start1 <= end1 && start2 <= end2)\n reg[k++] = arr[start1] < arr[start2] ? arr[start1++] : arr[start2++];\n while(start1 <= end1)\n reg[k++] = arr[start1++];\n while(start2 <= end2)\n reg[k++] = arr[start2++];\n for(k = start; k <= end; k++)\n arr[k] = reg[k];\n}\n\nvoid merge_sort2(int arr[],int size){\n int *reg = (int*)malloc(size*sizeof(int));\n merge_sort_rec(arr,reg,0,size-1);\n free(reg);\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n\n //merge_sort(arr,size);\n merge_sort2(arr,size);\n for(i = 0; i < size; i++)\n printf(\"%d \",arr[i]);\n puts(\"\");\n}\n"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 8,
"blob_id": "c8f4a4e30b326593d1f359f50c09825413406b73",
"content_id": "fd31d18f89508b879641dc4997a30f7c9d15db51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/硬件/c51/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 51笔记\n51单片机相关的笔记\n"
},
{
"alpha_fraction": 0.7099999785423279,
"alphanum_fraction": 0.75,
"avg_line_length": 22.076923370361328,
"blob_id": "06168e554de54b04af4981e38ec180b29d589410",
"content_id": "272603ae076fb5f9d59787ac03fb58e4d72e19c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 13,
"path": "/DailyCodingProblems/381_base64_paypal/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 381_base64_paypay \nGood morning! Here's your coding interview problem for today.\nThis problem was asked by Paypal.\nRead this Wikipedia article on Base64 encoding.\nImplement a function that converts a hex string to base64.\nFor example, the string:\n```\ndeadbeef\n```\nshould produce:\n```\n3q2+7w==\n```\n"
},
{
"alpha_fraction": 0.8583691120147705,
"alphanum_fraction": 0.8583691120147705,
"avg_line_length": 45.599998474121094,
"blob_id": "7373a1687e715ba5aae134657040ffde4dbdd71f",
"content_id": "0528aadef27ba439755e916f7f17473416c3086e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 633,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 5,
"path": "/算法/排序算法/快速排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 快速排序\n思路: \n选举出一个标兵,比它小的放到它左边,比它大的放右边,这样经过一趟操作,虽然整体不一定有序,但是我们可以确定标兵的位置肯定是对的. \n这样排序的好处是,无论在什么时候,要求解的序列都会被标兵分成两部分(标兵恰好是边界的,我们把另一半看成空的),这样处理之后,我们可以很容易的把分出来的两部分作为一个子问题重复上面的步奏.直到所有的元素的位置都是正确的. \n这个算法可以用递归写,比较好理解,我这里方便用了迭代的方式.这里注意一下就好了.\n"
},
{
"alpha_fraction": 0.5762711763381958,
"alphanum_fraction": 0.598870038986206,
"avg_line_length": 16.700000762939453,
"blob_id": "4cdc8b03d370edfde5ff667ac88371bf97cdc2af",
"content_id": "0d89fcc46396fd85696a92daedd1ffd1eba9c17a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 10,
"path": "/编程开发/vim/常见的值.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# vim中常见的值\n1. % 文件名 \n2. %:r 文件名 \n3. %:e 文件扩展名 \n4. %:p 文件绝对路径 \n**事件:** \nBufWrite 缓存写入文件\nBufWritePre 缓存开始写入文件\nBufWritePost 缓存写入文件完毕\nBufReadPost 文件读入缓存完毕\n"
},
{
"alpha_fraction": 0.837837815284729,
"alphanum_fraction": 0.837837815284729,
"avg_line_length": 16.5,
"blob_id": "9f54c92d531c4a7aa72875b25899b0b9fbc66704",
"content_id": "7a1c50db10105da015e7d7362f7058cc8fa00df6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/编程开发/git/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Git学习笔记\n这里记录着我在编程开发中遇到的跟Git相关的问题 \n"
},
{
"alpha_fraction": 0.5626283288002014,
"alphanum_fraction": 0.6714578866958618,
"avg_line_length": 18.360000610351562,
"blob_id": "70934e49df1ba89b79eb52ed392377fb8fd442cb",
"content_id": "4cd1af542063d03a2294fc1a52c4c46c1bb30549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1021,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 25,
"path": "/leetcode/201-数字范围按位与/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "**题目:**\n给定范围 [m, n],其中 0 <= m <= n <= 2147483647,返回此范围内所有数字的按位与(包含 m, n 两端点)。 \n**示例 1: **\n```\n输入: [5,7]\n输出: 4\n```\n**示例 2:**\n```\n输入: [0,1]\n输出: 0\n```\n\n很显然,这个题目用爆破一定会超时....... \n嗯,我还试了一下,果然超时了,就算优化了退出条件依然超时. \n这个题目我们应该从二进制的角度去思考: \n从左边开始观察二进制,只有代表范围边界数字的二进制位相同的地方,其二进制位才不会变化.其余地方,不论如何,二进制位必定改变. \ne.g.\n观察二进制数\n00111010 a\n00110010 b\n0011 相同的前缀.\n后面的四位,首先看第一个不相同的位置,a&b在该位在一开始的情况下必定为0. \n我们继续观察,其后的位置,必定存在一个从0111编程1000的过程,1000这个数的后三个二进制位与其他的二进制与运算必定为0. \n所以边界数字的公共前缀即为我们需要的结果. \n\n"
},
{
"alpha_fraction": 0.42560553550720215,
"alphanum_fraction": 0.46885812282562256,
"avg_line_length": 18.931034088134766,
"blob_id": "fcb00ae10461972e0e3698507c6c65e134eeb45d",
"content_id": "8b86d331326e8119e9c1bd155c42a2360a400beb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 776,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 29,
"path": "/算法/位运算/多数元素/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * 这个解法假设一定存在超过半数的元素.\n * 如果没有这个假设,可以在找出符合条件的元素后,验证该元素是否超过半数.\n * */\n#include<stdio.h>\n\nint main(){\n int arr[] = {\n 1,1,1,1,1,2,2,2,2\n };\n int size =sizeof(arr)/sizeof(arr[0]);\n int i,j;\n\n int num = 0;\n //int类型在32位的系统上长度为32位\n for(i = 0; i < 32; i++){\n int count = 0; //对应数位上的1计数\n int mask = 1<<i; //掩码\n for(j = 0; j < size; j++){ //遍历每个数字\n if(arr[j] & mask) count++;\n }\n if(count > size / 2) //对应二进制位出现1的次数超过半数\n num |= mask;\n }\n\n printf(\"超过半数的元素:%d\\n\",num);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.869369387626648,
"alphanum_fraction": 0.869369387626648,
"avg_line_length": 72.33333587646484,
"blob_id": "f58568d3bc71ce625538a03eb6d9652080271de9",
"content_id": "1e5f01ce0431882106d13cfe8c857534c2efc84e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 3,
"path": "/编程开发/Linux/常见问题/linux下搜狗输入法皮肤外框发黑.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# linux下搜狗输入法皮肤外框发黑解决 \n今天决定改用linux进行编程办公,在进行中文输入时,突然发现搜狗输入法的四个角原本应该是透明的区域变成黑框。我一开始猜测是在输入发中设置,但是找了一番,没有任何收获,最后借助在网上找到了对应的解决方案: \n启用linux中窗口管理器中的合成器。这个东西可能在不同的版本中名称有细微差别。在mint中可以在`系统》控制中心》窗口》混成管理器`中勾选`启用软件混成窗口管理器`。问题即解决。 \n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 19,
"blob_id": "bd234869a48039a1efe9aaf089b1f26cc79f19f4",
"content_id": "7945d5cb08715e97dc2876be39eba9e5a958ebc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 1,
"path": "/编程开发/谷歌问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 一些在开发中遇到的与谷歌有关的问题 \n"
},
{
"alpha_fraction": 0.5502315759658813,
"alphanum_fraction": 0.557356595993042,
"avg_line_length": 23.622806549072266,
"blob_id": "971cdb40b3e77d3aca61ca35c98914281c2c069b",
"content_id": "e6875384798e5c873cf9d55d289c8ac5b8dde501",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6356,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 228,
"path": "/数据结构/树/AVL树/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/**\n * AVL树的c语言实现 \n **/\n#include<stdio.h>\n#include<stdlib.h>\n#include<time.h>\n\ntypedef struct Node{\n int key; //键\n int val; //值,实际中键和值都可以用替换为其他类型\n int height; //节点的高度,从1算起\n struct Node *left; //左子节点\n struct Node *right; //右子节点\n}Node,*pnode;\n\n//获取某个节点的高度\n#define height(p) ((p) == NULL ? 0 : (p)->height)\n//取两个数字中的较大值\n#define max(a,b) ((a) > (b) ? (a) : (b))\n//获取某个节点的平衡因子\n#define bf(p) ((p) == NULL ? 0 : height(p->left) - height(p->right))\n\nvoid LL(pnode* proot);\nvoid RR(pnode* proot);\nvoid LR(pnode* proot);\nvoid RL(pnode* proot);\nvoid insert(pnode* proot,int key,int val); //插入节点\nint delete(pnode* proot,int key); //删除节点\nint search(pnode root,int key); //查找节点\nint cmp(const void* a,const void* b);\nint batchInsert(pnode* proot,int arr[],int size);//批量插入\nvoid preOrderVisit(pnode root); //先序遍历\nvoid genRandom(int arr[],int size); //生成随机数据\nvoid deleteMax(pnode *proot,int* key,int* val); //删除左边最大元素\nvoid deleteMin(pnode *proot,int* key,int* val); //删除右边最小元素\npnode adjust(pnode root);\n\n#define SIZE 10000000\n//int arr[SIZE];\nint arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n};\n\nint main(){\n srand(time(NULL));\n size_t size = sizeof(arr) / sizeof(arr[0]);\n //genRandom(arr,size);\n pnode root = NULL;\n int i;\n\n //在batchInsert中对arr进行了一次排序\n batchInsert(&root,arr,size);\n for(i = 0; i < 5; i++)delete(&root,arr[i]);;\n\n preOrderVisit(root);\n //int finder = 4;\n //printf(\"search %d:%d\\n\",finder,search(root,finder));\n return 0;\n}\n\nvoid genRandom(int arr[],int size){ //生成随机数据\n int i;\n for(i = 0; i < size; i++){\n arr[i] = random() % size;\n }\n}\n\nint batchInsert(pnode* proot,int arr[],int size){\n int i;\n qsort(arr,size,sizeof(int),cmp);\n for(i = 0; i < size; i++)\n insert(proot,arr[i],arr[i]);\n return size;\n}\n\nint cmp(const void* a,const void* b){\n return *(int*)a > *(int*)b;\n}\n\n//父子孙左左排布\nvoid LL(pnode * proot){ //选择二重指针的原因是跟节点的地址需要变动并传递到外界\n pnode root = *proot;\n if(root == NULL) return;\n pnode left = root->left;\n root->left = left->right;\n left->right = root; //根节点变为左节点的右孩子\n \n //先更新根节点的高度,因为此时根节点位于左孩子下方\n root->height = max(height(root->left),height(root->right))+1;\n //更新左节点的高度\n left->height = max(height(left->left),root->height)+1;\n *proot = left;\n}\n\n//父子孙右右排列\nvoid RR(pnode * proot){\n pnode root = *proot;\n if(root == NULL) return;\n pnode right = root->right;\n root->right = right->left;\n right->left = root;\n\n root->height = max(height(root->left),height(root->right))+1;\n right->height = max(height(root),height(right->right))+1;\n *proot = right;\n}\n\n//LR排列\nvoid LR(pnode * proot){\n pnode root = * proot;\n if(root == NULL) return;\n RR(&(root->left));\n LL(proot);\n}\n\n//RL排列\nvoid RL(pnode * proot){\n pnode root = * proot;\n if(root == NULL) return;\n LL(&(root->right));\n RR(proot);\n}\n\n//调整节点\npnode adjust(pnode root){\n root->height = max(height(root->left),height(root->right))+1; //更新当前节点的高度\n if(bf(root) == 2){ //树向左倾斜\n if(height(root->left) > height(root->right)){ //向左倾斜\n LL(&root);\n }else{\n LR(&root);\n }\n }else if(bf(root) == -2){ //向右倾斜\n if(height(root->left) > height(root->right)){ //向左倾斜\n RL(&root);\n }else{\n RR(&root);\n }\n }\n return root;\n}\n\nvoid insert(pnode * proot,int key,int val){\n pnode root = * proot;\n if(root == NULL){ //根节点为空,创建新节点\n root = (pnode)malloc(sizeof(Node));\n root->left = root->right = NULL;\n root->height = 1;\n root->key = key;\n root->val = val;\n }else if(root->key == key){\n root->val = val; //替换为当前值\n }else if(root->key < key){ //key可能在当前节点右边\n insert(&(root->right),key,val);\n }else{ //key可能在当前节点左边\n insert(&(root->left),key,val);\n }\n\n *proot = adjust(root);\n}\n\nvoid preOrderVisit(pnode root){\n if(root == NULL) return;\n printf(\"[%d]=>%d\\n\",root->key,root->val);\n preOrderVisit(root->left);\n preOrderVisit(root->right);\n}\n\n\nint delete(pnode* proot,int key){\n pnode root = *proot;\n if(root == NULL) return 0; //删除失败\n if(root->key == key){ //找到节点\n int key,val;\n //如果找到节点,则使用左侧最大的节点或者右侧最小的节点替换当前节点\n if(root->left != NULL){ //左节点存在\n deleteMax(&(root->left),&key,&val);\n }else if(root->right != NULL){\n deleteMin(&(root->right),&key,&val);\n }else{\n free(root);\n *proot = NULL;\n return 1;\n }\n root->key = key;\n root->val = val;\n *proot = adjust(root);\n return 1;\n }else if(root->key > key){\n return delete(&(root->left),key);\n }else{\n return delete(&(root->right),key);\n }\n}\n\nint search(pnode root,int key){\n if(root == NULL) return -1;\n if(root->key == key) return root->val;\n else if(root->key < key) return search(root->right,key); //向右查找\n else return search(root->left,key); //向左查找\n}\n\n//删除左边最大元素并返回\nvoid deleteMax(pnode *proot,int* key,int* val){ //删除左边最大元素\n pnode root = *proot;\n if(root->right == NULL){ //这里假定root节点存在\n *key = root->key;\n *val = root->val;\n free(root);\n root = NULL;\n }else{\n deleteMax(&(root->right),key,val);\n root = adjust(root);\n }\n *proot = root;\n}\nvoid deleteMin(pnode *proot,int* key,int* val){ //删除右边最小元素\n pnode root = *proot;\n if(root->left == NULL){\n *key = root->key;\n *val = root->val;\n free(root);\n root = NULL;\n }else{\n deleteMin(&(root->left),key,val);\n }\n *proot = root;\n}\n"
},
{
"alpha_fraction": 0.7786885499954224,
"alphanum_fraction": 0.7909836173057556,
"avg_line_length": 47.79999923706055,
"blob_id": "2a1252a8b9ae7e3b78b424ff1d029de2dff90698",
"content_id": "ba718fe7f2df01a15acd69dabc38e8393a4b5e35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 5,
"path": "/DailyCodingProblems/387_API_SDK_Amazon/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 387_API_SDK_Amazon\nThis problem was recently asked by Amazon. \nHow would you explain the difference between an API and SDK to a non-technical person? \n**思路:** \n我目前的思路是,可以把调用这两种事物类比成在家上班和到公司上班。在家工作,需要的东西自己都是可控的;在公司上班,所有的东西都由公司控制,自己只有使用的权利。\n"
},
{
"alpha_fraction": 0.6988235116004944,
"alphanum_fraction": 0.7529411911964417,
"avg_line_length": 25.4375,
"blob_id": "47c176ea039131caa9868b273448285906f029cc",
"content_id": "6206d82caf9f8335b8c4f84b71e3913e57c1a418",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 999,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 16,
"path": "/leetcode/260-只出现一次的数字3/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 只出现一次的数字3\n**题目:**\n给定一个整数数组 nums,其中恰好有两个元素只出现一次,其余所有元素均出现两次。 找出只出现一次的那两个元素。\n\n**示例 :**\n```\n输入: [1,2,1,3,2,5]\n输出: [3,5]\n```\n**注意:** \n1. 结果输出的顺序并不重要,对于上面的例子, [5, 3] 也是正确答案。 \n2. 你的算法应该具有线性时间复杂度。你能否仅使用常数空间复杂度来实现? \n\n**思路:** \nleetcode136中,可以从一堆双人狗中区分出一个单生狗,但是在这个题目里面存在两个单生狗,这个时候,这种策略看起来就不靠谱了. \n其实leetcode136中的思路也是可以用在这个题目中的.利用136中的思路,我们得到的是两个单独的数字的按位异或的结果,根据按位异或的特点,我们可以知道,结果中二进制位出现1的地方表示两个数字对应二进制位不同,根据这个特点,我们可以进一步区分出两个数字. \n"
},
{
"alpha_fraction": 0.8355140089988708,
"alphanum_fraction": 0.8420560956001282,
"avg_line_length": 55.31578826904297,
"blob_id": "75cb8197d4064d3534845c9aa6fcb1a7f97375e7",
"content_id": "e54b8e0e2b084dee0bac5ce87e5d87853e5398cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2348,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 19,
"path": "/硬件/华为平板/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 记一次华为平板的刷机过程 \n之前一段时间在国内的rom网站上找了一个看起来像模像样的ROM,刷完之后发现耗电特别快,想要刷回去,结果发现官方的刷机包不给下载了。。。。绝对是老用户与狗。。。 \n无奈,这件事情搁置了很长时间,今天突然想到好久没用平板了,就折腾了一下,用中文关键词找了半天都没有有用的线索,无奈硬着头皮去找官方客服,给的答复就是不给。 \n无奈就转为搜索英文关键词,终于在[http://huawei-update.com/device-list/m2-a01l](http://huawei-update.com/device-list/m2-a01l)找到了我需要的华为官方刷机包,刷机包的地址都是华为官方的。。。真的是对洋大人十分贴心的说,,,,,。\n下载之后里面有2个pdf文件,一个dload文件夹,文件夹里面有一个UPDATE.APP文件,我看教程上说dload放在tf卡根目录会自动读取,然后我的并没有。 \n继续搜索,发现UPDATE.APP可以解包(有一个官方的update.app解包工具)出来一堆的img文件,其中就包含`boot.img`,`recovery.img`,`system.img`,我把这三个单独提出来,放到电脑里,再利用提前装好的fastboot,adb命令,执行了如下操作: \n```\nfastboot devices # 确认设备是否连接上\nadb reboot bootloader # 进入bootloader准备载入img文件\n# 进入后\nfastboot flash system system.img\n# 这里我发现每次拷入一个img文件都需要重新拔插一下usb口,否则无法写入数据\nfastboot flash boot boot.img\nfastboot flash recovery recovery.img\n\n```\n进行了这三步之后,fastboot reboot重启。\n此时可以正常进入系统了,但是有一个问题,平板奇卡无比,等了好久都没加载出来桌面,而且也没有初始化的意思,所以我想应该是老的数据影响到了现有系统了,所以利用关机键+音量加键进入recovery,清空cache,恢复出厂设置。这两步做完之后,重启系统,此时系统就很流畅了。此时像是刚买平板时一样设置即可,因为是国外版本,所以自带了google全家桶,美滋滋。。。。\n其中好多步骤并不像我说的这么云淡风轻的,好几次在变砖的边缘徘徊,好在最后的结果是好的,所以这里记一下笔记。防止下次忘记\n"
},
{
"alpha_fraction": 0.733798623085022,
"alphanum_fraction": 0.7507477402687073,
"avg_line_length": 24.71794891357422,
"blob_id": "6844c27389c2949747f3fe64539ca169d44d8a42",
"content_id": "ac9c76f1bbd120142d2eb39dbd7db9c8109604aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1477,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 39,
"path": "/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# notes\n算法,数据结构及各种小想法 \n以后可能还会增加其他新的笔记内容 \n[Gitee](https://gitee.com/francisx/notes.git): \n[](https://gitee.com/francisx/notes/stargazers)\n[](https://gitee.com/francisx/notes/members) \n[Github](https://github.com/ixysoft/notes.git): \n\n \n\n## 目录说明\n1. 算法 \n算法相关的笔记\n2. 数据结构 \n数据结构相关的笔记\n3. 硬件 \n硬件相关的笔记\n4. 编译器 \n编译器相关的笔记\n5. 操作系统 \n操作系统相关\n6. 数据库 \n数据库相关\n7. leetcode \nLeetcode解题相关的笔记 \n**注意:**\n采用ACM输入的形式表现问题的源代码,而不是leetcode上的单纯的一个函数的形式,理解主要思路之后,可以很方便的转换成leetcode能够识别的形式.\n8. 数学 \n数学相关\n9. 人工智能 \nAI相关的笔记\n10. 外语 \n外语相关的笔记\n11. Linux \nLinux C,Linux脚本及一些其他工具的笔记\n12. 网络 \n网络编程,网络协议,爬虫等相关的笔记\n13. 编程开发 \n编程开发的一些笔记经验.包括前后端,Android,Qt等方面的一些笔记\n"
},
{
"alpha_fraction": 0.7643097639083862,
"alphanum_fraction": 0.7794612646102905,
"avg_line_length": 28.600000381469727,
"blob_id": "f668089ec34bd14f05b46f9752b59442b28ba3fd",
"content_id": "e5af5cb9d2a56ff1df8a93d9e95eb52d7faeb949",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1450,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 20,
"path": "/算法/排序算法/堆排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 堆排序(大根堆) \n这里实现的是一个大根堆,对于小根堆实现原理相同. \n利用堆实现的排序方式 \n堆的物理结构:`数组` \n**常见的一些关系**: \n```\n元素n的子元素下标:x*2+1(左子元素),x*2+2(右子元素) \n元素n的父元素下标:(x-1)/n \n长度为n的堆最后一个元素的父元素下标:n/2-1\n判断下标n超出堆底:n>=size(size为堆的长度)\n```\n**建堆的过程**: \n```\n从最后一个元素的父元素作为当前元素开始逆序遍历至队列头部.重复下面的过程:\n调整堆步骤: 比较当前元素与其左右子元素的大小.如果子元素里面有比当前元素大的,那我们就将子元素最大的那个与当前元素交换,切换到交换过子元素下标,然后递归调整交换的那个子元素直到堆底或者当前元素比两个子元素大. \n```\n上面的过程执行完毕后一个经过大顶堆就出来了. \n**排序的过程**: \n这个过程需要在建堆操作执行后运行. \n根据大根堆的特点(所有的父元素都不小于子元素)可知,堆顶的元素是整个堆中的最大值,此时我们进行的操作是将堆顶元素与堆最底部的元素交换,此时最大的元素沉底,我们只需要将堆顶元素作为当前元素,堆的大小变成size-1,重复调整堆的步奏.调整完成后,当前的堆变成了大小为size-1的大顶堆重复排序过程,最后得到的序列即为排序好的序列. \n"
},
{
"alpha_fraction": 0.7395833134651184,
"alphanum_fraction": 0.7395833134651184,
"avg_line_length": 30,
"blob_id": "a2884849c7d52bc3a784c64f214cbf678701e31e",
"content_id": "0b1786ded028cea4fca5072696b2796986d0fbd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 3,
"path": "/算法/其他/寻找两个有序数组的中位数/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 寻找两个有序数组的中位数\n**题目**: \n现在给定两个长度分别为M,N的有序数组(从小到大的顺序),设计一个算法在O(log(M+N))时间复杂度下求出两个数组合并后的中位数. \n\n"
},
{
"alpha_fraction": 0.4588744640350342,
"alphanum_fraction": 0.523809552192688,
"avg_line_length": 22.100000381469727,
"blob_id": "b0451f5a78aa6cf2ce928985f99816d37b183541",
"content_id": "c0ece1f03d26d3a4aa9bc5612be3ba09f9a0a0ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 20,
"path": "/DailyCodingProblems/382_base64_google/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\ndef base64decode(st):\n latex = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n lcap = {}\n idx = 0\n for i in latex:\n lcap[i] = idx\n idx+=1\n ret = 0\n cnt = 0\n for i in st:\n if i == '=':\n cnt += 1\n else:\n ret = (ret << 6) | lcap[i]\n ret = ret >> (12 - 4 * cnt)\n return format(ret,'x')\nprint(base64decode('3q2+7w=='))\nprint(base64decode('bmV3IHlvcms='))\n"
},
{
"alpha_fraction": 0.5850732922554016,
"alphanum_fraction": 0.5877388119697571,
"avg_line_length": 34.74603271484375,
"blob_id": "53136a4a1812cb866331a3df3405ca23f9202a7c",
"content_id": "602ea5127dedf6d1177d31672b27a85e56b60790",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2569,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 63,
"path": "/数据结构/树/红黑树/c/term3/rb.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __RB_H__\n#define __RB_H__\n#include<stdio.h>\n#include<stdlib.h>\n\n// #define DEBUG\n#ifdef DEBUG\n#define ralloc(n) (printf(\"line[%d]: alloc %d bytes\\n\",__LINE__,(int)(n)),malloc(n))\n#else\n#define ralloc(n) (malloc(n))\n#endif\n#define isNull(n) ((n) == NULL) //判断是否为空\n#define isLeaf(n) (!isNull(n) && isNull((n)->key)) //判断是否为叶子节点\n#define isRoot(n) (!isNull(n) && ((n)->parent == NULL || isLeaf((n)->parent))) //判断是否为根节点\n#define isRed(n) (!isNull(n) && n->color == RED) //判断是否为红节点\n#define isBlack(n) (!isRed(n)) //判断是否为黑节点\n#define isLeft(n) (!isNull(parent(n)) && parent(n)->left == (n))\n#define isRight(n) (!isNull(parent(n)) && parent(n)->right == (n))\n#define Key(n) (*(n)->key)\n#define key_cmp(a,b) ((isNull(a) || isNull(b) || Key(a) < Key(b)) ? 0 : 1)\n#define parent(n) (isNull(n) ? NULL : (n)->parent)//父节点\n#define sibling(n) (isRoot(n) ? NULL : isLeft(n) ? n->parent->right : n->parent->left)\n#define grandpa(n) (parent(parent(n))) //祖父节点\n#define uncle(n) (sibling(parent(n))) //叔父节点\n\ntypedef enum{\n BLACK = 0,\n RED = 1,\n}Color;\n\ntypedef enum{\n FALSE = 0,\n TRUE = 1,\n}Boolean;\n\ntypedef struct Node{\n Color color; //颜色\n int* key; //键值\n struct Node *parent,*left,*right;\n}Node,*nodep;\n\ntypedef struct Tree{\n nodep root; //根节点\n int size; //节点数目\n}Tree,*treep;\n\nnodep node(int key); //创建一个节点\nnodep nullnode(nodep pa); //创建一个nil节点\ntreep tree(); //创建一棵空树\nBoolean insertNode(treep t,int key); //向树中插入\nBoolean _insertNode(treep t,nodep* rootp,int key); //原始的insert方法\nBoolean insertFix(treep t,nodep n); //插入修复\nBoolean deleteNode(treep t,int key); //删除树中指定的元素\nBoolean _deleteNode(treep t,nodep* rootp,int key); //原始的insert方法\nBoolean deleteFix(treep t,nodep n); //删除修复\nnodep rotate_left(treep t,nodep nd); //左旋nd节点\nnodep rotate_right(treep t,nodep nd); //右旋nd节点\nBoolean transplant(treep t,nodep dest,nodep src); //使用src子树替换dest子树\nnodep minimum(nodep root); //获取以root为根节点的最小键\nvoid visit(treep t); //遍历指定的数\nvoid _visit(nodep root); //遍历指定根节点子树\nnodep search(treep t,int key); //查找指定的键\n#endif"
},
{
"alpha_fraction": 0.623711347579956,
"alphanum_fraction": 0.6649484634399414,
"avg_line_length": 16.636363983154297,
"blob_id": "53a634d78c2d597c2db5608946f2e2c500e64a7d",
"content_id": "7461b7c5889841b9048b5d3c09d4fef48c5a75dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 272,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 11,
"path": "/编程开发/数据库/mysql/mysql_1366问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#mysql 1366字符问题\n1. 查看不正确的字符编码 \n```\nshow variables like '%char%'\n```\n2. 修改编码 \n在配置文件中(/etc/mysql/mysql.conf.d/mysqld.cnf)中修改服务端编码. \n3. 转换表编码 \n```\nalter table `表名` convert to charset utf8;\n```\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 8,
"blob_id": "d546ec3c7e4559279d6e19d1716a7f1112ab6985",
"content_id": "bf15796b6df888727f009f88ef4a976194502f7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/外语/德语/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 德语学习\n与德语相关的学习资料\n"
},
{
"alpha_fraction": 0.8142856955528259,
"alphanum_fraction": 0.8142856955528259,
"avg_line_length": 33,
"blob_id": "2620cbcaf018464ca83c4a303c28d2be76759b11",
"content_id": "d9ba317df28d465be3144627bc2cf93698e75e2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 2,
"path": "/DailyCodingProblems/400_sublist_sum_Goldman_Sachs/c/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 解题思路 \n这道题很显然应该用树状数组解决。常规暴力解法最坏情况下的时间复杂度为O(N),而且对于插入新元素及新查询非常不友好。 \n"
},
{
"alpha_fraction": 0.772849440574646,
"alphanum_fraction": 0.7876344323158264,
"avg_line_length": 32.818180084228516,
"blob_id": "609d2ebb2610bc7a38a343c351115b725d684d51",
"content_id": "f3d305eadbc875cbc9eac41dc824824ac1c63c11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1798,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 22,
"path": "/编程开发/后端/缓存/缓存常见问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 缓存常见问题及解决方案\n## 1. 缓存穿透\n产生原因:`一直访问数据库中不存在的数据` \n由于访问的数据在数据库中不存在,那么每次访问缓存时,缓存中也获取不到指定的数据,所以每次访问该数据时都会访问一次数据库,如果有大量的请求访问到该数据,就会造成严重的性能问题,这种情况称作缓存穿透. \n**解决方案:** \n1. 对请求中的某个字段(例如id)作限定.\n2. 数据库中未查询到的值,也在缓存中做相应的记录,比如记为null\n3. 利用布隆过滤器(个人认为这种方式并不合适.) \n## 2. 缓存击穿 \n产生原因:`一个热点数据突然失效,导致大量数据库请求涌入` \n某个访问量特别大的数据,比如秒杀活动访问到的key,如果该key突然失效,就会导致大量依赖该key的请求查询不到数据,转而请求数据库,导致大量请求涌入数据库. \n**解决方案:** \n1. 对于热点数据,不设置过期时间或者保证在热点期间不过期. \n2. 设置互斥锁,请求某个key时,该key锁住,如果该key正好失效,当前请求转而请求数据库,重新设置key.由于在同一时间只有一个请求能访问到key,所以key的失效并不会影响到过多的请求. \n## 3. 缓存雪崩 \n产生原因:`多个key同时失效` \n多个正在被使用的key同时失效,会导致大量的请求涌入数据库.产生的原因有很多,比如key设置的过期时间相同或者服务器突然崩溃. \n**解决方案:** \n1. 为key设置随机的过期时间,这样可以尽可能的减小key同时过期的概率\n2. 使用高可用分布式缓存集群,确保缓存的高可用性. \n## 4. 双写不一致 \n原则: 先删除缓存,再更新数据库.这样可以尽可能的减小出错概率.\n"
},
{
"alpha_fraction": 0.5968000292778015,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 20.482759475708008,
"blob_id": "3821400dd2fe167f8a07b4cbedd307f8301c0038",
"content_id": "9923e70e68e794dbc15f4b069831356295ba143c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 939,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 29,
"path": "/编程开发/后端/Wordpress/问题/Wordpress自定义路由不生效.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 自定义路由不生效\n这两天,公司有一个需求需要用到wordpress自定义路由的东西,可是按照网上的教程一步步敲下来,最后却没有用. \n网上比较常见的教程: \n```\nfunction new_rules(){\n add_rewrite_rule(\n 'article/(\\d+)/?$',//正则\n 'index.php?pagename=tag&tag_id=$matches[1]',//实际路由\n 'top' //放在最前端\n );\n}\nadd_action('init','new_rules');\n```\n然后上面的代码并没有什么卵用. \n之后经过一番查找,终于找到了原因: \n在加入新规则之后,需要对原有的规则进行刷新,否则新规则不生效. \n将上面的new_rules函数改成: \n```\nfunction new_rules(){\n global $wp_rewrite;\n add_rewrite_rule(\n 'article/(\\d+)/?$',\n 'index.php?pagename=tag&tag_id=$matches[1]',\n 'top'\n );\n $wp_rewrite->flush_rules(); //加上这句即可\n}\n```\n然后再刷新页面,新的规则就加好了. \n"
},
{
"alpha_fraction": 0.5745007395744324,
"alphanum_fraction": 0.5760368704795837,
"avg_line_length": 31.575000762939453,
"blob_id": "20711ad3f9714a15bf29aa2f436ff6542208647e",
"content_id": "25bc159ff1c59ae303bf4564af12457db8e12db2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1440,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 40,
"path": "/数据结构/树/红黑树/c/term2/rb.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __RB_H__\n#define __RB_H__\n#include<stdio.h>\n#include<stdlib.h>\n\n#define parent(n) ((n) == NULL ? NULL : (n)->parent) //父节点\n#define isBlack(n) ((n) != NULL && (n)->color == BLACK)\n#define isRed(n) ((n) != NULL && (n)->color == RED)\n#define isLeaf(n) ((n) == NULL)\n#define isLeft(n) (parent(n) != NULL && parent(n)->left == (n)) //左节点\n#define isRight(n) (parent(n) != NULL && parent(n)->right == (n)) //右节点\n#define brother(n) (parent(n) == NULL ? NULL : isLeft(n) ? parent(n)->right : parent(n)->left)\n#define grandparent(n) (parent(parent(n))) //祖父节点\n#define uncle(n) (brother(parent(n))) //叔父节点\n\ntypedef enum{\n BLACK = 0,\n RED = 1,\n}Color;\n\ntypedef struct Node{\n Color color;\n int key;\n struct Node *left,*right,*parent;\n}Node,*nodep;\n\ntypedef struct Tree{\n nodep root;\n int size;\n}Tree,*treep; //红黑树\n\nint visit(nodep root); //遍历指定的树\ntreep new_tree(); //创建一个tree\nnodep new_node(int key); //创建新的node\nnodep rotate_left(nodep node); //左旋指定节点\nnodep rotate_right(nodep node); //右旋指定节点\nint insert(nodep* rootp,int key); //向指定的树中插入节点\nint _insert(nodep* entryp,int key,nodep pa,nodep* rootp); //插入节点底层\nint insert_fix(nodep* rootp,nodep node); //调整指定的节点\n#endif"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 8,
"blob_id": "2aacf23e3848c0f5d57717000c033307860810a9",
"content_id": "febf1806dc48cf5cc5e7194c5e1676643af20111",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/博弈论/博弈的思维看世界/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 博弈的思维看世界\n教师:蒋文华\n"
},
{
"alpha_fraction": 0.5612648129463196,
"alphanum_fraction": 0.5849802494049072,
"avg_line_length": 20.08333396911621,
"blob_id": "5da30ab335f738b76c228d919c17dfe8643c658a",
"content_id": "46b493819a5a2be0845ae536ca8e8ed7e7304e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 24,
"path": "/DailyCodingProblems/386_char_sort_twitter/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n#python3\n\nraw_str=input() # 输入字符串\n\nraw_str_list=list(raw_str)\nres_dict = {}\nfor v in raw_str_list:\n if not v in res_dict:\n res_dict[v] = 0\n res_dict[v]+=1\n\ndef sort_by_val(d):\n items = d.items()\n tmp_items = [[v[1],v[0]] for v in items]\n tmp_items.sort(reverse=True)\n return [(tmp_items[i][0],tmp_items[i][1]) for i in range(0,len(tmp_items))]\n\nres_list=sort_by_val(res_dict)\nret = ''\nfor v in res_list:\n for i in range(0,v[0]):\n print(v[1],end='')\nprint()\n"
},
{
"alpha_fraction": 0.6525821685791016,
"alphanum_fraction": 0.7934272289276123,
"avg_line_length": 27.399999618530273,
"blob_id": "fa7e7c4997449337817d5e69a030aa4e09d333dd",
"content_id": "bc05ed0bcc4350dd781bb0786be207da7a3c921b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 426,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 15,
"path": "/DailyCodingProblems/385_decrpt_apple/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 385_decrpt_apple\nGood morning! Here's your coding interview problem for today.\n\nThis problem was asked by Apple.\n\nYou are given a hexadecimal-encoded string that has been XOR'd against a single char.\n\nDecrypt the message. For example, given the string:\n```\n7a575e5e5d12455d405e561254405d5f1276535b5e4b12715d565b5c551262405d505e575f\n```\nYou should be able to decrypt it and get:\n```\nHello world from Daily Coding Problem\n```\n"
},
{
"alpha_fraction": 0.42522889375686646,
"alphanum_fraction": 0.4598168730735779,
"avg_line_length": 20.39130401611328,
"blob_id": "40bd460f439d937307f2d34303eea15c6a165fc6",
"content_id": "d3b349731cbe6d70f2043281d9476207c3307646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 46,
"path": "/数据结构/树/红黑树/c/term3/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"rb.h\"\n#include<sys/time.h>\n#include<stdlib.h>\n#include<time.h>\n#define N 1000000\n\n\nint main(){\n srand(time(NULL));\n treep t = tree();\n struct timeval start;\n struct timeval end;\n gettimeofday(&start,NULL);\n for(int i = 0; i < N; i++){\n insertNode(t,rand());\n }\n gettimeofday(&end,NULL);\n int us = (end.tv_usec - start.tv_usec);\n int s = (end.tv_sec - start.tv_sec) - (us < 0);\n us = (us + 1000000) % 1000000;\n printf(\"time:%d.%06d\\n\",s,us);\n printf(\"total: %d\\n\",t->size);\n\n // visit(t);\n // for(int i = 0; i < N; i++){\n // visit(t);\n // deleteNode(t,i);\n // }\n // visit(t);\n // printf(\"===========\\n\");\n // for(int i = 0; i < 20; i++){\n // if(i % 2)\n // deleteNode(t,i);\n // }\n // deleteNode(t,13);\n // visit(t);\n\n\n // nodep res = search(t,3);\n // if(res){\n // printf(\"%d\\n\",Key(res));\n // }else{\n // printf(\"记录不存在\\n\");\n // }\n return 0;\n}"
},
{
"alpha_fraction": 0.7742782235145569,
"alphanum_fraction": 0.7742782235145569,
"avg_line_length": 37.099998474121094,
"blob_id": "898f0adf330acbea78b47464be6d5814899c339c",
"content_id": "5012e3616ae91b008b62ae035410ef5f80701201",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 517,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 10,
"path": "/编程开发/Linux/常见问题/php多版本源debian无法使用.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 在debian中无法使用ondrej/php ppa源\n在debian中无法使用ondrej/php源,添加源之后显示opengpg无效。 \n遇到此问题时可以切换成另一个源,这里是操作的过程:\n```\napt install apt-transport-https lsb-release ca-certificates\nwget -O /etc/apt/trusted.gpg.d/php.gpg https://packages.sury.org/php/apt.gpg\necho \"deb https://packages.sury.org/php/ $(lsb_release -sc) main\" > /etc/apt/sources.list.d/php.list\napt update\n```\n经过这些操作之后,就可以安装多个php版本了.\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 7,
"blob_id": "918e1af8f5e1c92c40267a70f0e8a59d0cba1a37",
"content_id": "f62ae44e1d96f7e095d4b39c2c2447405791b02c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 30,
"license_type": "no_license",
"max_line_length": 9,
"num_lines": 2,
"path": "/数据结构/表/跳跃表/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 跳跃表\nRedis中有应用\n"
},
{
"alpha_fraction": 0.6259542107582092,
"alphanum_fraction": 0.6793892979621887,
"avg_line_length": 9.916666984558105,
"blob_id": "5ed5c15e66f669f50e08d55a2ce38017bf6c5d89",
"content_id": "d87ead4096ede22704a30a7c91b128cbca49eb2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/编译器/编译过程.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 程序编译过程 \n将一种高级语言翻译成低级语言的过程,我们称为编译 \n编译的常见流程: \n```\n1. 词法分析\n2. 语法分析\n3. 语义分析\n4. 中间代码生成\n5. 与机器无关的中间代码优化\n6. 与机器有关的中间代码优化\n7. 目标代码\n```\n"
},
{
"alpha_fraction": 0.40963855385780334,
"alphanum_fraction": 0.4440619647502899,
"avg_line_length": 19.75,
"blob_id": "645e7e2ec6358185496784c401a9d3cf73dae4c3",
"content_id": "6b13a145fc70da5eb67d2a127719eab752a163c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/算法/排序算法/插入排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint insertion_sort(int arr[],int size){\n int i,j;\n for(i = 1; i < size; i++){\n int tmp = arr[i];\n for(j = i-1; j >= 0 && arr[j] > tmp; j--) arr[j+1] = arr[j];\n arr[j+1] = tmp;\n }\n return 0;\n}\n\nint main(){\n int i;\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr)/sizeof(arr[0]);\n\n printf(\"排序前:\");\n for(i = 0; i < size; i++)printf(\"%d \",arr[i]);\n puts(\"\");\n insertion_sort(arr,size);\n printf(\"排序后:\");\n for(i = 0; i < size; i++)printf(\"%d \",arr[i]);\n puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4472843408584595,
"alphanum_fraction": 0.5239616632461548,
"avg_line_length": 15.473684310913086,
"blob_id": "161417511baf37b56b7137131d7e563e2c9e8012",
"content_id": "443b8d83123129ebedc08eb768f440db3d3e596e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 19,
"path": "/DailyCodingProblems/403_rand5_get_rand7_Two_Sigma/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "from random import *\n\ndef rand5():\n return int(random()*5)\n\ndef rand7():\n while True:\n res = rand5()*5+rand5()\n if res < 21:\n break\n return res//3\n\nN = 100000\ncount = [0]*7\nfor i in range(0,N):\n idx = rand7()\n count[idx]+=1\nfor i in range(0,7):\n print(count[i]*1.0 / N)\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 10,
"blob_id": "c89d08c1d089fc6ae76b28211a23132abcd04ffe",
"content_id": "ebcbbb76d519761954593a6e299ac8724531185a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/编译器/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 编译器相关的笔记\n编译器学习的点点滴滴 \n"
},
{
"alpha_fraction": 0.6599326729774475,
"alphanum_fraction": 0.6700336933135986,
"avg_line_length": 11.375,
"blob_id": "53a38551840549a41904f8df2d3ae854be7259ab",
"content_id": "8213446b6596ad56f6ac5c23a47a7040f4a84bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 689,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 24,
"path": "/外语/韩语/词汇/副词.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 常用副词 \n```\n1. 表示肯定,确定\n결코 绝对\n과연(果然) 的确,真\n마침 恰恰,正好\n물론 当然\n사실(事实)事实上\n정말 真的\n참 真\n참으로 当然\n사실 저는 이 문제에 대하여 전혀 모르고 있썼습니다. 事实上,我对这个问题完全不了解\n결코 안된다. 万万不可\n2. 表示假定,让步\n가령 即使,加入\n만일 (万一) 如果,假如\n만약 如果,假如\n설사 即使\n비록 即使,尽管\n시간이 급한 만큼 가령 비가 오더라도 일이 끝나는 대로곧 돌아와야 하겠습니다. 时间紧,只要事情一办完,即使是下雨也一定要赶回来.\n3. 表转折\n그런데 但是\n그러나 但是\n```\n"
},
{
"alpha_fraction": 0.695035457611084,
"alphanum_fraction": 0.716312050819397,
"avg_line_length": 18.714284896850586,
"blob_id": "c9ae9106e10f93f0c60178fb6f83b1c79442a622",
"content_id": "8b4226db86a68c7c2dad8e90f279f4eb63bf08c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 7,
"path": "/编程开发/设计模式/依赖注入.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 依赖注入(DI) \n依赖注入是一种编程思想. \n将当前对象需要的依赖对象引入到当前对象中,而不直接参与对依赖对象的创建和销毁. \n常见的注入方式有一下几种: \n1. 通过a的接口,把b传入 \n2. 通过a的构造方法,将b传入 \n3. 通过a的属性,将b传入. \n"
},
{
"alpha_fraction": 0.6315789222717285,
"alphanum_fraction": 0.6315789222717285,
"avg_line_length": 12.75,
"blob_id": "55b28932ca01760c4d3a7f7e1e7b91d878614a13",
"content_id": "5c65c3e3003ed9d2176561301c616ace7d693cef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 4,
"path": "/外语/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 外语学习\n学习各种语言的学习笔记.\n路线依次是:`韩语->日语->德语->意大利语` \n英语不受约束. \n"
},
{
"alpha_fraction": 0.6274510025978088,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11.25,
"blob_id": "8039afab45c786866609dd811d7d6e292c060b55",
"content_id": "6b7a695515e22f5f179b680c3486b7fede8992fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 4,
"path": "/外语/英语/词汇/20191129.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "1. Recommendation \n推荐,推荐信 \n2. Interaction \n交互 \n"
},
{
"alpha_fraction": 0.4054833948612213,
"alphanum_fraction": 0.43290042877197266,
"avg_line_length": 22.100000381469727,
"blob_id": "c0fcab9754a4784163776d7dda8e6e203cf9483c",
"content_id": "0f533b6199fa65d293126f739a84b94dc47f92de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 30,
"path": "/算法/排序算法/希尔排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint shell_sort(int arr[],int size){\n int i,j,k;\n int step;\n for(step = size / 2; step > 0; step /= 2){ //更新步长\n for(i = step; i < size; i++){\n int tmp = arr[i];\n for(j = i - step; j >= 0 && arr[j] > tmp; j -= step) arr[j+step] = arr[j];\n arr[j+step] = tmp;\n }\n }\n return 0;\n}\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n printf(\"排序前:\");\n for(i = 0; i < size; i++) printf(\"%d \",arr[i]);\n puts(\"\");\n shell_sort(arr,size);\n printf(\"排序后:\");\n for(i = 0; i < size; i++) printf(\"%d \",arr[i]);\n puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.46715328097343445,
"alphanum_fraction": 0.6861313581466675,
"avg_line_length": 136,
"blob_id": "40d184a9cdd9fb03c1258175255908f80a0dcacb",
"content_id": "312f25b778d6cfb6c6f9aa7a2e6cd9850f03949d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 1,
"path": "/leetcode/169-求众数/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "[解题思路](https://github.com/ixysoft/notes/tree/master/%E7%AE%97%E6%B3%95/%E4%BD%8D%E8%BF%90%E7%AE%97/%E5%A4%9A%E6%95%B0%E5%85%83%E7%B4%A0)\n"
},
{
"alpha_fraction": 0.8169868588447571,
"alphanum_fraction": 0.8281092047691345,
"avg_line_length": 50.94736862182617,
"blob_id": "feec52e61641f28081924a2e108f94664beb904b",
"content_id": "cf8f0b1ff85c176dd1fa2993225175dcaa3daa33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1881,
"license_type": "no_license",
"max_line_length": 399,
"num_lines": 19,
"path": "/编程开发/后端/Laravel/问题/Laravel-totem问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Laravel-Totem问题\n1. migrate问题\n在执行totem的migrate时,在mysql5.7中出现\n```\nCHARACTER SET utf8mb4 COLLATE `utf8mb4_unicode_ci`附近出错的问题.\n```\n在mysql中执行提示的语句,也的确有问题. \n经过排查发现是在执行mysql的change时,不能设置编码,故把CHARSET SET utf8mb4这部分去掉,再执行migrate成功. \n确定问题之后,在相应的migrate文件中,更改指定指端的后面加上`->charset('')`. \n操作之后,清空了原先的数据库,然后再执行一遍`php artisan migrate`之后,问题解决. \n2. 无法自动执行任务\n在laravel中,无论执行`php artisan schedule:run`还是`php artisan schedule:list`都没有任何的效果. \n问题原因:laravel中存在配置缓存,在安装好totem之后,我们应该做一次`php artisan cache:clear`操作. \n3. totem创建新任务不成功.....\n这个问题困恼了我两三天.在网上也没查到相关的问题,只能自己排查问题.在报错的地方,我发现是由于aliyunmns这个第三方库导致的问题,但是没有办法.经过逐步排查后,我怀疑是由于项目刚创建,这个地方没有配置好.回想了一下,应该还没有用到这个地方,所以在app.php中,把`App\\Library\\AliyunMns\\LaravelMnsServiceProvider::class`这个provider注释掉了,然后再执行,之前的问题消失,但是出现了一个新问题.报错无法连接mns很容易看出导致这个错误的原因是还存在其他地方在使用mns.经过一番排查,在.env文件中找到`QUEUE_DRIVER=mns`想,将其改成`QUEUE_DRIVER=sync`之后再执行创建操作,之前的问题消失了,此时执行`php artisan schedule:list`显示也是正常的.计划任务顺利执行. \n原因分析: \n由于第三方库导致的问题. \n解决: \n如果对库不熟悉且该库目前阶段有更好的替代方案,关闭库比排查不熟悉的库更有效. \n"
},
{
"alpha_fraction": 0.6720686554908752,
"alphanum_fraction": 0.7063870429992676,
"avg_line_length": 35.17241287231445,
"blob_id": "e1fd7f105e5693ec4b3b1ddc59b9bcc13cecd3d5",
"content_id": "77889da27f7f67e38eccc208fa2c3d14ec230e64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1235,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 29,
"path": "/编程开发/谷歌问题/谷歌字体.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 谷歌字体 \n谷歌字体在国内经常性的不好使,在多方查询资料后,找到了一个靠谱的代理网站(中科大的) \n中国科学技术大学\n主页 https://lug.ustc.edu.cn/wiki/start\n\nhttps://fonts.proxy.ustclug.org\n\n这里是一些其他google相关的代理网站\n```\nfonts.gstatic.com fonts-gstatic.proxy.ustclug.org\nfonts.googleapis.com fonts.proxy.ustclug.org\najax.googleapis.com ajax.proxy.ustclug.org\n```\n\n除了谷歌代理外,下面是一些其它代理地址 \n```\nregistry-1.docker.io docker.mirrors.ustc.edu.cn\npackages.elastic.co elastic.proxy.ustclug.org\nppa.launchpad.net launchpad.proxy.ustclug.org\narchive.cloudera.com/cdh5/ cloudera.proxy.ustclug.org/cdh5/\ndownloads.lede-project.org lede.proxy.ustclug.org\ndownloads.openwrt.org openwrt.proxy.ustclug.org\nregistry.npmjs.org npmreg.proxy.ustclug.org\nwww.npmjs.com npm.proxy.ustclug.org\nthemes.googleusercontent.com google-themes.proxy.ustclug.org\nsecure.gravatar.com gravatar.proxy.ustclug.org\n```\n\n地址来源:[某网友博客](https://blog.fm618.org/2019/02/14/%E8%B0%B7%E6%AD%8C%E5%AD%97%E4%BD%93%E7%9A%84%E4%BB%A3%E7%90%86/)\n"
},
{
"alpha_fraction": 0.4079602062702179,
"alphanum_fraction": 0.4564676582813263,
"avg_line_length": 21.33333396911621,
"blob_id": "2cf1572ba82e4ec4266d1f9912d55dff29a35b94",
"content_id": "b210075afb1d5d04cdbccc4f80da3709efdf957e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 36,
"path": "/DailyCodingProblems/386_char_sort_twitter/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n\nint cmp(const void* a,const void* b){\n int pa = ((int*)a)[0];\n int pb = ((int*)b)[0];\n return pa < pb;\n}\n\nint main(){\n int char_cnt[128][2] = {{0,0}}; //字符出现次数统计\n char str[1024]; //字符串\n int len;\n int i,j,k;\n\n fgets(str,1024,stdin);\n len = strlen(str);\n str[len-1] = '\\0';\n for(i = 0; str[i] != '\\0'; i++){\n char_cnt[str[i]][0]++;\n char_cnt[str[i]][1]=str[i];\n }\n qsort(char_cnt,128,sizeof(int)*2,cmp);\n for(i = k = 0; i < 128 && char_cnt[i][0] > 0; i++){\n //printf(\"%c[%d],%d\\n\",char_cnt[i][1],char_cnt[i][1],char_cnt[i][0]);\n //continue;\n for(j = 0; j < char_cnt[i][0]; j++){\n str[k++] = char_cnt[i][1];\n }\n }\n\n puts(str);\n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.6129032373428345,
"alphanum_fraction": 0.6129032373428345,
"avg_line_length": 12.285714149475098,
"blob_id": "9626afcbba55eacbc3905d07e3117a6239b81c3a",
"content_id": "926060a91dce70979f9a442ae124d02c03def1a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 201,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 7,
"path": "/外语/韩语/语法/(으)ㄹ 턱이 없다.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "-(으)ㄹ 턱이 없다\n用于动词词干后,表示没有理由做某事,不会做某事. \ne.g. \n```\n기대도 하지마!그 사람이 올 턱이 없어.\n别指望了!那个傻x不会回来了.\n```\n"
},
{
"alpha_fraction": 0.6830000281333923,
"alphanum_fraction": 0.7674999833106995,
"avg_line_length": 26.79166603088379,
"blob_id": "08097156381537ba49cd64ff769dcc952699575d",
"content_id": "451a971613406496da2607cc446b16dc9e7faaea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3082,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 72,
"path": "/硬件/树莓派/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 树莓派折腾笔记\n1. 内置chromium无法启动 \n右键菜单中的快捷方式,点击属性,桌面项,在《命令》输入框最後面加上--no-sandbox \n2. 树莓派4执行gpio readall显示找不到主板信息 \n这是由于树莓派原装的wiringPi库没有及时更新导致的,只需要更新wiringPi库即可正常 \n```\ncd /tmp\nwget https://project-downloads.drogon.net/wiringpi-latest.deb\nsudo dpkg -i wiringpi-latest.deb\n```\n安装完成后,可以通过:`gpio -v`检查是否安装成功. \n3. 切换桌面\n在使用树莓派的时候,原装桌面有时候不一定好用,所以有时候就会需要切换桌面,此时我们可以使用: \n```\nupdate-alternatives --config x-session-manager\n```\n该指令执行完之后,会出现一个选择界面,在界面中选择需要的桌面即可. \n4. 树莓派国内源\n```\n# 编辑 `/etc/apt/sources.list` 文件,删除原文件所有内容,用以下内容取代:\ndeb http://mirrors.tuna.tsinghua.edu.cn/raspbian/raspbian/ buster main non-free contrib\ndeb-src http://mirrors.tuna.tsinghua.edu.cn/raspbian/raspbian/ buster main non-free contrib\n\n# 编辑 `/etc/apt/sources.list.d/raspi.list` 文件,删除原文件所有内容,用以下内容取代:\ndeb http://mirrors.tuna.tsinghua.edu.cn/raspberrypi/ buster main ui\n```\n5. 给树莓派添加LCD屏幕驱动 \n```\n1. 从github克隆LCD-show需要的驱动库\ngit clone https://github.com/goodtft/LCD-show.git\n2. 克隆完成后,切换到库目录\ncd LCD-show\n3. 执行屏幕对应的驱动文件,我的是3.5寸的屏幕,所以执行LCD35-show\n./LCD35-show\n```\n6. 添加自定义分辨率 \n有时候树莓派支持的分辨率可能并不能跟自己电脑显示器的分辨率对应. \n这个时候我们可以自己新建一个分辨率 \n```\n1. 计算分辨率参数\ncvt 1366 768\n将会输出\n# 1368x768 59.88 Hz (CVT) hsync: 47.79 kHz; pclk: 85.25 MHz\nModeline \"1368x768_60.00\" 85.25 1368 1440 1576 1784 768 771 781 798 -hsync +vsync\n2. 新建分辨率\nxrandr --newmode \"1368x768_60.00\" 85.25 1368 1440 1576 1784 768 771 781 798 -hsync +vsync\n3. 将分辨率添加到显示输出\nxrandr --addmode HDMI-1 1368x768_60.00\n如果不知道显示输出怎么填可以执行一次xrandr\n4. 设置显示输出使用该分辨率\nxrandr --output HDMI-1 --mode 1368x768_60.00\n5. 持久化\n现在终端中执行这些指令,如果出现什么问题,重启即可,如果没有问题,可以将2,3,4步的命令写入到\n/etc/X11/Xsession.d/30x11-set_resolution中\n```\n6. 安装屏幕键盘 \n```\napt install florence\n执行florence命令将会弹出屏幕键盘.\n如果出现org.florence没有安装的信息,则执行sudo glib-compile-schemas /usr/share/glib-2.0/schemas/\n之后再次执行florence命令即可. \n```\n安装可以在登陆界面显示的屏幕键盘\nsudo apt-get install lightdm-gtk-greeter\n之后编辑配置文件\nvim /etc/lightdm/lightdm-gtk-greeter.conf\n编辑配置文件:\n[greeter]\nkeyboard=florence --no-gnome --focus &\n\n## 链接显示屏无反应的解决 \n从github中下载LCD-Show库,运行其中的LCD-HDMI文件."
},
{
"alpha_fraction": 0.47566717863082886,
"alphanum_fraction": 0.48665618896484375,
"avg_line_length": 20.593219757080078,
"blob_id": "28a7e939d803b43c942cc454b5604e679bb72e24",
"content_id": "6dd4acb00658d4bbc8a55bad8ff757e1f703985d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1324,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 59,
"path": "/DailyCodingProblems/398_deleteKthElemInList_Amazon/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#coding:utf-8\n# 数据节点类\nclass Node:\n next = None\n val = 0\n def __init__(self,val):\n self.val = val\n\n# 链表类\nclass List:\n first = None\n length = 0\n def __init__(self):\n self.first = None\n self.length = 0\n # 插入新节点\n def insert(self,val):\n node = Node(val)\n node.next = self.first\n self.first = node\n self.length += 1\n return self\n # 删除指定位置的节点\n def delete(self,k):\n if self.length == 0: #list长度为0\n return False\n if k == 0:\n first = self.first.next\n del self.first\n self.first = first\n else:\n first = self.first\n for i in range(1,k):\n first = first.next\n if not first.next is None:\n next = first.next\n first.next = first.next.next\n del next\n return True\n \n def visit(self):\n first = self.first\n while not first is None:\n print(first.val,end = '')\n if not first.next is None:\n print('->',end = '')\n first = first.next\n print('')\n\nli = List()\nli.insert(1)\nli.visit()\nli.insert(3)\nli.visit()\nli.insert(4)\nli.visit()\nli.delete(1)\nli.visit()\n"
},
{
"alpha_fraction": 0.43062201142311096,
"alphanum_fraction": 0.46889951825141907,
"avg_line_length": 10.61111068725586,
"blob_id": "1c8dcf21108514b58c5d7a29eb27311a0b599c51",
"content_id": "c7d042cc248602eae0823ac3e9c9af6b9250a9b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 18,
"path": "/算法/位运算/加一/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint add1(int n){\n int carry = 1;\n while(n&carry){\n n^=carry;\n carry<<=1;\n }\n n^=carry;\n\n return n;\n}\n\nint main(){\n printf(\"%d\\n\",add1(127));\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.41118124127388,
"alphanum_fraction": 0.42741209268569946,
"avg_line_length": 17.338842391967773,
"blob_id": "0038bace311b28e407425bc8117e0586f04ad8a4",
"content_id": "f9920a509f19c958901927a1e6e75d50ae91da32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3498,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 121,
"path": "/数据结构/树/红黑树/programiz.com/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# programiz.com中的思路 \n**红黑树的基本属性:** \n1. 每个节点都有配色,非黑即红\n2. 根节点为黑色\n3. 叶子结点(NIL)为黑色\n4. 如果一个红色的节点存在孩子,则孩子总是黑色\n5. 从根节点到每个叶子节点的简单路径上总存在相同数目黑节点\n\n**每个节点存在下面的几个属性:** \n1. color\n2. key\n3. leftChild\n4. rightChild\n5. parent(root节点为parent为NULL) \n\n**红黑树如果维持自平衡?:** \n红黑树中节点中的颜色概念是为了平衡树结构而设定的. \n从根节点到叶子节点的每条简单路径中的黑节点个数相同保证了任意一条路径不会超过另一条路径长度的1倍\n\n## 红黑树的操作: \n**有很多可以在红黑树上执行的操作:** \n### 红黑树中子树的旋转 \n#### 左旋\n算法: \n1. 假设树的初始状态为: \n p\n |\n [x]\n / \\\n α [y]\n / \\\n β γ\n2. 如果`y`存在左子树,将x设定为y左子树的父节点\n p\n |\n [x]\n / \\\n α β [y]\n \\\n γ\n3. 如果`x`的父节点为`NULL`,将`y`设为树的根节点\n4. 否则如果`x`为`p`的左节点,使得`y`成为`p`的左节点\n5. 否则将`y`设定为`p`的右节点\n p\n |\n [x] [y]\n / \\ \\\n α β γ\n6. 将`y`设定为`x`的父节点\n\n#### 右旋\n1. 初始结构为: \n p\n |\n [y]\n / \\\n [x] α\n / \\\n γ β\n2. 如果`x`存在右子树,将`y`设定为`x`右子树的父节点\n p\n |\n [y]\n / \\\n [x] β α\n /\n γ\n3. 如果`y`父节点为`NULL`,将`x`设定为树的根节点\n4. 否则如果`y`为`p`的右孩子,将`x`设为`p`的右孩子\n5. 否则将`x`设为`p`的左孩子\n p\n |\n [x] [y]\n / / \\\n γ β α\n6. 将`x`设为`y`的父节点 \n\n#### 右左,左右旋转(RL,LR)\n**在LR旋转中,我们首先进行左旋,之后执行右旋** \n1. 在`x-y`上执行左旋 \n p p\n | |\n [z] [z]\n / \\ / \\\n [x] δ --> [y] δ\n / \\ / \\\n α [y] [x] γ\n / \\ / \\\n β γ α β\n2. 在`y-z`上执行右旋\n p p\n | |\n [z] [y]\n / \\ / \\\n [y] δ --> [x] [z]\n / \\ / \\ / \\\n [x] γ α β γ δ\n / \\\nα β\n\n**右左旋转与左右旋转相反:**\n先执行右旋,在再执行左旋. \n\n**向红黑树中插入一个节点:** \n被插入的节点总是被设定为红色,在插入新节点后,如果红黑树的性质被破坏的话,我们需要做下面两个操作: \n1. 重新上色\n2. 旋转\n\n### 插入节点算法 \n1. 将`y`设为`nil`,`x`设为树的根节点\n2. 判断树是否为空(`x`==`nil`),如果是空树,将插入节点设为树的根节点\n3. 如果不是,重复下面步奏,直到节点为`nil`: \n a. 比较`newKey`与`rootKey` \n b. 如果`newKey`>`rootKey`,说明结果可能在右边,遍历右子树 \n c. 否则遍历左子树 \n4. 将叶子节点的父节点作为`newNode`的右节点\n5. 如果`leafKey`>`newKey`,将`newNode`设为`rightChild`.\n6. 否则,将`newNode`设为`leftChild`\n7. 将newNode的左右子节点设为NULL\n8. 将newNode设为Red\n9. 调用插入修复函数,是的红黑树保持其基本性质"
},
{
"alpha_fraction": 0.4829467833042145,
"alphanum_fraction": 0.5525238513946533,
"avg_line_length": 18.810810089111328,
"blob_id": "6fa9a4d786b0e7e4f7bd1f494c4de0ef1a010361",
"content_id": "56768625b4d4100d97e2cbfcf6574c879064c84a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 37,
"path": "/DailyCodingProblems/381_base64_paypal/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nfrom math import *\nimport os\n\n# 获取16进制位数\ndef getBits(a):\n if a == 0:\n return 0\n return floor(log(a,16))+1\n\n# 获取16进制数编码\ndef base64(a):\n tail = 1\n ret = ''\n table = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n while a > 0:\n if a % 64 == 0 and tail == 1:\n ret = '=' + ret\n else:\n tail = 0\n ret = table[a%64] + ret\n a = a // 64\n return ret\n\ntry: # python2\n a = int(raw_input('请输入一串16进制数:').strip(),16)\nexcept Exception: # python3\n a = int(input('请输入一串16进制数:'),16)\nexcept: #异常处理\n print('数字格式错误')\n os._exit(1)\n\nbits = getBits(a)\npadBits = int((bits//6+1)*6 - bits)%6\nna = a << (4*padBits)\n\nprint(base64(na))\n"
},
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.7435897588729858,
"avg_line_length": 18.5,
"blob_id": "510d2f97b3e475d6dd56998cc7827556c58f4c8b",
"content_id": "454b741f751ec44eb9a2a67bbeddde6bb7bd70f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 10,
"path": "/编程开发/git/commit提交中文乱码.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# commit乱码\n最近在电脑上使用MobaXterm,使用`git commit`提交代码时,中文的注释总是变成数字形式,用户体验极差,经过google查询之后,找到了方案:\n```\ngit config --global core.quotepath false\n```\n或者手动修改~/.gitconfig,添加:\n```\n[core]\nquotepath = false\n```\n"
},
{
"alpha_fraction": 0.426870733499527,
"alphanum_fraction": 0.4897959232330322,
"avg_line_length": 15.333333015441895,
"blob_id": "6f10970b1906587463b9385c26c1218ac48eaf0e",
"content_id": "d38d9b1b8ec560d16fa7b96fde335c2f1ae9ae10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 36,
"path": "/DailyCodingProblems/403_rand5_get_rand7_Two_Sigma/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n#include<time.h>\n\nint rand5();\nint rand7();\n\nint main(){\n srand(time(NULL));\n int i;\n int count[7] = {0};\n int N = 10000000;\n for(i = 0; i < N; i++){\n int idx = rand7();\n count[idx]++;\n }\n for(i = 0; i < 7; i++){\n printf(\"%d,%.04f\\n\",i,count[i] * 1.0 / N);\n }\n\n}\n\n//系统给定的函数\nint rand5(){\n return rand() % 5; //此处我们假设等概率\n}\n\n\n//我们需要实现的函数\nint rand7(){\n int res;\n do{\n res = rand5() * 5 + rand5(); //可以等概率生成0~24之间的数\n }while(res >= 21); //等概率生成0~20之间的数\n return res / 3; //等概率生成0~6之间的数\n}\n"
},
{
"alpha_fraction": 0.6239346861839294,
"alphanum_fraction": 0.6335227489471436,
"avg_line_length": 18.55555534362793,
"blob_id": "46e6116650aea30b381b037bc54f5adeb3640dc3",
"content_id": "40c4e49f32726f61f25c527484e1a27487164bd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3774,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 144,
"path": "/编程开发/后端/Yii2/Html帮助类.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Yii Html帮助类\n类:yii\\helper\\Html\n1. 生成标签 \n```\nHtml::tag(标签名,内容,属性)\n```\n如: \n```\nHtml::tag('p','Hello,World!',['class'=>'test']);\n将会生成\n<p class=\"test\">Hello,World!</p>\n这样一个元素\n```\n2. 生成CSS类和样式\n```\nHtml::addCssClass(&$options,class属性值); //添加新类属性\nHtml::removeCssClass(&$options,class属性值);//删除类属性值\n```\n示例: \n```\n$options = ['class'=>'btn btn-default'];\n\nif($type === 'success'){\n Html::removeCssClass($options,'btn-default');\n Html::addCssClass($options,'btn-success');\n}\n\necho Html::tag('div','Hello,Test!',$options);\n```\n3. 标签内容的转码和解码 \n为了让内容能够安全的显示,一些HTML特殊字符应该被转码.在PHP中,这个操作由`htmlspecialchars`和`htmlspecialchars_decode`完成\n```\n$userName = Html::encode($user->name);\necho $userName;\n$decodeUserName = Html::decode($userName);\n```\n4. 表单 \n处理表单标签是大量的重复性劳动并且容易出错.因此,Yii提供了一些列的方法来辅助处理表单标签. \n**创建表单:** \n```\nHtml::beginForm(地址ID,请求方法,表单数据方式);\nHtml::endForm(); //关闭表单\n```\n示例: \n```\necho Html::beginForm(['/test','id'=>'testForm'],'post',['enctype'=>'multipart/form']);\necho Html::endFOrm();\n```\n5. 按钮\n```\nHtml::button(按钮值,按钮属性) //普通按钮\nHtml::submitButton(按钮值,按钮属性) //提交按钮\nHtml::resetButton(按钮值,按钮属性) //清空按钮\n```\n6. 输入栏 \ninput相关的方法有两组:以`active`开头的被称为`active inputs`,`active inputs`依据指定的模型和属性获取数据,而普通input则直接指定数据. \n```\nHtml::input(类型,输入框name,输入框值,属性); //普通输入\nHtml::activeInput(类型,模型,模型属性名,属性); //active输入\n```\n还有一些其他的input类型参考手册\n6.1. 单选复选框 \n```\nHtml::radio(名,选择,属性);\nHtml::activeRadio(模型,名,属性)\n\nHtml::checkbox(名,选择,属性)\nHtml::activeCheckbox(模型,名,属性)\n```\n**示例:** \n```\necho Html::radio('agree',true,['label'=>'I agree']);\necho activeRadio($model,'agree',['class'=>'agreement']);\n\necho Html::checkbox('agree',true,['label'=>'I agree']);\necho Html::activeCheckbox($model,'agree',['class'=>'agreement']);\n```\n6.2. 下拉框\n```\nHtml::dropDownList(名称,当前值,键值对)\nHtml::activeDropDownList(模式,名称,键值对)\n\nHtml::listBox(名称,当前值,键值对)\nHtml::activeListBox(模型,名称,键值对)\n```\n6.3. 多选框列表 \n```\nHtml::checkboxList(名称,已选值,键值对)\nHtml::activeCheckboxList(模型,名称,键值对)\n```\n6.4 单选框列表\n```\nHtml::radioList(名称,已选值,键值对)\nHtml::activeRadioList(模型,名称,键值对)\n```\n6.5. 标签和错误\n```\nHtml::label(标签值,名称,属性)\nHtml::activeLabel(模型,名称,属性)\n\nHtml::error(模型,名称,属性) //显示单个错误\nHtml::errorSummary(模型,属性) //显示单个或多个错误\n```\n6.6. Input的名和值 \n```\n此处方法看手册\n```\n7. 样式表和脚本\n```\nHtml::style(样式)\n\n外联外部css文件:\nHtml::cssFile('@web/css/ie5.css',['condition'=>'IE 5'])\n生成\n<!-- [if IE 5]>\n <link href=\"http://example.com/css/ie5.css\" />\n<![endif]-->\n外联js文件\nHtml::jsFile('@web/js/main.js')\n```\n8. 超链接 \n```\nHtml::a(标题,[地址,id],属性)\n\nmailto:\nHtml::mailto(标题,地址)\n```\n9. 图片 \n```\nHtml::img('@web/images/logo.png',['alt'=>'Logo'])\n生成\n<img src=\"http://example.com/image/logo.png\" alt=\"Logo\" />\n```\n10. 列表 \n**无序列表生成:** \n```\necho Html::ul(模型,['item'=>function($item,$index){\n return Html::tag(\n 'li',\n $this->render('post',['item'=>$item]),\n ['class'=>'post']\n );\n});\n```\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 9.5,
"blob_id": "27304b6244735a4e20aca22a7e037ed9e527913b",
"content_id": "8c93afe9cad77d7f0b5df4115eb0860fb70e0825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 2,
"path": "/编程开发/面试问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 面试问题\n与面试相关的问题解答及思考\n"
},
{
"alpha_fraction": 0.6976242065429688,
"alphanum_fraction": 0.6997840404510498,
"avg_line_length": 28.41269874572754,
"blob_id": "9de6a06a2c5ef2ef202fb2753dfe2a9be8ac6cab",
"content_id": "bfeabd2c3207c12b4bb3bcc439b1321104f7fc4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2202,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 63,
"path": "/数据结构/树/字典树/term1/trie.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __TRIE_H__\n#define __TRIE_H__\n#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\n//默认删除方式,不指定则优先软删除\n#ifdef HARD_DELTE_FIRST\n#define deleteNode hardDeleteNode\n#else\n#define deleteNode softDeleteNode\n#endif\n\ntypedef enum{\n FALSE = 0,\n TRUE = 1,\n}Boolean; //布尔类型\n\ntypedef struct Trie Trie,*triep;\ntypedef struct TrieNode TrieNode,*trienodep;\ntypedef unsigned char byte;\ntypedef char* string;\ntypedef struct SearchResult SearchResult,*searchresultp; //查询结果\n\nstruct Trie{\n trienodep root; //字典的根\n int size; //字典大小\n};\n\nstruct TrieNode{\n byte key; //节点字符\n void* data; //节点对应的数据\n int size; //节点数据大小\n trienodep next; //同级下一个字符\n trienodep next_level; //下一层\n};\n\nstruct SearchResult{\n void* data; //查询结果\n Boolean status; //查询状态,0:成功,1:失败\n};\n\ntriep trie(); //初始化一棵trie树\ntrienodep trienode(byte key,void* data,int size); //创建trienode\nBoolean insertNode(triep tree,string key,void* data,int size); //插入节点\ntrienodep searchNode(triep tree,string key); //获取指定key的节点\nsearchresultp search(triep tree,string key); //查询指定key的数据\nBoolean updateNode(triep tree,string key,void* data,int size); //更新节点\nBoolean updateOrSetNode(triep tree,string key,void* data,int size); //存在则更新,不存在则创建\nBoolean _insertNode(triep tree,string key,void* data,int size,Boolean fail_when_exists,Boolean fail_when_not_exists); //底层节点插入\nBoolean softDeleteNode(triep tree,string key); //删除指定的节点\nBoolean hardDeleteNode(triep tree,string key); //硬删除指定的节点\n/**\n * 节点删除底层逻辑\n * @param triep tree 字典树\n * @param string key 关键词\n * @param Boolean recusive_delete 递归删除标志,FALSE表示单纯设定为NULL\n **/\nBoolean _deleteNode(triep tree,string key,Boolean recusive_delete);\n\nBoolean emptyTrie(triep tree); //清空所有的节点\nBoolean destroyTrie(triep* treep); //清空并释放trie树的空间\n#endif"
},
{
"alpha_fraction": 0.6818181872367859,
"alphanum_fraction": 0.6954545378684998,
"avg_line_length": 31,
"blob_id": "4536ff034533d94314d249f2893b390ddd32c240",
"content_id": "cdc66dd1df21afe9687239f10b4b9d9fb6054292",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2884,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 55,
"path": "/leetcode/1023-驼峰式匹配/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 1023-驼峰式匹配\n## 题目\n如果我们可以将**小写字母**插入模式串 pattern 得到待查询项 query,那么待查询项与给定模式串匹配。(我们可以在任何位置插入每个字符,也可以插入 0 个字符。) \n\n给定待查询列表 queries,和模式串 pattern,返回由布尔值组成的答案列表 answer。只有在待查项 queries[i] 与模式串 pattern 匹配时, answer[i] 才为 true,否则为 false。 \n\n**示例 1:** \n```\n输入:queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FB\"\n输出:[true,false,true,true,false]\n示例:\n\"FooBar\" 可以这样生成:\"F\" + \"oo\" + \"B\" + \"ar\"。\n\"FootBall\" 可以这样生成:\"F\" + \"oot\" + \"B\" + \"all\".\n\"FrameBuffer\" 可以这样生成:\"F\" + \"rame\" + \"B\" + \"uffer\".\n```\n**示例 2:** \n```\n输入:queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FoBa\"\n输出:[true,false,true,false,false]\n解释:\n\"FooBar\" 可以这样生成:\"Fo\" + \"o\" + \"Ba\" + \"r\".\n\"FootBall\" 可以这样生成:\"Fo\" + \"ot\" + \"Ba\" + \"ll\".\n```\n**示例 3:** \n```\n输出:queries = [\"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"], pattern = \"FoBaT\"\n输入:[false,true,false,false,false]\n解释:\n\"FooBarTest\" 可以这样生成:\"Fo\" + \"o\" + \"Ba\" + \"r\" + \"T\" + \"est\".\n```\n\n**提示:** \n1. 1 <= queries.length <= 100\n2. 1 <= queries[i].length <= 100\n3. 1 <= pattern.length <= 100\n4. 所有字符串都仅由大写和小写英文字母组成。\n\n## 思路\n题目比较绕,我大致分析了一下,就是以大写字母为一个单元的起点将模式字符串和queries中的各个字符串分成不同的单词(或者称为组).queries中字符串对应的组要以该组对应模式字符串组开头. \n例如: \n```\n假设\nqueries:[\"AppVersion\",\"OldAppVersion\"]\npattern:\"AP\"\n则对于queries的第一个元素,我们可以分成[\"App\",\"Version\"]两组,第二个元素分成[\"Old\",\"App\",\"Version\"];而pattern可以分成[\"A\",\"P\"]两组,我们拿第一个元素生成的组中对应的元素与pattern生成的组对应元素开头,App要以A或Ap或App开头.\n```\n```\n题目比较简单,我们将queries中的元素成为query.那我们只需要考虑某个query的情况. \n对于某个query,遍历其中的字符:\n如果是大写字符: \n 说明此时已经切换到一个新的组上面,此时query上的字符必须要与pattern对应的字符相同(因为此时pattern也必定需要切换到新的组).\n如果不是大写字符:\n 判断此时query与pattern当前位置的字符是否相同,如果相同,pattern当前位置后移一位. \n做完这些操作,我们可以判断出query所有分组与pattern分组对应项是否匹配,但是一种情况没有包含,在query所有字符遍历完之后,pattern还存在字符,说明此时pattern与query是不匹配的.\n```\n"
},
{
"alpha_fraction": 0.36713287234306335,
"alphanum_fraction": 0.4195804297924042,
"avg_line_length": 14.88888931274414,
"blob_id": "6c6a56ef1ba12608a8a737be891d0e9b4f08f01a",
"content_id": "73c2472f71c247146642e7af7d5ed312696a71bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 18,
"path": "/leetcode/338-比特位计数/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint ones(int n){\n int count = 0;\n for(;n>0;n&=n-1,count++);\n return count;\n}\n\nint main(){\n int i;\n int dp[100] = {0};\n printf(\"0 \");\n for(i = 1; i < 100; i++){\n dp[i] = dp[i & (i-1)] + 1;\n printf(\"%d \",dp[i]);\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4016913175582886,
"alphanum_fraction": 0.4482029676437378,
"avg_line_length": 19.565217971801758,
"blob_id": "b6f0226a612bc20ae36d0c261a430a378b607cef",
"content_id": "b64a0e118b2921db0c3e2b1b0823b07f54b8bc65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 23,
"path": "/leetcode/260-只出现一次的数字3/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int arr[] = {\n 1,3,4,3,1,4,5,9,5,6\n };\n\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n\n int mix = 0; //或者获得的结果\n for(i = 0; i < size; i++){\n mix ^= arr[i];\n }\n int lsb = mix&-mix; //最低有效位,两个唯一的数字混合结果最低位1对应的二进制位必然不同,根据该二进制位进行区分\n int a[2] = {0};\n for(i = 0; i < size; i++){\n int idx = (lsb&arr[i]) ? 1:0;\n a[idx] ^= arr[i];\n }\n printf(\"%d,%d\\n\",a[0],a[1]);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7961373329162598,
"alphanum_fraction": 0.8433476686477661,
"avg_line_length": 37.66666793823242,
"blob_id": "39bf03a77f7853de40d19ed0ef4c009b2f599ccc",
"content_id": "dc409f0efcb792b83b015c47bd87459ffeb6d1cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1208,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 12,
"path": "/算法/位运算/只出现一次的元素3/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 只出现一次的元素\n在一个数列中,除了一个数字只出现一次,其他数字皆出现三次.求出这个数字.\n思路: \n这个题目目前我知道的有两种思路.\n1. 计算每一个位上1出现的个数,不为3的倍数的位置必然属于只出现一次的数字. \n原因很简单,每个出现三次的数字,二进制位完全相同,所以有1的地方必然出现三次1. \n比如12的二进制为1100,第2位与第三位是1,所以在第二位和第三位上必然出现三次1. \n同理,即使存在其他出现过三次的数字,对应的二进制位也会同样出现3次,即使与其他出现三次的数字的二进制有重叠依然不会影响.只有数字只出现一次(其实这里可以适应所有出现次数不为3的倍数的情况)会影响到二进制位1出现的次数是否为3的倍数. \n这种方式,代码比较好理解. \n2. 除了上面这种方式外,还有一种比较难懂的方法,利用二进制模拟三进制运算... \n因为只有两种情况:数字出现1次,数字出现3次.我们把数字出现三次的二进制位归0,这样最后运算得到的结果就是我们需要的结果了... \n具体操作我会在代码里面进行解释. \n"
},
{
"alpha_fraction": 0.416304349899292,
"alphanum_fraction": 0.428260862827301,
"avg_line_length": 20.395349502563477,
"blob_id": "fd5c19655b51ec611d0b05e2c1f9e9ac56b463dc",
"content_id": "8fa5c27b2a231db348130f151fc4c9c619efd50c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 43,
"path": "/DailyCodingProblems/412_look_and_say_Epic/cpp/main.cpp",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<iostream>\n#include<vector>\n#include<string>\n\nusing namespace std;\n\n\nclass Solution{\npublic:\n vector<int> say;\n string look_and_say(string look,int steps){\n if(steps <= 1){\n return look;\n }\n int len = look.length();\n char tmp = look[0];\n int cnt = 0;\n say.clear();\n for(int i = 0; i < len; i++){\n if(tmp != look[i]){\n say.push_back(cnt);\n say.push_back(tmp - '0');\n cnt = 0;\n }\n tmp = look[i];\n cnt++;\n //cout << look[i];\n }\n say.push_back(cnt);\n say.push_back(tmp - '0');\n look = \"\";\n for(int i = 0; i < say.size(); i++){\n look+=to_string(say[i]);\n }\n return look_and_say(look,steps-1);\n }\n};\n\nint main(){\n Solution s;\n int N = 4;\n cout << s.look_and_say(\"1\",N) << endl;\n}\n"
},
{
"alpha_fraction": 0.6038035154342651,
"alphanum_fraction": 0.6156893968582153,
"avg_line_length": 23.745098114013672,
"blob_id": "f4c4a68c6f002c1ca64ffecc395587b54866e0e2",
"content_id": "5f3d7618f8ea87935ecb77fba5217b3d6f9451b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1372,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 51,
"path": "/leetcode/108-将有序数组转换为二叉搜索树/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\n//TreeNode结构体\nstruct TreeNode {\n int val;\n struct TreeNode *left;\n struct TreeNode *right;\n};\n\n//构建树,思想就是二分查找\nvoid buildTree(struct TreeNode** pRoot,int* nums,int left,int right){\n if(pRoot == NULL) return;\n struct TreeNode* root = *pRoot;\n int mid = (left+right)/2;\n if(root == NULL){\n root = (struct TreeNode*)malloc(sizeof(struct TreeNode));\n root->val = nums[mid];\n root->left = root->right = NULL;\n *pRoot = root;\n if(left < mid)\n buildTree(&(root->left),nums,left,mid-1);\n if(mid < right)\n buildTree(&(root->right),nums,mid+1,right);\n }\n}\n\n//主要函数\nstruct TreeNode* sortedArrayToBST(int* nums, int numsSize){\n if(numsSize == 0) return NULL;\n struct TreeNode* root = NULL;\n buildTree(&root,nums,0,numsSize-1); //这里需要注意参数应该是numsSize-1,而不是numsSize,否则程序会出现数组下标超界.\n return root;\n}\n\n//前序遍历,用来看结果的\nvoid preVisit(struct TreeNode* root){\n if(root == NULL) return;\n printf(\"%d \",root->val);\n preVisit(root->left);\n preVisit(root->right);\n}\n\nint main(){\n int nums[] = {1,2,3,4,5,6};\n struct TreeNode* root = sortedArrayToBST(nums,sizeof(nums)/sizeof(nums[0]));\n preVisit(root);\n printf(\"\\n\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5376781821250916,
"alphanum_fraction": 0.5498981475830078,
"avg_line_length": 18.639999389648438,
"blob_id": "6dea15a909cfb6d2fcc189664e10e502e0bddd04",
"content_id": "276ba0c0206d8029313b5ec7675751b78ea0067a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 25,
"path": "/leetcode/136-只出现一次的数字/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n这个题目整体比较简单.\n可以利用二进制位出现次数统计的方法得出结果,但是不是特别优秀.\n根据这个题目的特殊性,我们可以利用位异或运算的特点解题.\n很容易知道两个相同的数字位异或后为0,即a^a == 0.\n异或运算也存在交换律性质,所以a^b^a == a^a^b == b\n有了这个性质,题目也就不难解决了.\n#endif\n\n#include<stdio.h>\n\nint main(){\n int N;\n while(~scanf(\"%d\",&N)){ //这种写法并不适用于leetcode的解题提交形式,我这里主要是讲解题思路\n int res = 0;\n int i;\n int num;\n for(i = 0; i < N; i++){\n scanf(\"%d\",&num); //获取数列中的某个数字\n res^=num;\n }\n printf(\"%d\\n\",res);\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.8608695864677429,
"alphanum_fraction": 0.8782608509063721,
"avg_line_length": 55.5,
"blob_id": "8014aab70fb71a4bd9eac6aeeaefd6711ffdf4af",
"content_id": "33c4d3440b9db065e449dafc5a0a06af64fda324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 2,
"path": "/编程开发/Linux/命令/wget替代命令axel&aria2.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# aria2&axel\n在linux中,有时候使用wget命令下载会比较慢,可以选择用aria2或者axel,一般情况下axel比较好使.不排除有些情况下无法使用,比如下载github里的东西,这个时候老老实实用wget. \n"
},
{
"alpha_fraction": 0.5647298693656921,
"alphanum_fraction": 0.5769622921943665,
"avg_line_length": 24.842105865478516,
"blob_id": "2c817f43f5630f4cf37806c0ed839ac18215ba16",
"content_id": "b17660f42f825b75db9a677cca2fbda809e795ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1109,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 38,
"path": "/数据结构/树/2-3树/term1/tree23.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __TREE23_H__\n#define __TREE23_H__\n#include<stdlib.h>\n#include<stdio.h>\ntypedef struct Key Key,*keyp; //关键词链\ntypedef struct Node Node,*nodep; //节点\ntypedef struct Tree Tree,*treep; //23树\ntypedef enum{\n FALSE = 0,\n TRUE = 1,\n}Boolean;\n\nstruct Key{\n int key;\n struct Key* next;\n}; //关键词\n\nstruct Node{\n struct Node *parent; //父节点\n keyp keys; //关键词链\n nodep children; //孩子\n int size; //关键词链\n struct Node *next; //后继节点\n};\n\nstruct Tree{\n nodep root; //树根\n int size; //树的大小\n};\n\ntreep tree(); //新的2,3树\nnodep create_node(treep t,keyp first_key,nodep first_child,int size); //创建新的2-3节点\nBoolean insertNode(treep t,int key); //向t中插入节点key\nBoolean _insertNode(treep t,nodep n,int key); //底层节点插入\nBoolean fixNode(treep t,nodep n); //修复节点\nkeyp searchNode(treep t,int key); //查找关键词\nBoolean deleteNode(treep t,int key);\n#endif"
},
{
"alpha_fraction": 0.523862361907959,
"alphanum_fraction": 0.5627080798149109,
"avg_line_length": 22.102563858032227,
"blob_id": "efe32052cc1d264453f7d31013dabe3de0052394",
"content_id": "ebe5618a333ef9e0ef948502d3eb89f51c0fb6a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1051,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 39,
"path": "/算法/排序算法/睡眠排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n编译: gcc -o main main.c -lpthread\n编程中遇到的一些问题,printf中如果没有达到一定长度,且没有回车输出的情况下,字符并不会刷新到屏幕上,需要用fflush(stdout)刷新一下\n#endif\n#include<stdio.h>\n#include<pthread.h>\n#include<unistd.h>\n\nint out[100000];\nint len;\n\nvoid* routine(void *tm){\n int t = *(int*)tm;\n usleep(t*1000+666); //核心的睡觉部分\n //printf(\"%d \",t); //输出结果\n out[len++] = t;\n //fflush(stdout);\n pthread_exit(NULL);\n}\n\nvoid sleep_sort(int arr[],int size){\n int i;\n pthread_t pids[10000];\n len = 0;\n for(i = 0; i < size; i++){\n pthread_create(&pids[i],NULL,routine,&arr[i]);\n }\n while(len < size); //等待排序完毕\n}\n\nint main(){\n int arr[] = {1,3,4,0,0,5,8,5,9,0};\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n puts(\"排序前\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n sleep_sort(arr,size);\n puts(\"排序后\");for(i = 0; i < size; i++) printf(\"%d \",out[i]);puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4477040767669678,
"alphanum_fraction": 0.47066327929496765,
"avg_line_length": 27,
"blob_id": "6643873788e14933e8f27feaf08ef6cc7c0378ac",
"content_id": "3db90380ee48b5f419556e67a700d26810d9e4e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 980,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 28,
"path": "/算法/背包问题/捡宝石/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint values[1000]; //保存价值的数组\nint weights[1000]; //重量的数组\nint max_values[1000]; //保存最大重量的数组\nint max(int a,int b){\n return a > b ? a : b;\n}\n\nint main(){\n int N; //宝石的数目\n int M; //最大可以容纳的重量\n int i,j;\n while(~scanf(\"%d%d\",&N,&M)){\n for(i = 1; i <= N; i++){\n scanf(\"%d%d\",&weights[i],&values[i]); //输入石头的重量和价值\n }\n for(i = 0; i <= M; i++)\n max_values[i] = 0; //初始化最大价值数组\n for(i = 0; i <= N; i++){ //遍历所有的宝石\n for(j = M; j > 0; j--){ //逆序遍历各个当前石头对最大价值的影响\n max_values[j] = (weights[i] > j ? max_values[j] : max(max_values[j],values[i]+max_values[j-weights[i]])); //更新由于当前石块加入造成的最大价值变化\n }\n }\n printf(\"%d\\n\",max_values[M]);\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.49494948983192444,
"alphanum_fraction": 0.5122655034065247,
"avg_line_length": 20,
"blob_id": "3594616f58eead47465ee319090846d669c3822f",
"content_id": "42dddf9f47534813b24f3e8e1daac27fda235dac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 33,
"path": "/DailyCodingProblems/384_coin_wework/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\nint count(int target,int coins[],int length){\n int i;\n for(i = 0; i < length; i++){\n if(coins[i] < target){ //可以继续下一步处理\n int cnt = count(target - coins[i],coins,length);\n if(cnt > 0)\n return 1+cnt;\n }else if(coins[i] == target){\n return 1;\n }\n }\n return 0;\n}\n\n//逆序排列\nint cmp(const void* a,const void* b){\n return *(int*)a < *(int*)b;\n}\n\nint main(){\n int target = 15;\n int coins[] = {\n 5,8\n };\n int length = sizeof(coins) / sizeof(coins[0]);\n qsort(coins,length,sizeof(coins[0]),cmp);\n\n printf(\"%d\\n\",count(target,coins,length));\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6805407404899597,
"alphanum_fraction": 0.6929917931556702,
"avg_line_length": 23.215517044067383,
"blob_id": "005db1fb4dd36086ac919bc72e669ecda3a4d4fe",
"content_id": "3a0b24d0912b9ebe445da165701db42c3dd5abc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4981,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 116,
"path": "/编程开发/后端/Laravel/Laravel官方教程笔记/5.8/1. 开始/2. 配置.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 配置 \n## 1. 介绍\nlaravel所有的配置文件都放在`config`目录下\n## 2. 环境配置 \n在不同的环境中使用不同的配置是一项非常有意义的功能. \n如果我们需要将同一个项目部署到不同的环境中,我们可能会希望使用不同的配置. \n为了实现这一点,laravel采用了名为`DotEnv`的php库.在默认的情况下,laravel中包含一个`.env.example`文件. \n使用composer安装laravel时,这个文件会被自动命名为`.env`\n需要注意的是我们不应该将`.env`文件提交到我们的版本库中.否则一些关键的信息可能会暴露在版本库中. \n我们也可以创建一个形如`.env.testing`的文件用于设定跟测试有关的配置. \n在进行PHPUnit测试时,这个文件会覆盖`.env`中的配置.我们课可以在artisan命令后加上`--env=testing`指定环境 \n## 4. 环境变量类型 \n所有的都是以字符串的形式被解析出来,但是对于一些特殊的值,`env()`函数会更加灵活的返回一些类型: \n`.env`|env()\n---|---\ntrue|(bool)true\n(true)|(bool)true\nfalse|(bool)false\n(false)|(bool)false\n空|(string)''\n(空)|(string)''\nnull|(null)null\n(null)|(null)null \n\n如果需要设置的配置值中包含空格,我们需要用双引号将配置值括起来: \n```\nAPP_NAME=\"My Application\"\n```\n## 访问环境配置 \n使用env函数访问设定好的配置\n```\n'debug' => env('APP_DEBUG',false),\n``` \n## 5. 检测当前的环境 \n在laravel中我们可以通过App`Facade`中的`environment`方法访问当前的环境. \n```\n$environment = App::environment();\n```\n如果`environment`中指定了环境名称,则表示检测是否处于指定的环境中. \n```\nif ( App::environment('local') ) {\n // 当前环境为local环境\n}\n\nif ( App::environment(['local','staging']) ) {\n // 当前环境为local或者staging环境\n}\n```\n该方法依据`APP_ENV`判断当前环境 \n## 6. 在调试信息中隐藏环境变量 \n这个功能非常重要.在系统发生异常并且`APP_DEBUG`被设置为`true`时,laravel的调试界面会显示出所有的环境变量. \n我们可能会隐藏一些变量,此时我们可以通过修改`config/app.php`配置文件中的`debug_blacklist`选项实现. \n文件格式如下: \n```\nreturn [\n\n // ...\n\n 'debug_blacklist' => [\n '_ENV' => [\n 'APP_KEY',\n 'DB_PASSWORD',\n ],\n\n '_SERVER' => [\n 'APP_KEY',\n 'DB_PASSWORD',\n ],\n\n '_POST' => [\n 'password',\n ],\n ],\n];\n``` \n## 7. 访问配置值 \n如果我们需要访问config目录下指定文件中的某个值,我们可以使用`config`全局帮助函数. \n比如如果我们需要访问`app.php`文件中的timezone,我们可以现成. \n```\n$value = config('app.timezone')\n```\n我们可以通过该函数设定运行时的配置: \n```\nconfig(['app.timezone'=>'Asia/Shanghai'])\n``` \n## 8. 配置缓存 \n如果有提升应用速度的需求,你可以通过artisan的`config:cache`命令将所有的配置文件缓存到一个文件中. \n系统会自动将所有的配置包含到一个文件中以提升框架载入速度. \n在production发布的时候,最好是执行一下`php artisan config:cache`以提升应用的速度. \n如果我们在发布的过程中执行了`config:cache`,我们需要确保所有的env函数只存在于配置文件中,一旦配置被缓存,`.env`将不会被解析,此时执行`env`函数会返回一个null. \n## 9. 维护模式 \n如果我们的网站出现了暂时无法解决的问题或者正在升级时,此时我们可能会需要将网站置于维护模式,以保证所有的请求将会被导到提前定义好的页面中. \n维护模式检测默认被包含再默认的中间件栈中.如果应用处于维护模式,`MaintenanceModeException`异常将会被抛出,并且附带503状态响应. \n开启维护模式: \n```\nphp artisan down\n``` \n我们也可以自定义提示文字及重试时间选项. \n```\n# 正在更新数据库,60s后重试\nphp artisan down --message=\"正在更新数据库\" --retry=60\n``` \n其中`message`可以被我们显示到网站上或者记录在日志中,`retry`的值会以`Retry-After`HTTP头的值的形式返回. \n有时候我们在维护模式时,可能会想要让指定ip或者ip端的用户正常访问网站,此时我们可以通过`allow`选项指定: \n```\nphp artisan down --allow=127.0.0.1 --allow=192.168.0.0/16\n``` \n在一切准备好之后,我们可以退出维护模式: \n```\nphp artisan up\n```\n如果需要自定义默认维护模式的页面,我们可以修改默认的模板:`resources/views/errors/503.blade.php` \n### 维护模式与队列 \n当应用处于维护模式时,`queue jobs`将不会被处理,当我们退出维护模式时,这些没有处理的队列任务才会被继续处理. \n### 维护模式的替代方案 \n由于维护模式需要应用有一段时间处于维护状态,我们可以考虑使用`Envoyer`来实现0维护时间发布. \n"
},
{
"alpha_fraction": 0.4577294588088989,
"alphanum_fraction": 0.4915458858013153,
"avg_line_length": 15.5600004196167,
"blob_id": "bf78e7292e558839548710541dc6d4f70307fd08",
"content_id": "73f1fe5d52aecc58bf00ba101755a9bd09aa6ca5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 862,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 50,
"path": "/数据结构/表/树状数组/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\n#define SIZE 10000\nint table[SIZE] = {0};\n\nint lowbit(int num); //最低位1\nint update(int i,int val); //更新指定位置的值\nint delete(int i); //删除指定index的值\nint sum(int i);\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n for(i = 0; i < size; i++)\n update(i,arr[i]);\n for(i = 0; i < size; i++)\n printf(\"%d\\n\",sum(i));\n return 0;\n}\n\nint sum(int i){\n int s = 0;\n i+=1;\n while(i > 0){\n s+=table[i-1];\n i-=lowbit(i);\n }\n return s;\n}\n\nint lowbit(int num){\n return num & (-num);\n}\n\nint update(int i,int val){\n i+=1;\n while(i < SIZE){\n table[i-1] += val;\n i += lowbit(i);\n }\n return val;\n}\n\nint delete(int i){\n return update(i,sum(i-1) - sum(i));\n}\n"
},
{
"alpha_fraction": 0.600806474685669,
"alphanum_fraction": 0.725806474685669,
"avg_line_length": 23.422534942626953,
"blob_id": "9853c59f0f7ed3b91d46fd8f62cd10923e75dbdb",
"content_id": "e8803bb27388313907d89126920a907e52f77a52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2688,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 71,
"path": "/网络/认证/HTTP认证.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# HTTP认证方式 \n**核对的信息通常如下:** \n1. 密码: 只有本人知道的特定字符串信息\n2. 动态令牌: 仅限本人持有的设备内显示的一次性密码\n3. 数字证书: 仅限本人持有的信息\n4. 生物认证: 指纹和虹膜等本人的生理信息\n5. IC卡等: 仅限本人持有的信息 \n\n**HTTP使用的认证方式:** \n- Basic认证(基本认证) \n- Digest认证(摘要认证) \n- SSL客户端认证 \n- FormBase认证(基于表单认证) \n## 1. Basic认证 \nBasic认证是从HTTP/1.0就定义的认证方式.即便现在仍然有部分网站会使用这种认证方式.是Web服务器与通信客户端之间进行的认证方式. \n过程: \n**发送请求**\n```\nGET / HTTP/1.1\nHost: 127.0.0.1 \n```\n**1. 服务端返回401状态码** \n```\nHTTP/1.1 401 unAuthorizied\nWWW-Authenticate: Basic realm=/\n```\nrealm说明了保护的范围 \n**2. 输入用户,密码已base64方式编码后发送**\n```\nGET / HTTP/1.1\nHost: 127.0.0.1\nAuthorization: Basic Zng6MTIzNDU2\n```\n**3. 认证成功后返回200,失败后继续401**\n```\nHTTP/1.1 200 OK\n...\n```\n在PHP中可以通过`$_SERVER['PHP_AUTH_USER']`和`$_SERVER['PHP_AUTH_PW']`获取到传递过来的用户名和密码.可以通过这两个字段对用户进行认证. \n**缺点:** \n- Basic认证,账户密码以Base64方式编码,并没有进行加密处理 \n- 浏览器无法注销认证 \n- 达不到多数web网站期望的安全性等级 \n## 2. Digest认证 \nDigest认证同样采用质询/响应的方式(challenge/response),但不会像BASIC一样直接发送明文密码. \n质询响应方式:开始一方会发送认证要求给另一方,接着使用从另一方接收到的质询码生成响应码.最后将响应码返回给对方进行认证的方式. \n**发送请求** \n```\nGET / HTTP/1.1\nHost: 127.0.0.1\n```\n**1. 发送临时质询码(随机数,nonce)以及告知需要认证的状态码401**\n```\nHTTP/1.1 401 unAuthorized\nWWW-Authenticate: Digest realm=\"DIGEST\",nonce=\"MOSQZ0itBAA=44abb6784cc9cbf605a5b0893d36f23de95fcff\",algorithm=MD5,qop=\"auth\"\n```\n**2. 发送摘要以及由质询码计算出的响应码(response)** \n```\nGET / HTTP/1.1\nHost: 127.0.0.1\nAuthorization: Digest username=\"guest\",realm=\"DIGEST\",nonce=\"MOSQZ0itBAA=44abb6784cc9cbf605a5b0893d36f23de95fcff\",uri=\"/\",algorithm=MD5,response=\"df56389ba3f7c52e9d7551115d67472f\",qop=auth,nc=00000001,cnonce=\"082c875dcb2ca740\"\n```\n**3. 认证成功返回200,失败发送401**\n```\nHTTP/1.1 200 OK\nAuthentication-info: rspauth=\"f218e9ddb407a3d16f2f7d2c4097e900\",cnonce=\"083c875dcb2ca740\",nc=00000001,qop=auth\n```\n## 3. SSL认证\n一般配合其他认证使用. \n## 4. 表单认证\n一般常见的认证实现方式. \n"
},
{
"alpha_fraction": 0.44232237339019775,
"alphanum_fraction": 0.4660045802593231,
"avg_line_length": 24.173076629638672,
"blob_id": "ee30089d4c3f779691e10a54701b0be9dc14217d",
"content_id": "daa14ee617a3c2e84ee703840b91fe478039a327",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1333,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 52,
"path": "/数据结构/树/哈夫曼树/term2/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include\"huffman.h\"\n\nint main(){\n // hufflistp list = hufflist();\n\n // int kvs[][2] = { //key,weight\n // {1,12},\n // {2,18},\n // {3,9},\n // {4,15},\n // {5,11},\n // {6,16},\n // {7,12},\n // };\n \n // int kvl = sizeof(kvs) / sizeof(kvs[0]);\n\n // for(int i = 0; i < kvl; i++){\n // insertHuffNode(list,huffnode(kvs[i][0],kvs[i][1]));\n // }\n\n // hufflistnodep tmp = NULL;\n\n // // for(tmp = list->head; tmp != NULL; tmp = tmp->next){\n // // printf(\"%d{%d}\\n\",tmp->node->key,tmp->node->weight);\n // // }\n\n // hufftreep tree = hufftree(list);\n char* input = \"test1.txt\";\n char* output = \"test2.huff\";\n\n huffresultp result = getHuffCodesByFile(input);\n if(result != NULL){\n int l = strlen(result->code);\n // printf(\"%s\\n\",result->code);\n huffbufp origin = getOriginBuf(result);\n if(origin != NULL){\n // printf(\"%s\\n\",origin->code);\n // printf(\"原文大小:%d\\n\",origin->size);\n // printf(\"压缩大小:%d[%d]\\n\",l,(l/8)+(l%8 > 0));\n }else\n printf(\"解析失败\\n\");\n\n putOriginToFile(result,output);\n }else\n printf(\"Failed\\n\");\n\n huffbufp tmp = readHuffFile(output);\n printf(\"%s\\n\",tmp ? tmp->code : \"Failed\\n\");\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.8455696105957031,
"alphanum_fraction": 0.8455696105957031,
"avg_line_length": 55.14285659790039,
"blob_id": "1e644b2ab10fd76f083c39a53a428dcc8122f502",
"content_id": "de45e1a8c9c00a47df9d3f979714913e2463dd81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 7,
"path": "/编程开发/vim/mac下vim乱码.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Mac下vim乱码\n这两天mac刚到手,配置了很久的软件,在用vim的时候,发现了一个问题.在vim中输入中文,会显示乱码.顿时一万只草泥马在头上飞过. \n最开始我猜测是编码的问题,所以在网上修改了一下编码,结果还是不行.一通谷歌百度之后,讨论的点也是集中在编码上.但是的确,编码已经没问题了,但是vim还是有问题. \n既然不是编码的问题,那可能就是终端自己的问题,我尝试着改了终端的字体,从头到尾试了一遍以后还是无效.我心想又遇到一个可以以我的名字命名的BUG了. \n路都堵死了之后,我突然想到可能跟我之前修改终端的配置有关系,在终端偏好设置->高级->输入里面,我看到了之前不确定的一个选项\"用Control-V键跳过非ASCII的输入\",将前面的勾选去掉之后,再输入中文,发现这个时候就好使了. \n**问题原因分析:** \n对terminal的配置文件了解不清晰. \n"
},
{
"alpha_fraction": 0.5181924700737,
"alphanum_fraction": 0.5208333134651184,
"avg_line_length": 40.06024169921875,
"blob_id": "19f964fa22ae55f72c88c2232000f354831c1aaf",
"content_id": "fcb851cb69f3bff78d92a090c123141308346bf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4850,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 83,
"path": "/数据结构/树/红黑树/python/tree.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "class RBT:\n # y为删除掉的节点,x为current节点\n def delete(self,tree,n)\n delTree = self.search(tree,n) # 查找需要删除的节点\n if delTree is None: # 节点不存在\n self.missNode += 1\n return\n self.deleteNode +=1 # 删除的节点数+1\n\n y = delTree # y为需要删除的节点\n yorgcolor = y.color # 需要删除节点的颜色\n\n if delTree.left.val is None: # 需要删除的节点左孩子不存在\n x = delTree.right # 假定右孩子为替换节点\n self.transplant(delTree,delTree.right) # 将右子树移动到需要删除的节点位置\n elif delTree.right.val is None:\n x = delTree.left\n self.transplant(delTree,delTree.left) # 将左孩子移动到需要删除的节点位置\n else:\n y = self.findMinimum(delTree.right) # 将y设定为替死鬼节点(右子树中的最小节点)\n y.orgcolor = y.color # 需要删除节点的颜色\n x = y.right # 需要删除节点的右节点可能存在,需要将它移动到y的位置\n\n if y.parent is delTree: # 需要删除的节点为原本需要删除节点的子节点\n x.parent = delTree.right # 把x的父节点设置为y????\n else:\n self.transplant(y,y.right) # y右子节点替换到当前位置\n y.right = delTree.right # y的右节点设置为delTree的右节点\n y.right.parent = y # 架空delTree节点\n \n self.transplant(delTree,y) # 架空delTree\n y.left = delTree.left # 把y的左节点设置为delTree的左节点,说明最后会把delTree删除掉\n y.left.parent = y\n y.color = delTree.color # 将delTree的颜色给y\n if yorgcolor == 'black': # 需要删除节点的颜色为黑色,破坏了RB的规则\n self.RBT_Delete_Fixup(self,x) # 通过current来修复红黑树\n\n # 删除操作修复,x为current\n def RBT_Delete_Fixup(self,tree,x):\n # x非根节点,且x的颜色为黑色\n # transplant会自动判断当前节点是否为根节点\n \"\"\"\n x为root可能的情况:\n [y]\n /\n [x]\n 与其对称的情况同理\n \"\"\"\n # x为红色时,将其转变成黑色即可,这种情况也不考虑,因为我们在delete的时候,已经把删除节点变成了一个最多只有一个孩子的节点了,其没有兄弟节点\n while x is not tree.root and x.color == 'black':\n if x == x.parent.left: # 替换节点在父节点左侧\n w = x.parent.right # 替换节点的兄弟节点\n\n # case1,兄弟是红节点\n if w.color == \"red\": # 兄弟为红色\n # 因为被删除的节点位于父节点左侧,所以左侧黑节点比右侧少1,我们可以利用红色节点来为左侧节点增高\n # 因为w的颜色是红色,所以父节点为黑色.\n w.color = \"black\"\n x.parent.color = \"red\"\n self.left_rotate(tree,x.parent) # 左旋父节点\n w = x.parent.right # 兄弟节点变成了x.parent.right.left\n \n # case2,兄弟节点为黑节点,并且两个孩子也是黑色的\n if w.left.color == \"black\" and w.right.color == 'black':\n w.color = 'red' # 将兄弟节点设置为红色\n x = x.parent # 将x替换成x的父节点\n # 进过case2,x.parent的左右两侧都少了一个节点,此刻一定是不平衡的\n\n # case3,兄弟节点子节点左红右黑\n else:\n if w.right.color == 'black': # 暗示左节点为红色,否则就进之前的if了\n w.left.color = 'black' # 将兄弟节点左节点编程黑色,此刻其左孩子黑高度升高\n w.color = 'red' # 兄弟节点变红色,其黑高度降低\n self.right_rotate(tree,w) # 沿兄弟节点右旋,此时以w左节点为核心的新子树黑高度恢复\n w = x.parent.right # 将兄弟节点设置为x父节点的右节点(更新兄弟节点)(原兄弟节点的左节点)\n \n # case4,兄弟节点右节点为红色,左节点不论\n w.color = x.parent.color # 将兄弟节点颜色设置为父节点颜色,此处原因可以分情况讨论\n x.parent.color = 'black' # 将父节点的颜色设置为黑色\n w.right.color = 'black' # 兄弟节点右节点设置为黑色\n # TODO:\n self.left_rotate(tree,x.parent) # 绕x的父节点左旋\n x = tree.root\n"
},
{
"alpha_fraction": 0.720095694065094,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 15.640000343322754,
"blob_id": "d8a6b6ac535744ba6a7edf9499ca555a0c274831",
"content_id": "c8bcc6d1c51f34ceb468438435b89bd19412e36f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 201,
"num_lines": 25,
"path": "/算法/贪心算法/最多可以参与几项活动/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 最多可以参与几项活动 \n- 来源:geekforgeek \n**题目**\n```\n有一堆活动,现在知道他们的开始时间个结束时间,试着设计一个算法,使求出最多可以参与哪些活动. \n```\n**输入** \n```\nN:活动总数\nsN:活动N的开始时间\neN:活动N的结束时间\nN\ns1 e1\ns2 e2\ns3 e3\n...\nsN eN\n```\n**输出** \n```\n最多可参与的活动数目\n```\n\n**思路** \n按照结束时间升序排列.则第一活动必定能够参与(结束时间最小).根据这个原因,我们用第二个活动的开始时间与第一个元素的结束时间比较,如果开始时间大于结束时间,说明第二个活动开始时,第一个还没有结束,此时,说明第二个活动无法参与,我们向后遍历,直到某个元素的开始时间大于第一个元素的结束时间,此时该活动可以参与,我们将当前活动的结束时间作为标准,并将计数+1,重复上面的过程.最后的计数即为能够参与的活动数 \n"
},
{
"alpha_fraction": 0.47268107533454895,
"alphanum_fraction": 0.505717933177948,
"avg_line_length": 20.86111068725586,
"blob_id": "58b9815c38fe22234ba67c7a265d18f78a7c67c1",
"content_id": "a8f61539fdd69108fb98fcaeea71a6fa6a100832",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 839,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 36,
"path": "/算法/排序算法/猴子排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<time.h>\n#include<stdlib.h>\n\n//判断是否排序完毕\nint isSorted(int arr[],int size){\n int i;\n for(i = 1; i < size; i++)\n if(arr[i] < arr[i-1]) return 0;\n return 1;\n}\n\n//排序主体\nvoid monkey_sort(int arr[],int size){\n int rnd1,rnd2;\n while(!isSorted(arr,size)){\n rnd1 = rand() % size;\n rnd2 = rand() % size;\n //任意调换两个元素\n int tmp = arr[rnd1];\n arr[rnd1] = arr[rnd2];\n arr[rnd2] = tmp;\n }\n}\n\nint main(){\n srand(time(NULL));\n int arr[] = {1,3,4,0,0,5,8,5,9,0};\n int size = sizeof(arr) / sizeof(arr[0]);\n int i;\n\n puts(\"排序前\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n monkey_sort(arr,size);\n puts(\"排序后\");for(i = 0; i < size; i++) printf(\"%d \",arr[i]);puts(\"\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.43675416707992554,
"alphanum_fraction": 0.6062052249908447,
"avg_line_length": 15.541666984558105,
"blob_id": "340576eb8de6a23698ec8af38d92d3211cd6f5f5",
"content_id": "9dd36567442b156cfcf4b22958adccc9a6913fea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 874,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 24,
"path": "/leetcode/696-计数二进制子串/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "[简单题] \r\n给定一个字符串 s,计算具有相同数量0和1的非空(连续)子字符串的数量,并且这些子字符串中的所有0和所有1都是组合在一起的。\r\n\r\n重复出现的子串要计算它们出现的次数。\r\n\r\n**示例 1 :** \r\n```\r\n输入: \"00110011\"\r\n输出: 6\r\n解释: 有6个子串具有相同数量的连续1和0:“0011”,“01”,“1100”,“10”,“0011” 和 “01”。\r\n请注意,一些重复出现的子串要计算它们出现的次数。\r\n另外,“00110011”不是有效的子串,因为所有的0(和1)没有组合在一起。\r\n```\r\n\r\n**示例 2 :** \r\n```\r\n输入: \"10101\"\r\n输出: 4\r\n解释: 有4个子串:“10”,“01”,“10”,“01”,它们具有相同数量的连续1和0。\r\n```\r\n\r\n**注意:**\r\n- s.length 在1到50,000之间。\r\n- s 只包含“0”或“1”字符。"
},
{
"alpha_fraction": 0.6224831938743591,
"alphanum_fraction": 0.6426174640655518,
"avg_line_length": 18.866666793823242,
"blob_id": "9d11bb8df6c0e7a40798c39061f3a2dba8e88264",
"content_id": "1f0d2156a5255b692130e018da402c904fcdf39c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 30,
"path": "/编程开发/前端/css/float元素父元素高度为0解决.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# float父元素高度为0 \n由于float属性的特点,其父元素高度会变成0.很多时候我们需要让父元素的高度与子元素的高度一致,这个时候可以用下面的方案解决: \n1. 方案1:在浮动元素下面放一个清楚浮动的元素:`div{clear:both;}` \nhtml \n```html\n<div class=\"clearfix\">\n <div style=\"float: left;\">Div 1</div>\n <div style=\"float: left;\">Div 2</div>\n</div>\n```\ncss \n```css\n.clearfix:after{\n content: ' ';\n display:block;\n height:0;\n clear: both;\n}\n```\n2. 将父元素设置为浮动 \nhtml \n```html\n<div style=\"float:left\">\n <div style=\"float:left;\">Div 1</div>\n <div style=\"float:left;\">Div 2</div>\n</div>\n```\n3. 为父元素显示的设置高度. \n4. 为父元素加入overflow:hidden或overflow:auto属性. \n上述方案中,方案一最好,兼容性最强.\n"
},
{
"alpha_fraction": 0.7060367465019226,
"alphanum_fraction": 0.7086614370346069,
"avg_line_length": 28.230770111083984,
"blob_id": "bbfedde4e52d9711c92fed6b644917fa3f8391ad",
"content_id": "d8c7beb712ebfce58a9118b586c6aed0ff66ae2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1352,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 26,
"path": "/编程开发/后端/随想/unserialize解析出自身.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# unserialize解析出自身\n**描述:** \nunserialize是一个用来解析由serialize函数序列化数据的.如何做到unserialize一个变量,使得解析后的数据与解析前的参数相同呢? \n对于这个问题,我们进行分析,unserialize首先解析的是一个字符串,那么解析前的数据也应该是一个字符串,但是,假设解析之前是一个常规意义上的字符串的话,经过serialize串行话之后,肯定会加上相应的标记,所以此时不会满足要求.我们需要找到一种在不同环境下类型\"会变\"的类型.可以想到,对象是有这样的功能的,PHP对象有一个__tostring魔术方法,可以将对象转为字符串. \n```\n<?php\nfunction Model{\n public $name;\n public $age;\n public $sex;\n\n function __construct(){\n $this->name = 'Francis';\n $this->age = 24;\n $this->sex = '男';\n }\n\n //在这里,我们返回对当前对象序列化之后的字符串\n function __tostring(){\n return serialize($this);\n }\n}\n$model = new Model;\nvar_dump($model == unserialize($model)); //此时会显示true\n```\n当然这并不是严格意义上的相同,反序列化之后会生成一个新的对象,只不过对象的内容与原对象内容一致.对反序列化之后的对象依然可以进行反序列化,得到的内容依然是相同的. \n"
},
{
"alpha_fraction": 0.40145984292030334,
"alphanum_fraction": 0.4197080433368683,
"avg_line_length": 13.421052932739258,
"blob_id": "932e8dd696ef2fff7f1a5e19c63f181cc069d294",
"content_id": "9b6d7e1e763bfa030f5a63413772b7cf0454a449",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 19,
"path": "/算法/其他/约瑟夫环/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "/*\n * 核心公式:\n * F(N,M) = (F(N-1,M)+M) % N\n **/\n#include<stdio.h>\n\nint main(){\n int N,M;\n printf(\"输入参数人数和剔除间隔:\");\n scanf(\"%d%d\",&N,&M);\n int i;\n int res = 0;\n for(i = 2; i <= N; i++)\n res=(res+M)%i;\n\n printf(\"剩下第%d个人.\\n\",res+1);\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7289972901344299,
"alphanum_fraction": 0.7506775259971619,
"avg_line_length": 32.3636360168457,
"blob_id": "391801a73633796ea131ef451863ef71e6af1bff",
"content_id": "322e8ac965843f6691e06ebc7c0d4af36244c3fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 11,
"path": "/编程开发/Linux/常见问题/Tar.php问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Tar.php问题\n今天由于升级了一下php-redis插件,导致php7.2redis插件突然就出问题了,无奈使用pecl install redis安装(apt install php-redis安装不报错,但是没有卵用). \n执行之后报错:\n```\nPHP Fatal error: Cannot use result of built-in function in write context in /usr/share/php/Archive/Tar.php on line 639\n```\n从上面这句报错可以看出639行出现错误,打开文件,发现语句如下: \n```\n $v_att_list =& func_get_args();\n```\n我们将func_get_args方法前面的&去掉,这个bug即解决了. \n"
},
{
"alpha_fraction": 0.4378921091556549,
"alphanum_fraction": 0.45859473943710327,
"avg_line_length": 17.74117660522461,
"blob_id": "0c4513965a64feaf23f2180952904fe7bc8d8ec7",
"content_id": "549ee10becf3c5138ba8837a122176f76ba75606",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1756,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 85,
"path": "/算法/贪心算法/Summer-Vacation/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n这里利用构建的一个大根堆来实现优先队列(其实就是堆的一种形式)的功能,\n#endif\n\n#include<stdio.h>\n#include<stdlib.h>\n\nstruct Node{\n int a;\n int b;\n}node[100007];\n\nint tmp[100007]; //临时空间\nint tmp_length = 0; //临时空间长度\n\n//重新调整堆\nvoid sort(int idx){\n int left = idx*2+1;\n int right = idx*2+2;\n int max = idx;\n if(left < tmp_length && tmp[left] >= tmp[max]) max = left;\n if(right < tmp_length && tmp[right] >= tmp[max]) max = right;\n if(max != idx){\n //交换元素\n tmp[max]^=tmp[idx];\n tmp[idx]^=tmp[max];\n tmp[max]^=tmp[idx];\n sort(max); //重新调整\n }\n}\n\n//压入一个数字\nvoid push(int num){\n tmp[tmp_length++] = num;\n int i,idx;\n for(i = tmp_length-1; i > 0; i = idx){\n idx = (i-1)/2;\n sort(idx); //重新调整\n }\n}\n\n//弹出元素\nint pop(){\n int ret = 0;\n if(tmp_length > 0){\n ret = tmp[0];\n tmp[0] = tmp[--tmp_length];\n sort(0);\n }\n return ret;\n}\n\n//判断是否为空\nint empty(){\n return tmp_length == 0;\n}\n\nint cmp(const void * a,const void * b){\n return ((struct Node*)a)->a > ((struct Node*)b)->a;\n}\n\nint main(){\n int N,M;\n int i,j;\n int sum,max,idx;\n\n while(~scanf(\"%d%d\",&N,&M)){\n for(i = 0; i < N; i++)\n scanf(\"%d%d\",&node[i].a,&node[i].b);\n qsort(node,N,sizeof(struct Node),cmp);\n\n sum = 0;\n tmp_length = 0;\n idx = 0;\n for(i = 1; i <= M; i++){\n while(idx < N && node[idx].a <= i){\n push(node[idx++].b); //压入一个数字\n }\n if(!empty()){\n sum+=pop();\n }\n }\n printf(\"%d\\n\",sum);\n }\n}\n\n"
},
{
"alpha_fraction": 0.4079754650592804,
"alphanum_fraction": 0.4435582756996155,
"avg_line_length": 20.733333587646484,
"blob_id": "1ca7aba1812b65144f0c722153bd204926800f05",
"content_id": "8960d6f654da4e214ad07d528f943654303cece6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1742,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 75,
"path": "/DailyCodingProblems/399_sum_same_value_Facebook/c/fenwick/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\n#define SIZE 10000\nint fenwick[SIZE];\nint result[3][SIZE];\n\nint lowbit(int num); //获取该数字最小有效位(1)\nvoid update(int i,int max,int val); //\nint sum(int i); //0 - i区间的数字和\nint getResult(int arr[],int size,int result[3][SIZE]);\n\nint main(){\n int arr[] = {\n 1,1,1,3,4,2,8,1,1,2,2,2,2\n };\n int size = sizeof(arr) / sizeof(arr[0]);\n int i,j;\n if(!getResult(arr,size,result)){\n printf(\"null\\n\");\n }else{\n printf(\"[\\n\");\n for(i = 0; i < 3; i++){\n printf(\" [\");\n for(j = 0; result[i][j] >= 0; j++){\n printf(\"%d%c\",result[i][j],result[i][j+1] == -1 ? '\\0':',');\n }\n printf(\"]%c\\n\",i == 2 ? '\\0' : ',');\n }\n printf(\"]\\n\");\n }\n return 0;\n\n}\n\nint lowbit(int num){\n return num & (-num);\n}\n\nvoid update(int i,int max,int val){\n i+=1; //索引从1开始算\n while(i <= max){\n fenwick[i-1] += val;\n i += lowbit(i); //最低有效位进1\n }\n}\n\nint sum(int i){\n i+=1;\n int s = 0;\n while(i > 0){\n s+=fenwick[i-1];\n i -= lowbit(i);\n }\n return s;\n}\n\nint getResult(int arr[],int size,int result[3][SIZE]){\n int i,j,k;\n for(i = 0; i < size; i++)\n update(i,size,arr[i]);\n\n int s = sum(size - 1); //获取序列的和\n if(s % 3 != 0) return 0;\n s /= 3;\n int start = 0;\n for(i = j = 0; i < 3; i++){\n for(k=0;j < size && (sum(j) - start <= s);j++,k++){\n result[i][k] = arr[j];\n }\n if(sum(j-1) - start != s) return 0; //不能凑出s\n start = sum(j-1);\n result[i][k] = -1;\n }\n return 1; //只要凑出2个等于s的,第三个区间的和必然是s\n}\n"
},
{
"alpha_fraction": 0.875,
"alphanum_fraction": 0.875,
"avg_line_length": 15,
"blob_id": "265533785112a39ea5332ebae89985b898b51292",
"content_id": "4750ede5ff7a8fa6a5c9f11cb3c6d4b9148af48a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/编程开发/后端/Laravel/问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 问题\n这里记录我在laravel中遇到的一些问题及解决方法\n"
},
{
"alpha_fraction": 0.560030996799469,
"alphanum_fraction": 0.5615801811218262,
"avg_line_length": 25.367347717285156,
"blob_id": "0733fe049b39299600b5f94c898b9529d6b7fdce",
"content_id": "055ad0490d57bff0205afceb027842e5311aa8e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1453,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 49,
"path": "/数据结构/树/字典树/term1/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include\"trie.h\"\n\n#define str(n) (n),(strlen(n)+1)\n\nvoid visit(trienodep root){\n if(root == NULL){\n printf(\"\\n\");\n return;\n }\n printf(\"%c{%d}[%p] \",root->key,root->key,root->data);\n visit(root->next);\n visit(root->next_level);\n}\n\nint main(){\n triep tree = trie();\n insertNode(tree,\"apple\",str(\"苹果\"));\n insertNode(tree,\"app\",str(\"应用\"));\n insertNode(tree,\"boy\",str(\"男孩\"));\n insertNode(tree,\"cat\",str(\"猫\"));\n insertNode(tree,\"唐揚げ\",str(\"炸鸡\"));\n insertNode(tree,\"ねこ\",str(\"猫\"));\n insertNode(tree,\"いぬ\",str(\"狗\"));\n insertNode(tree,\"お風呂\",str(\"澡堂\"));\n insertNode(tree,\"風邪\",str(\"感冒\"));\n insertNode(tree,\"熱\",str(\"发烧\"));\n insertNode(tree,\"산채하다\",str(\"散步\"));\n insertNode(tree,\"미국\",str(\"美国\"));\n insertNode(tree,\"한국\",str(\"韓国\"));\n insertNode(tree,\"풀\",str(\"草\"));\n insertNode(tree,\"저는 중국사람입니다\",str(\"我是中国人\"));\n insertNode(tree,\"アメリカ\",str(\"美国\"));\n\n visit(tree->root);\n // deleteNode(tree,\"アメリカ\");\n\n searchresultp result = search(tree,\"アメリカ\");\n\n string data = (string)result->data;\n printf(\"%s\\n\",(result->status == TRUE && data != NULL ? data : \"查找失败\"));\n\n deleteNode(tree,\"apple\");\n visit(tree->root);\n printf(\"========硬删除=======\\n\");\n hardDeleteNode(tree,\"apple\");\n visit(tree->root);\n\n return 0;\n}"
},
{
"alpha_fraction": 0.7555555701255798,
"alphanum_fraction": 0.7555555701255798,
"avg_line_length": 21.5,
"blob_id": "ad626a2e79e02aac253b28edd573170597d6f892",
"content_id": "594f44f2fdbea491d545c8f18e6e29583eeb1b53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 226,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 4,
"path": "/博弈论/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 博弈论\n有关博弈论的一些学习笔记.\n- 夫未战而庙算得胜者,得算多也.未战而庙算不得胜者,得算少也.多算胜,少算不胜,何况于不算者乎.吾以此观之,胜负定矣. \n- 谋定而动\n"
},
{
"alpha_fraction": 0.6194770336151123,
"alphanum_fraction": 0.6406672596931458,
"avg_line_length": 23.351648330688477,
"blob_id": "2ce8d8de5a87544f066071a29fc3f36c25b92dfa",
"content_id": "77924b81e2fc8c62600fc804d4028471944547d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3204,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 91,
"path": "/编程开发/Linux/命令/curl.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# curl命令\n用于向指定的地址发送请求并获取响应信息的命令.类似于postman \n在默认情况下`curl`命令发送`GET`请求 \n```\ncurl https://api.ixysoft.cn/tel/17712345678\n```\n这个命令会向`https://api.ixysoft.cn/tel/17712345678`这个地址发送`GET`请求,服务器返回的内容会在命令行输出. \n**各个选项:** \n**-A** 指定客户端的用户代理表头,即`User-Agent`,默认情况下curl发送`curl/版本信息`\n```\n$ curl -A 'Wechat/hello' https://www.baidu.com\n```\n**-b** 向服务器发送cookie,可以用多个-b发送多个cookie选项,也可以指定cookie的文件发送 \n```\n$ curl -b 'name=Francis' https://www.baidu.com\n```\n**-c** 将从服务端获取的cookie写入文件 \n```\n$ curl -c cookie.txt https://www.baidu.com\n```\n**-d** 向指定地址发送`POST`请求的数据体,与`-b`选项一样,`-d`也可以指定一个文件 \n```\n$ curl -d 'name=Francis&age=24' http://www.baidu.com\n```\n**--data-urlencode** 跟`-d`功能相同,区别在于,该选项会对需要发送的数据进行`url`编码 \n```\n$ curl --data-urlencode 'msg=this is just a test!' https://www.baidu.com\n```\n**-e** 用于设置`HTTP`标头的`Referer`以指定请求源. \n```\n$ curl -e 'https://www.baidu.com/s?wd=apple' https://www.baidu.com\n```\n**-F** 用于向地址上传二进制文件 \n```\n$ curl -F '[email protected]' https://www.baidu.com\n```\n该选项会在请求头中加入`Content-Type: multipart/form-data`,然后将`test.c`作为`file`字段上传.该选项可以指定`MIME`类型 \n```\n$ curl -F '[email protected];type=text/plain' https://www.baidu.com\n```\n**-G** 采用`GET`方式发送数据\n```\n$ curl -G -d 'wd=Francis' http://www.baidu.com/s\n```\n如果没有`-G`选项`-d`默认发送`POST`请求 \n`-G`选项可以结合`--data-urlencode`选项使用 \n**-H** 添加请求头条目 \n```\n$ curl -H 'Content-Type: text/css;charset=UTF8' https://www.baidu.com\n```\n**-i** 输出结果中附带请求头信息 \n```\ncurl -i https://www.baidu.com\n```\n**-I** 向服务器发送`HEAD`请求\n```\n$ curl -I https://www.baidu.com\n```\n等价于`--head` \n**-k** 跳过`SSL`检测 \n```\n$ curl -k https://www.baidu.com\n```\n该选项不会检查`SSL`证书的正确性 \n**-L** 重定向跟随\n```\n$ curl -L 'http://blog.ixysoft.cn'\n```\n**--limit-rate** 设置请求与回应的带宽,模拟各种速度的网络环境 \n```\n$ curl --limit-rate 200k https://www.baidu.com\n```\n经测试好像没有作用,不知道是不是我测试姿势有问题 \n**-o** 输出响应的内容到文件,与`wget`类似 \n```\n$ curl -L -o index.html https://blog.ixysoft.cn\n```\n**-O** 不需要指定文件名,自动将网址最后部分视作文件名 \n**-s** 不输出错误和进度(以安静模式进行) \n**-S** 指定只输出的错误信息,常与`-s`一起使用 \n**-u** 设置服务器认证的用户名和密码\n```\n$ curl -u 'fx:123456' https://www.baidu.com\n命令设置用户名为fx,密码为123456,并将其转为请求头`Authorization: Basic Zng6MTIzNDU2`\n```\n**-v** 输出请求的整个过程,一般用于调试 \n**-x** 指定请求代理 \n```\n$ curl -x sock5://username:[email protected]:8080 https://www.google.com\n```\n**-X** 指定请求方法 \n"
},
{
"alpha_fraction": 0.4921826124191284,
"alphanum_fraction": 0.5109443664550781,
"avg_line_length": 23.600000381469727,
"blob_id": "2eb85b9e24debe09fac27baae682ff597880ec47",
"content_id": "1f15093dec5685366a8a696d09a6416022a508d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1617,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 65,
"path": "/leetcode/1023-驼峰式匹配/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n√ Accepted\n √ 36/36 cases passed (0 ms)\n √ Your runtime beats 100 % of c submissions\n √ Your memory usage beats 100 % of c submissions (6.8 MB)\n#endif\n\n#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\ntypedef enum{\n false = 0,\n true = 1\n}bool;\n/*\n * @lc app=leetcode.cn id=1023 lang=c\n *\n * [1023] 驼峰式匹配\n */\n\n#define isupper(c) ((c)>='A'&&(c)<='Z')\n/**\n * Note: The returned array must be malloced, assume caller calls free().\n */\nbool* camelMatch(char ** queries, int queriesSize, char * pattern, int* returnSize){\n int i,j,k;\n bool* ret = (bool*)malloc(sizeof(bool) * queriesSize);\n int pattern_size = strlen(pattern);\n *returnSize = queriesSize;\n for(i = 0; i < queriesSize; i++){\n char *query = queries[i];\n bool tmp = true;\n for(j = k = 0; query[j] != '\\0'; j++){\n if(isupper(query[j])){\n if(query[j] == pattern[k]){\n k++;\n }else{\n tmp = false;\n break;\n }\n }else if(query[j] == pattern[k]){\n k++;\n }\n }\n ret[i] = (tmp && (k == pattern_size));\n }\n return ret;\n}\n\nint main(){\n char *queries[] = {\n \"FooBar\",\"FooBarTest\",\"FootBall\",\"FrameBuffer\",\"ForceFeedBack\"\n };\n int size = sizeof(queries) / sizeof(queries[0]);\n int i;\n char *pattern = \"FB\";\n int retSize;\n bool *ret = camelMatch(queries,size,pattern,&retSize);\n for(i = 0; i < retSize; i++)\n printf(\"%s \",ret[i]?\"true\":\"false\");\n puts(\"\");\n free(ret);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4882352948188782,
"alphanum_fraction": 0.5764706134796143,
"avg_line_length": 11.142857551574707,
"blob_id": "f3d30e637c403cebfc463eb71f0f92f16080d32c",
"content_id": "3780ecbb38572807711d1527bf47bf7a7ecf98b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 28,
"path": "/算法/背包问题/捡宝石/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 捡宝石 \n一个寻宝者在一个山洞里面找到一堆宝石,宝石的种类多种多样,每种宝石都有自己的价值,寻宝者随身携带了一个可以给宝石称重,估价的设备.但是寻宝者只有一个最多可以装重量为M的宝石.现在设每块宝石的重量分别为wi(i=0,1,2,3,4,5...),价值分别为vi(i=0,1,2,3,4,5,6....).求背包能够容纳宝石的最大价值. \n**输入**:\n```\nN M\nw1 v1\n...\nwn vn \n```\n`N`为石头的总数,`M`为背包最大可以容纳的重量. \n**输出**: \n```\n最大的价值数\n``` \n**示例**: \n**输入**:\n```\n5 10\n1 2\n3 2\n6 4\n5 5\n3 3\n```\n**输出**:\n```\n10\n```\n"
},
{
"alpha_fraction": 0.5106837749481201,
"alphanum_fraction": 0.5220797657966614,
"avg_line_length": 20.60769271850586,
"blob_id": "9315864bb70bc7cc2c75e1259e2d158b1cdcace0",
"content_id": "1db2adcb573126ef16ef3e778d8d20ca5ea53f74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2870,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 130,
"path": "/数据结构/树/哈夫曼树/term1/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<stdlib.h>\n\ntypedef enum{\n FALSE = 0,\n TRUE = 1,\n}Boolean;\n\ntypedef struct Node{\n int key; //关键字\n int weight; //权重\n struct Node *left;\n struct Node *right;\n}Node,*nodep;\n\ntypedef struct RateNode{\n nodep data; //当前节点\n struct RateNode* next; //后继节点\n}RateNode,*ratenodep;\n\nnodep node(int key,int weight); //插入节点\nBoolean insertNode(ratenodep *rootp,nodep nd);\nvoid visit(ratenodep root);\nnodep popNode(ratenodep* rootp);\nvoid printCode(nodep root); //输出代码\nchar dirs[100];\nint idx = -1;\n\nint main(){\n ratenodep root = NULL;\n\n int kvs[][2] = {\n {1,10},\n {2,5},\n {3,15},\n {5,12},\n {4,32},\n {7,1},\n };\n int kvl = sizeof(kvs) / sizeof(kvs[0]);\n for(int i = 0; i < kvl; i++)\n insertNode(&root,node(kvs[i][0],kvs[i][1]));\n\n visit(root);\n nodep a = NULL;\n nodep b = NULL;\n nodep res = NULL;\n while(root->next != NULL){\n a = popNode(&root);\n b = popNode(&root);\n res = node(0,a->weight + b->weight); //创建新的节点\n res->left = a;\n res->right = b;\n insertNode(&root,res);\n }\n\n printCode(res);\n \n return 0;\n}\n\nnodep node(int key,int weight){ //插入节点\n nodep ret = (nodep)malloc(sizeof(Node));\n ret->key = key;\n ret->weight = weight;\n ret->left = NULL;\n ret->right = NULL;\n}\n\n\nBoolean insertNode(ratenodep *rootp,nodep ins){\n if(rootp == NULL || ins == NULL) return FALSE;\n ratenodep root = *rootp;\n ratenodep* lastp = NULL;\n for(;root != NULL && root->data->weight < ins->weight;root = root->next){\n lastp = rootp;\n rootp = &(root->next);\n }\n\n ratenodep nd = (ratenodep)malloc(sizeof(RateNode));\n nd->next = NULL;\n nd->data = ins;\n\n if(root == NULL){\n *rootp = nd;\n }else if(lastp == NULL){\n nd->next = root;\n *rootp = nd;\n }else{\n root = *lastp;\n nd->next = root->next;\n root->next = nd;\n }\n return TRUE;\n}\n\nnodep popNode(ratenodep* rootp){\n ratenodep root = *rootp;\n if(root == NULL) return NULL;\n nodep ret = root->data;\n *rootp = root->next;\n free(root);\n return ret;\n}\n\nvoid visit(ratenodep root){\n while(root != NULL){\n printf(\"%d{%d}\\n\",root->data->key,root->data->weight);\n root = root->next;\n }\n}\n\nvoid printCode(nodep root){\n if(root == NULL) return;\n if(root->right != NULL || root->left != NULL ){\n idx++;\n dirs[idx] = 1;\n printCode(root->right);\n dirs[idx] = 0;\n printCode(root->left);\n idx--;\n }else if(root->left == NULL && root->right == NULL){\n printf(\"%d:\",root->key);\n for(int i = 0; i <= idx;i++){\n printf(\"%d\",dirs[i]);\n }\n printf(\"\\n\");\n return;\n }\n}"
},
{
"alpha_fraction": 0.503778338432312,
"alphanum_fraction": 0.5264483690261841,
"avg_line_length": 21.05555534362793,
"blob_id": "315f75ee8546b11cfe54d30e5f6ceaca758a37c5",
"content_id": "6ee5b9f862467d9344975258e683f17de4822358",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 794,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 36,
"path": "/编程开发/Linux/自定义/bin2sh/template.sh",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n__FILE__=`basename $0`\n__EXECUTED__=0\nif [ $# -eq 0 -a $__EXECUTED__ -eq 0 ];then\n # if in template\n echo \"Usage:\"\n echo \"$0 'binary file path' > 'output path'\"\n exit 0\nfi\nif [ $# -eq 1 ];then\n # create a new sh file\n cat $0|sed 's/^__EXECUTED__=0$/__EXECUTED__=1/'\n # depend on gzip\n gzip -c $1\n exit 0\nfi\nPROGRAM_BEGIN_LINE=`cat $0|awk '/^_-_-ZDQC-_-_$/{print NR}'`\nif [ ! -d /tmp ];then\n mkdir /tmp\n RET=$?\n if [ $RET -ne 0 ];then\n echo \"Priviledges not enough\"\n exit $RET\n fi\nfi\n:>/tmp/$__FILE__.x\nawk \"NR>${PROGRAM_BEGIN_LINE}{print \\$0}\" $0|gzip -d 2>/dev/null > /tmp/$__FILE__.x\nchmod +x /tmp/$__FILE__.x\nRET=$?\nif [ $RET -ne 0 ];then\n echo \"Priviledges not enough\"\n exit $RET\nfi\n/tmp/$__FILE__.x\nexit $?\n_-_-ZDQC-_-_\n"
},
{
"alpha_fraction": 0.7446808218955994,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 14,
"blob_id": "5ed49beb9f4432561da2f5f91576c304532fdcf5",
"content_id": "3165c0ca3529914bccd5378d41cb889183b01c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 3,
"path": "/数据结构/表/树状数组/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 树状数组\n利用数组实现类似于树的功能. \n插入及删除的时间复杂度均为O(logN) \n"
},
{
"alpha_fraction": 0.7979452013969421,
"alphanum_fraction": 0.8082191944122314,
"avg_line_length": 40.42856979370117,
"blob_id": "fc6487c284e57192f0ab228cdf02a89bd2d56acc",
"content_id": "7634251c5e3b63f6246250c65f170f4b0c990442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 7,
"path": "/算法/排序算法/冒泡排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 冒泡排序(算法时间复杂度:O(N^2)) \n冒泡排序是一个比较简单的算法,也是大学程序设计中经常会学习到的算法. \n跟它的名字一样,冒泡排序,分成很多趟,每一趟都会把最值浮动到最后面(浮动到前面也是一样的道理). \n这个算法有很多的实现方式,比较常见的有: \n1. 一个数字下标固定,另一个向后方移动,判断下标所在的数字是否符合排序规则,不符合就交换这两个位置的数字,一趟下来就确定了一个值.重复这个过程,整体就变得有序了. \n2. 比较前后两个相邻元素的大小,从左到右遍历一遍,不符合规则的就交换,每趟确定一个值,重复过程整体有序. \n两种方法的本质是相同的. \n"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 13.5,
"blob_id": "a255d24abaf80419c00362c003cb2a431cdfb363",
"content_id": "348659c897088647b38356339063d739c82534c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/编程开发/Linux/命令/df替代命令pydf.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# pydf\n显示磁盘使用状态的一个小工具,没啥好说的. \n"
},
{
"alpha_fraction": 0.745945930480957,
"alphanum_fraction": 0.7621621489524841,
"avg_line_length": 19.55555534362793,
"blob_id": "a379618887eabd1801835787c8ae938fe840fc18",
"content_id": "a4cdf8fad2fc6c7df093bbe04e7e05541f931765",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 9,
"path": "/编程开发/Linux/自定义/bin2sh/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# bin2sh\n二进制文件加上shell外壳的工具. \n用法: \n```\nsh template.sh 查看帮助\nsh template.sh 可执行文件 > 生成加壳shell文件\n```\ntemplate.sh可以改成任意名称,这里建议改成bin2sh. \n为了方便访问,可以给bin2sh文件加上可执行权限,并放置在/usr/local/bin目录中.\n"
},
{
"alpha_fraction": 0.875,
"alphanum_fraction": 0.875,
"avg_line_length": 31,
"blob_id": "b53e29083720376bb3ab3ac58abffa5fb11dec8f",
"content_id": "d76265b26e5f36829e173eff72b0562943d44320",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/算法/其他/最长连续非递减序列/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "有一个序列,其中的元素乱序,求该序列中最长的连续非递减子序列.\n"
},
{
"alpha_fraction": 0.7560975551605225,
"alphanum_fraction": 0.7560975551605225,
"avg_line_length": 39,
"blob_id": "f904df58f3f4e70472157740c227a6b3e9651194",
"content_id": "6f5f7d0734584d9fc38ec89ae98ca1d0a096c1b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 2,
"path": "/外语/英语/句子/2019-09-10.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "- You will leave life even more beautiful than you entered it. \n生命的结局会比开端更加美丽。 \n"
},
{
"alpha_fraction": 0.3779761791229248,
"alphanum_fraction": 0.4449404776096344,
"avg_line_length": 18.200000762939453,
"blob_id": "b95ab05fd686199c19e8f7884b7e0c874e94d372",
"content_id": "90b3171051dcff5d05f926b55bdf89e7037e2d46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 684,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 35,
"path": "/DailyCodingProblems/402_rotate_number_Twitter/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n#include<string.h>\n\nchar rotate_table[] = {\n //0,1,2, 3, 4,5,6, 7,8,9\n 0,1,2,-1,-1,5,9,-1,8,6\n};\n\nchar isRotateNumber(char * num,int len){\n int i;\n int l = (len+1) / 2;\n char n1,n2;\n for(i = 0; i < l; i++){\n n1 = num[i] - '0';\n n2 = num[len - 1 - i] - '0';\n if(rotate_table[n1] != n2) return 0;\n }\n return 1;\n}\n\nint main(){\n int N;\n printf(\"输入数字位数:\");\n scanf(\"%d\",&N);\n char num[16];\n int res = 1;\n int i;\n for(i = 0; i < N; i++) res*=10;\n for(i =N == 1 ? 0 : res/10; i < res; i++){\n sprintf(num,\"%d\",i);\n if(isRotateNumber(num,N)) printf(\"%d\\n\",i);\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7983281016349792,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 85.90908813476562,
"blob_id": "d60ddf2a5a89edf846fccb552f0c51b2d148cf47",
"content_id": "d0add34ceb33fe9bdd354c5960009906ea33a57d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1667,
"license_type": "no_license",
"max_line_length": 234,
"num_lines": 11,
"path": "/编程开发/后端/Composer/composer缺少依赖.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "今天公司有一个新需求,需要从公司代码库里面拉取一个laravel项目.因为对laravel不是特别了解.所以在安装第三包依赖包的时候就出来了问题. \n执行composer命令,出现一大堆的依赖缺失.看第一个报错,项目要求的PHP最少要7.1以上的版本,查看了一下,本地的php版本是7.0.需要升级PHP.因为本人的环境安装的是debian,所以下意识的运行:`apt search php`,发现软件源里面只有7.0的php.在google上搜索了一下,最终在[tecadmin.net](https://tecadmin.net/install-php-debian-9-stretch/)上找到了解决方法: \n```\nsudo apt install ca-certificates apt-transport-https\nwget -q https://packages.sury.org/php/apt.gpg -O- | sudo apt-key add -\necho \"deb https://packages.sury.org/php/ stretch main\" | sudo tee /etc/apt/sources.list.d/php.list\nsudo apt update # 刷新软件源\n``` \n执行完上面的代码,过程一切顺利,然后在终端执行`sudo apt search php`,最新的几个PHP版本都在软件源里,这里我下载了php7.2,php7.2-fpm. \n安装完新的PHP之后,因为在nginx中依然是7.0的sock,所以把nginx站点配置中与7.0相关的配置全部改成了7.2 \n上面的问题解决之后,在项目根目录执行`composer install`,依然报错,不过PHP版本的问题已经解决,仔细观察了一下各个问题,都有ext-xxx类似的字段,所以大概知道是PHP扩展缺失导致的问题,ext-后面的xxx表示扩展的名字,在debian里面需要什么扩展直接输入`sudo apt install php7.2-xxx`,不同的扩展对应不同的xxx. 所有的扩展问题解决后,执行`composer install`就完成了远程项目在本地的构建了.\n\n"
},
{
"alpha_fraction": 0.6834645867347717,
"alphanum_fraction": 0.752755880355835,
"avg_line_length": 34.16666793823242,
"blob_id": "1c22085c7385d22d3d8654ba3ec6bc79b50b04a7",
"content_id": "a060c2570b0442dd6dfe99bd9d1f24233473bece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 18,
"path": "/编程开发/Linux/常见问题/系统null文件权限问题.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# /dev/null权限问题\n之前因为需要解决一个问题,直接把/etc/apt/下面的相关文件删除了,导致引入了一个很大的bug,余波至今. \n今天安装mysql的时候,这个问题有复现了.正好趁着这个机会研究了一下这个问题,在askubuntu.com上找到了一种可以的方案: \n`/dev/null` should have `0666` permissions and that is what `/etc/udev/rules.d/device-permissions.rules` says:\n```\nKERNEL==\"null|zero|full|random|urandom\", MODE=\"0666\"\n```\nBut unfortunately this is a virtual server. So the kernel has not been updated:\n```\n# uname -a\nLinux memocamp 2.6.32-042stab063.2 #1 SMP Tue Oct 23 16:24:09 MSK 2012 x86_64 x86_64 x86_64 GNU/Linux\n```\nA temporary fix is to manually set the permissions:\n```\nchmod go+rw /dev/null\n```\n\n上面的方案执行之后在执行apt update就不会出现卡死的情况. \n"
},
{
"alpha_fraction": 0.6740087866783142,
"alphanum_fraction": 0.7753304243087769,
"avg_line_length": 36.83333206176758,
"blob_id": "f38f3344e371aa0bcce6c95e2f75ca521a86c81c",
"content_id": "6a5d40a0d667bc2bda5fbfb408261a89311403c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 6,
"path": "/算法/排序算法/插入排序/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 插入排序:O(N^2) \n类似整理扑克牌的过程,从左到右扫视,如果每张牌左边有比它大的牌,我们就向前看,直到遇到比当前牌小的数字,中间的所有牌后移一个位置. \n`0,2,3,4,1` \n序列前4个数已经有序,此时1左边有2,3,4比它大,0比1小,所以我们把1插在原来2的位置,中间的2,3,4整体后移,变成了: \n`0,1,2,3,4`\n这个算法的效率不是特别高,但是这种插入的思想还是很有意义的,希尔排序就是基于插入排序的一种改良算法.\n"
},
{
"alpha_fraction": 0.3028571307659149,
"alphanum_fraction": 0.4399999976158142,
"avg_line_length": 29.34782600402832,
"blob_id": "4598b3ba6205d643f99c80c92b2e3f41cf8acc59",
"content_id": "c7df012efc1fa359044621c2a6c4c0af6a24f4b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1052,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 23,
"path": "/算法/背包问题/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "**示例**: \n```\n物体: 1 2 3 4 5\n重量: 1 3 6 5 3\n价值: 2 2 4 5 3\n目标背包可以装重量为10的物体. \n``` \n**列表**:(i:行,j:列) \n\n物\\重|0|1 |2 |3 |4 |5 |6 |7 |8 |9 |10\n---|---|---|---|---|---|---|---|---|---|---|---\n1 |0 |2 |2 |2 |2 |2 |2 |2 |2 |2 |2\n2 |0 |2 |2 |2 |4 |4 |4 |4 |4 |4 |4\n3 |0 |2 |2 |2 |4 |4 |4 |6 |6 |6 |8\n4 |0 |2 |2 |2 |4 |5 |7 |7 |7 |9 |9\n5 |0 |2 |2 |3 |5 |5 |7 |7 |8 |9 |10 \n\n`wi`表示第`i`个物体的重量,`vi`表示第`i`个物体的价值. \n`v(i,j)`表示前i个物体在最大重量为j时的最大价值. \n所以最大的可以获取的价值为`10`. \n行首为物体,列首为背包能容纳的重量,表格中的值表示在指定容纳重量时最大可以放入的价值 \n`v(i,j) = max{v(i-1,j),j-wi >= 0 ? vi + v(i-1,j-wi):0}` \n由于所有的当前块的价值只与上一行有关,所以,我们可以将二维数组压缩,然后逆序遍历(因为`j-wi`必定比`j`小,从后面向前遍历不会破坏上一步的价值). \n"
},
{
"alpha_fraction": 0.7814569473266602,
"alphanum_fraction": 0.7947019934654236,
"avg_line_length": 28.799999237060547,
"blob_id": "0fbab6d5171d26296185976f346dfb6026b09a8c",
"content_id": "ce07729d07b74fbe9d43049d8d69174edf3ed9cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 5,
"path": "/算法/位运算/反转的位数/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 反转位数\n有两个整型数字(32)位,对于每个数字,可以对其内部的某位进行反转操作,问最少需要反转多少次可以将其中一个数变成另一个数. \n**思路**: \n这个题比较简单,列出所有的二进制位,不相同的即为需要反转的位. \n可以先对两个数进行一次按位异或操作,这样可以确定所有需要反转的位. \n"
},
{
"alpha_fraction": 0.653333306312561,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 21.88888931274414,
"blob_id": "8c09b96cc8b18344a1fe249e6f21d4e6a2588c55",
"content_id": "9c87a9db7366185fe7e6ae8c8ab46291f8bb1351",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2148,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 72,
"path": "/算法/贪心算法/Summer-Vacation/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Summer Vacation\n```\nTime Limit: 2 sec / Memory Limit: 1024 MB\n```\n**Problem Statement** \nThere are `N` one-off jobs available. If you take the `i`-th job and complete it, you will earn the reward of Bi after Ai days from the day you do it. \nYou can take and complete at most one of these jobs in a day. \nHowever, you cannot retake a job that you have already done. \nFind the maximum total reward that you can earn no later than `M` days from today. \nYou can already start working today. \n\n**Constraints** \n- All values in input are integers. \n- 1 ≤ N ≤ 10^5 \n- 1 ≤ M ≤ 10^5 \n- 1 ≤ Ai ≤ 10^5 \n- 1 ≤ Bi ≤ 10^4 \n**Input** \nInput is given from Standard Input in the following format:\n```\nN M\nA1 B1\nA2 B2\n.\n.\n.\nAN BN\n```\n**Output** \nPrint the maximum total reward that you can earn no later than `M` days from today.\n\n**Sample Input 1** \n```\n3 4\n4 3\n4 1\n2 2\n```\n**Sample Output 1** \n```\n5\n```\nYou can earn the total reward of `5` by taking the jobs as follows: \nTake and complete the first job today. You will earn the reward of `3` after four days from today. \nTake and complete the third job tomorrow. You will earn the reward of `2` after two days from tomorrow, that is, after three days from today. \n**Sample Input 2** \n```\n5 3\n1 2\n1 3\n1 4\n2 1\n2 3\n```\n**Sample Output 2** \n```\n10\n```\n**Sample Input 3** \n```\n1 1\n2 1\n```\n**Sample Output 3** \n```\n0\n```\n\n# 思路\n说实话,这个题目我一开始是当背包问题来写的.因为工作的结算时间虽然不同,但是工作时间是相同的,很多工作都是可以选择的,但是用解背包问题的思想,很多问题会交织在一起,难以解决.结算时间只能看做是一个维度,而在结算时间内还能做其他的事情. \n既然用解背包的思路比较困难,这种问题可以试试贪心算法.以剩余时间作为一个标准,选择在剩余时间内能做的工资最高的事情,将这个工作移除队列(剩最短的时间能做的事,能为其他的事空出更多的时间).然后依次按剩余时间递增的顺序遍历出所有在剩余时间内工资最高的工作. \n这样到最后即可获得最高的工资. \n"
},
{
"alpha_fraction": 0.4099999964237213,
"alphanum_fraction": 0.41999998688697815,
"avg_line_length": 17.18181800842285,
"blob_id": "f06ef156b6ebcd64c6b9b31a6aa797ccaa710dbb",
"content_id": "85e76858fd66a96ad92548cdbfc97d734115fc4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 11,
"path": "/算法/位运算/反转的位数/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int a,b;\n while(~scanf(\"%d%d\",&a,&b)){\n int count = 0; //反转的次数\n int c = a^b;\n for(;c;c&=c-1,count++);\n printf(\"%d\\n\",count);\n }\n}\n"
},
{
"alpha_fraction": 0.6536082625389099,
"alphanum_fraction": 0.6646047830581665,
"avg_line_length": 23.644067764282227,
"blob_id": "028b422b721a555e6d868049e6ec2b332c0ca976",
"content_id": "3a5614508edac1fb91555044da0ebdd8176840d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2053,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 59,
"path": "/编程开发/后端/Laravel/Laravel官方教程笔记/5.8/1. 开始/4. 发布.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 发布 \n## 1. 介绍 \n在发布应用前,我们需要做进行一些操作以使得应用执行得更有效率. \n## 2. 服务器配置 \n### Nginx \n可以参考`Laravel Forge`的配置: \n```\nserver {\n listen 80;\n server_name example.com;\n root /example.com/public;\n\n add_header X-Frame-Options \"SAMEORIGIN\";\n add_header X-XSS-Protection \"1; mode=block\";\n add_header X-Content-Type-Options \"nosniff\";\n\n index index.html index.htm index.php;\n\n charset utf-8;\n\n location / {\n try_files $uri $uri/ /index.php?$query_string;\n }\n\n location = /favicon.ico { access_log off; log_not_found off; }\n location = /robots.txt { access_log off; log_not_found off; }\n\n error_page 404 /index.php;\n\n location ~ \\.php$ {\n fastcgi_pass unix:/var/run/php/php7.2-fpm.sock;\n fastcgi_index index.php;\n fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;\n include fastcgi_params;\n }\n\n location ~ /\\.(?!well-known).* {\n deny all;\n }\n}\n```\n## 3. 优化 \n### 1. 自定载入优化 \n优化composer autoloader map,以使得composer能够更快的获取到需要载入的文件. \n```\ncomposer install --optimize-autholoader --no-dev\n```\n**在运行该命令前,需要确保项目中存在`composer.lock`文件.** \n### 2. 优化配置载入 \n为了更快的载入配置项,我们可以执行: \n```\nphp artisan config:cache\n```\n此命令会将所有的配置项放到一个缓存文件中. \n### 3. 优化路由载入 \n如果我们构建的项目中包含很多的路由,那么在项目发布前,我们最好执行一下路由缓存.这使得应用在处理上百个路由的时候,能够提升路由注册的性能. \n由于此功能使用了php的序列化功能,我们需要保证使用的路由为基于控制器的路由,基于闭包的路由无法被缓存. \n## 4. Forge发布 \n如果我们不想因为布置一个健壮的laravel需要这么多步奏,我们可以选择`Laravel Force`这种替代方案.其内置了所有laravel相关的工具及配置. "
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6770833134651184,
"avg_line_length": 22.5,
"blob_id": "e3695a222732793d9c5a80e57a36a06d24dd1973",
"content_id": "341d7f8cdbb02647cd06b936e294411bc25ea4dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 4,
"path": "/算法/位运算/加一/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 加一 \n不使用+,-,*,/,%等运算符实现一个数字的加一操作 \n思路: \n在逻辑运算中,^(异或)又叫做不进位加法,两个数对应二进制位进行位与操作,可以判断该位为否同时为1. \n"
},
{
"alpha_fraction": 0.7241379022598267,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 12.5,
"blob_id": "c956245c6707a3a5ee126b756d003708fb0be889",
"content_id": "3cc4a163ad601fecf223e8b0e1cbdb508f7f5074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/编程开发/Linux/APUE/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Unix环境高级编程笔记 \n按照章节创建文件夹 \n"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.5625,
"avg_line_length": 6,
"blob_id": "add2c2e4865d15c4d3d12feff8414610c4e42c91",
"content_id": "63e76ffadc7fe28d856d8e66433400c69d752717",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 2,
"path": "/编程开发/后端/缓存/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 缓存\n缓存相关的笔记. \n"
},
{
"alpha_fraction": 0.6344262361526489,
"alphanum_fraction": 0.7147541046142578,
"avg_line_length": 21.592592239379883,
"blob_id": "774c5e9b0795033f5779a5cf79acb31acb98f404",
"content_id": "21da5b2aac55c630e51c181dd01d9c5635752c42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 27,
"path": "/leetcode/338-比特位计数/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "**题目:** \n给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。 \n\n**示例 1:** \n```\n输入: 2\n输出: [0,1,1]\n```\n**示例 2:** \n```\n输入: 5\n输出: [0,1,1,2,1,2]\n```\n**进阶:** \n\n给出时间复杂度为O(n*sizeof(integer))的解答非常容易。但你可以在线性时间O(n)内用一趟扫描做到吗?\n要求算法的空间复杂度为O(n)。\n你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount)来执行此操作。\n\n**思路:** \n看到最后要求的空间复杂度为O(N),下意识的就能想到可能需要用动态规划的思想. \n我们很容易知道一个数字除去最低位的1必然比原数小.又我们在遍历到当前数字之前,必定会先遍历到比当前数字小的数字. \n所以当前数字的数位等于除去最低位1之后的数字位数+1. \n比如: \n14的二进制形式为:1110,剔除最小二进制1之后得到1100即12.所以14的二进制中1的个数等于12的二进制中1的个数+1 \n又0的二进制中1的个数为0,所以1的二进制为0+1,2的二进制为0+2. \n剔除最低有效位的方法也很简单,利用表达式n&=n-1即可剔除最低位的1.\n"
},
{
"alpha_fraction": 0.41013824939727783,
"alphanum_fraction": 0.44009217619895935,
"avg_line_length": 17.08333396911621,
"blob_id": "c863966bfef19db5f24646c2380b7fc1d751035a",
"content_id": "b4c190c630087641bb294b26081ab6057e5b6c02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/leetcode/392-判断子序列/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\ntypedef enum{\n false = 0,\n true = 1\n}bool;\n\nbool isSubsequence(char * s, char * t){\n int i,j;\n for(i = j = 0; s[i] != '\\0'; i++){\n for(;t[j] != '\\0' && s[i] != t[j];j++);\n if(t[j] == '\\0') return false;\n j++;\n }\n return true;\n}\n\nint main(){\n char s[128],t[128];\n while(~scanf(\"%s%s\",s,t)){\n printf(\"%s\\n\",isSubsequence(s,t)?\"true\":\"false\");\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7653061151504517,
"alphanum_fraction": 0.7755101919174194,
"avg_line_length": 72.5,
"blob_id": "0943e28d12613416958a036def404d946a8c2f0f",
"content_id": "3a2c0479594f3e8ccbc321222da6724e94b30832",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 4,
"path": "/DailyCodingProblems/386_char_sort_twitter/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 386_char_sort_twitter\nThis problem was asked by Twitter. \nGiven a string, sort it in decreasing order based on the frequency of characters. If there are multiple possible solutions, return any of them.\nFor example, given the string `tweet`, return `tteew`. `eettw` would also be acceptable.\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 12,
"blob_id": "60316f2a6c0f22fa27a4443fac384fcba08ec940",
"content_id": "5d0e72422edab55e9605bc73d87246d3a66418c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/编程开发/数据库/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 各种数据库笔记\n在使用各种数据库时遇到的问题. \n"
},
{
"alpha_fraction": 0.6896551847457886,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 13.5,
"blob_id": "88a55822f5c70b36de227d3ce3f47bb58c6d240d",
"content_id": "e53474a5e73795ba7c29d4b01b83249baa11e96d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 6,
"path": "/外语/韩语/语法/2019-10-23.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 后悔有什么用\n후회한들 무슨 소용이 있어요?\nA: 그 사람은 지금 많이 후회한대요.\n听说他现在很后悔.\nB: 후회한들 무슨 소용이 있어요?\n后悔又有什么用.\n"
},
{
"alpha_fraction": 0.6948868632316589,
"alphanum_fraction": 0.7003352642059326,
"avg_line_length": 30,
"blob_id": "eb4e799326d6e5fd691a43855471a3610cbd0f54",
"content_id": "2be1f32fad3effc7faf18a83f06df05ba4c36198",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2748,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 77,
"path": "/数据结构/树/哈夫曼树/term2/huffman.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __HUFFMAN_H__\n#define __HUFFMAN_H__\n#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\n#define START printf(\"=====start=====\\n\")\n#define END printf(\"=====end=====\\n\")\n#define toByte(n) ((n) / 8 + ((n) % 8 > 0))\n\ntypedef struct HuffListNode HuffListNode,*hufflistnodep;\ntypedef struct HuffNode HuffNode,*huffnodep;\ntypedef struct HuffTree HuffTree,*hufftreep;\ntypedef struct HuffCode HuffCode,*huffcodep;\ntypedef struct HuffList HuffList,*hufflistp;\ntypedef struct HuffResult HuffResult,*huffresultp;\ntypedef struct HuffCode HuffBuf,*huffbufp; //缓存类型\n\nstruct HuffListNode{\n huffnodep node; //huffman节点\n hufflistnodep next; //后继节点\n}; //huffman频率节点\n\nstruct HuffList{\n hufflistnodep head; //头结点\n int keys[256]; //键值字典\n int size; //链表长度\n};\n\nstruct HuffNode{\n int key; //键\n int weight; //权重\n huffnodep left; //左节点\n huffnodep right; //右节点\n}; //huffman节点\n\nstruct HuffCode{\n char* code; //huffman code\n int size; //huffman code size\n};\n\nstruct HuffTree{\n huffnodep root; //根\n huffcodep codes[256]; //key对应的代码\n int size; //大小\n}; //huffman树\n\nstruct HuffResult{\n char* code; //生成的代码\n hufftreep tree; //对应的哈夫曼树\n};\n\n#ifndef __BOOLEAN__\n#define __BOOLEAN__\ntypedef enum{\n FALSE = 0,\n TRUE = 1,\n}Boolean;\n#endif\n\nhuffnodep huffnode(int key,int weight); //初始化huffman节点\nhufflistp hufflist(); //初始化hufflist\nBoolean insertHuffNode(hufflistp list,huffnodep node); //向指定的节点链表添加一个节点\nhuffnodep shiftHuffNode(hufflistp list); //删除第一个节点\nhufftreep hufftree(hufflistp list); //构建一棵huffman tree\nhuffbufp getFileBuf(const char* filename); //获取文件的buf\nhufftreep genhuffcodes(hufftreep tree,huffnodep node,char codes[],int idx); //获取当前节点之下的节点的huffman编码\nhufflistp getHuffListByFile(const char* filename); //根据文件创建huffman链表\nhufflistp getHuffListByBuf(huffbufp buf); //根据文件buf创建huffman链表\nhuffcodep getHuffCode(hufftreep tree,int key); //获取指定键值的huffcode\nhuffresultp getHuffCodesByFile(const char* filename); //获取文件的huffman code\nhuffbufp getOriginBuf(huffresultp result); //从result中解析出原始的字符串\nhuffbufp str2bin(char* str); //二进制字符串转二进制数组\nint putOriginToFile(huffresultp result,const char* filename); //将result存储到filename中\nchar* bin2str(huffbufp buf); //二进制数组转二进制字符串\nhuffbufp readHuffFile(const char* filename); //解析huff文件\n#endif"
},
{
"alpha_fraction": 0.41025641560554504,
"alphanum_fraction": 0.6114398241043091,
"avg_line_length": 9.346939086914062,
"blob_id": "5684a3329b51c249f6dc8883349a51db7f66efb6",
"content_id": "c07fba08851bd4dc861cd4bd2af02e7b6e28ae12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 49,
"path": "/编程开发/软件安装/注册破解/IDEA注册码.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# IDEA注册码\n1. 2019.11.26\n```\nYZVR7WDLV8-eyJsaWNlbnNlSWQiOiJZWlZSN1dETFY4IiwibGljZW5zZWVOYW1lIjoiamV0YnJhaW5zIGpzIiwiYXNzaWduZWVOYW1lIjoiIiwiYXNzaWduZWVFbWFpbCI6IiIsImxpY2Vuc2VSZXN0cmljdGlvbiI6IkZvciBlZHVjYXRpb25hbCB1c2Ugb25seSIsImNoZWNrQ29uY3VycmVudFVzZSI6ZmFsc2UsInByb2R1Y3RzIjpbeyJjb2RlIjoiSUkiLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifSx7ImNvZGUiOiJBQyIsInBhaWRVcFRvIjoiMjAxOS0xMS0yNiJ9LHsiY29kZSI6IkRQTiIsInBhaWRVcFRvIjoiMjAxOS0xMS0yNiJ9LHsiY29kZSI6IlBTIiwicGFpZFVwVG8iOiIyMDE5LTExLTI2In0seyJjb2RlIjoiR08iLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifSx7ImNvZGUiOiJETSIsInBhaWRVcFRvIjoiMjAxOS0xMS0yNiJ9LHsiY29kZSI6IkNMIiwicGFpZFVwVG8iOiIyMDE5LTExLTI2In0seyJjb2RlIjoiUlMwIiwicGFpZFVwVG8iOiIyMDE5LTExLTI2In0seyJjb2RlIjoiUkMiLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifSx7ImNvZGUiOiJSRCIsInBhaWRVcFRvIjoiMjAxOS0xMS0yNiJ9LHsiY29kZSI6IlBDIiwicGFpZFVwVG8iOiIyMDE5LTExLTI2In0seyJjb2RlIjoiUk0iLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifSx7ImNvZGUiOiJXUyIsInBhaWRVcFRvIjoiMjAxOS0xMS0yNiJ9LHsiY29kZSI6IkRCIiwicGFpZFVwVG8iOiIyMDE5LTExLTI2In0seyJjb2RlIjoiREMiLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifSx7ImNvZGUiOiJSU1UiLCJwYWlkVXBUbyI6IjIwMTktMTEtMjYifV0sImhhc2giOiIxMTA1NzI3NC8wIiwiZ3JhY2VQZXJpb2REYXlzIjowLCJhdXRvUHJvbG9uZ2F0ZWQiOmZhbHNlLCJpc0F1dG9Qcm9sb25nYXRlZCI6ZmFsc2V9-rsJR5mlJcjibqRu1gQAMUCngMe8i+AOWIi+JZkNFYPET2G1ONcLPcIzoATTRi6ofkDm5l+3Y4HXjBPjVU6bHDdMBAzCnUqpXKsCknwSYyPSU0Y5pzuLvw6O9aPlQ46UBoTEC2BL5W6f11S7NlAq7tTbDuvFUynqSGAmTEfuZtKmzRmp20ejTPuMlSO7UqSkZvkg6YvSTrax1d2K+P9SAmVGZ9iC7AzBs4AwTf84QB9qHvE/Nh0oELSHWGG9hsZZ7sVghI/39/jPQFTp8GLFsl36ZPybPhGDam721zxS9H++/eJk23Jz3nxaRluE4dWmpHrDg1qBHp8qVpSFejg2QYw==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n2. 2020.1.22到期\n```\nT440AFY6IP-eyJsaWNlbnNlSWQiOiJUNDQwQUZZNklQIiwibGljZW5zZWVOYW1lIjoid3d3LnB5Y2hhcm0uY24g5Lit5paH572R5o+Q5L6bIiwiYXNzaWduZWVOYW1lIjoiIiwiYXNzaWduZWVFbWFpbCI6IiIsImxpY2Vuc2VSZXN0cmljdGlvbiI6IkZvciBlZHVjYXRpb25hbCB1c2Ugb25seSIsImNoZWNrQ29uY3VycmVudFVzZSI6ZmFsc2UsInByb2R1Y3RzIjpbeyJjb2RlIjoiSUkiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifSx7ImNvZGUiOiJBQyIsInBhaWRVcFRvIjoiMjAyMC0wMS0yMiJ9LHsiY29kZSI6IkRQTiIsInBhaWRVcFRvIjoiMjAyMC0wMS0yMiJ9LHsiY29kZSI6IlBTIiwicGFpZFVwVG8iOiIyMDIwLTAxLTIyIn0seyJjb2RlIjoiR08iLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifSx7ImNvZGUiOiJETSIsInBhaWRVcFRvIjoiMjAyMC0wMS0yMiJ9LHsiY29kZSI6IkNMIiwicGFpZFVwVG8iOiIyMDIwLTAxLTIyIn0seyJjb2RlIjoiUlMwIiwicGFpZFVwVG8iOiIyMDIwLTAxLTIyIn0seyJjb2RlIjoiUkMiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifSx7ImNvZGUiOiJSRCIsInBhaWRVcFRvIjoiMjAyMC0wMS0yMiJ9LHsiY29kZSI6IlBDIiwicGFpZFVwVG8iOiIyMDIwLTAxLTIyIn0seyJjb2RlIjoiUk0iLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifSx7ImNvZGUiOiJXUyIsInBhaWRVcFRvIjoiMjAyMC0wMS0yMiJ9LHsiY29kZSI6IkRCIiwicGFpZFVwVG8iOiIyMDIwLTAxLTIyIn0seyJjb2RlIjoiREMiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifSx7ImNvZGUiOiJSU1UiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjIifV0sImhhc2giOiIxMTcxNTc3MS8wIiwiZ3JhY2VQZXJpb2REYXlzIjowLCJhdXRvUHJvbG9uZ2F0ZWQiOmZhbHNlLCJpc0F1dG9Qcm9sb25nYXRlZCI6ZmFsc2V9-GnYipToEcr6iQ2fBKq87oicRzl7/dXaC+OwEN1+hLh6cuwa7950IR8b/9djr9Ram7wg3EvHQ7PB3si7aGyJrGx2zQGgmbeqJ/0FjXBL0jnOFENY8bb++NnOqTRivElO4k0myU70kULdoSxW6JlXUjZNgXpzy+jO1El9axBtGOS+fGNUp7mgk0+EetACqmvQPDBDkIQhpBBDAcrtElufHuo60FhjXv8kyZAgs1y8kkZgSAKblmmedpzNFmu34wBHXvkKZMkYei0KUd3AHE7chmU6MX/b0v15Qm0Pix8yWuxEQubDkATzY6hc2liRj1dPE6/TYamQLhRHxUtreDysVBA==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n3. 2020.1.24到期 \n```\n9MWZD5CC4E-eyJsaWNlbnNlSWQiOiI5TVdaRDVDQzRFIiwibGljZW5zZWVOYW1lIjoiMjAxNzY1MDYxQGNxdS5lZHUuY24gLiIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiJGb3IgZWR1Y2F0aW9uYWwgdXNlIG9ubHkiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In0seyJjb2RlIjoiQUMiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjQifSx7ImNvZGUiOiJEUE4iLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjQifSx7ImNvZGUiOiJQUyIsInBhaWRVcFRvIjoiMjAyMC0wMS0yNCJ9LHsiY29kZSI6IkdPIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In0seyJjb2RlIjoiRE0iLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjQifSx7ImNvZGUiOiJDTCIsInBhaWRVcFRvIjoiMjAyMC0wMS0yNCJ9LHsiY29kZSI6IlJTMCIsInBhaWRVcFRvIjoiMjAyMC0wMS0yNCJ9LHsiY29kZSI6IlJDIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In0seyJjb2RlIjoiUkQiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjQifSx7ImNvZGUiOiJQQyIsInBhaWRVcFRvIjoiMjAyMC0wMS0yNCJ9LHsiY29kZSI6IlJNIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In0seyJjb2RlIjoiV1MiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMjQifSx7ImNvZGUiOiJEQiIsInBhaWRVcFRvIjoiMjAyMC0wMS0yNCJ9LHsiY29kZSI6IkRDIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In0seyJjb2RlIjoiUlNVIiwicGFpZFVwVG8iOiIyMDIwLTAxLTI0In1dLCJoYXNoIjoiMTE3Mzk0NjcvMCIsImdyYWNlUGVyaW9kRGF5cyI6MCwiYXV0b1Byb2xvbmdhdGVkIjpmYWxzZSwiaXNBdXRvUHJvbG9uZ2F0ZWQiOmZhbHNlfQ==-ocfH5r7aj6756M8f6f+7MXdp2K0RxPCG2Yp9/iIydFQxHCSjqjUopo2yAxH8+EtTGm+w4dKPe+6tLqa/HOExPt9g3yOf29PU7oIwmqnIcnvZYGNCgNsCK7fyfxc2CX1whLZTByQ0LKylRshlsYz+HXg002E7q7bY0y3vbwxWLyNR5qY4EPa4WzfWjdnopCZfijgsU0bTRB51jiqVlLHONKjFFUmzPopUDwEHwDTnmNMEUAsluTaiirWIJwkmuFbsgmir+KUAxHvStOUgo68Vyaas7BicAWI1QiCqd1EvFIN/sGaZkRE9hK6FwkDN/hscDX1DCPY62ttgP/k1GvFOVA==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n4. 2020.4.4到期\n```\nMNQ043JMTU-eyJsaWNlbnNlSWQiOiJNTlEwNDNKTVRVIiwibGljZW5zZWVOYW1lIjoiR1VPIEJJTiIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNC0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNC0wNCJ9XSwiaGFzaCI6IjEyNjIxNDIwLzBwIiwiZ3JhY2VQZXJpb2REYXlzIjo3LCJhdXRvUHJvbG9uZ2F0ZWQiOnRydWUsImlzQXV0b1Byb2xvbmdhdGVkIjp0cnVlfQ==-Zmbxcn7NPlqBNqAURX0uiLzybnruyx6PG+6KYZrpzm/IJJs5nnIogGgdfIJoifO6fbaaJYc5pjds7CHdrt/neIpvF2o/HvIjMEF4/AhNV7HUGsAa9zpMszc6YBIkMmVFh4Y7GPKOStA14/Ld83AC7kGnwL1Fq7eAXKJFljc00GMejPpfE0zDqTN634bC+0ojfklhWXaLqhUt230SiE8onnd3quvEaH5NsW7sIQm2spyONZI+iHvHFtl4EvG7tlRlD1StsfhrbgNNxz61FOEEQ+GtZIzMx+T4sbpfoRyms7lbWQecrbAtE0c2sR98esm4PcDUhrFVBxGorPC1ppOLSQ==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n5. 2020.4.20到期 \n```\n812LFWMRSH-eyJsaWNlbnNlSWQiOiI4MTJMRldNUlNIIiwibGljZW5zZWVOYW1lIjoi5q2j54mIIOaOiOadgyIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNC0yMSIsInBhaWRVcFRvIjoiMjAyMC0wNC0yMCJ9LHsiY29kZSI6IkFDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNC0yMSIsInBhaWRVcFRvIjoiMjAyMC0wNC0yMCJ9LHsiY29kZSI6IkRQTiIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDQtMjEiLCJwYWlkVXBUbyI6IjIwMjAtMDQtMjAifSx7ImNvZGUiOiJQUyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDQtMjEiLCJwYWlkVXBUbyI6IjIwMjAtMDQtMjAifSx7ImNvZGUiOiJHTyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDQtMjEiLCJwYWlkVXBUbyI6IjIwMjAtMDQtMjAifSx7ImNvZGUiOiJETSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDQtMjEiLCJwYWlkVXBUbyI6IjIwMjAtMDQtMjAifSx7ImNvZGUiOiJDTCIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDQtMjEiLCJwYWlkVXBUbyI6IjIwMjAtMDQtMjAifSx7ImNvZGUiOiJSUzAiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiUkMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiUkQiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiUEMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiUk0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiV1MiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiREIiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiREMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA0LTIxIiwicGFpZFVwVG8iOiIyMDIwLTA0LTIwIn0seyJjb2RlIjoiUlNVIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNC0yMSIsInBhaWRVcFRvIjoiMjAyMC0wNC0yMCJ9XSwiaGFzaCI6IjEyNzk2ODc3LzAiLCJncmFjZVBlcmlvZERheXMiOjcsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-ti4tUsQISyJF/zfWxSHCr+IcYrX2w24JO5bUZCPIGKSi+IrgQ0RT2uum9n96o+Eob9Z1iQ9nUZ6FJdpEW5g0Exe6sw8fLrWMoLFhtCIvVgQxEEt+M7Z2xD0esmjP1kPKXZyc/i+NCxA2EO2Sec9uifqklBGP1L3xoENAw2QsIWBfttIe6EPWhbS8TIMMr2vF/S3HrN8To5Hj5lwD/t1GHgFK1uWrhsuifAiKcVzqogybzGiR1h2+yNYTMbKxP7uPCcdYMsIyrBNVRGA3IuEJgyGQTQlFbnVQoVUTGPW2tQxprmC464wMjKi40JHh27WzjOHPwgzxDaigwn4Z0EbSpA==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n6. 2020.6.5到期\n```\nK6IXATEF43-eyJsaWNlbnNlSWQiOiJLNklYQVRFRjQzIiwibGljZW5zZWVOYW1lIjoi5o6I5p2D5Luj55CG5ZWGOiBodHRwOi8vaWRlYS5oay5jbiIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkFDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkRQTiIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJQUyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJHTyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJETSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJDTCIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJSUzAiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkQiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUEMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUk0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiV1MiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREIiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUlNVIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9XSwiaGFzaCI6IjEzMjkyMzQwLzAiLCJncmFjZVBlcmlvZERheXMiOjcsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-f8GvMiFGxAImRG8KKudyJDmZkDYD5fQiMOSFnBEMuAkeHjkq3rcj19hqQ1OS9nLCO4RvhRMINgYtKi3jVeZADAf6HKMnzDisWECB7ms8EgZoWOzTdKi3vw2pCpck5k6U6RXJmFlebIIbjA/KrzlPCPt9BfMZQ9NN5OdXDYXN9ZCvgG3vt5S0ZShPDNMQllSJt8OSerE1daj+nOP8f6WiUpgrYkHwydzF/NBlejdjvkMZp3iCk+ylKhYW5OgfnChCwWEyEmmIaNj4xYyeL3WMLqHm82Uo3bQnKkUU8eO0WOmJPfO2NGrVIeM5SEl1iu8odKX4fes5u+duTRCKjbDLAg==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n7. 2020.6.12到期\n```\nT3ACKYHDVF-eyJsaWNlbnNlSWQiOiJUM0FDS1lIRFZGIiwibGljZW5zZWVOYW1lIjoi5bCP6bifIOeoi+W6j+WRmCIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0xMyIsInBhaWRVcFRvIjoiMjAyMC0wNi0xMiJ9LHsiY29kZSI6IkFDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0xMyIsInBhaWRVcFRvIjoiMjAyMC0wNi0xMiJ9LHsiY29kZSI6IkRQTiIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMTMiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMTIifSx7ImNvZGUiOiJQUyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMTMiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMTIifSx7ImNvZGUiOiJHTyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMTMiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMTIifSx7ImNvZGUiOiJETSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMTMiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMTIifSx7ImNvZGUiOiJDTCIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMTMiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMTIifSx7ImNvZGUiOiJSUzAiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiUkMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiUkQiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiUEMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiUk0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiV1MiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiREIiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiREMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTEzIiwicGFpZFVwVG8iOiIyMDIwLTA2LTEyIn0seyJjb2RlIjoiUlNVIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0xMyIsInBhaWRVcFRvIjoiMjAyMC0wNi0xMiJ9XSwiaGFzaCI6IjEzMzgwMDA0LzAiLCJncmFjZVBlcmlvZERheXMiOjcsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-nTBuZDiAOuM4IHXNkS7GbCvZVZFo4EcHf9hHzfhaPYsaCGQjuCVJFEboopbPuEHn16yT9Zvf7yRuM5WGlGmpcOJnWLpCmGm65S6wHtZdX0kfSNIqnqdS1MhIHpftsAGxSswuQksrm09tltbO4nATeavGs1BIMafsCJVen+BvDFvYL7+3crkRI7AwdyMb2miLLYJcEVPhiVKZnzJUzT9uA8/4Q02BqsvX5oSJg8cLw3w7Cd0ISrn1i8uENe/1z3T/Ede0STM7eOekFaVEdO9cgzYME3iIFzi2TZXMSqIuBpJqF4NFb6M0039tEGy6EHqcksMyDTdCAASquqcDcHrUUA==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n8. 2020.7.25到期\n```\nY9MXSIF79G-eyJsaWNlbnNlSWQiOiJZOU1YU0lGNzlHIiwibGljZW5zZWVOYW1lIjoiSkJGYW1pbHkgQ2hpbmEiLCJhc3NpZ25lZU5hbWUiOiIiLCJhc3NpZ25lZUVtYWlsIjoiIiwibGljZW5zZVJlc3RyaWN0aW9uIjoiIiwiY2hlY2tDb25jdXJyZW50VXNlIjpmYWxzZSwicHJvZHVjdHMiOlt7ImNvZGUiOiJJSSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDctMjYiLCJwYWlkVXBUbyI6IjIwMjAtMDctMjUifSx7ImNvZGUiOiJBQyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDctMjYiLCJwYWlkVXBUbyI6IjIwMjAtMDctMjUifSx7ImNvZGUiOiJEUE4iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA3LTI2IiwicGFpZFVwVG8iOiIyMDIwLTA3LTI1In0seyJjb2RlIjoiUFMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA3LTI2IiwicGFpZFVwVG8iOiIyMDIwLTA3LTI1In0seyJjb2RlIjoiR08iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA3LTI2IiwicGFpZFVwVG8iOiIyMDIwLTA3LTI1In0seyJjb2RlIjoiRE0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA3LTI2IiwicGFpZFVwVG8iOiIyMDIwLTA3LTI1In0seyJjb2RlIjoiQ0wiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA3LTI2IiwicGFpZFVwVG8iOiIyMDIwLTA3LTI1In0seyJjb2RlIjoiUlMwIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IlJDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IlJEIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IlBDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IlJNIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IldTIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IkRCIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IkRDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNy0yNiIsInBhaWRVcFRvIjoiMjAyMC0wNy0yNSJ9LHsiY29kZSI6IlJTVSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDctMjYiLCJwYWlkVXBUbyI6IjIwMjAtMDctMjUifV0sImhhc2giOiIxMzgzODYyOS8wIiwiZ3JhY2VQZXJpb2REYXlzIjo3LCJhdXRvUHJvbG9uZ2F0ZWQiOmZhbHNlLCJpc0F1dG9Qcm9sb25nYXRlZCI6ZmFsc2V9-rI4et6OSKLA4gvOzxtyp48SCWtjwsOSQBJittaw6BOVJOwVBz0p31wBWDFSdIogdRPKquk2BAou7N694entEn4/Db3Ol5uotDtUd2MHuo+BBu9QcwIoX3RTrnYLwJfTlEJfRH/3TF3WtkPGQZQQcw/23hsZzdC/WJY6tmvyTijIBScUsvIOxZ+8REbWbkTQx1KliliFyrMua7hit8LThzfffZloHciaHwUP9BjxEjU0qQi+yFacSXjxEZERJT25hZrMN+bqBxcn59/4UJBrITt8YpLIlydt0+6vMSWAMawMzKpeDEDInKy0XomauTIUfxS4sbw/dSyVdSrh+IuOc7g==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n9. 2020.9.19\n```\nK6IXATEF43-eyJsaWNlbnNlSWQiOiJLNklYQVRFRjQzIiwibGljZW5zZWVOYW1lIjoi5o6I5p2D5Luj55CG5ZWGOiBodHRwOi8vaWRlYS5oay5jbiIsImFzc2lnbmVlTmFtZSI6IiIsImFzc2lnbmVlRW1haWwiOiIiLCJsaWNlbnNlUmVzdHJpY3Rpb24iOiIiLCJjaGVja0NvbmN1cnJlbnRVc2UiOmZhbHNlLCJwcm9kdWN0cyI6W3siY29kZSI6IklJIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkFDIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9LHsiY29kZSI6IkRQTiIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJQUyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJHTyIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJETSIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJDTCIsImZhbGxiYWNrRGF0ZSI6IjIwMTktMDYtMDUiLCJwYWlkVXBUbyI6IjIwMjAtMDYtMDQifSx7ImNvZGUiOiJSUzAiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUkQiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUEMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUk0iLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiV1MiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREIiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiREMiLCJmYWxsYmFja0RhdGUiOiIyMDE5LTA2LTA1IiwicGFpZFVwVG8iOiIyMDIwLTA2LTA0In0seyJjb2RlIjoiUlNVIiwiZmFsbGJhY2tEYXRlIjoiMjAxOS0wNi0wNSIsInBhaWRVcFRvIjoiMjAyMC0wNi0wNCJ9XSwiaGFzaCI6IjEzMjkyMzI4LzAiLCJncmFjZVBlcmlvZERheXMiOjcsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-KUaQi549fH96M/qU7jTvuMeq2GuedA+WppV3irI0JHlfDuhJlidK2m3yoRxitGNmimPFVUA8Dk38OzXnP29I39QDXH5VAF8VjOP0XrqdfrpaZUKpdhRaYz8r1NAwID75U4LqYCvFbazka1dCMJBFqJ2wum1+CSQhJ1O7CSchAJAbjcCRQjbU2sXOofAA2sPLi7nlJw2wrjOHzH9cOczUn11n24PE9BQ/oYGITHkzsu94i4Q90Z1jQysMtXLgM/HoLSHY2T9rKULLoh+tdMwBp9+m0VLF/R5gdkVDV/dlorrA9OEZIsSOaG+oWSen/AulKH6OXllZJoR+b/T6YYfGWg==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n```\[email protected]\nJb012345678\n```\n10.\n```\nMTW881U3Z5-eyJsaWNlbnNlSWQiOiJNVFc4ODFVM1o1IiwibGljZW5zZWVOYW1lIjoiTnNzIEltIiwiYXNzaWduZWVOYW1lIjoiIiwiYXNzaWduZWVFbWFpbCI6IiIsImxpY2Vuc2VSZXN0cmljdGlvbiI6IkZvciBlZHVjYXRpb25hbCB1c2Ugb25seSIsImNoZWNrQ29uY3VycmVudFVzZSI6ZmFsc2UsInByb2R1Y3RzIjpbeyJjb2RlIjoiSUkiLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifSx7ImNvZGUiOiJBQyIsInBhaWRVcFRvIjoiMjAxOS0xMS0wNiJ9LHsiY29kZSI6IkRQTiIsInBhaWRVcFRvIjoiMjAxOS0xMS0wNiJ9LHsiY29kZSI6IlBTIiwicGFpZFVwVG8iOiIyMDE5LTExLTA2In0seyJjb2RlIjoiR08iLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifSx7ImNvZGUiOiJETSIsInBhaWRVcFRvIjoiMjAxOS0xMS0wNiJ9LHsiY29kZSI6IkNMIiwicGFpZFVwVG8iOiIyMDE5LTExLTA2In0seyJjb2RlIjoiUlMwIiwicGFpZFVwVG8iOiIyMDE5LTExLTA2In0seyJjb2RlIjoiUkMiLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifSx7ImNvZGUiOiJSRCIsInBhaWRVcFRvIjoiMjAxOS0xMS0wNiJ9LHsiY29kZSI6IlBDIiwicGFpZFVwVG8iOiIyMDE5LTExLTA2In0seyJjb2RlIjoiUk0iLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifSx7ImNvZGUiOiJXUyIsInBhaWRVcFRvIjoiMjAxOS0xMS0wNiJ9LHsiY29kZSI6IkRCIiwicGFpZFVwVG8iOiIyMDE5LTExLTA2In0seyJjb2RlIjoiREMiLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifSx7ImNvZGUiOiJSU1UiLCJwYWlkVXBUbyI6IjIwMTktMTEtMDYifV0sImhhc2giOiIxMDgyODE0Ni8wIiwiZ3JhY2VQZXJpb2REYXlzIjowLCJhdXRvUHJvbG9uZ2F0ZWQiOmZhbHNlLCJpc0F1dG9Qcm9sb25nYXRlZCI6ZmFsc2V9-aKyalfjUfiV5UXfhaMGgOqrMzTYy2rnsmobL47k8tTpR/jvG6HeL3FxxleetI+W+Anw3ZSe8QAMsSxqVS4podwlQgIe7f+3w7zyAT1j8HMVlfl2h96KzygdGpDSbwTbwOkJ6/5TQOPgAP86mkaSiM97KgvkZV/2nXQHRz1yhm+MT+OsioTwxDhd/22sSGq6KuIztZ03UvSciEmyrPdl2ueJw1WuT9YmFjdtTm9G7LuXvCM6eav+BgCRm+wwtUeDfoQqigbp0t6FQgkdQrcjoWvLSB0IUgp/f4qGf254fA7lXskT2VCFdDvi0jgxLyMVct1cKnPdM6fkHnbdSXKYDWw==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n11. \n```\nN757JE0KCT-eyJsaWNlbnNlSWQiOiJONzU3SkUwS0NUIiwibGljZW5zZWVOYW1lIjoid3UgYW5qdW4iLCJhc3NpZ25lZU5hbWUiOiIiLCJhc3NpZ25lZUVtYWlsIjoiIiwibGljZW5zZVJlc3RyaWN0aW9uIjoiRm9yIGVkdWNhdGlvbmFsIHVzZSBvbmx5IiwiY2hlY2tDb25jdXJyZW50VXNlIjpmYWxzZSwicHJvZHVjdHMiOlt7ImNvZGUiOiJJSSIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9LHsiY29kZSI6IkFDIiwicGFpZFVwVG8iOiIyMDIwLTAxLTA3In0seyJjb2RlIjoiRFBOIiwicGFpZFVwVG8iOiIyMDIwLTAxLTA3In0seyJjb2RlIjoiUFMiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMDcifSx7ImNvZGUiOiJHTyIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9LHsiY29kZSI6IkRNIiwicGFpZFVwVG8iOiIyMDIwLTAxLTA3In0seyJjb2RlIjoiQ0wiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMDcifSx7ImNvZGUiOiJSUzAiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMDcifSx7ImNvZGUiOiJSQyIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9LHsiY29kZSI6IlJEIiwicGFpZFVwVG8iOiIyMDIwLTAxLTA3In0seyJjb2RlIjoiUEMiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMDcifSx7ImNvZGUiOiJSTSIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9LHsiY29kZSI6IldTIiwicGFpZFVwVG8iOiIyMDIwLTAxLTA3In0seyJjb2RlIjoiREIiLCJwYWlkVXBUbyI6IjIwMjAtMDEtMDcifSx7ImNvZGUiOiJEQyIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9LHsiY29kZSI6IlJTVSIsInBhaWRVcFRvIjoiMjAyMC0wMS0wNyJ9XSwiaGFzaCI6IjExNTE5OTc4LzAiLCJncmFjZVBlcmlvZERheXMiOjAsImF1dG9Qcm9sb25nYXRlZCI6ZmFsc2UsImlzQXV0b1Byb2xvbmdhdGVkIjpmYWxzZX0=-AE3x5sRpDellY4SmQVy2Pfc2IT7y1JjZFmDA5JtOv4K5gwVdJOLw5YGiOskZTuGu6JhOi50nnd0WaaNZIuVVVx3T5MlXrAuO3kb2qPtLtQ6/n3lp4fIv+6384D4ciEyRWijG7NA9exQx39Tjk7/xqaGk7ooKgq5yquIfIA+r4jlbW8j9gas1qy3uTGUuZQiPB4lv3P5OIpZzIoWXnFwWhy7s//mjOWRZdf/Du3RP518tMk74wizbTeDn84qxbM+giNAn+ovKQRMYHtLyxntBiP5ByzfAA9Baa5TUGW5wDiZrxFuvBAWTbLrRI0Kd7Nb/tB9n1V9uluB2WWIm7iMxDg==-MIIElTCCAn2gAwIBAgIBCTANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBMB4XDTE4MTEwMTEyMjk0NloXDTIwMTEwMjEyMjk0NlowaDELMAkGA1UEBhMCQ1oxDjAMBgNVBAgMBU51c2xlMQ8wDQYDVQQHDAZQcmFndWUxGTAXBgNVBAoMEEpldEJyYWlucyBzLnIuby4xHTAbBgNVBAMMFHByb2QzeS1mcm9tLTIwMTgxMTAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcQkq+zdxlR2mmRYBPzGbUNdMN6OaXiXzxIWtMEkrJMO/5oUfQJbLLuMSMK0QHFmaI37WShyxZcfRCidwXjot4zmNBKnlyHodDij/78TmVqFl8nOeD5+07B8VEaIu7c3E1N+e1doC6wht4I4+IEmtsPAdoaj5WCQVQbrI8KeT8M9VcBIWX7fD0fhexfg3ZRt0xqwMcXGNp3DdJHiO0rCdU+Itv7EmtnSVq9jBG1usMSFvMowR25mju2JcPFp1+I4ZI+FqgR8gyG8oiNDyNEoAbsR3lOpI7grUYSvkB/xVy/VoklPCK2h0f0GJxFjnye8NT1PAywoyl7RmiAVRE/EKwIDAQABo4GZMIGWMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGEpG9oZGcfLMGNBkY7SgHiMGgTcMEgGA1UdIwRBMD+AFKOetkhnQhI2Qb1t4Lm0oFKLl/GzoRykGjAYMRYwFAYDVQQDDA1KZXRQcm9maWxlIENBggkA0myxg7KDeeEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAF8uc+YJOHHwOFcPzmbjcxNDuGoOUIP+2h1R75Lecswb7ru2LWWSUMtXVKQzChLNPn/72W0k+oI056tgiwuG7M49LXp4zQVlQnFmWU1wwGvVhq5R63Rpjx1zjGUhcXgayu7+9zMUW596Lbomsg8qVve6euqsrFicYkIIuUu4zYPndJwfe0YkS5nY72SHnNdbPhEnN8wcB2Kz+OIG0lih3yz5EqFhld03bGp222ZQCIghCTVL6QBNadGsiN/lWLl4JdR3lJkZzlpFdiHijoVRdWeSWqM4y0t23c92HXKrgppoSV18XMxrWVdoSM3nuMHwxGhFyde05OdDtLpCv+jlWf5REAHHA201pAU6bJSZINyHDUTB+Beo28rRXSwSh3OUIvYwKNVeoBY+KwOJ7WnuTCUq1meE6GkKc4D/cXmgpOyW/1SmBz3XjVIi/zprZ0zf3qH5mkphtg6ksjKgKjmx1cXfZAAX6wcDBNaCL+Ortep1Dh8xDUbqbBVNBL4jbiL3i3xsfNiyJgaZ5sX7i8tmStEpLbPwvHcByuf59qJhV/bZOl8KqJBETCDJcY6O2aqhTUy+9x93ThKs1GKrRPePrWPluud7ttlgtRveit/pcBrnQcXOl1rHq7ByB8CFAxNotRUYL9IF5n3wJOgkPojMy6jetQA5Ogc8Sm7RG6vg1yow==\n```\n"
},
{
"alpha_fraction": 0.47361963987350464,
"alphanum_fraction": 0.4981594979763031,
"avg_line_length": 22.285715103149414,
"blob_id": "1ec40e7973ed7ffeaa5be1277db4e73820db9de4",
"content_id": "554a2627529b7866636a30ee5bf7f29c33adce5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 897,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 35,
"path": "/DailyCodingProblems/400_sublist_sum_Goldman_Sachs/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Main:\n table = []\n # 初始化Main对象\n def __init__(self,size):\n self.table = [0] * size\n # 获得最低有效位\n def lowbit(self,v):\n return v & (-v)\n # 更新指定下标的值\n def update(self,i,size,val):\n i+=1\n while i <= size:\n self.table[i-1] += val\n i += self.lowbit(i)\n # 获取0~idx区间的和\n def getSum(self,idx):\n idx+=1\n s = 0\n while idx > 0:\n s += self.table[idx-1]\n idx -= self.lowbit(idx)\n return s\n # 获取[i,j)区间的和\n def sum(self,i,j):\n if i >= j or j < 1: # 给定的下标需要合法\n return 0\n return self.getSum(j-1) - self.getSum(i-1)\n\ndatas = [1,2,3,4,5]\nsize = len(datas)\nsolution = Main(size)\nfor i in range(size):\n solution.update(i,size,datas[i])\nprint(solution.sum(0,3))\n"
},
{
"alpha_fraction": 0.642276406288147,
"alphanum_fraction": 0.642276406288147,
"avg_line_length": 7.482758522033691,
"blob_id": "b6c626232ae9788fc2402af7f506ba18e50efdb0",
"content_id": "52fe27cbc1bc8cfaf75fdeca57107d13f0d3a7e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 534,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 29,
"path": "/外语/韩语/2019-09-02.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "안녕【名词】:你好(打招呼),安宁 ,安定 ,平安\nannyeong\n\n나/저【代名词】:我,(저:我的谦称)\nna/cheo\n\n제【代名词】:我的,“저의”的略语\nche\n\n중국【名词】:中国\nchungguk\n\n취미【名词】:嗜好 ,爱好 ,习好 ,趣味 ,兴趣\nch'wimi\n\n드라마【名词】:电视剧\nteurama\n\n요즘【名词,副词】:最近,进来,这几天\nyojeum\n\n한국【名词】:韩国\nhan'guk\n\n여행【名词】:旅行\nyeohaeng\n\n사람【名词】:人\nsaram\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.4444444477558136,
"avg_line_length": 6,
"blob_id": "795a8bb5326ad7be1259f48a3cbd49d00b544cb2",
"content_id": "dd44182e66ea88811f0a1b8bfc55f1d1070748da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 6,
"num_lines": 1,
"path": "/网络/认证/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 网络认证 \n"
},
{
"alpha_fraction": 0.6288209557533264,
"alphanum_fraction": 0.6331877708435059,
"avg_line_length": 16.615385055541992,
"blob_id": "54bb7c949390b182da982bf06b4c8c05297cc77f",
"content_id": "06f6de9922a0ecb742d940565d620efcbc97ba89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 730,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 26,
"path": "/编程开发/后端/Wordpress/问题/检测某个文章是否存在.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 判断文章是否存在 \n1. 通过`get_permalink()`函数 \n```\nif(get_permalink($post_id) === false){\n //文章不存在\n}\n```\n2. 通过`get_post_status()`函数 \n`get_post_status()`函数可以检测当前文章或指定文章的状态,如果检测不到状态,则说明该文章不存在,返回false.\n```\nif(get_post_status($post_id) === false){\n //文章不存在\n}\n```\n该函数不仅能检测文章是否存在,还能判断文章处于哪种状态. \n常见的状态: \n```\npublish 公开发布的post\npendding 待审文章\ndraft 草稿\nauto-draft 最新自动保存的草稿,无文章内容\nfuture 定时发布文章\nprivate 私有文章\ninherit 修订版\ntrash 在回收站\n```\n"
},
{
"alpha_fraction": 0.42300379276275635,
"alphanum_fraction": 0.463878333568573,
"avg_line_length": 20.46938705444336,
"blob_id": "a4ceaa314e2f6b6fc00edb0428c951260d6481a4",
"content_id": "ab368a5e05133879b2fdc42fb2c423da0830e367",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1072,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 49,
"path": "/DailyCodingProblems/413_climb_staircase_Amazon/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\nint steps[1008611];\n\nvoid path(int n,int step){\n int i;\n if(n == 0){\n for(i = step-1; i >= 0; i--){\n if(i == step-1) printf(\"%d\",steps[i]);\n else printf(\",%d\",steps[i]);\n }\n puts(\"\");\n return;\n }\n\n for(i = 1; i <= 2 && i <= n; i++){\n steps[step] = i;\n path(n - i,step+1);\n }\n}\n\n//根据给定的步长求路径\nvoid path2(int step_lengths[],int step_size,int n,int step){\n int i;\n if(n == 0){\n for(i = step-1; i >= 0; i--){\n if(i == step-1) printf(\"%d\",steps[i]);\n else printf(\",%d\",steps[i]);\n }\n puts(\"\");\n return;\n }\n\n for(i = 0; i < step_size && step_lengths[i] <= n; i++){\n steps[step] = step_lengths[i];\n path2(step_lengths,step_size,n - steps[step],step+1);\n }\n}\n\n\nint main(){\n path(4,0);\n\n int step_lengths[] = {2,3,5,10,20,50,100};\n int step_size = sizeof(step_lengths) / sizeof(step_lengths[0]);\n\n printf(\"case 2:\\n\");\n path2(step_lengths,step_size,10,0);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.33157894015312195,
"alphanum_fraction": 0.3526315689086914,
"avg_line_length": 19,
"blob_id": "d6f27ee2fb44ce80317d3583b1d3eb46f20b4806",
"content_id": "6948213e25457f776a2de2f824de2055cb65f2be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 19,
"path": "/leetcode/201-数字范围按位与/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int a,b;\n while(~scanf(\"%d%d\",&a,&b)){\n int mask = 1 << 30;\n int prod = a ^ b;\n int res = 0;\n for(;mask>0;mask>>=1){\n if((prod&mask) == 0){ //ab对应二进制位相同\n res|=(a & mask);\n }else{\n break;\n }\n }\n printf(\"%d\\n\",res);\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4004913866519928,
"alphanum_fraction": 0.45945945382118225,
"avg_line_length": 19.350000381469727,
"blob_id": "0df0db7adf09e9bc83598dc72d8fddb050033c3b",
"content_id": "646d73635ead440fe713957e819ca7a54b8ae9c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 509,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 20,
"path": "/算法/排序算法/计数排序/c/main.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include<stdio.h>\n\nint main(){\n int arr[] = {\n 1,3,4,0,0,5,8,5,9,0\n };\n int size = sizeof(arr)/sizeof(arr[0]);\n\n int tmp[10] = {0}; //临时空间,这里我们观察到所有需要排序的数字都在0~9之间,所以申请了长度为10的数组\n int i,j;\n for(i = 0; i < size; i++)\n tmp[arr[i]]++;\n\n //输出排序结果\n for(i = 0;i < 10; i++){\n for(j = 0; j < tmp[i]; j++)\n printf(\"%d \",i);//这里可以把值一次赋给arr\n }\n return 0;\n}\n"
},
{
"alpha_fraction": 0.4564618468284607,
"alphanum_fraction": 0.45909690856933594,
"avg_line_length": 24.931676864624023,
"blob_id": "794dcf48dfc076925f255a46a44938d41be0f883",
"content_id": "8503f53d6bfa6a51326d0b0a0ea39d36cd8e98f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 9153,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 322,
"path": "/数据结构/树/红黑树/c/term3/rb.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"rb.h\"\n\nnodep node(int key){\n nodep ret = (nodep)ralloc(sizeof(Node));\n\n ret->color = RED; //设置默认颜色\n ret->parent = nullnode(NULL);\n ret->left = nullnode(ret);\n ret->right = nullnode(ret);\n ret->key = (int*)ralloc(sizeof(int));\n *(ret->key) = key;\n\n return ret;\n}\n\nnodep nullnode(nodep p){\n nodep ret = (nodep)ralloc(sizeof(Node));\n\n ret->color = BLACK; //设置默认颜色\n ret->parent = p;\n ret->left = ret->right = NULL;\n ret->key = NULL;\n\n return ret;\n}\n\ntreep tree(){\n treep t = (treep)ralloc(sizeof(Tree));\n t->root = NULL;\n t->size = 0;\n\n return t;\n}\n\nBoolean insertNode(treep t,int key){\n return _insertNode(t,&(t->root),key);\n}\n\n//以某个节点为根节点插入\nBoolean _insertNode(treep t,nodep* rootp,int key){\n if(rootp == NULL) return FALSE;\n nodep root = *rootp;\n if(root == NULL){ //根节点\n t->root = node(key);\n }else{\n while(root->key != NULL){\n if(Key(root) == key) return FALSE; //已经存在元素,返回插入失败\n \n if(Key(root) > key){\n root = root->left;\n }else{\n root = root->right;\n }\n }\n nodep leaf = root;\n root = node(key);\n transplant(t,leaf,root); //替换nill节点\n if(isRoot(root)) t->root = root; //切换根节点\n free(leaf); // 删除叶子节点\n \n\n //当前处于nil节点\n while(isRed(root) && isRed(parent(root))){ //父节点为红色\n nodep pa = parent(root); //父节点\n nodep ga = grandpa(root); //祖父节点\n nodep un = uncle(root); //叔父节点\n if(isNull(ga) || isLeaf(ga)){ //祖父节点不存在\n break;\n }\n\n if(isRed(un)){ //叔父节点为红节点\n pa->color = un->color = BLACK; //父节点与叔父节点颜色反转\n ga->color = RED; //祖父节点的颜色反转\n root = ga; //此时祖父节点视作新加入的节点\n }else{\n //叔父节点为黑色,父节点红色,祖父节点黑色\n //我们需要将当前节点与父节点两个双红节点其中一个变黑\n ga->color = RED; //祖父节点变红色\n if(key_cmp(pa,ga) != key_cmp(root,pa)){ //非一个方向\n if(isRight(root)){ //pa > root,LR结构\n pa = rotate_left(t,pa); //左旋,转成LL\n root = pa->left;\n }else{\n pa = rotate_right(t,pa); //右旋,转成RR\n root = pa->right;\n }\n }\n pa->color = BLACK; //将中间元素转为黑色\n\n if(isRight(pa)){ //pa > root\n root = rotate_left(t,ga); //左旋\n }else{\n root = rotate_right(t,ga); //右旋\n }\n }\n }\n }\n t->size++;\n t->root->color = BLACK; //根节点为黑色\n return TRUE;\n}\n\nBoolean transplant(treep t,nodep dest,nodep src){\n if(isNull(dest) || isNull(src)) return FALSE;\n if(isLeaf(src->parent)){ //一个新节点\n free(src->parent);\n }\n src->parent = dest->parent;\n if(!isRoot(dest)){\n if(isRight(dest)){ //右孩子\n dest->parent->right = src;\n }else{\n dest->parent->left = src;\n }\n }\n\n if(isRoot(src)){\n t->root = src;\n if(isLeaf(src)){ //根节点不允许为NIL\n free(t->root->parent);\n t->root = NULL;\n }\n }\n return TRUE;\n}\n\nnodep rotate_left(treep t,nodep nd){\n if(isNull(t) || isNull(nd) || isNull(nd->right)) return NULL;\n nodep right = nd->right;\n nd->right = right->left;\n if(nd->right){\n nd->right->parent = nd;\n }\n right->parent = nd->parent;\n if(!isRoot(nd)){ //nd为根节点\n if(isLeft(nd)){ //左侧\n right->parent->left = right;\n }else{\n right->parent->right = right;\n }\n }\n nd->parent = right;\n right->left = nd;\n\n if(isRoot(right)){\n t->root = right;\n if(isLeaf(right)){ //根节点不允许为NIL\n free(t->root);\n t->root = NULL;\n }\n }\n\n return right;\n}\n\nnodep rotate_right(treep t,nodep nd){\n if(isNull(t) || isNull(nd) || isNull(nd->left)) return NULL;\n nodep left = nd->left;\n nd->left = left->right;\n if(nd->left){\n nd->left->parent = nd;\n }\n left->parent = nd->parent;\n if(!isRoot(nd)){ //nd为根节点\n if(isLeft(nd)){ //左侧\n left->parent->left = left;\n }else{\n left->parent->right = left;\n }\n }\n nd->parent = left;\n left->right = nd;\n\n if(isRoot(left)){\n t->root = left;\n if(isLeaf(left)){ //根节点不允许为NIL\n free(t->root);\n t->root = NULL;\n }\n }\n return left;\n}\n\nvoid visit(treep t){\n return _visit(t->root);\n}\n\n//层序遍历\nvoid _visit(nodep root){\n nodep stack[1024];\n int len = 0;\n stack[len++] = root;\n stack[len++] = NULL;\n nodep tmp = NULL;\n int i;\n for(i = 0; i < len; i++){\n tmp = stack[i];\n if(tmp == NULL){\n if(len > 0 && stack[len-1] != NULL)\n stack[len++] = NULL;\n printf(\"\\n\");\n }else{\n printf(\"%d[%s]{%d} \",Key(tmp),isRed(tmp) ? \"R\" : \"B\",isRoot(tmp) ? -1 : Key(tmp->parent));\n // printf(\"%p\\n\",tmp);\n if(!isLeaf(tmp->left))\n stack[len++ % 1024] = tmp->left;\n\n if(!isLeaf(tmp->right))\n stack[len++ % 1024] = tmp->right;\n }\n }\n}\n\nnodep search(treep t,int key){\n if(t == NULL || t->root == NULL) return NULL;\n nodep root = t->root;\n while(!isLeaf(root)){\n if(Key(root) == key){\n return root;\n }else if(Key(root) > key){\n root = root->left;\n }else{\n root = root->right;\n }\n }\n return NULL;\n}\n\nBoolean deleteNode(treep t,int key){\n nodep root = search(t,key);\n nodep current = NULL;\n if(root == NULL) return FALSE; //结果不存在\n t->size--;\n // printf(\"删除:%d\\n\",key);\n // printf(\"%p\\n\",sibling(root)->left);\n if(isLeaf(root->left)){ //左节点不存在\n free(root->left); //释放nil节点\n current = root->right;\n transplant(t,root,root->right); // 将root->right移动到root\n }else if(isLeaf(root->right)){\n free(root->right); //释放nil节点\n current = root->left;\n transplant(t,root,root->left); // 将root->left移动到root\n }else{ //左右节点都存在\n nodep min = minimum(root->right); //root右子树上的最小值\n int* tmp = root->key;\n root->key = min->key;\n min->key = tmp;\n current = min->right;\n free(root->left); //释放nil节点\n transplant(t,min,min->right); // 转移\n root = min;\n }\n\n Color del_color = root->color;\n free(root); //释放root\n\n if(del_color == BLACK){\n deleteFix(t,current);\n }\n if(!isLeaf(current))\n current->color = BLACK;\n\n return TRUE;\n}\n\n//删除\nBoolean deleteFix(treep t,nodep current){\n while(!isRoot(current) && isBlack(current)){\n nodep pa = parent(current);\n nodep si = sibling(current);\n if(si == NULL) break; \n // case1,si为红色\n if(isRed(si)){\n si->color = BLACK; //si设定为黑色\n if(isRight(si)){\n rotate_left(t,pa);\n si->left->color = RED; //si的左孩子设定为红色\n }else{\n rotate_right(t,pa);\n si->right->color = RED; //si的右孩子设定为红色\n }\n }else{\n // case2,un为黑,其孩子也是黑色\n if(isBlack(si->left) && isBlack(si->right)){\n current = parent(current);\n si->color = RED; //叔父节点设置为红色\n }else if(isBlack(si->right)){ //case3,左孩子为红色,右孩子为黑色\n current->color = BLACK;\n pa->color = RED;\n if(isLeft(current)){\n rotate_right(t,pa);\n }else{\n rotate_left(t,pa);\n }\n }else{ //右节点为红色\n si->color = pa->color; //叔父节点颜色设置为父节点颜色\n si->right->color = BLACK; //右节点设为黑色\n pa->color = BLACK;\n if(isRight(si)){\n rotate_left(t,pa);\n }else{\n rotate_right(t,pa);\n }\n\n break; //case4调整后必然平衡\n }\n }\n }\n current->color = BLACK;\n\n return TRUE;\n}\n\nnodep minimum(nodep root){\n nodep m = NULL;\n while(!isLeaf(root)){\n m = root;\n root = root->left;\n }\n return m;\n}"
},
{
"alpha_fraction": 0.7136659622192383,
"alphanum_fraction": 0.7136659622192383,
"avg_line_length": 19.863636016845703,
"blob_id": "ed531177ddb2c56d3b433abedf48c7519a1d1d2b",
"content_id": "484615a963fcbcc7eaf75741c2ce7adec9423270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 22,
"path": "/编程开发/git/push出现错误.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# git push错误\n在git push时出现\n```\n fatal: remote error:\n You can't push to git://github.com/xxx/xxx.git\n Use https://github.com/xxx/xxx.git\n```\n错误,此时执行\n```\ngit remote remove origin\ngit remote add origin https://github.com/xxx/xxx.git\n```\n再执行git push时会要求输入用户名,密码. \n在网上找到的方案: \n```\n执行:\ngit remote remove origin\ngit remote add origin [email protected]:/xxx/xxx.git\n```\n此时再执行git push可以正常执行. \n**原因分析:** \ngithub中默认的方式是[email protected]:/xxx/xxx.git的方式,而在clone时错误的使用了git协议的方式. \n"
},
{
"alpha_fraction": 0.7428571581840515,
"alphanum_fraction": 0.7428571581840515,
"avg_line_length": 15.5,
"blob_id": "6fe87fd83ed416a95c452e9af49670e8ba242fdc",
"content_id": "58ca8a85818c36acc06bc2f6f88f3bdb17239e7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/DailyCodingProblems/399_sum_same_value_Facebook/c/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# 解决思路 \n这里主要给出暴力解法和利用树状数组解决的思路. \n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6859756112098694,
"avg_line_length": 24.230770111083984,
"blob_id": "329ee6f8e1080b26b2e6651ae758a6896edc88f2",
"content_id": "fa0902a0f32935c1eafc31d1288c89682df347be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 13,
"path": "/DailyCodingProblems/390_num_not_exists_Two_Sigma/python/main.py",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n# 题目大意是求一个固定边界的数列中未出现的数字\nimport random\n\ndef getSet(size,cut): # 获取打乱并剔除指定数目的数据的集合\n li = list(range(1,size+1))\n random.shuffle(li)\n return set(li[cut:])\nscale = 1000000 # 数据规模\ncuted = 1000 # 数据剔除数\nli = getSet(scale,cuted) #获取数据剔除后的集合\nli_raw = set(range(1,scale+1)) # 获取1~100的集合\nprint(li_raw - li) # 取差集\n"
},
{
"alpha_fraction": 0.40560293197631836,
"alphanum_fraction": 0.42996346950531006,
"avg_line_length": 18.5238094329834,
"blob_id": "7b2c55cac8b12c008559c8ca352da909ed6c73e7",
"content_id": "8a900858b26ae78a88997fda05bbe193b65bba21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 829,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 42,
"path": "/算法/贪心算法/Summer-Vacation/cpp/main.cpp",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#if 0\n思路来自: https://blog.csdn.net/Hpuer_Random/article/details/99128977\n#endif\n#include<iostream>\n#include<queue>\n#include<algorithm>\n\nusing namespace std;\n\nstruct Node{\n int a;\n int b;\n} node[100007];\n\nbool cmp(Node a,Node b){\n return a.a < b.a;\n}\n\nint main(){\n int N,M;\n while(cin >> N >> M){\n for(int i = 0; i < N; i++){\n cin >> node[i].a >> node[i].b;\n }\n sort(node,node+N,cmp);\n int idx = 0;\n int sum = 0;\n priority_queue<int> q;\n for(int i = 1; i <= M; i++){\n while(node[idx].a <= i && idx < N){\n q.push(node[idx].b);\n idx++;\n }\n if(!q.empty()){\n sum+=q.top();\n q.pop();\n }\n }\n cout << sum << endl;\n }\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 9,
"blob_id": "e0fdee24bb82127e42c7e33fa1b9eb9f1c759d10",
"content_id": "ef892bf12e963f176a9d29adade4afa56062ae48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 9,
"num_lines": 2,
"path": "/编程开发/后端/Laravel/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# Laravel\nLaravel笔记 \n"
},
{
"alpha_fraction": 0.6290463805198669,
"alphanum_fraction": 0.6307961344718933,
"avg_line_length": 23.319149017333984,
"blob_id": "8d40183690751a6504d0a2cfe642e3060fe88b3c",
"content_id": "f17ef488eb9df939c5f860552c87fce6da35c797",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1351,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 47,
"path": "/数据结构/树/红黑树/c/term1/rb.h",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#ifndef __RB_H__\n#define __RB_H__\n/**\n * 红黑树头文件\n **/\n#include<stdio.h>\n#include<stdlib.h>\n\n//颜色\ntypedef enum{\n RED = 0,\n BLACK = 1\n}Color;\n\n//红黑树节点\ntypedef struct Node{\n int key; //键\n int val; //值\n Color color; //颜色\n struct Node *left; //左子节点\n struct Node *right; //右子节点\n struct Node *parent;//父节点\n}Node,*node;\ntypedef node* Tree;\n//判断是否是红节点\n#define isRed(p) ((p) != NULL && (p)->color == RED)\n//判断是否为黑节点\n#define isBlack(p) ((p) != NULL && (p)->color == BLACK)\n//父节点\n#define parent(p) ((p)->parent)\n//祖父节点\n#define grandpa(p) (parent(parent(p)))\n//叔父节点\n#define uncle(p) (grandpa(p)->left == parent(p) ? grandpa(p)->right : grandpa(p)->left)\n\nnode rotate_left(node root); //左旋\nnode rotate_right(node root); //右旋\nint search(Tree tree,int key); //查找key\nvoid insert(Tree tree,int key,int val);\nvoid adjust(Tree tree,node root); //调整指定节点\nvoid delete(Tree tree,int key); //删除键\nvoid deleteMin(Tree tree,int *key,int *val); //删除最小子节点并返回键值\nvoid deleteMax(Tree tree,int *key,int *val); //删除最大子节点并返回键值\nvoid getNodeInfo(node root,char* prefix); //获取节点信息\nvoid preOrderVisit(node root); //先序遍历\n\n#endif\n"
},
{
"alpha_fraction": 0.7235772609710693,
"alphanum_fraction": 0.7235772609710693,
"avg_line_length": 23.600000381469727,
"blob_id": "6512c1f6a7a86b87c0be21932d749cae7322c1eb",
"content_id": "723ce13e7e45f510e6147e4b1ef4a47c84dd19a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 5,
"path": "/编程开发/数据库/mysql/mysql修改密码.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# mysql修改密码\n```\nalter user 'root'@'localhost' identified with mysql_native_password by '123456';\nflush privileges;\n```\n"
},
{
"alpha_fraction": 0.5341880321502686,
"alphanum_fraction": 0.5341880321502686,
"avg_line_length": 18.5,
"blob_id": "00aa32b051b1b3fb29ef718c7cba3d29e0d516a9",
"content_id": "5a2838ed7bdda6bb9fdf66ad5f15593c4169e7ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 584,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 24,
"path": "/编程开发/后端/Wordpress/函数/get_tags.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# get_tags \n**原型**: \n`get_tags(string|array $args = '')` \n检索遍历所有文章标签. \n**参数**: \n```\n$args (string|array) (Optional) 遍历标签时用到的参数,默认为''\n```\n**返回值**: \n(WP_Term[]|int) 关于'post_tag'条目对象的$tags数组或其中的一个计数(这半边不是特别理解) \n**实现**: \n```php\nfunction get_tags($tags = '') {\n $tags = get_terms('post_tag',$args);\n\n if( empty($tags) ) {\n $return = array();\n return $return;\n }\n\n $tags = apply_filters( 'get_tags', $tags, $args );\n return $tags;\n}\n```\n"
},
{
"alpha_fraction": 0.5939741730690002,
"alphanum_fraction": 0.6097561120986938,
"avg_line_length": 19.47058868408203,
"blob_id": "d16a7ee639dd6ea77ac3d67a04fcf6597c3f7c4b",
"content_id": "1b404c9a03ded54f6defa5cd1c0544ff39eff168",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1525,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 34,
"path": "/外语/韩语/2019-09-03.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "**--(으)ㄹ까봐 (걱정이다)**: \n接在动词上,形容词词干上表示担心发生什么事. \n翻译成\"怕-----\" \n(걱정이다)---担心 \n例: \n1. 오늘 늦을 까봐 택시를 타고 왔어요. \n因为怕迟到,坐出租车来的. \n2. 지영씨 생일을 잊어버릴 까봐 핸드폰에 메모해 놓았어요. \n因为怕忘记志永的生日,所以记在手机上了. \n3. 어머님께서 걱정하실 까봐 일찍 졸아왔어요. \n怕妈妈担心,所以早点回来了. \n4. 내일 눈이 내릴 까봐 걱정이에요. \n担心明天下雪. \n5. 감기에 걸릴 까봐 코트까지 입었어요. \n因为害怕感冒,连外套也穿了. \n6. 교통사고를 칠 까봐 술을 안 마셨어요. \n怕出交通事故所以没喝酒. \n7. 오늘 손님이 올 까봐 집을 지키고 있었어요. \n今天怕来客人,所以一直守在家里. \n\n**면서(요)**: \n接在动词,形容词,名次+이다后边表示自己已知的内容向别人确认 \n翻译成:\"据说是....\",\"听说是....\",接法是接在简体终结词尾上. \n动词:ㄴ 다 면서요/는다 면서요. \n形容词:다 면서요. \n名词:(이)라 면서요. \n例句: \n1. 지영씨가 내일 한국에 간다 면서요. \n据说志永明天去韩国. \n2. 한국은 이혼율이 높다 면서 한국에 시집가요? \n3. 문수씨,저녁에 술 한잔 해요.와이프도 집에 없다 면서요. \n文洙,听说你老婆晚上不在家,晚上喝酒. \n4. 내일 눈이 내릴거라 면서요. \n据说明天要下雨. "
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 10,
"blob_id": "6710fefa3ba4ee39169000bcc568a65cf6826673",
"content_id": "bd9d061d4bda56d3475d596ff0e351d9af06ea03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 2,
"path": "/编程开发/后端/thinkphp/README.md",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "# thinkphp\nThinkphp笔记\n"
},
{
"alpha_fraction": 0.5026286244392395,
"alphanum_fraction": 0.5035673975944519,
"avg_line_length": 24.854368209838867,
"blob_id": "f5bf2dbb72ab408e994b1d9dbaeb0772d0114f3b",
"content_id": "7480c324179c623ba6001323f6d6d304e3da8979",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5566,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 206,
"path": "/数据结构/树/红黑树/c/term1/rb.c",
"repo_name": "ixysoft/notes",
"src_encoding": "UTF-8",
"text": "#include \"rb.h\"\nstatic void _insert(Tree tree,node* proot,int key,int val);\n\nnode rotate_left(node root){\n if(root == NULL) return NULL;\n if(root->right == NULL) return root; //右节点为空\n node right = root->right;\n root->right = right->left;\n right->left = root;\n\n if(root->parent != NULL){ //存在父节点\n if(root->parent->left == root){\n root->parent->left = right;\n }else{\n root->parent->right = right;\n }\n }\n\n if(root->right) root->right->parent = root;\n node tmp = root->parent;\n root->parent = right;\n right->parent = tmp;\n \n return right;\n}\n\nnode rotate_right(node root){\n if(root == NULL) return NULL;\n if(root->left == NULL) return root; //左节点为空\n node left = root->left;\n root->left = left->right;\n left->right = root;\n\n if(root->parent != NULL){\n if(root->parent->left == root){\n root->parent->left = left;\n }else{\n root->parent->right = left;\n }\n }\n\n if(root->left) root->left->parent = root;\n node tmp = root->parent;\n root->parent = left; //这里root的父节点应该是left节点而不是left的父节点,rotate_left同理\n left->parent = tmp;\n \n return left;\n}\n\nvoid adjust(Tree tree,node root){\n if(root == NULL) return;\n if(parent(root) == NULL){ //父节点为空\n root->color = BLACK; //世界没有你想想的那么好,记得加上黑色\n *tree = root;\n return;\n }\n if(isBlack(root) || isBlack(parent(root))) return; //父节点为黑色\n\n if(isRed(uncle(root))){\n parent(root)->color = uncle(root)->color = BLACK;\n grandpa(root)->color = RED;\n //getNodeInfo(grandpa(root),\"DEBUG \");\n adjust(tree,grandpa(root));\n return;\n }else{\n node pa = parent(root);\n node ga = grandpa(root);\n pa->color = BLACK;\n ga->color = RED;\n if(ga->left == pa){ //左子树\n if(pa->right == root){\n rotate_left(pa);\n }\n ga = rotate_right(ga);\n adjust(tree,ga);\n }else{\n if(pa->left == root){\n rotate_right(pa);\n }\n ga = rotate_left(ga);\n adjust(tree,ga);\n }\n }\n}\n\nint search(Tree tree,int key){\n node root = *tree;\n if(root == NULL) return -1;\n if(root->key == key) return root->val;\n else if(root->key > key) return search(&(root->left),key);\n else return search(&(root->right),key);\n}\n\nstatic void _insert(Tree tree,node *proot,int key,int val){\n node root = *proot;\n if(root == NULL){\n root = (node)malloc(sizeof(Node));\n root->left = root->right = root->parent = NULL;\n root->key = key;\n root->val = val;\n root->color = BLACK;\n if(*tree == NULL) *tree = root;\n *proot = root;\n }else if(root->key == key){\n root->val = val;\n }else if(root->key > key){\n _insert(tree,&(root->left),key,val);\n if(root->left != NULL && root->left->parent == NULL){\n root->left->color = RED;\n root->left->parent = root;\n adjust(tree,root->left);\n }\n }else{\n _insert(tree,&(root->right),key,val);\n if(root->right != NULL && root->right->parent == NULL){\n root->right->color = RED;\n root->right->parent = root;\n adjust(tree,root->right);\n }\n }\n}\n\n//插入键值对\nvoid insert(Tree tree,int key,int val){\n _insert(tree,tree,key,val);\n}\n\nvoid getNodeInfo(node root,char* prefix){\n if(prefix == NULL) prefix = \"\";\n if(root == NULL){\n printf(\"%s%p\\n\",prefix,root);\n }else{\n printf(\"%s[%d]=>%d[%s]{%p=>%p}\\n\",prefix,root->key,root->val,isRed(root)?\"red\":\"black\",root,root->parent);\n }\n}\n\nvoid preOrderVisit(node root){\n if(root == NULL) return;\n getNodeInfo(root,NULL);\n preOrderVisit(root->left);\n preOrderVisit(root->right);\n}\n\n\n//删除最小节点并返回其属性值\nvoid deleteMin(Tree tree,int *key,int *val){\n node root = *tree;\n *key = 0;\n *val = 0;\n if(root == NULL) return;\n if(root->left == NULL){\n *key = root->key;\n *val = root->val;\n free(root);\n *tree = NULL;\n }else{\n deleteMin(&(root->left),key,val); //递归删除左节点\n }\n}\n\n//删除最大节点并返回其属性值\nvoid deleteMax(Tree tree,int *key,int *val){\n node root = *tree;\n *key = 0;\n *val = 0;\n if(root == NULL) return;\n if(root->right == NULL){\n *key = root->key;\n *val = root->val;\n free(root);\n *tree = NULL;\n }else{\n deleteMax(&(root->right),key,val);\n }\n}\n\nvoid delete(Tree tree,int key){\n node root = *tree;\n if(root == NULL) return;\n if(root->key == key){\n int key,val;\n if(root->left != NULL && root->right != NULL){ //选取左节点\n deleteMax(&(root->left),&key,&val);\n root->key = key;\n root->val = val;\n }else{\n *tree = NULL;\n if(root->left){ //左节点存在\n *tree = root->left;\n }else if(root->right){ //右节点存在\n *tree = root->right;\n }\n free(root);\n adjust(tree,*tree);\n }\n if(*tree != NULL){\n root->key = key;\n root->val = val;\n *tree = root;\n }\n }else if(root->key > key){\n delete(&(root->left),key);\n }else{\n delete(&(root->right),key);\n }\n}\n"
}
] | 251 |
rotoro-cloud/webapp
|
https://github.com/rotoro-cloud/webapp
|
036ff6317b31a5dce3a0ee1b0697fa4dbbb76ae1
|
a58f24cb9cf8785b2b60f896e67ef326c8375ad6
|
c6d041a76935d1656e3450ffef80378207cf6b9c
|
refs/heads/master
| 2023-06-04T15:03:28.513466 | 2021-06-30T04:45:46 | 2021-06-30T04:45:46 | 317,531,633 | 3 | 4 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5983935594558716,
"alphanum_fraction": 0.6144578456878662,
"avg_line_length": 16.172412872314453,
"blob_id": "62ed6bfd8e12a32dfeec8f96c65b11c5a434e9ed",
"content_id": "a89bc62b8474ddc585c7ec4e8718e32c09100a3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 498,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 29,
"path": "/script/build.sh",
"repo_name": "rotoro-cloud/webapp",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env sh\n\nset -ev\n\nexport BUILD_VERSION=\"0.0.1\"\nexport BUILD_DATE=`date +%Y-%m-%dT%T%z`\n\nSCRIPT_DIR=$(dirname \"$0\")\n\necho ${SCRIPT_DIR}\n\nif [ -z \"$COMMIT\" ] ; then\n echo \"Cannot find COMMIT env var\"\n exit 1\nfi\n\n$(docker -v >/dev/null 2>&1)\nif [ $? -eq 0 ]; then\n DOCKER_CMD=docker\nelse\n DOCKER_CMD=`sudo docker`\nfi\n\n\n$DOCKER_CMD build \\\n --build-arg BUILD_VERSION=$BUILD_VERSION \\\n --build-arg BUILD_DATE=$BUILD_DATE \\\n --build-arg COMMIT=$COMMIT \\\n -t ${REPO}:${COMMIT} .\n"
},
{
"alpha_fraction": 0.5775076150894165,
"alphanum_fraction": 0.6018236875534058,
"avg_line_length": 19.5625,
"blob_id": "6691ff4d85e0177c1fedf46de816293767cd39d5",
"content_id": "6e20f9600a2995f1a9684d544a5b8258d5ad5697",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 16,
"path": "/app.py",
"repo_name": "rotoro-cloud/webapp",
"src_encoding": "UTF-8",
"text": "import os\nimport random\nfrom flask import Flask\napp = Flask(__name__)\n\[email protected](\"/\")\ndef main():\n return \"Hello!\"\n\[email protected]('/hello there')\ndef hello():\n phrase = random.choice([\"Good day\",\"Glad to see you\",\"Hello\"])\n return phrase + ', my friend.'\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n"
},
{
"alpha_fraction": 0.6177130341529846,
"alphanum_fraction": 0.6468609571456909,
"avg_line_length": 20.238094329833984,
"blob_id": "961267552bbd37346039bd791db3b0f402e1a14d",
"content_id": "6ad2139afe521c95e36421246d350c1fd482ac60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 987,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 42,
"path": "/README.md",
"repo_name": "rotoro-cloud/webapp",
"src_encoding": "UTF-8",
"text": "# Simple Web Application\n\nThis simple web application using [Python Flask](http://flask.pocoo.org/) use for education purposes.\n \n There are the steps required to get it working on linux.\n \n - Установи зависимости ОС\n - Установи зависимости приложения\n - Разверни исходный код приложения\n - Запусти веб-сервер\n \n## 1. Install OS dependencies\n \n Python and its dependencies\n\n apt-get install -y python3 python3-pip\n\n \n## 2. Install app dependencies\n \n Install Flask framework\n \n pip3 install flask\n\n## 3. Deploy the app source code\n\n Copy source to /opt dir\n\n cp app.py /opt\n \n## 4. Start the webserver\n\nStart web command\n\n FLASK_APP=/opt/app.py flask run --host=0.0.0.0 --port=5000\n \n## 6. Test\n\nOpen a browser and try URL\n\n http://<IP>:5000 => Hello!\n http://<IP>:5000/hello%20there => Hello, my friend.\n"
}
] | 3 |
stroke6463/DKTracker
|
https://github.com/stroke6463/DKTracker
|
70d1f3a2c2c7e19e479506864a9f1d2ee85fa753
|
56e07ce4475ad0e807e70605709ae42bb71fd74b
|
aa5ed4dc90c2a1a099330c1d73e0951c44b40671
|
refs/heads/master
| 2020-06-12T02:22:22.902430 | 2019-07-07T21:46:17 | 2019-07-07T21:46:17 | 194,165,995 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7884615659713745,
"avg_line_length": 51,
"blob_id": "8c1f7a8f175edb896bd4f613001dd746e7678167",
"content_id": "105eb1801006a4d6f104c1404f5d5159acf32f53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 1,
"path": "/README.md",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "Run the script to start the application on port 80.\n"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 28,
"blob_id": "22baa46b68df3392b16b872c485fccfc103c67cf",
"content_id": "b6fcbd8a88c99c2c04991d1d356986b48215f131",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/Docker.sh",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "export TZ='America/New_York'\ngunicorn -b 0.0.0.0:8000 app\n"
},
{
"alpha_fraction": 0.6987740993499756,
"alphanum_fraction": 0.7267950773239136,
"avg_line_length": 34.6875,
"blob_id": "d3619acb4e9fb064e0449a5e86d31e517e7deba4",
"content_id": "3154337799f2b461f34a80a6e5ea455f9320f1f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 16,
"path": "/dockerMake.sh",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#Clear old containers\ndocker kill $(docker ps -q)\ndocker rm $(docker ps --filter=status=exited --filter=status=created -q)\ndocker rmi $(docker images -a -q)\n\n# Quick build and run. Change the port mapping below for a change.\ndocker build --tag donkey_dock . \n# remove the detach arg if you'd like it to run interactive.\ndocker run --detach -p 127.0.0.1:8000:8000 donkey_dock\n# list running docker containers--should see it listed.\ndocker ps\n# allow 2s for the server to start. This needs to be fixed.\nsleep 2\ncurl -s -o /dev/null -w \"%{http_code}\" localhost\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.719298243522644,
"avg_line_length": 18,
"blob_id": "34b1d6d409c59a683927c45c45ea652b6172c99c",
"content_id": "462f3bc2ff3d33278ac7acb0c757c9efb1791ac3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/Dockerfile",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "FROM python:3.6\nADD . /app\nWORKDIR /app\nRUN pip install -r requirements.txt\nEXPOSE 8000\nCMD [\"bash\", \"Docker.sh\"]\n"
},
{
"alpha_fraction": 0.6175161004066467,
"alphanum_fraction": 0.6214745044708252,
"avg_line_length": 31.59677505493164,
"blob_id": "471a0ab281c2fbfd239834154f0cfa2c508fbea8",
"content_id": "dbc51c6a62caeffc74c7e44f6c7fba59599db65b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2021,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 62,
"path": "/app.py",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "\"\"\" main flask appliation routes \"\"\"\nfrom flask import Flask, render_template, request, redirect, url_for, jsonify\nimport db, time, json\napplication = Flask(__name__)\n\[email protected]('/')\ndef donkey():\n \"\"\" return the route route \"\"\"\n return render_template('index.html')\n\n\[email protected]('/drinks')\ndef show_drinks():\n \"\"\" Pass the results of the database query to the template \"\"\"\n drinks = db.get_drinks()\n for drink in drinks:\n drink['drinkTime'] = drink['drinkTime'].strftime('%Y-%m-%d %H:%M')\n return render_template('drinks.html', drinks=drinks)\n\n\[email protected]('/new', methods=['GET', 'POST'])\ndef new_drink():\n \"\"\" Insert a new DK \"\"\"\n if request.method == 'GET':\n # add constraints to possible input options\n drink_list = [\"Big DK\", \"Miller\", \"Clam\", \"Mixed Drink\", \"normal DK\", \"Some Beer\"]\n return render_template('new.html', drink_list=drink_list)\n elif request.headers['Content-Type'] == 'application/json':\n try:\n result = request.json\n data = json.loads(result)\n except ValueError:\n return json.dumps({\"status\": \"Incorrect data format.\"})\n try:\n db.insert_drink(data['drinkType'])\n return json.dumps({\"status\": \"ok\"})\n except (KeyError, ValueError):\n return json.dumps({\"status\": \"failed\"})\n else:\n user_input = request.form.copy()\n db.insert_drink(user_input['drinkType'])\n time.sleep(2)\n return redirect(\"/new\", code=302)\n\n\[email protected](\"/donkeydumps\")\ndef donkey_dumps():\n \"\"\" Return a JSON dumps from the DB's contents \"\"\"\n drinks = db.get_drinks()\n response = jsonify(drinks)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n\[email protected](Exception)\ndef exception_handler(error):\n \"\"\" route to catch server errors and redirect to home \"\"\"\n return redirect(url_for('donkey'))\n\n\nif __name__ == '__main__':\n application.run(host='0.0.0.0')\n"
},
{
"alpha_fraction": 0.6225582957267761,
"alphanum_fraction": 0.6307498216629028,
"avg_line_length": 26.36206817626953,
"blob_id": "c244013f9415d43dda24fd864528f57d23d5ed0d",
"content_id": "e906d47d76c24b456822e3387c684708667cdf4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1587,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 58,
"path": "/db.py",
"repo_name": "stroke6463/DKTracker",
"src_encoding": "UTF-8",
"text": "\"\"\" Handles database insertions \"\"\"\nfrom datetime import datetime\nimport psycopg2\n\nconn_str = \"postgres://eriwbisk:[email protected]:5432/eriwbisk\"\n\n\ndef connect_to_db(conn_str):\n conn = psycopg2.connect(conn_str)\n # conn.closed returns 0 if connection established\n if conn.closed == 0:\n print(\"Connection established.\")\n return conn\n\nconn = connect_to_db(conn_str)\n\n\ndef test_connection():\n \"\"\" Test if connection is still alive. If no, reestablish connection \"\"\"\n global conn_str, conn\n try:\n with conn.cursor() as cursor:\n cursor.execute(\"SELECT 1;\")\n except psycopg2.OperationalError:\n print(\"Connection was closed.\")\n conn = connect_to_db(conn_str)\n\n\ndef initialize():\n \"\"\" initializaiton function \"\"\"\n test_connection()\n with conn.cursor() as cursor:\n cursor.execute(\"CREATE TABLE IF NOT EXISTS donkey(id SERIAL UNIQUE, \\\n drinkType TEXT, drinkTime TIMESTAMP)\")\n conn.commit()\n\n\ndef insert_drink(drinkType):\n \"\"\" Insert drink into DB \"\"\"\n test_connection()\n with conn.cursor() as cursor:\n cursor.execute(\"INSERT INTO donkey(drinkType, drinkTime) VALUES (%s, %s);\",\n (drinkType, datetime.now()))\n conn.commit()\n\n\ndef get_drinks():\n \"\"\" get list of drinks fromm DB \"\"\"\n test_connection()\n with conn.cursor() as cursor:\n cursor.execute(\"SELECT * FROM donkey;\")\n drinks = list()\n for result in cursor:\n drinks.append({'id' : result[0], 'drinkType' : result[1], 'drinkTime' : result[2]})\n return drinks\n\n\ninitialize()\n"
}
] | 6 |
UPKIRAT/106-Project
|
https://github.com/UPKIRAT/106-Project
|
0b9e2554cbc05e8a29c476b22ac0257792e2434a
|
43753eb4403cedb6514e92be646cbff1d69b20d1
|
3180f7417dae3e119f8c4b48aa85fe93fde08937
|
refs/heads/main
| 2022-12-25T10:28:47.766381 | 2020-10-06T10:20:00 | 2020-10-06T10:20:00 | 301,688,302 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6081229448318481,
"alphanum_fraction": 0.6103183031082153,
"avg_line_length": 26.46875,
"blob_id": "760e91938f719a7378e2e3801f8a9af7582308b0",
"content_id": "669c53c5d81b12338c967b0556a252e05724cf5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 32,
"path": "/sg_cupsofcoffee.py",
"repo_name": "UPKIRAT/106-Project",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport plotly.express as px\r\nimport numpy as np\r\nimport csv\r\n\r\ndef get_data_source(data_path):\r\n coffee_cups = []\r\n sleep = []\r\n with open(data_path) as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n for row in csv_reader:\r\n coffee_cups.append(float(row['coffee']))\r\n sleep.append(float(row['sleep']))\r\n \r\n return{'x': coffee_cups, 'y': sleep}\r\n\r\ndef find_corelation(data_source):\r\n corelation = np.corrcoef(data_source['x'], data_source['y'])\r\n print(corelation[0,1])\r\n\r\ndef plotGraph(data_source):\r\n df = pd.read_csv('cups of coffee vs hours of sleep.csv')\r\n fig = px.scatter(df, x = 'coffee', y = 'sleep')\r\n fig.show()\r\n\r\ndef setup():\r\n data_path = 'cups of coffee vs hours of sleep.csv'\r\n data_source = get_data_source(data_path)\r\n find_corelation(data_source)\r\n plotGraph(data_source)\r\n\r\nsetup()\r\n"
},
{
"alpha_fraction": 0.614130437374115,
"alphanum_fraction": 0.616304337978363,
"avg_line_length": 26.75,
"blob_id": "4e5d64ebeae0a8361c1b360bd7d509901916d563",
"content_id": "914db6166d45b35302429d8c0ba49851de5e916c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 32,
"path": "/sg_students_present.py",
"repo_name": "UPKIRAT/106-Project",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport plotly.express as px\r\nimport numpy as np\r\nimport csv\r\n\r\ndef get_data_source(data_path):\r\n marks = []\r\n days_present = []\r\n with open(data_path) as csv_file:\r\n csv_reader = csv.DictReader(csv_file)\r\n for row in csv_reader:\r\n marks.append(float(row['marks']))\r\n days_present.append(float(row['days_present']))\r\n \r\n return{'x': marks, 'y': days_present}\r\n\r\ndef find_corelation(data_source):\r\n corelation = np.corrcoef(data_source['x'], data_source['y'])\r\n print(corelation[0,1])\r\n\r\ndef plotGraph(data_source):\r\n df = pd.read_csv('Student Marks vs Days Present.csv')\r\n fig = px.scatter(df, x = 'marks', y = 'days_present')\r\n fig.show()\r\n\r\ndef setup():\r\n data_path = 'Student Marks vs Days Present.csv'\r\n data_source = get_data_source(data_path)\r\n find_corelation(data_source)\r\n plotGraph(data_source)\r\n\r\nsetup()\r\n"
}
] | 2 |
henokyemam/trains
|
https://github.com/henokyemam/trains
|
8189348ba7f0936168123589a28dfe1fe4bace17
|
8acb236b3344de7f5cbd65cb2e01287c1b74cbad
|
98f54c378cbc5084d4c80951401ed15842edce92
|
refs/heads/master
| 2021-03-02T07:35:54.656688 | 2020-03-05T17:56:51 | 2020-03-05T17:56:51 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6485677361488342,
"alphanum_fraction": 0.6532706022262573,
"avg_line_length": 31.041095733642578,
"blob_id": "4dc8395941988b6493fe896b036394bdb92fb9f8",
"content_id": "3b5cfce3caebd8bca15871a48be017ee293e002a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2339,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 73,
"path": "/examples/execute_jupyter_notebook_server.py",
"repo_name": "henokyemam/trains",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport subprocess\nfrom copy import deepcopy\nimport socket\nfrom tempfile import mkstemp\n# make sure we have jupter in the auto requirements\nimport jupyter\nfrom trains import Task\n\n\n# set default docker image, with network configuration\nos.environ['TRAINS_DOCKER_IMAGE'] = 'nvidia/cuda --network host'\n\n# initialize TRAINS\ntask = Task.init(project_name='examples', task_name='Remote Jupyter NoteBook')\n\n# get rid of all the runtime TRAINS\npreserve = ('TRAINS_API_HOST', 'TRAINS_WEB_HOST', 'TRAINS_FILES_HOST', 'TRAINS_CONFIG_FILE',\n 'TRAINS_API_ACCESS_KEY', 'TRAINS_API_SECRET_KEY', 'TRAINS_API_HOST_VERIFY_CERT')\n\n# setup os environment\nenv = deepcopy(os.environ)\nfor key in os.environ:\n if key.startswith('TRAINS') and key not in preserve:\n env.pop(key, None)\n\n# Add jupyter server base folder\nparam = {\n 'jupyter_server_base_directory': '',\n}\ntask.connect(param)\n\n# execute jupyter notebook\nfd, local_filename = mkstemp()\ncwd = os.path.expandvars(os.path.expanduser(param['jupyter_server_base_directory'])) \\\n if param['jupyter_server_base_directory'] else os.getcwd()\nprint('Running Jupyter Notebook Server on {} [{}] at {}'.format(socket.gethostname(),\n socket.gethostbyname(socket.gethostname()), cwd))\nprocess = subprocess.Popen([sys.executable, '-m', 'jupyter', 'notebook', '--no-browser', '--allow-root'],\n env=env, stdout=fd, stderr=fd, cwd=cwd)\n\n# print stdout/stderr\nprev_line_count = 0\nprocess_running = True\nwhile process_running:\n process_running = False\n try:\n process.wait(timeout=2.0 if prev_line_count == 0 else 15.0)\n except subprocess.TimeoutExpired:\n process_running = True\n\n with open(local_filename, \"rt\") as f:\n # read new lines\n new_lines = f.readlines()\n if not new_lines:\n continue\n output = ''.join(new_lines)\n print(output)\n # update task comment with jupyter notebook server links\n if prev_line_count == 0:\n task.comment += '\\n' + ''.join(line for line in new_lines if 'http://' in line or 'https://' in line)\n prev_line_count += len(new_lines)\n\n os.lseek(fd, 0, 0)\n os.ftruncate(fd, 0)\n\n# cleanup\nos.close(fd)\ntry:\n os.unlink(local_filename)\nexcept:\n pass\n"
}
] | 1 |
xALg8Fv982cJHU5d/TID
|
https://github.com/xALg8Fv982cJHU5d/TID
|
e06f5e5cf63efb54c1e2d0f93271339677ee1e91
|
b43fde4d10fcccb001a79478c2184881ac88657a
|
ae99e021602ef267f86f717f5e27b7e1e1a51c18
|
refs/heads/master
| 2023-07-11T01:28:30.725071 | 2021-08-08T05:42:03 | 2021-08-08T05:42:03 | 393,862,733 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.46041277050971985,
"alphanum_fraction": 0.4919324517250061,
"avg_line_length": 32.73417663574219,
"blob_id": "1bac71d41178f69338b3a26ccdd56524a09e375e",
"content_id": "919da11e8b7642f9edd3454d9a33d33d5e75c48b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5330,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 158,
"path": "/TID.py",
"repo_name": "xALg8Fv982cJHU5d/TID",
"src_encoding": "UTF-8",
"text": "from PIL import Image, ImageDraw, ImageFont\nimport tkinter as tk\nfrom os import system\nfrom sys import platform \nfrom math import ceil\nfrom pynput import keyboard\nfrom io import BytesIO\nimport win32clipboard\n\n\ndim = (1920, 1080)\npadx = 100\npady = 100\nfonts = {'C64 Pro' : 'C64_Pro_Mono-STYLE.ttf', 'TT2020 Base' : 'TT2020Base-Regular.ttf', 'Average Mono' : 'AverageMono.ttf',\n 'Consolas' : 'Consolas.ttf', 'Adobe Source Code Scans' : 'SourceCodePro-Regular.ttf',\n 'Liberation Mono' : 'LiberationMono-Regular.ttf', 'Times New Roman' : 'times.ttf', \n 'Comic Sans MS' : 'ComicSansMS3.ttf', 'Papyrus' : 'PAPYRUS.TTF'}\n\nv_anchor = {'center' : 'mm', 'left' : 'mm'}\ndfont = 'Liberation Mono'\ndfsize = 40\ndwrap = 30\n\ndef mkI():\n with open('text.txt', 'r') as file:\n try: \n ptwrap = int(twrap.get())\n if ptwrap < 0:\n print(\"wrap length too small\")\n ptwrap = dwrap\n except: \n ptwrap = dwrap\n print('invalid wrap length')\n t = ''\n for line in file:\n t = t+line\n t = t.strip()\n if ptwrap:\n nt = ''\n i = 0\n for w in t.split(' '):\n if w.find('\\n') == -1:\n i += len(w) + 1\n else: \n i = 0\n if i >= ptwrap:\n nt = nt + '\\n' + w + ' '\n i = len(w)+1\n else: \n nt = nt + w + ' '\n \n t = nt.strip()\n i = Image.new(\"RGB\", dim, (0, 0, 0))\n i_d = ImageDraw.Draw(i)\n \n try: \n pfsize = int(fsize.get())\n if pfsize < 1:\n print('font size too small')\n pfsize = dfsize\n except:\n pfsize = dfsize\n print('font size invalid')\n f = ImageFont.truetype('fonts/'+fonts[s_font.get()], pfsize)\n t_dim = i_d.textbbox((0, 0), t, font = f, anchor = v_anchor[v_align.get()], align =\n v_align.get())\n t_dim = (int(t_dim[2] - t_dim[0]), int(t_dim[3] - t_dim[1]))\n print(t_dim)\n t_dim = (t_dim[0]+padx, t_dim[1]+pady)\n \n i = Image.new(\"RGB\", (t_dim[0], t_dim[1]), (0, 0, 0))\n i_d = ImageDraw.Draw(i)\n i_d.text((t_dim[0]//2, t_dim[1]//2), t, font = f, fill = (255, 255, 255), anchor =\n v_anchor[v_align.get()], align = v_align.get())\n\n i.save('code.png')\n if platform == 'linux':\n try: \n system('xclip -selection clipboard -target image/png -i code.png')\n except: \n print('.. -. ... - .- .-.. .-.. / -..- -.-. .-.. .. .--. / ... - ..- .--. .. -.. .-.-.-')\n elif platform == 'win32':\n output = BytesIO()\n i.convert(\"RGB\").save(output, \"BMP\")\n data = output.getvalue()[14:]\n output.close()\n\t\t\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data)\n win32clipboard.CloseClipboard()\n else: \n print(\"-.-- --- ..- .----. .-. . / -. --- - / .-. ..- -. -. .. -. --. / .-- .. -. -..\\\n --- .-- ... / --- .-. / .-.. .. -. ..- -..- / .-.. --- .-.. .-.-.-\")\n\n if i_show.get():\n i.show()\npk = {keyboard.Key.shift : False}\ndef on_press(key):\n pk[key] = True\n sp = pk[keyboard.Key.shift]\n if key == keyboard.Key.f12 and sp:\n mkI()\ndef on_release(key):\n pk[key] = False\n\n\nroot = tk.Tk()\nframe1 = tk.Frame(root)\ns_font = tk.StringVar(value = dfont)\nv_align = tk.StringVar(value = 'center')\ni_show = tk.BooleanVar(value = False)\nfsize = tk.StringVar(value = f\"{dfsize}\")\ntwrap = tk.StringVar(value = f\"{dwrap}\")\n\nb1 = tk.Button(frame1, text = 'generate', command = mkI)\nr1 = []\ni = 0\nfor x, y in fonts.items():\n i += 1\n r1.append(tk.Radiobutton(frame1, text = x, variable = s_font, value = x))\n\nr2 = []\ni = 0\nfor x, y in v_anchor.items():\n i += 1\n r2.append(tk.Radiobutton(frame1, text = x, variable = v_align, value = x))\nc1 = tk.Checkbutton(frame1, text = 'print', variable = i_show, offvalue = False, onvalue = True)\nl1 = tk.Label(frame1, text = 'font size')\ne1 = tk.Entry(frame1, textvariable = fsize, exportselection = 0, width = 5)\nl2 = tk.Label(frame1, text = 'text wrap')\ne2 = tk.Entry(frame1, textvariable = twrap, exportselection = 0, width = 5)\n\ndef main():\n root.title(\"- . -..- - / - --- / .. -- .- --. .\")\n root.geometry(\"300x350\")\n frame1.place(anchor = 'center', relx = 0.5, rely = 0.5)\n i = 0\n for rb in r1:\n rb.grid(row = i, column = 0, sticky = 'w', padx = 10, pady = 2)\n i += 1\n i = 0\n for rb in r2:\n rb.grid(row = i, column = 1, sticky = 'w', padx = 10, pady = 2)\n i += 1\n c1.grid(row = len(r2)+1, column = 1, sticky = 'w', padx = 10, pady = 2)\n l1.grid(row = len(r2)+2, column = 1, sticky = 's')\n e1.grid(row = len(r2)+3, column = 1, sticky = 'n')\n l2.grid(row = len(r2)+4, column = 1, sticky = 's')\n e2.grid(row = len(r2)+5, column = 1, sticky = 'n')\n b1.grid(row = 100, column = 0, columnspan = 100)\n\n listener = keyboard.Listener(on_press = on_press, on_release = on_release)\n listener.start()\n\n root.mainloop()\n\nmain()\n"
}
] | 1 |
ayassbgc/nessmado_bot
|
https://github.com/ayassbgc/nessmado_bot
|
7b8727f79da4df31c139492cd455ea31e9bf7c0e
|
ca9a8085a92951036083b5fda7038a0af9e6c7e4
|
3f1e7ec94116b8ed19a7612082a9162eb3a73e2d
|
refs/heads/master
| 2021-03-21T01:14:31.638417 | 2020-08-10T15:44:26 | 2020-08-10T15:44:26 | 247,250,521 | 3 | 1 | null | 2020-03-14T09:50:15 | 2020-03-14T09:52:30 | 2020-03-14T10:00:51 | null |
[
{
"alpha_fraction": 0.5179818868637085,
"alphanum_fraction": 0.5234024524688721,
"avg_line_length": 39.47679138183594,
"blob_id": "68670220a1ac4a3ca7f01fef0875441ba20c0b03",
"content_id": "e5d751700fbfc083b181f629d2da2225fc4505cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12385,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 237,
"path": "/nessmado_discord_manager.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:nessmado_function.py\n# バージョン:5.01\n# 作成日:2019/03/xx\n# 最終更新日:2019/10/14\n# 作成者:(へっへ)\n# スクリプト概要:\n# |キャラ対策チャンネル(大元)に「質問」から始まるメッセージを投稿すると、\n# |各キャラ別の対策チャンネルに文言をコピーした上で、\n# |大元のキャラ対策チャンネルと雑談チャンネルに周知メッセージを送る。\n\"\"\"更新履歴\n 2019/03/xx ver 3.0?覚えてない。\n オブジェクト指向に沿ってクラス化。\n 2019/07/31 Ver 5.0\n 勇者追加。\n 2019/10/14 Ver 5.1\n バンカズ追加。\n NESS_SKILLクラス考慮。\n\"\"\"\n\n\n# discordAPIモジュール\nfrom discord import message\nfrom discord import client\nfrom discord import channel\n\n# 自作モジュール\nfrom NMconfig import NMConfig\n\n\nclass ChannelManager:\n def __init__(self):\n self.nmconfig = NMConfig()\n self.TOKEN = \"\"\n self.ZATSUDAN_CHANNEL_ID = \"\"\n self.CHARACTER_TAISAKU_ID = \"\" # 「対策」は英語で\"counterplan\"って言うらしいが分かりにくいので\n self.MATCH_CHANNEL_ID = \"\"\n self.TAISAKU_STAMP = \"\"\n self.NESS_SKILL_CHANNEL_ID = \"\"\n self.STARVED_MATCHING = \"\"\n self.MYCHARACTER = \"\"\n\n self.inputConfig()\n\n def inputConfig(self):\n self.TOKEN = self.nmconfig.TOKEN\n self.ZATSUDAN_CHANNEL_ID = self.nmconfig.ZATSUDAN_CHANNEL_ID\n self.CHARACTER_TAISAKU_ID = self.nmconfig.CHARACTER_TAISAKU_ID\n self.MATCH_CHANNEL_ID = self.nmconfig.MATCH_CHANNEL_ID\n self.TAISAKU_STAMP = self.nmconfig.TAISAKU_STAMP\n self.NESS_SKILL_CHANNEL_ID = self.nmconfig.NESS_SKILL_CHANNEL_ID\n self.STARVED_MATCHING = self.nmconfig.STARVED_MATCHING\n self.MYCHARACTER = self.nmconfig.MYCHARACTER\n\n def judgeNameContained(self, client, ch_name, content) -> bool:\n \"\"\"\n キャラクター名について、包括してしまっている名前はいい感じに振り分けしてくれる処理。\n TO:DO本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。\n \"\"\"\n if ch_name == 'マリオ':\n if ('ドクター' in content) or ('Dr' in content) or ('dr' in content):\n return False\n elif ch_name == 'ファルコ':\n if 'ファルコン' in content:\n return False\n elif ch_name == 'クッパ':\n if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content):\n return False\n elif ch_name == 'ピット':\n if ('ブラック' in content):\n return False\n elif ch_name == self.MYCHARACTER:\n if self._judgeMyCharacterNameContained(client, ch_name, content):\n return False\n return True\n\n def _judgeMyCharacterNameContained(self, client, ch_name, content) -> bool:\n all_channels = client.get_all_channels()\n for channel in all_channels:\n if channel.name == ch_name:\n continue\n elif (channel.name in content):\n return True\n return False\n\n def judgeFuzzyCharacterName(self, ch_name: str, content: str):\n \"\"\"\n 質問対象のキャラに対して、質問が投下されるべきチャンネルがどれなのかを\n メッセージのキャラ名とキャラクター毎の対策チャンネル名を見比べることで判別している。\n ただ、窓民が質問メッセージを書く際に、キャラクターの名前が微妙にチャンネル名と違っちゃう場合が\n 出てくることが予測される。その名前の差分を力ずくで補完してくれる関数がこいつである。\n TO:DO 本当はさ、もっとスッキリ書けることなんてわかってるんだよ。でもさ、メンドかったんだよ。許してくれな。\n \"\"\"\n\n # ★各キャラ窓へ③\n # |ch_nameがチャンネル名称からキャラ名を抽出したものです。\n # |各キャラ窓のサーバーに適用させる場合、\n # |1. if ch_name == 〜の行のキャラ名をチャンネル名称のキャラ名に合わせる\n # |2. ネス窓ではポケトレは1つのチャンネルで対応しているので、これを分ける\n # | (分けるに当たり、他の関数も変えるといったことは不要なはずです)\n # |3. ネス窓ではMiiファイター用の対策チャンネルを作成していないので、これを作る\n # (作るに当たり、他の関数も変えるといったことは不要なはずです)\n if ch_name in content:\n return True\n if ch_name == \"ドクマリ\":\n if ('ドクター' in content) or ('Dr' in content) or ('dr' in content) or ('医者' in content):\n return True\n if ch_name == \"ロゼッタ&チコ\":\n if ('ロゼチコ' in content) or ('ロゼッタ' in content):\n return True\n if ch_name == \"クッパjr\":\n if ('ジュニア' in content) or ('Jr' in content) or ('jr' in content):\n return True\n if ch_name == \"パックンフラワー\":\n if ('パックン' in content) or ('花' in content):\n return True\n if ch_name == \"ドンキーコング\":\n if ('DK' in content) or ('D.K.' in content) or ('D.K' in content) or ('ドンキー' in content) or ('ゴリラ' in content):\n return True\n if ch_name == \"ディディーコング\":\n if ('DD' in content) or ('D.D.' in content) or ('D.D' in content) or ('ディディー' in content) or ('猿' in content):\n return True\n if ch_name == \"キングクルール\":\n if ('クルール' in content) or ('鰐' in content) or ('ワニ' in content):\n return True\n if ch_name == \"ガノンドロフ\":\n if ('ガノン' in content) or ('おじさん' in content):\n return True\n if ch_name == \"ヤングリンク\":\n if ('ヤンリン' in content) or ('こどもリンク' in content) or ('子どもリンク' in content) or ('子供リンク' in content):\n return True\n if ch_name == \"トゥーンリンク\":\n if ('トリン' in content):\n return True\n if ch_name == \"ダークサムス\":\n if ('ダムス' in content):\n return True\n if ch_name == \"ゼロスーツサムス\":\n if ('ダムス' in content) or ('ゼロサム' in content) or ('ZSS' in content) or ('ゼロスーツ・サムス' in content):\n return True\n if ch_name == \"ピチュー\":\n if ('ピチュカス' in content):\n return True\n if ch_name == \"ミュウツー\":\n if ('M2' in content) or ('m2' in content):\n return True\n if ch_name == \"ポケモントレーナー\":\n if ('ポケモン・トレーナー' in content) or ('ポケトレ' in content) or ('ゼニガメ' in content) \\\n or ('フシギソウ' in content) or ('リザードン' in content) or ('リザ' in content):\n return True\n if ch_name == \"ゲッコウガ\":\n if ('蛙' in content):\n return True\n if ch_name == \"メタナイト\":\n if ('メタ' in content):\n return True\n if ch_name == \"デデデ\":\n if ('デデデ大王' in content):\n return True\n if ch_name == \"フォックス\":\n if ('狐' in content):\n return True\n if ch_name == \"ブラックピット\":\n if ('ブラック・ピット' in content) or ('ブラピ' in content):\n return True\n if ch_name == \"むらびと\":\n if ('ムラビト' in content) or ('村人' in content):\n return True\n if ch_name == \"アイスクライマー\":\n if ('アイス・クライマー' in content) or ('アイクラ' in content):\n return True\n if ch_name == \"インクリング\":\n if ('スプラゥーン' in content) or ('インリン' in content) or ('イカちゃん' in content) \\\n or ('いかちゃん' in content) or ('烏賊' in content) or ('イカ' in content):\n return True\n if ch_name == \"キャプテン・ファルコン\":\n if ('ファルコン' in content) or ('キャプテンファルコン' in content) or ('CF' in content) \\\n or ('C.F' in content) or ('cf' in content) or ('c.f' in content):\n return True\n if ch_name == \"ダックハント\":\n if ('ダック・ハント' in content) or ('犬' in content):\n return True\n if ch_name == \"ピクミン&オリマー\":\n if ('ピクミン&オリマー' in content) or ('ピクオリ' in content) or ('ピクミン' in content) or ('オリマー' in content):\n return True\n if ch_name == \"リトル・マック\":\n if ('リトルマック' in content) or ('マック' in content) or ('トルマク' in content):\n return True\n if ch_name == \"ロボット\":\n if ('ロボ' in content):\n return True\n if ch_name == \"mrゲーム&ウォッチ\":\n if ('ゲムヲ' in content) or ('ゲムオ' in content) or ('ミスター' in content) \\\n or ('ゲーム&ウォッチ' in content) or ('ゲーム&ウォッチ' in content):\n return True\n if ch_name == \"wii-fitトレーナー\":\n if ('フィットレ' in content) or ('Wii Fit' in content) or ('wii fit' in content) \\\n or ('Wii fit' in content) or ('wii Fit' in content) or ('Wii-Fit' in content) or ('wii-fit' in content) \\\n or ('Wii-fit' in content) or ('wii-Fit' in content)or ('wii-Fit' in content) \\\n or ('tトレーナー' in content)or ('Tトレーナー' in content) or ('t トレーナー' in content)or ('T トレーナー' in content):\n return True\n if ch_name == \"パックマン\":\n if ('金玉' in content):\n return True\n if ch_name == \"ベヨネッタ\":\n if ('ベヨ' in content):\n return True\n if ch_name == \"ロックマン\":\n if ('ロック' in content) or ('岩男' in content):\n return True\n if ch_name == \"ジョーカー\":\n if ('ペルソナ' in content):\n return True\n if ch_name == \"格闘mii\":\n if ('格闘Mii' in content) or ('格闘MII' in content):\n return True\n if ch_name == \"剣術mii\":\n if ('剣術Mii' in content) or ('剣術MII' in content):\n return True\n if ch_name == \"射撃mii\":\n if ('射撃Mii' in content) or ('射撃MII' in content) or ('シャゲミ' in content):\n return True\n if ch_name == \"勇者\":\n if ('HERO' in content) or ('hero' in content) or ('Hero' in content) \\\n or ('HELO' in content) or ('helo' in content) or ('Helo' in content) \\\n or ('ゆうしゃ' in content) or ('ユウシャ' in content) or ('ゆーしゃ' in content) \\\n or ('ユーシャ' in content) or ('ひーろー' in content) or ('ヒーロー' in content) \\\n or ('よしひこ' in content) or ('ヨシヒコ' in content):\n return True\n if ch_name == \"バンジョー&カズーイ\":\n if ('バンジョー&カズーイ' in content) or ('バンジョーとカズーイ' in content) or ('バンカズ' in content) \\\n or ('バンジョー' in content) or ('カズーイ' in content):\n return True\n if ch_name == \"ベレスト\":\n if ('ベレス' in content) or ('ベレト' in content):\n return True\n\n return False\n"
},
{
"alpha_fraction": 0.6351071000099182,
"alphanum_fraction": 0.6451278328895569,
"avg_line_length": 38.10810852050781,
"blob_id": "87aa11c3e548d941dffe19389336e7377c1dfa69",
"content_id": "54b976849a3e2b0e9a624f138cf7a3069102eb44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8532,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 148,
"path": "/MyMessageClass/question.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:nessmado_function.py\n# バージョン:5.0\n# 作成日:2019/03/xx\n# 最終更新日:2019/07/19\n# 作成者:(へっへ)\n# スクリプト概要:\n# |キャラ対策チャンネル(大元)に「質問」から始まるメッセージを投稿すると、\n# |各キャラ別の対策チャンネルに文言をコピーした上で、\n# |大元のキャラ対策チャンネルと雑談チャンネルに周知メッセージを送る。\n# |\n# |★キャラ窓の方へ\n# |これを使う場合、nessmado_discord_manager.pyをちゃんとイジる必要があります。\n# |具体的には、各チャンネル名を完全一致で取得してるので、例えば\n# |・「No.1_マリオ」のようにキャラ名以外にもチャンネル名にNo.1_とか付けてる場合は\n# | 上手いことをそれを外す処理を加える必要があります。正規表現を使うと良いと思います。\n# |・そうじゃなく、キャラ名だけのチャンネル名になっている場合は\n# | 例えばMr.Game&Watchなど(アルファベット?カタカナ?半角?全角?)と微妙に異なる場合が\n# | あるはずなので、そのへんの埋め合わせが必要です。\n# | 窓側のチャンネル名を変更してもらうか、スクリプト記載内容を修正してもらうか。\n# |・そこまでしてもらえれば、あとはNMconfig.iniにキャラ対策チャンネルのIDを書き込み、\n# | nessmado_basicで本クラスのimportとFunctionGeneratorクラスのgenerateFunctionInstanceメソッドの\n# | コメントアウトを外せば使えます。\n\n\"\"\"更新履歴\n 2019/03/xx ver 3.1\n 新規作成\n 2019/07/19 ver 5.0\n makeMessageをasync/awaitに仕様変更\n\"\"\"\n\n\n# discordAPIモジュール\nfrom discord import message\nfrom discord import client\nfrom discord import channel\n\n# 自作モジュール\nfrom MyMessageClass.message_maker import MessageMaker\nfrom nessmado_discord_manager import ChannelManager\n\n\nclass QuestionMessageMaker(MessageMaker):\n def __init__(self):\n super(QuestionMessageMaker, self).__init__()\n self.keyword = '質問'\n self.l_reply = []\n self.output_replies = []\n self.message_pattern = 0\n self.be_character_name = False\n self.ch_manager = ChannelManager()\n self.keychannel = self.ch_manager.CHARACTER_TAISAKU_ID\n\n async def _makeMessage(self, message, client, channel=None) -> str:\n # 長い関数なので構成を文章で説明\n # |_makeMessageでは全部で3パターンに分けてメッセージを作成する。\n # |パターン1:正常な場合。スクリプト概要に記載された処理が走る。\n # |パターン2:メッセージが「質問」から始まっていない場合。ちゃんと書いてほしい旨のメッセージを作成する。\n # |パターン3:質問から始められているが、キャラ名がキチンとかけていない場合。\n # | キャラ名ちゃんと書いてねって旨のメッセージが作成される。\n asyncio_result = None\n\n # パターン1:質問が適切な場合\n if self.message_pattern == 0:\n # 1-1:キャラ対策チャンネルと雑談チャンネルに質問が着たことを周知するためのメッセージの作成\n self.l_reply.append(f'{channel.mention} で質問がきたよ')\n for i in range(len(self.l_reply)):\n self.reply += self.l_reply[i]\n self.output_replies.append(\n [client.get_channel(self.ch_manager.CHARACTER_TAISAKU_ID), self.reply])\n self.output_replies.append(\n [client.get_channel(self.ch_manager.ZATSUDAN_CHANNEL_ID), self.reply])\n\n # 1-2:対象キャラのキャラ対策ページ用のメッセージを作成するにあたり、変数をクリア。\n self.l_reply.clear()\n self.reply = \"\"\n\n # 1-3:質問が着たキャラのキャラ対策チャンネルに質問内容をコピペするためのメッセージを作成。\n self.l_reply.append(f'{message.author.mention} からの質問\\n')\n self.l_reply.append(message.content)\n for tmp_reply in self.l_reply:\n self.reply += tmp_reply\n self.output_replies.append(\n [client.get_channel(channel.id), self.reply])\n\n # パターン2:メッセージが「質問」から始まっていない場合。\n if self.message_pattern == -1:\n self.l_reply.append(f'{message.author.mention} エラー!\\n')\n self.l_reply.append('「質問です。○○(キャラ名)について〜」の形で書いてね。')\n for i in range(len(self.l_reply)):\n self.reply += self.l_reply[i]\n self.output_replies.append(\n [client.get_channel(self.ch_manager.CHARACTER_TAISAKU_ID), self.reply])\n\n # パターン3:質問から始められているが、キャラ名がキチンとかけていない場合\n if self.message_pattern == -2:\n self.l_reply.append(f'{message.author.mention} エラー!\\n')\n self.l_reply.append('何のキャラの質問か分からないよ\\n')\n self.l_reply.append('「質問です。○○(キャラ名)について〜」の形で書いてね。')\n for i in range(len(self.l_reply)):\n self.reply += self.l_reply[i]\n self.output_replies.append(\n [client.get_channel(self.ch_manager.CHARACTER_TAISAKU_ID), self.reply])\n\n for reply_channel, reply_content in self.output_replies:\n #print(\"message.channel:\" + str(message.channel))\n asyncio_result = await reply_channel.send(reply_content)\n return asyncio_result\n\n async def executeFunction(self, message, client) -> str:\n asyncio_result = None\n # 「質問」から始まってなかったら -1 パターンのメッセージを作成\n if not message.content.startswith(self.keyword):\n self.message_pattern = -1\n asyncio_result = await self._makeMessage(message, client)\n return asyncio_result\n\n # TO:DO ここ関数化すべき。何のためにこれを書いてるのかがぱっと見でわからん。\n if not message.channel.id == self.keychannel:\n return None\n\n # 全てのチャンネルについてfor文を回し、質問の内容に該当するキャラがいないか調べる。\n for character_ch in client.get_all_channels():\n # 以下if文について:\n # ひたすらにメッセージに記載されているキャラ名とチャンネル名の一致の有無を確認する。\n # チャンネル名称(=キャラ名)がメッセージ内に含まれてたら、該当するキャラでの質問があった場合のメッセージ作成を実施する。\n\n # ★各キャラ窓へ①\n # 以下if文の条件にあるcharacter_ch.nameがチャンネル名称です。\n # client.get_all_channels()を使用していることからキャラ対策以外のチャンネル名も\n # 取得してきてしまうので、キャラ対策以外のチャンネルで「キャラクター名称を含むチャンネル名」が\n # 存在する場合『もし「_対」の文言が含まれていたら』といったような条件付けも必要だと思います。\n # print(character_ch.name)\n if self.ch_manager.judgeNameContained(client, character_ch.name, message.content) \\\n and self.ch_manager.judgeFuzzyCharacterName(character_ch.name, message.content):\n self.be_character_name = True\n asyncio_result = await self._makeMessage(message, client, character_ch)\n return asyncio_result\n\n # 対策したいキャラが見つからなかったら -2 パターンのメッセージを作成\n if not self.be_character_name:\n self.message_pattern = -2\n asyncio_result = await self._makeMessage(message, client)\n return asyncio_result\n\n def checkTriggers(self, message) -> bool:\n if self._checkKeyword(message) or self._checkChannelMessageWritten(message):\n return True\n return False\n"
},
{
"alpha_fraction": 0.8727272748947144,
"alphanum_fraction": 0.8727272748947144,
"avg_line_length": 17.33333396911621,
"blob_id": "62e09ee68e21825151c5abeb925f1aae9f86fccf",
"content_id": "6b590008a5ad3ecd0cc86f0a0ba2f107adc914cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/README.md",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# nessmadobot\nネス窓用に作ったbotです。\n使い方とか説明はそれぞれのスクリプトに書いてます。\n"
},
{
"alpha_fraction": 0.6615853905677795,
"alphanum_fraction": 0.6755226254463196,
"avg_line_length": 32.764705657958984,
"blob_id": "9074ae54c8ec76aaf1677146134d640e118693b7",
"content_id": "33b733d077edb8f95d6d3f6e6c3143ac2a48b08d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2892,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 68,
"path": "/MyMessageClass/ness_skill.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:ness_skill.py\n# バージョン:5.0\n# 作成日:2019/10/14\n# 最終更新日:2019/10/14\n# 作成者:(へっへ)\n# スクリプト概要:\n# |Nessskillクラス\n# |「/脱初心者」「/初心者」「/中級者」「/上級者」と任意のページで打つと\n# |議題・相談1(仮)内のメッセージでその文字で始まっているメッセージのログをまとめて出力する\n# |\n# |★キャラ窓の方へ\n# |一時的に作っただけのクラスなので使えなくて良いです。参考までに載せます。\n\"\"\"更新履歴\n 2019/10/14 ver 5.0\n 新規作成。\n\"\"\"\n\nimport asyncio\n\n# discordAPIモジュール\nfrom discord import message\nfrom discord import client\nfrom discord import channel\n\n# 自作モジュール\nfrom MyMessageClass.message_maker import MessageMaker\nfrom nessmado_discord_manager import ChannelManager\n\n\nclass NessSkill(MessageMaker):\n def __init__(self):\n self.keyword = '/ネススキル'\n self.HIS_MSG_LMT = 500 # ログのヒストリカル取得index数。今後使う機能用。\n self.ch_manager = ChannelManager()\n self.skill_ch_id = self.ch_manager.NESS_SKILL_CHANNEL_ID\n\n async def _makeMessage(self, client, message) -> str:\n asyncit_result = None\n nessskill_messages = [] # ここに実力毎のメッセージの内容が格納される。\n skill_ch = client.get_channel(self.skill_ch_id)\n historical_messages = await skill_ch.history(limit=self.HIS_MSG_LMT).flatten()\n\n # 各メッセージについて、リアクション情報を取得\n for history_message in historical_messages:\n if history_message.content.startswith('脱初心者'):\n nessskill_messages.append(history_message.content)\n for history_message in historical_messages:\n if history_message.content.startswith('初心者'):\n nessskill_messages.append(history_message.content)\n for history_message in historical_messages:\n if history_message.content.startswith('中級者'):\n nessskill_messages.append(history_message.content)\n for history_message in historical_messages:\n if history_message.content.startswith('上級者'):\n nessskill_messages.append(history_message.content)\n\n str_ness_skills = \"\"\n str_ness_skills = '\\n'.join(nessskill_messages)\n\n # fav_messagesをスタンプ多い順に並べ替える\n self.reply = str(\"【ネス使い実力別スキル表】\")\n await message.channel.send(self.reply)\n asyncit_result = await message.channel.send(str_ness_skills)\n return asyncit_result\n\n async def executeFunction(self, message, client) -> str:\n asyncit_result = await self._makeMessage(client, message)\n return asyncit_result\n"
},
{
"alpha_fraction": 0.6617646813392639,
"alphanum_fraction": 0.679330050945282,
"avg_line_length": 33.01388931274414,
"blob_id": "3c121dd067a4633e7b89037862dd7cb58c0d9a4d",
"content_id": "b07b3256a9e8e122a8de52c8464d986718fe21ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3044,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 72,
"path": "/MyMessageClass/match.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:match.py\n# バージョン:5.0\n# 作成日:2019/07/21\n# 最終更新日:2019/10/14\n# 作成者:(へっへ)\n# スクリプト概要:\n# |AnnounceMatchクラス\n# |対戦募集チャンネルで「対戦募集」から始まるメッセージを入力すると\n# |雑談チャンネルに対戦募集の周知をする\n# |\n# |★キャラ窓の方へ\n# |NMconfig.iniに対戦募集用のチャンネルIDと、対戦募集用の役職IDを書き込めば使えます。\n# |使う場合はnessmado_basicで本クラスのimportとFunctionGeneratorクラスのgenerateFunctionInstanceメソッドの\n# |コメントアウトを外してください。\n\"\"\"更新履歴\n 2019/07/21 ver 5.0\n 新規作成。\n 2019/10/14 ver 5.1\n sterved_matchingを最新のロールIDに修正。\n\"\"\"\n\n\n# discordAPIモジュール\nfrom discord import message\nfrom discord import client\nfrom discord import channel\n\n# 自作モジュール\nfrom MyMessageClass.message_maker import MessageMaker\nfrom nessmado_discord_manager import ChannelManager\n\n\nclass AnnounceMatchMessageMaker(MessageMaker):\n def __init__(self):\n super(AnnounceMatchMessageMaker, self).__init__()\n self.keyword = '対戦募集'\n self.output_replies = []\n self.message_pattern = 0\n self.ch_manager = ChannelManager()\n self.keychannel = self.ch_manager.MATCH_CHANNEL_ID\n print(type(self.keychannel))\n self.starved_matching = self.ch_manager.STARVED_MATCHING\n\n async def _makeMessage(self, message, client, channel=None) -> str:\n asyncio_result = None\n if self.message_pattern == -1:\n return asyncio_result\n if self.message_pattern == 0:\n self.reply = f'{message.author.mention} さんが対戦募集を開始しました。 {self.starved_matching}\\n \\\n 参加したい方はこちらから→{message.channel.mention} \\n'\n self.output_replies.append(\n [client.get_channel(self.ch_manager.ZATSUDAN_CHANNEL_ID), self.reply])\n for reply_channel, reply_content in self.output_replies:\n asyncio_result = await reply_channel.send(reply_content)\n return asyncio_result\n\n async def executeFunction(self, message, client) -> str:\n asyncio_result = None\n # 「対戦募集」から始まってなかったら -1 パターンのメッセージを作成\n if not message.content.startswith(self.keyword):\n self.message_pattern = -1\n asyncio_result = await self._makeMessage(message, client)\n return asyncio_result\n # 対戦募集チャンネル「以外」でのメッセージはスルーする。\n if message.channel.id == self.keychannel:\n asyncio_result = await self._makeMessage(message, client)\n return asyncio_result\n\n def checkTriggers(self, message) -> bool:\n if self._checkKeyword(message) or self._checkChannelMessageWritten(message):\n return True\n return False"
},
{
"alpha_fraction": 0.6859903335571289,
"alphanum_fraction": 0.6962205171585083,
"avg_line_length": 39.91860580444336,
"blob_id": "2a36d28be03f69409eeb75012d5e2b410110d29b",
"content_id": "543d19c2dbd815028886e9a6fc58fd7a43085c20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5423,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 86,
"path": "/MyMessageClass/taisaku.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:nessmado_function.py\n# バージョン:5.0\n# 作成日:2019/07/19\n# 最終更新日:2019/07/19\n# 作成者:(へっへ)\n# スクリプト概要:\n# |キャラ対策チャンネル(大元)に「/対策 [キャラ名]」から始まるメッセージを投稿すると、\n# |各キャラ別の対策チャンネルで対策スタンプが押されたメッセージを取得し(過去500件分のメッセージから抽出)、\n# |/対策 が書かれたチャンネルに票数でソートした順にメッセージを記載する。\n# |\n# |★キャラ窓の方へ\n# |これを使う場合、nessmado_discord_manager.pyをちゃんとイジる必要があります。\n# |具体的には、各チャンネル名を完全一致で取得してるので、例えば\n# |・「No.1_マリオ」のようにキャラ名以外にもチャンネル名にNo.1_とか付けてる場合は\n# | 上手いことをそれを外す処理を加える必要があります。正規表現を使うと良いと思います。\n# |・そうじゃなく、キャラ名だけのチャンネル名になっている場合は\n# | 例えばMr.Game&Watchなど(アルファベット?カタカナ?半角?全角?)と微妙に異なる場合が\n# | あるはずなので、そのへんの埋め合わせが必要です。\n# | 窓側のチャンネル名を変更してもらうか、スクリプト記載内容を修正してもらうか。\n# |・そこまでしてもらえれば、あとはNMconfig.iniにキャラ対策チャンネルのIDおよび作成した対策スタンプのIDを書き込み、\n# | nessmado_basicで本クラスのimportとFunctionGeneratorクラスのgenerateFunctionInstanceメソッドの\n# | コメントアウトを外せば使えます。\n\"\"\"更新履歴\n 2019/07/19 ver 5.0\n 新規作成。\n\"\"\"\n\nimport asyncio\n\n# discordAPIモジュール\nfrom discord import message\nfrom discord import client\nfrom discord import channel\n\n# 自作モジュール\nfrom MyMessageClass.message_maker import MessageMaker\nfrom nessmado_discord_manager import ChannelManager\n\n\nclass TaisakuMessageMaker(MessageMaker):\n def __init__(self):\n super(TaisakuMessageMaker, self).__init__()\n self.keyword = '/対策'\n self.HIS_MSG_LMT = 500 # ログのヒストリカル取得index数。今後使う機能用。\n self.be_character_name = False\n self.ch_manager = ChannelManager()\n\n async def _makeMessage(self, client, message, character_ch) -> str:\n asyncit_result = None\n fav_messages_and_counts = [] # ここに対策スタンプついてるメッセージの対策スタンプの数, メッセージの内容が格納される。\n historical_messages = await character_ch.history(limit=self.HIS_MSG_LMT).flatten()\n\n # 各メッセージについて、リアクション情報を取得\n for history_message in historical_messages:\n for reaction in history_message.reactions:\n # リアクションが対象の絵文字だったら、fav_messagesリストにメッセージの内容とスタンプの数を格納\n if str(reaction.emoji) == self.ch_manager.TAISAKU_STAMP:\n fav_messages_and_counts.append(\n [reaction.count, history_message.content])\n # fav_messagesをスタンプ多い順に並べ替える\n fav_messages_and_counts.sort(reverse=True)\n self.reply = str(\"【\" + character_ch.name + \"対策】\")\n await message.channel.send(self.reply)\n for fav_count, fav_content in fav_messages_and_counts:\n asyncit_result = await message.channel.send(\"・\" + fav_content + \"、\" + str(fav_count)+\"票\")\n return asyncit_result\n\n async def executeFunction(self, message, client) -> str:\n # await history()をbot.pyに直書きしている現在は、チャンネルと特定し、そのチャンネルを_makeMessageに渡すだけの関数。\n character_ch = self.specifyCharacter(message, client)\n if character_ch == None:\n return await message.channel.send(\"キャラ名が正しく入力されてません。\\n「/対策 ○○(キャラ名)」で入力してください。\")\n asyncit_result = await self._makeMessage(client, message, character_ch)\n return asyncit_result\n\n def specifyCharacter(self, message, client):\n # 全てのチャンネルについてfor文を回し、対策情報を取得したいキャラを特定する。\n # 処理として対象キャラのチャンネルでログを取得するためcharacter_chって変数名にしている。\n for character_ch in client.get_all_channels():\n # 以下if文について:\n # ひたすらにメッセージに記載されているキャラ名とチャンネル名の一致の有無を確認する。\n # チャンネル名称(=キャラ名)がメッセージ内に含まれてたら、該当するキャラでの質問があった場合のメッセージ作成を実施する。\n if self.ch_manager.judgeNameContained(client, character_ch.name, message.content) \\\n and self.ch_manager.judgeFuzzyCharacterName(character_ch.name, message.content):\n self.be_character_name = True\n return character_ch\n"
},
{
"alpha_fraction": 0.7956469058990479,
"alphanum_fraction": 0.7980653047561646,
"avg_line_length": 34.956520080566406,
"blob_id": "798d75afb85f40732c290f4c40122259be1411bc",
"content_id": "809be995704f012d399b7d56cbe586d315f3cc89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 1597,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 23,
"path": "/NMconfig.ini",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "[DEFAULT]\n#BE_TEST: Trueの場合、[TEST]配下の値が使われる。Falseの場合[MASTER]配下の値が使われる。\n# ネス窓ではお試し用のサーバーの時にTrue、ネス窓で実際に使用する時はFalseにして使い分けている。\nBE_TEST = True\nMYCHARACTER = キャラ窓のキャラ名を入れる。例えばネス窓の場合「ネス」と入れる。\n\n[TEST]\nTOKEN = トークンを入れる。\nZATSUDAN_CHANNEL_ID = 雑談チャンネルのIDを入れる。\nCHARACTER_TAISAKU_ID = キャラ対策チャンネルのIDを入れる。\nMATCH_CHANNEL_ID = 対戦募集チャンネルのIDを入れる。\nTAISAKU_STAMP = 対策スタンプのIDを入れる。右の例のように記載する→ <:対策スタンプの名前:対策スタンプのID>\nNESS_SKILL_CHANNEL_ID = 議論・相談1チャンネルのIDを入れる。\nSTARVED_MATCHING = 対戦募集を通知したい役職のIDを入れる。右の例のように記載する→ <@&役職のID>\n\n[MASTER]\nTOKEN = トークンを入れる。\nZATSUDAN_CHANNEL_ID = 雑談チャンネルのIDを入れる。\nCHARACTER_TAISAKU_ID = キャラ対策チャンネルのIDを入れる。\nMATCH_CHANNEL_ID = 対戦募集チャンネルのIDを入れる。\nTAISAKU_STAMP = 対策スタンプのIDを入れる。右の例のように記載する→ <:対策スタンプの名前:対策スタンプのID>\nNESS_SKILL_CHANNEL_ID = 議論・相談1チャンネルのIDを入れる。\nSTARVED_MATCHING = 対戦募集を通知したい役職のIDを入れる。右の例のように記載する→ <@&役職のID>\n"
},
{
"alpha_fraction": 0.47438231110572815,
"alphanum_fraction": 0.4855656623840332,
"avg_line_length": 37.838382720947266,
"blob_id": "a3fbf2b8a3e38fe074b0bb568daca81479292a43",
"content_id": "5dd5baa6127973f17d5cdaff7b968892361c644c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5613,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 99,
"path": "/MyMessageClass/message_maker.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:message_maker.py\n# バージョン:5.0\n# 作成日:2019/03/xx\n# 最終更新日:2019/07/31\n# 作成者:(へっへ)\n# スクリプト概要:\n# |新機能を開発するにあたり、基底とするクラス。\n# |基本的にここに記載されているメンバ関数をいじる。\n# |ただし、メッセージを出力させるだけといった簡単な機能の場合は__init__くらいしかいじらなくてもよい。\n\"\"\"更新履歴\n 2019/03/xx ver 3.1?覚えてない。\n オブジェクト指向に沿ってクラス化。\n 2019/07/31 Ver 5.0\n asyncioに完全に沿わせるためにサブジェネレータをasync文に書き直してAwaitableにした\n (言葉の使い方あってるのか?これ)\n 2020/03/10 ABCモジュール実装\n\"\"\"\n\nfrom discord import message\nfrom discord import client\nfrom MyMessageClass.abs_message_maker import AbsMessageMaker\n\n\nclass MessageMaker(AbsMessageMaker):\n \"\"\"メッセージを作成する基底クラス\n absMessageMakerから直接いろんなMessageMakerに飛ばないのは\n オーバーライドさせたかったから。\n ness_mado_message_maker.pyを見てもらうとわかるが、\n このクラスを継承すれば変数に値を入れるだけでbotが反応するようにしてある。\n \"\"\"\n\n def __init__(self):\n # -----------------------------------------------------------\n # __init()__\n # -----------------------------------------------------------\n # |役割\n # |・変数の設定、代入をする。botの動作を試すだけなら、作成したクラスの__init()__の値をいじれば動きます。\n # | ness_mado_message_maker.pyを参考に作ってもらえれば良いと思います。\n # |\n # -----------------------------------------------------------\n # 変数について\n # -----------------------------------------------------------\n # |keyword : string型。ここに入れた文字から始まるメッセージが投稿されたらreplyに入る文字が返される。\n # | 基本的には文字の頭に「/」とか「|」とか付けて普通の会話で反応しないようにするのが良い。\n # |keychannel: int型。オプションみたいなもの。keywordから始まるメッセージについて、\n # | 「このチャンネルで投稿されたら」という条件を加えたい場合にkeychannelに値を代入する。\n # | keychannelには対象としたいチャンネルのチャンネルIDを入れる。\n # | keychannelの値はNMconfig.pyとNMconfig.iniをいじって入れた方が後々楽だけどどっちでもよい。\n # |reply : string型。botが返すメッセージを入れる。\n # -----------------------------------------------------------\n self.keyword = ''\n self.keychannel = 0\n self.reply = ''\n\n async def executeFunction(self, message, clinet) -> str:\n # -----------------------------------------------------------\n # executeFunction()\n # -----------------------------------------------------------\n # |役割\n # |・この次に書かれてる_makeMessage()メソッドを実行すること\n # |・その他諸々の処理(追加条件など)を加えたい場合にここに記述する。\n # | question.pyやtaisaku.pyなどを見てもらえばと思います。\n # -----------------------------------------------------------\n asyncio_result = await self._makeMessage(message)\n return asyncio_result\n\n async def _makeMessage(self, message) -> str:\n # -----------------------------------------------------------\n # _makeMessage()\n # -----------------------------------------------------------\n # |役割\n # |・この次に書かれてる_makeMessage()メソッドを実行すること\n # |・その他諸々の処理(追加条件など)を加えたい場合にここに記述する。\n # | question.pyやtaisaku.pyなどを見てもらえばと思います。\n # -----------------------------------------------------------\n asyncio_result = await message.channel.send(self.reply)\n return asyncio_result\n\n def checkTriggers(self, message) -> bool:\n # -----------------------------------------------------------\n # checkTriggers()\n # -----------------------------------------------------------\n # |役割\n # |・メッセージがkeywordから始まっているかをチェックする_checkKeyword()メソッドを返します。\n # |・それ以外にも特定のチャンネルからのメッセージかを判断する_checkChannelMessageWritten()メソッドを\n # | 条件に加えたい場合は、継承先の小クラス側でオーバーライドさせます。これについては\n # | question.pyやtaisaku.pyなどを見てもらえばと思います。\n # -----------------------------------------------------------\n return self._checkKeyword(message)\n\n def _checkKeyword(self, message) -> bool:\n if message.content.startswith(self.keyword):\n return True\n return False\n\n def _checkChannelMessageWritten(self, message) -> bool:\n if message.channel.id == self.keychannel:\n return True\n return False\n"
},
{
"alpha_fraction": 0.5856621861457825,
"alphanum_fraction": 0.6093559861183167,
"avg_line_length": 28.13274383544922,
"blob_id": "f56145c6b9c93bd9c59f24b8e930425de1d23042",
"content_id": "183fb48fbdc3e0e83c181bcce45b4e640420eb0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5126,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 113,
"path": "/bot.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:bot.py\n# バージョン:5.0\n# 作成日:2019/03/xx\n# 最終更新日:2019/07/19\n# 作成者:(へっへ)\n# はじめに:\n# |ネス窓をdiscort運用するにあたり、窓の運用が快適になるような機能群を用意する。\n# |機能群を用意するに当たり、可読性、新規機能追加の容易さの観点から\n# |オブジェクト指向をベースにプログラムを組むこととする。\n# |ただし、私も勉強の途中であることから、変な書き方になってるところがいっぱいあります。すまん。\n# |\n#\n# スクリプト概要:\n# |メイン関数があるスクリプト。サーバーで起動する際は本スクリプトを起動させる。\n\"\"\"更新履歴(目を通す必要はない)\n 2019/03/11 ver 3.0\n キャラ対策用チャンネルの管理bot開発\n - キャラ対策チャンネルへの質問の振り分け機能\n - mother2ネタのメッセージ送信機能\n 2019/05/01 Ver 2.0\n オブジェクト指向に沿ってリファクタリング実施。\n - 今後の課題:質問クラスが死んでるので気が向いたらきれいにする。\n 2019/05/13 Ver 3.0, 3.1\n 質問チャンネルおよびそれに関わる部分の修正\n - メッセージの文言だけをトリガーにしていたが、メッセージがどのチャンネルに投稿されたのかについてもトリガーに追加。\n これにより質問クラスのフローかなりマシになった。\n 2019/07/06 Ver 4.0\n discord.py のバージョンを0.16.12 → 1.2.3へ変更。\n これにあたり以下を修正。\n ・send_message周りを更新\n ・チャンネルIDの型がstrからintへ変更。\n 2019/07/18 ver 5.0\n 対策クラスの作成にて、コルーチンを使用するAPIの使用方法を学習し、その結果\n 自作クラスにてasync/await使用する方法がわかった。\n そのために、\n ・自作クラスからbot.pyに作成したメッセージとそれを記載するチャンネルを送り、\n ・それをbot.pyにてsendにて記載する\n という処理をわざわざbot.pyでする必要がなくなる(=自作クラス内ですればよい)。\n なので、上記2点を実行するための処理を削除。自作クラスに処理を記載。\n\"\"\"\nimport discord # インストールした discord.py\n\n# 自作モジュール\nimport nessmado_basic\nimport NMconfig\n\n\nclient = discord.Client() # 接続に使用するオブジェクト\nnmconfig = NMconfig.NMConfig()\n\n# ------------------------------------------------------\n# ここからdiscordAPIの実行\n# ------------------------------------------------------\n# 起動時に通知してくれる処理\[email protected]\nasync def on_ready():\n print('ログインしました')\n\n\[email protected]\nasync def on_message(message):\n\n func_executer = nessmado_basic.FunctionExecuter(message, client)\n await func_executer.startFunction()\n print(\"処理終了\")\n\n# botの接続と起動\n# (tokenにはbotアカウントのアクセストークンを入れてください)\nclient.run(nmconfig.TOKEN)\n\n# -------------------ここでメイン終わり---------------------\n\n\n# ------------------------------------------------------\n# ここから落書き\n# ------------------------------------------------------\n'''\n # ユーザーリスト表示\n # リプライする場合message.contentにユーザーIDを含む\n if message.content.startswith('<@hogehoge> ユーザーリスト'):\n reply = f'{message.author.mention} ユーザーリストね〜' # 返信文の作成\n await client.send_message(message.channel, reply) # 返信を送る\n\n member_list = [\n member.display_name for member in client.get_all_members()]\n reply = member_list\n '''\n\n'''\n # チャンネルリスト表示\n if message.content.startswith('<@hogehoge> チャンネルリスト'):\n reply = f'{message.author.mention} チャンネルリストね〜' # 返信文の作成\n await client.send_message(message.channel, reply) # 返信を送る\n\n channel_list = [str(channel)\n for channel in client.get_all_channels()]\n reply = channel_list\n else :\n reply = f'{message.author.mention} はあい' # 返信文の作成\n await client.send_message(message.channel, reply) # 返信を送る\n '''\n\n\"\"\"チャンネルをバーっと作るプログラム。今はいらない。\nif message.content.startswith('!!!!!!!!!!mkch'):\n csv_file = open('all_chara.csv', 'r', newline='')\n reader = csv.reader(csv_file)\n for row in reader:\n for cell in row:\n channel_name = cell\n await client.create_channel(message.server, channel_name, type=discord.ChannelType.text)\n await client.send_message(message.channel, f'{channel_name} チャンネルを作成しました')\n\n\"\"\"\n"
},
{
"alpha_fraction": 0.5038029551506042,
"alphanum_fraction": 0.521187961101532,
"avg_line_length": 32.67073059082031,
"blob_id": "1559c8f77f37686932315bb29964e1e088937592",
"content_id": "7a615cc2b278f280e0244a7900ed271d8ff4562e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3617,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 82,
"path": "/NMconfig.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:NMconfig.py\n# バージョン:5.0\n# 作成日:2019/03/xx\n# 最終更新日:2019/07/19\n# 作成者:(へっへ)\n# スクリプト概要:\n# |classの説明に記載したため省略。\n\"\"\"更新履歴\n 2019/03/xx ver 3.1\n 新規作成\n 2019/07/19 ver 5.0\n 対策スタンプメンバーを追加。\n 2020/03/10 ver 6.0\n 設定ファイル(NMconfig.ini)からもろもろ読み取ることにした。\n 設定ファイルをいじればコードを知らなくても楽に値を変更できる。\n https://docs.python.org/ja/3/library/configparser.html\n\"\"\"\n\nimport configparser\n\n\nclass NMConfig:\n \"\"\"NMConfigクラス\n NessMadoConfigの略。\n Singleton(分からない場合はググる)のつもりで書いたクラス。\n Tokenとかその他諸々の環境による差分はここで吸収する。\n \"\"\"\n\n def __init__(self):\n self.CONFIG_FILE_NAME = 'NMconfig.ini'\n self.S_DEFAULT = 'DEFAULT'\n self.S_MASTER_MODE = 'MASTER'\n self.S_TEST_MODE = 'TEST'\n\n self.config = configparser.ConfigParser()\n self.config.read(self.CONFIG_FILE_NAME)\n # -----------------------------------------------------------\n # 設定1:本番環境と試験環境の設定\n # -----------------------------------------------------------\n # |普通はどうかわからないがテストの環境と本番の環境を用意している場合は、\n # |このbe_testmodeの値を変更することでconfig的な設定を変えることができる。\n # |これにより変更する値はこのクラスで一括管理すると楽。\n # -----------------------------------------------------------\n # True:検証用、False:本番用\n self.be_testmode = self.config[self.S_DEFAULT].getboolean('BE_TEST')\n\n # -----------------------------------------------------------\n # 設定2:変数の設定\n # -----------------------------------------------------------\n # |サーバーごとにプライベートに変わってくるID等は一括で管理。\n # |ここの値を外部に公開するとサーバーを乗っ取られてしまうので\n # |そんなことはしてはいけない。\n # -----------------------------------------------------------\n self.TOKEN = \"\"\n self.ZATSUDAN_CHANNEL_ID = \"\"\n self.CHARACTER_TAISAKU_ID = \"\" # 対策が英語だと\"counterplan\"って言うらしいが分かりにくいので\n self.MATCH_CHANNEL_ID = \"\"\n self.TAISAKU_STAMP = \"\"\n self.NESS_SKILL_CHANNEL_ID = \"\"\n self.STARVED_MATCHING = \"\"\n self.MYCHARACTER = \"\"\n\n if self.be_testmode:\n # テストサーバー用\n self._setConfig(self.S_TEST_MODE)\n else:\n # ネス窓本番\n self._setConfig(self.S_MASTER_MODE)\n\n def _setConfig(self, mode: str):\n self.TOKEN = self.config[mode].get('TOKEN')\n self.ZATSUDAN_CHANNEL_ID = self.config[mode].getint(\n 'ZATSUDAN_CHANNEL_ID')\n self.CHARACTER_TAISAKU_ID = self.config[mode].getint(\n 'CHARACTER_TAISAKU_ID')\n self.MATCH_CHANNEL_ID = self.config[mode].getint(\n 'MATCH_CHANNEL_ID')\n self.TAISAKU_STAMP = self.config[mode].get('TAISAKU_STAMP')\n self.NESS_SKILL_CHANNEL_ID = self.config[mode].getint(\n 'NESS_SKILL_CHANNEL_ID') # 議論・相談1のチャンネル\n self.STARVED_MATCHING = self.config[mode].get(\n 'STARVED_MATCHING')\n"
},
{
"alpha_fraction": 0.5651870965957642,
"alphanum_fraction": 0.574018120765686,
"avg_line_length": 33.42399978637695,
"blob_id": "388c3d576147b00e7c4343bcfb17bbe63573dac1",
"content_id": "2cff54c5bc7e29bf89e0282b61a593b6777c2daf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6858,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 125,
"path": "/MyMessageClass/ness_mado_message_maker.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:ness_mado_message_maker.py\n# バージョン:3.1\n# 作成日:2019/03/xx\n# 最終更新日:2019/05/13\n# 作成者:(へっへ)\n# スクリプト概要:\n# |任意のメッセージに対して、何かしらの返信をするお遊び用の機能群。\n# |motherシリーズやネス窓にゆかりのある名言(迷言)の返事がくる。\n#\n# 使い方\n# |ここにある内容を見て、それに習って書く。パパとママを見ればそれで良い。\n# |作ったら、nessmado_function.pyに使い方を見て、必要な情報を入れる。\n# |そこまでできたら使えるようになってる。以上。\n\nfrom MyMessageClass.message_maker import MessageMaker\nimport random\n\n\n\"\"\"更新履歴\n 2019/03/xx ver 3.1?覚えてない。\n オブジェクト指向に沿ってクラス化。\n\"\"\"\n\n\nclass NessMadoMessageMaker(MessageMaker):\n pass\n\n\nclass PapaMessageMaker(MessageMaker):\n \"\"\"パパのメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(PapaMessageMaker, self).__init__()\n self.keyword = '/パパ'\n self.reply = 'ママに似てがんばり屋だなあ。無理するなよ。'\n\n\nclass MamaMessageMaker(MessageMaker):\n \"\"\"ママのメッセージを作成するクラス\"\"\"\n\n # ★__init__でごちゃごちゃ書いている理由\n # 関数の名前の観点からmakeMessageメンバ関数をオーバーライドすべきでは?と思うが、\n # 本クラス群では現状で__init__で一通り作ってしまった方が運用しやすい\n # (=ここのクラス群は__init__さえいじれば他は変えなくて良い)ので、この方針で進める。\n def __init__(self):\n super(MamaMessageMaker, self).__init__()\n self.keyword = '/ママ'\n self.l_reply = []\n self.l_reply.append('「おかえり、ネス。\\n')\n self.l_reply.append(' 何も言わなくてもいいの。ママはわかってるつもりよ。\\n')\n self.l_reply.append(' ずいぶん疲れてるようだし、ハンバーグを食べておやすみ。チュッ!」')\n for i in range(len(self.l_reply)):\n self.reply += self.l_reply[i]\n\n\nclass ObasanMessageMaker(MessageMaker):\n \"\"\"おばさんのメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(ObasanMessageMaker, self).__init__()\n self.keyword = '/おばさん'\n self.reply = ' ◆キイイー! こうるさい ハエだよ! しんで じごくへいけ!!'\n\n\nclass FlyingmanMessageMaker(MessageMaker):\n \"\"\"フライングマンのメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(FlyingmanMessageMaker, self).__init__()\n self.keyword = '/フライングマン'\n self.reply = ' わたしは あなたの ゆうき。あなたに ついてゆきます。…なまえ? フライングマンとでも いっておきましょうか。'\n\n\nclass DoseisanMessageMaker(MessageMaker):\n \"\"\"どせいさんのメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(DoseisanMessageMaker, self).__init__()\n self.keyword = '/どせいさん'\n self.reply = ' なにかむずかしいことをかんがえよう。これからのぼくは。'\n\n\nclass EscargotMessageMaker(MessageMaker):\n \"\"\"エスカルゴ運送のメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(EscargotMessageMaker, self).__init__()\n self.keyword = '/エスカルゴ'\n self.reply = ' エスカルゴ運送でーす!お預かり料金は18ドルです。お金持ってますよね?'\n\n\nclass FsannMessageMaker(MessageMaker):\n \"\"\"Fsannのメッセージを作成するクラス\"\"\"\n\n def __init__(self):\n super(FsannMessageMaker, self).__init__()\n self.keyword = '/Fsann'\n self.reply = ' 神に感謝'\n\n\nclass NessOugiMessageMaker(MessageMaker):\n def __init__(self):\n super(NessOugiMessageMaker, self).__init__()\n self.keyword = '/ネス奥義'\n # 参考:ネス奥義公式サイトおよびTwitterネス奥義アカウント\n # 2020/3/12時点で83種類。最近のは取りこぼしあるかも。\n self.l_nessougi = ['殺', '朧', '命', '雫', '薊',\n '芥', '閃', '雷雲', '不知火', '早咲きの薔薇',\n 'クロスPKファイアー', 'チェイスPKファイアー', 'エイリアンPKファイアー(旧名 アブソードPKファイアー、くつした)', 'PKファイアージャンプ', '裏PKファイアージャンプ',\n '焔・ホームラン', '食い逃げ', '完全に停止したハニワは吸収できる。知らなかったのか?', 'スタッカート', '背中で語る',\n '雀の涙', 'たびゴマフラッシュ', '阿修羅飯綱落ち', 'Infinity ∞ Sign', 'Uroboros Exhale',\n 'Final Dive', 'Ishtar Drive', 'Eclipse End', 'Fate Reload', 'Orion Imagine',\n '銀河鉄道YAMANOTE', 'とうごうアタック', 'PKサヨナラ', 'パイク・オブ・アブソリュート', '椿',\n 'おもてなし', '少年院ヘッドバッド(旧名 βストライク)', '河童の川流れ', 'オイルパニック', '無拍子',\n 'ヘル・アンド・ヘヴン', 'ノアの方舟', 'リリパットステップ', '裏シャトル', '崖の上のポニョ',\n 'お天道様は見ておるぞ', 'だるまさんがころんだ', 'かなまる', 'かなまる改', 'かなまる隼',\n 'かなまるファントム', 'Final Time', 'EarthBound', 'クロノス・サンダー', 'エフランエンディカルバースト(略称 FEB)',\n 'ファイナルうんちバズーカ', 'うめきレインボー', '逆さ富士描き', 'ディヴァイン・ブラスト', 'とどかぬ翼',\n 'はんげきのサイコシールド', '朱赤の盾', '太陽の牢獄', '切断(旧名 トロコンクエスト)', 'PKカケヒキ',\n '闇', '場外ファウル', 'チェックザアンサー', '一本釣り', 'フォーリングPKフラッシュ',\n 'ハッケヨ~イ・ノコッタノコッタ', '今日も陽は落ちる', '鯖折り', 'ボンバーマン参戦', '太陽心酔',\n 'アヤメ返し', '滝登り', '忍', '108マシンガン', 'ネス使い',\n 'タイタニック','野原しんのすけ','PKジェットコースター衝撃映像',\n ]\n self.reply = random.choice(self.l_nessougi)\n"
},
{
"alpha_fraction": 0.7160859704017639,
"alphanum_fraction": 0.7254756689071655,
"avg_line_length": 29.659090042114258,
"blob_id": "a661b4f499104bf9ee210f6e3b5827f68fec0735",
"content_id": "ca254e4cc61d72ea3d142e862c35eedd1418a296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5643,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 132,
"path": "/nessmado_basic.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:nessmado_function.py\n# バージョン:5.0\n# 作成日:2019/03/xx\n# 最終更新日:2019/07/31\n# 作成者:(へっへ)\n# スクリプト概要:\n# |ネス窓の機能群を使用するにあたり、それらのインターフェースとなるクラス群が記載されているスクリプト。\n# |メッセージを受信したら、メッセージの内容を読み取り、必要に応じた機能クラスを使用する。\n# |\n# |備考\n# |現在は「受信したメッセージに対して何かしらのリアクションを返す」という機能のみだが、\n# |今後はトリガーをスタンプとした機能なども作成していく予定。\n#\n# 以下開発者用\n# 新機能実装方法:\n# |新機能を開発したら\n# | ①新しいクラスを本スクリプトファイルにimportする\n# | ②新しいクラスを本スクリプトファイルに記載の\n# | FunctionGeneratorクラスのgenerateFunctionInstanceメソッドに追加する。\n# |①②のやり方はここにある既存のものを参考にして追加すること。\n#\n# 注意点:\n# |メッセージを受信し、それが該当するメッセージかどうかの判別にMessageMakerクラスを使用している。\n# |新機能を開発する場合は、インターフェースとしてMessageMakerを継承したクラスを作成し、\n# |その継承したクラスから新機能を実行させること。\n\"\"\"更新履歴\n 2019/03/xx ver 3.0?覚えてない。\n オブジェクト指向に沿ってクラス化。\n 2019/07/31 Ver 5.0\n asyncioに完全に沿わせるためにサブジェネレータをasync文に書き直してAwaitableにした\n (使い方あってるのか?これ)\n\"\"\"\n\n# discortAPI\nfrom discord import message\nfrom discord import client\n\n# ネス窓の共通クラス\nfrom nessmado_discord_manager import ChannelManager\n\n# 機能クラス\n# 新しいクラスを作ったらここでimportする\nfrom MyMessageClass.ness_mado_message_maker import PapaMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import MamaMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import ObasanMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import FlyingmanMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import DoseisanMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import EscargotMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import FsannMessageMaker\nfrom MyMessageClass.ness_mado_message_maker import NessOugiMessageMaker\n#from MyMessageClass.question import QuestionMessageMaker\n#from MyMessageClass.taisaku import TaisakuMessageMaker\n#from MyMessageClass.match import AnnounceMatchMessageMaker\n#from MyMessageClass.ness_skill import NessSkill\n\n\nclass FunctionExecuter:\n \"\"\"FunctionExecuter\n discordで使用する機能クラスを実行するクラス\n FunctionSelecter関数を内包し、これを使って使用する機能を選択する。\n ▼本クラスの使い方\n ・bot.pyにインスタンスを生成\n ・startFunctionで実行。\n \"\"\"\n\n def __init__(self, message, client):\n self.message = message\n self.client = client\n self.ch_manager = ChannelManager()\n self.func_selecter = FunctionSelecter(message, client)\n self.function_instance = None\n\n async def startFunction(self) -> str:\n if self.checkMessageFromRobot():\n return None\n self.function_instance = self.func_selecter.selectFunctionClass()\n if self.function_instance:\n asynciot_result = await self.function_instance.executeFunction(\n self.message, self.client)\n return asynciot_result\n return None\n\n def checkMessageFromRobot(self):\n if self.message.author.id == self.client.user.id:\n return True\n return False\n\n\nclass FunctionSelecter:\n \"\"\"FunctionSelecter\n messageを読み取り、実行すべき機能を選択する。\n FunctionExecuterクラスのメンバオブジェクトとして使用する。\n \"\"\"\n\n def __init__(self, message, client):\n self.message = message\n self.client = client\n\n def selectFunctionClass(self) -> 'class':\n func_generator = FunctionGenerator()\n for f_instance in func_generator.generateFunctionInstance():\n if f_instance.checkTriggers(self.message):\n return f_instance\n return None\n\n\nclass FunctionGenerator:\n \"\"\"FunctionGenerator\n 使用する機能クラス群を生成するクラス。\n 使用する候補のクラスを全て保持するとメモリ負荷がやばくなるかもしれないので、\n クラスの生成手法にはジェネレーターを採用。\n 使用したいクラスをFunctionSelecterに逐一生成し、渡すことでメモリ負担を減らす。\n (この考慮が必要かどうかを測ったりはしていない)\n \"\"\"\n\n def __init__(self):\n pass\n\n # 新しいクラスを作ったらここでインスタンスを渡す。\n def generateFunctionInstance(self):\n yield PapaMessageMaker()\n yield ObasanMessageMaker()\n yield MamaMessageMaker()\n yield FlyingmanMessageMaker()\n yield DoseisanMessageMaker()\n yield EscargotMessageMaker()\n yield FsannMessageMaker()\n yield NessOugiMessageMaker()\n# yield QuestionMessageMaker()\n# yield TaisakuMessageMaker()\n# yield AnnounceMatchMessageMaker()\n# yield NessSkill()\n"
},
{
"alpha_fraction": 0.6597744226455688,
"alphanum_fraction": 0.6973684430122375,
"avg_line_length": 16.161291122436523,
"blob_id": "01864cbdc54131fac8dbe1b0b8bcf9dfc7420213",
"content_id": "e08b50b3dcebacb96604df1e938c73b6ced6dd43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 31,
"path": "/MyMessageClass/abs_message_maker.py",
"repo_name": "ayassbgc/nessmado_bot",
"src_encoding": "UTF-8",
"text": "# スクリプト名:abs_message_maker.py\n# バージョン:6.0\n# 作成日:2020/03/10\n# 最終更新日:-\n# 作成者:(へっへ)\n# スクリプト概要:\n# |インターフェースを使ってみたかったから抽象クラスを実装。\n# |MessageMakerが子クラスとなる。\n\"\"\"更新履歴\n 2020/03/10 ver 6.0\n 新規作成\n \n\"\"\"\n\nfrom abc import ABCMeta\nfrom abc import ABCMeta, abstractmethod\n\nfrom discord import message\nfrom discord import client\n\n\nclass AbsMessageMaker(metaclass=ABCMeta):\n \"\"\"メッセージを作成するMessageMakerクラスの抽象クラス\"\"\"\n\n @abstractmethod\n def executeFunction(self):\n pass\n\n @abstractmethod\n def _makeMessage(self):\n pass\n"
}
] | 13 |
PranaliJadav/DataScienceProject
|
https://github.com/PranaliJadav/DataScienceProject
|
e7f0ed41a6c2b378c0fc5627006cc1f5fc33a17e
|
bd2f12db46cfccb6c2bdcc664a1963f37983bb7c
|
55da67cf3b96cd612288848c00a5934e2b3dbed0
|
refs/heads/master
| 2020-05-15T00:32:25.552125 | 2019-04-18T16:56:37 | 2019-04-18T16:56:37 | 182,013,549 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7180576920509338,
"alphanum_fraction": 0.7300360798835754,
"avg_line_length": 30.302419662475586,
"blob_id": "d6fcfd5545b7e07f89a0d168929ee4ffb302503e",
"content_id": "1e48de35283bf7297d961a9688a802b8069cdd5e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7764,
"license_type": "no_license",
"max_line_length": 528,
"num_lines": 248,
"path": "/Project/Final Project.py",
"repo_name": "PranaliJadav/DataScienceProject",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# Name: Pranali Jadav\n# \n# Jupyter notebook for the project.\n\n# As part of a mobile app development class, I am creating a travel/language learning app that will help users select a destination they are travelling to and would like to the learn the laguage of to help them in their visit. depending on the purpose of the visit, the app is capable of suggesting users words to learn catered to their particular situation. The app also provides helpful tips, tourist location, and restaurants to eat at. for the learning language section, it is done using crosswrords puzzles and flashcards. \n# \n# Thinking of the future, if i ever want to actually market the app, i would like to know what would be the most beneficial business model in terms of an android app marketing. For that purpose, i want to analyze the current apps on the android market to see what apps do the best in their respective category. \n# \n# The dataset for the Google Play store apps was found on Kaggle and will be used for this project. \n# \n# \n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport itertools\nimport time\ntest=pd.read_csv('googleplaystore.csv')\n\n\n# In[2]:\n\n\n#test[test['Category'].isin(['COMMUNICATION'])]\n\n\n# The application considered can fall under following categories.\n# Books and references\n# communication\n# education\n# Entertainment\n# food and drink\n# Game \n# travel and local\n# \n# thus we will filter our data to only anayze those categories as we cannot put our app in other categories. \n# These categories can be customized for any application.\n# \n\n# In[3]:\n\n\nourCategories = ['BOOKS_AND_REFERENCE', 'COMMUNICATION', 'EDUCATION', 'ENTERTAINMENT', 'FOOD_AND_DRINK', 'GAME', 'TRAVEL_AND_LOCAL']\nourContentRating = ['Everyone']\n\nourData = test[test['Category'].isin(ourCategories)]\nourData = ourData[ourData['Content Rating'].isin(ourContentRating)]\n\n\n# In[4]:\n\n\n#ourData['Rating'].fillna(ourData.groupby('Category')['Rating'].transform('mean'), inplace = True)\n\n\n# In[5]:\n\n\nstep1 = ourData[['Category', 'Rating']].groupby('Category').median()\nstep1\nplt.plot(step1, '-o')\nplt.xticks(rotation=90)\nplt.savefig(\"figure1.png\") # save as png\n\n\n# By simply plotting the median rating in these categories, we can see that most apps in these category have a rating above 4, leading me to believe that my app can survive in these categories. though the data can be biased seeing as we dont yet know how many rating were recieved and if there was a bias. \n# \n# We will now try to take into account number of installs and ratings to see if there is a merit to the rating. \n\n# In[6]:\n\n\nstep2 = ourData.copy()\nstep2['Relative Rating'] = step2['Rating']/step2['Reviews']\n\n\n# In[7]:\n\n\nstep3 = step2[['Category', 'Relative Rating']].groupby('Category').median()\nplt.plot(step3, '-o')\nplt.xticks(rotation=90)\nplt.savefig(\"figure2.png\") # save as png\n\n\n# The lower the numbers, the more reviews it received and yet had a higher rating. Thus we can see Books and reference category is misrepresented as it has fewer reviews for its apps and thus the rating does not hold much weight. \n# \n# Also, two of the lowest numbers are entertainment and Game\n\n# Next lets check the installs in each category to see which of these have the most number of installs\n\n# In[8]:\n\n\nourData.dtypes\nourData['Price'] = ourData['Price'].astype(float)\nourData['Installs'] = ourData['Installs'].astype(int)\nourData['Rating'] = ourData['Rating'].astype(float)\nourData['Content Rating'] = ourData['Content Rating'].astype('|S')\n\nourData['Reviews'] = ourData['Reviews'].astype(int)\nourData['Type'] = ourData['Type'].astype('|S')\n\n\n\n# In[9]:\n\n\nstep4 = ourData[['Category', 'Installs']].groupby('Category').median()\nplt.plot(step4, '-o')\nplt.xticks(rotation=90)\nplt.savefig(\"figure3.png\") # save as png\n\n\n# The number of installs is scaled to represent in > number * 10000000, thus we see that education, entertainment, and games have over 10000000 installs, with games and entertainment being highest installs.\n\n# This was just to take a cursory look at the data to have a rough estimate of the data and have prelimnary hypothesis for the correlation between categories and the success of the app\n\n# Next we will try to run ANOVA statistic to see if we can prove statistically significant correlation \n\n# In[10]:\n\n\nfrom scipy import stats\nF, p = stats.f_oneway(ourData[ourData.Category=='EDUCATION'].Installs,\n ourData[ourData.Category=='GAME'].Installs,\n ourData[ourData.Category=='ENTERTAINMENT'].Installs)\n\n\n# In[11]:\n\n\np\n\n\n# Looking at the F-statistic and p-value from the ANOVA test fro category vs Installs, we can see that there is no statistically significant relationship between these variables.\n\n# In[12]:\n\n\n\nourData['TypeInt']=(ourData['Type']=='Free').astype(int)\nourData.corr()\n\n\n# In[13]:\n\n\nourData['Category'] = ourData['Category'].astype('|S')\n\n\n# Similarly, the correlation scores for numerical values in the data set do not meaningfully explain any correlation. Thus we can expect the linear model to not be an ideal fit for this dataset. \n\n# In[14]:\n\n\ndef fit_linear_reg(X,Y):\n #Fit linear regression model and return RSS and R squared values\n model_k = linear_model.LinearRegression(fit_intercept = True)\n model_k.fit(X,Y)\n RSS = mean_squared_error(Y,model_k.predict(X)) * len(Y)\n R_squared = model_k.score(X,Y)\n return RSS, R_squared\n\n\n# In[15]:\n\n\nX = ourData[['Category','Type','Content Rating']]\nY = ourData['Installs']\n\n\nmydata = ourData[['Category','Type','Content Rating', 'Installs']]\n\n\n# In[16]:\n\n\nX = pd.get_dummies(data=X, drop_first=True)\n\n\n# In[17]:\n\n\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn import linear_model\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = .20, random_state = 40)\nregr = BernoulliNB() #MultinomialNB() #linear_model.LinearRegression()\nregr.fit(X_train, Y_train)\npredicted = regr.predict(X_test)\n\n\n# In[18]:\n\n\nprint(\"Train accuracy is %.2f %%\" % (regr.score(X_train, Y_train)*100))\nprint(\"Test accuracy is %.2f %%\" % (regr.score(X_test, Y_test)*100))\n\n\n# Trying different models for categorical data classification, we can see that Bernoulli Naive-Bayes offer the most accuracy, though it is not proven to be anywhere close to a good fit. \n# \n# From these tests, finding a model was extremely difficult and the original hypothesis of using the data to predict could not be tested at a statistical level.\n# \n# Though the empirical data still gives use some inkling about some trends of human thinking. \n\n# In[19]:\n\n\nimport seaborn as sns\n\nsns.barplot(x='Type', y='Installs', data=mydata)\nplt.savefig(\"figure4.png\") # save as png\n\n\n# In[20]:\n\n\nourData['Profits'] = ourData['Price']*ourData['Installs']\n\nstep4 = ourData[['Category', 'Profits']].groupby('Category').mean().reset_index()\ng=sns.catplot(x='Category', y='Profits',kind='bar', data=step4)\ng.set_xticklabels(rotation=90)\ng.savefig(\"figure5.png\") # save as png\n\n\n# We can see from the bar chart above, that if the app is to be made paid, education and game both prove to be good category in terms of profits. \n\n# In[21]:\n\n\ng=sns.catplot(x=\"Category\", y=\"Installs\", hue=\"Type\", kind=\"bar\", data=ourData);\ng.set_xticklabels(rotation=90)\ng.savefig(\"figure6.png\") # save as png\n\n\n# While for free apps, Game and communication prove to be good categories. Travel and local is close up as well, making it more appropriate for our app.\n\n# In Conclusion, we can see that emprirical data provides a good indication of what categories are a good start to have, but ther is no statistical significance that was attached to these results. \n"
},
{
"alpha_fraction": 0.636781632900238,
"alphanum_fraction": 0.7103448510169983,
"avg_line_length": 18.809524536132812,
"blob_id": "df2907ff7b02f38e8c9981fa1ae144636b30b292",
"content_id": "bb8d03c43e22ec0ae6a2441125d41bd7d4628d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 21,
"path": "/Project/leaps.R",
"repo_name": "PranaliJadav/DataScienceProject",
"src_encoding": "UTF-8",
"text": "test<-read.csv('googleplaystore.csv')\r\nattach(test)\r\n\r\nlibrary(leaps)\r\n\r\n\r\ntest2<-na.omit(test)\r\nattach(test2)\r\n\r\n\r\nfull.data=cbind(Category, Rating, Reviews, Type, Price, Content.Rating)\r\n\r\n\r\n\r\ndata1<-leaps(full.data, Installs, method = \"adjr2\")\r\nsort(data1$adjr2)\r\ndata2 = cbind(data1$which,data1$adjr2)\r\n\r\n#Category, rating, Reviews, Price, Content.rating - 0.0151862151\r\n\r\n#just cateogry, type, price, content.rating - 0.0132111202"
}
] | 2 |
josmontes/flask-advanced-course-api
|
https://github.com/josmontes/flask-advanced-course-api
|
963b9823b7c3eddf5f4871b3ab74694338bedd3a
|
3e846bd360e4a64e5d222a3c4d032899e2f91741
|
c230111b654387d89c6ac4e89c657e4754645d8c
|
refs/heads/master
| 2022-12-28T22:54:48.514930 | 2020-10-12T20:28:42 | 2020-10-12T20:28:42 | 302,974,343 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6280120611190796,
"alphanum_fraction": 0.6581325531005859,
"avg_line_length": 22.714284896850586,
"blob_id": "c5267c105f8a59bff3a24f420438b270151abfce",
"content_id": "93e299c3342a4dffa348e289fdb030a0b7ba971a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 28,
"path": "/migrations/versions/386161cb6aef_.py",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "\"\"\"empty message\n\nRevision ID: 386161cb6aef\nRevises: 620a5757a9e0\nCreate Date: 2020-10-11 12:23:01.816262\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '386161cb6aef'\ndown_revision = '620a5757a9e0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(op.f('uq_users_username'), 'users', ['username'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(op.f('uq_users_username'), 'users', type_='unique')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6057612895965576,
"avg_line_length": 26,
"blob_id": "7561b7324393ccb91ff43563074f39398bf4ebdb",
"content_id": "44d0fedf7278248ce16d2fa97490538fc4192a7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1215,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 45,
"path": "/jwt_callbacks.py",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "# @jwt.user_claims_loader\n# def add_claims_to_jwt(identity):\n# if identity == 1: # instead of hard coding you should read from a config file or a db\n# return {\"is_admin\": True}\n# return {\"is_admin\": False}\n\n\n# @jwt.expired_token_loader\n# def expired_token_callback():\n# return jsonify({\n# \"description\": \"The token has expired.\",\n# \"error\": \"token_expired\"\n# }), 401\n\n\n# @jwt.invalid_token_loader\n# def invalid_token_callback(error):\n# return jsonify({\n# \"description\": \"Signature verifiaction failed\",\n# \"error\": \"invalid_token\"\n# }), 401\n\n\n# @jwt.unauthorized_loader\n# def unauthorized_callback(error):\n# return jsonify({\n# \"description\": \"Request does not contain an access token\",\n# \"error\": \"authorization_required\"\n# }), 401\n\n\n# @jwt.needs_fresh_token_loader\n# def needs_fresh_token_callback():\n# return jsonify({\n# \"description\": \"The token is not a fresh one\",\n# \"error\": \"fresh_token_required\"\n# }), 401\n\n\n# @jwt.revoked_token_loader\n# def revoked_token_callback():\n# return jsonify({\n# \"description\": \"The token has been revoked\",\n# \"error\": \"token_revoked\"\n# }), 401\n"
},
{
"alpha_fraction": 0.811965823173523,
"alphanum_fraction": 0.811965823173523,
"avg_line_length": 18.66666603088379,
"blob_id": "a755f35a78b8c5cad791a63c2cd11b0cdbfc34b0",
"content_id": "80ba840c57480d0953423000b5f1c5ab13a77aa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 6,
"path": "/.env.example",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "MAILGUN_DOMAIN=\nMAILGUN_API_KEY=\nDATABASE_URL=\nJWT_SECRET_KEY=\nAPPLICATION_SETTINGS=default_config.py\nAPP_SECRET_KEY="
},
{
"alpha_fraction": 0.7555859684944153,
"alphanum_fraction": 0.7633379101753235,
"avg_line_length": 28.635135650634766,
"blob_id": "a4bb8adca5d508b7d15a8995db7af4db421fd5b2",
"content_id": "c8f2d5bbe48a7298b736d23dc0645e1f5f54ce89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2193,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 74,
"path": "/app.py",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom flask import Flask, jsonify\nfrom flask_restful import Api\nfrom flask_jwt_extended import JWTManager\nfrom flask_migrate import Migrate\nfrom marshmallow import ValidationError\nfrom flask_uploads import configure_uploads, patch_request_class\nfrom dotenv import load_dotenv\n\nfrom ma import ma\nfrom db import db\nfrom resources.user import UserRegister, UserLogin, UserLogout, User, TokenRefresh\nfrom resources.confirmation import Confirmation, ConfirmationByUser\nfrom resources.item import Item, ItemList\nfrom resources.store import Store, StoreList\nfrom resources.image import Avatar, ImageUpload, Image, AvatarUpload\nfrom blacklist import BLACKLIST\nfrom libs.images import IMAGE_SET\n\napp = Flask(__name__)\nload_dotenv(\".env\", verbose=True)\napp.config.from_object(\"default_config\")\napp.config.from_envvar(\"APPLICATION_SETTINGS\")\npatch_request_class(app, size=10 * 1024 * 1024)\nconfigure_uploads(app, IMAGE_SET)\napi = Api(app)\njwt = JWTManager(app)\ndb.init_app(app)\nma.init_app(app)\nmigrate = Migrate(app, db)\n\n\[email protected]_first_request\ndef create_tables():\n db.create_all()\n\n\[email protected](ValidationError)\ndef marshmallow_validation_handler(err):\n return jsonify(err.messages), 400\n\n\[email protected]_in_blacklist_loader\ndef token_in_blacklist_callback(decrypted_token):\n return decrypted_token[\"jti\"] in BLACKLIST\n # if true it will go to the revoked loader\n\n\n# Users\napi.add_resource(UserLogin, \"/login\")\napi.add_resource(UserLogout, \"/logout\")\napi.add_resource(UserRegister, \"/register\")\napi.add_resource(User, \"/user/<int:user_id>\")\napi.add_resource(TokenRefresh, \"/refresh\")\napi.add_resource(Confirmation, \"/confirm/<string:confirmation_id>\")\napi.add_resource(ConfirmationByUser, \"/confirmation/user/<int:user_id>\")\n\n# Items\napi.add_resource(Item, \"/item/<string:name>\")\napi.add_resource(ItemList, \"/items\")\n\n# Stores\napi.add_resource(Store, \"/store/<string:name>\")\napi.add_resource(StoreList, \"/stores\")\n\n# Images\napi.add_resource(ImageUpload, \"/upload/image\")\napi.add_resource(Image, \"/image/<string:filename>\")\napi.add_resource(AvatarUpload, \"/upload/avatar\")\napi.add_resource(Avatar, \"/avatar/<int:user_id>\")\n\nif __name__ == \"__main__\":\n app.run(port=5000)\n"
},
{
"alpha_fraction": 0.6131108403205872,
"alphanum_fraction": 0.6205006241798401,
"avg_line_length": 35.4782600402832,
"blob_id": "ef930e324500a898407edcb84dd257cc4d231f2a",
"content_id": "c7d7ff940e76e509418ec2fb7becf4d36c4d4bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4195,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 115,
"path": "/resources/image.py",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "from inspect import trace\nimport traceback\nimport os\n\nfrom flask_restful import Resource\nfrom flask_uploads import UploadNotAllowed\nfrom flask import request, send_file\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nfrom libs import images\nfrom libs.strings import gettext\nfrom schemas.image import ImageSchema\n\nimage_schema = ImageSchema()\n\n\nclass ImageUpload(Resource):\n @classmethod\n @jwt_required\n def post(cls):\n \"\"\"\n This endpoint is used to upload an image file. It uses the\n JWT to retrieve user information and save the image in the user's folder.\n If a file with the same name exists in the user's folder, name conflicts\n will be automatically resolved by appending a underscore and a smallest\n unused integer. (eg. filename.png to filename_1.png).\n \"\"\"\n data = image_schema.load(request.files)\n user_id = get_jwt_identity()\n folder = f\"user_{user_id}\"\n try:\n # save(self, storage, folder=None, name=None)\n image_path = images.save_image(data[\"image\"], folder=folder)\n # here we only return the basename of the image and hide the internal folder structure from our user\n basename = images.get_basename(image_path)\n return {\"message\": gettext(\"image_uploaded\").format(basename)}, 201\n except UploadNotAllowed: # forbidden file type\n extension = images.get_extension(data[\"image\"])\n return {\"message\": gettext(\"image_illegal_extension\").format(extension)}, 400\n\n\nclass Image(Resource):\n @classmethod\n @jwt_required\n def get(cls, filename: str):\n \"\"\"\n Returns requested image if it exists inside logged in user's folder\n \"\"\"\n user_id = get_jwt_identity()\n folder = f\"user_{user_id}\"\n if not images.is_filename_safe(filename):\n return {\"message\": gettext(\"image_illegal_filename\").format(filename)}, 400\n\n try:\n return send_file(images.get_path(filename, folder=folder))\n except FileNotFoundError:\n return {\"message\": gettext(\"image_not_found\")}, 404\n\n @classmethod\n @jwt_required\n def delete(cls, filename: str):\n user_id = get_jwt_identity()\n folder = f\"user_{user_id}\"\n if not images.is_filename_safe(filename):\n return {\"message\": gettext(\"image_illegal_filename\").format(filename)}, 400\n\n try:\n print(images.get_path(filename, folder=folder))\n os.remove(images.get_path(filename, folder=folder))\n return {\"message\": gettext(\"image_deleted\")}\n except FileNotFoundError:\n return {\"message\": gettext(\"image_not_found\")}, 404\n except:\n traceback.print_exc()\n return {\"message\": gettext(\"image_delete_error\")}, 500\n\n\nclass AvatarUpload(Resource):\n @classmethod\n @jwt_required\n def put(cls):\n \"\"\"\n Used to upload user avatars\n \"\"\"\n data = image_schema.load(request.files)\n filename = f\"user_{get_jwt_identity()}\"\n folder = \"avatars\"\n avatar_path = images.find_image_any_format(filename, folder)\n if avatar_path:\n try:\n os.remove(avatar_path)\n except:\n return {\"message\": gettext(\"avatar_delete_error\")}, 500\n\n try:\n ext = images.get_extension(data[\"image\"].filename)\n avatar = filename + ext\n avatar_path = images.save_image(\n data[\"image\"], folder=folder, name=avatar)\n basename = images.get_basename(avatar_path)\n return {\"message\": gettext(\"avatar_uploaded\").format(basename)}\n except UploadNotAllowed:\n extension = images.get_extension(data[\"image\"])\n return {\"message\": gettext(\"image_illegal_extension\").format(extension)}, 400\n\n\nclass Avatar(Resource):\n @classmethod\n def get(cls, user_id: int):\n folder = \"avatars\"\n filename = f\"user_{user_id}\"\n avatar = images.find_image_any_format(filename, folder)\n if avatar:\n return send_file(avatar)\n return {\"message\": gettext(\"avatar_not_found\")}, 404\n"
},
{
"alpha_fraction": 0.6197225451469421,
"alphanum_fraction": 0.6314837336540222,
"avg_line_length": 31.831684112548828,
"blob_id": "615e4af135af0a6d9821ca515ab1a1714444f43e",
"content_id": "dd85cbc57b216f610be35b236446fc9d96926fe9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3316,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 101,
"path": "/resources/user.py",
"repo_name": "josmontes/flask-advanced-course-api",
"src_encoding": "UTF-8",
"text": "import traceback\nfrom flask_restful import Resource\nfrom flask import request\nfrom werkzeug.security import safe_str_cmp\nfrom flask_jwt_extended import (\n jwt_required,\n create_access_token,\n create_refresh_token,\n jwt_refresh_token_required,\n get_jwt_identity,\n get_raw_jwt,\n)\n\nfrom libs.mailgun import MailgunException\nfrom libs.strings import gettext\nfrom models.user import UserModel\nfrom models.confirmation import ConfirmationModel\nfrom schemas.user import UserSchema\nfrom blacklist import BLACKLIST\n\n\nuser_schema = UserSchema()\n\n\nclass UserRegister(Resource):\n @classmethod\n def post(cls):\n user = user_schema.load(request.get_json())\n\n if UserModel.find_by_username(user.username):\n return {\"message\": gettext(\"user_already_exists\").format(user.username)}, 400\n if UserModel.find_by_email(user.email):\n return {\"message\": gettext(\"user_already_exists\").format(user.email)}, 400\n\n try:\n user.save_to_db()\n confirmation = ConfirmationModel(user.id)\n confirmation.save_to_db()\n user.send_confirmation_email()\n return {\"message\": gettext(\"user_register_success\")}, 201\n except MailgunException as e:\n user.delete_from_db() # rollback\n return {\"message\": str(e)}, 500\n except:\n traceback.print_exc()\n user.delete_from_db()\n return {\"message\": gettext(\"user_db_saving_error\")}, 500\n\n\nclass User(Resource):\n @classmethod\n def get(cls, user_id: int):\n user = UserModel.find_by_id(user_id)\n if not user:\n return {\"message\": gettext(\"user_not_found\")}, 404\n return user_schema.dump(user)\n\n @classmethod\n def delete(cls, user_id: int):\n user = UserModel.find_by_id(user_id)\n if not user:\n return {\"message\": gettext(\"user_not_found\")}, 404\n user.delete_from_db()\n return {\"message\": gettext(\"user_deleted\")}, 200\n\n\nclass UserLogin(Resource):\n @classmethod\n def post(cls):\n login_user = user_schema.load(request.get_json(), partial=(\"email\",))\n\n user = UserModel.find_by_username(login_user.username)\n if user and safe_str_cmp(user.password, login_user.password):\n confirmation = user.most_recent_confirmation\n if confirmation and confirmation.confirmed:\n access_token = create_access_token(\n identity=user.id, fresh=True)\n refresh_token = create_refresh_token(user.id)\n return {\"access_token\": access_token, \"refresh_token\": refresh_token}, 200\n else:\n return {\"message\": gettext(\"user_not_confirmed\")}, 400\n return {\"message\": gettext(\"user_invalid_credentials\")}, 401\n\n\nclass UserLogout(Resource):\n @classmethod\n @jwt_required\n def post(cls):\n # jti is \"JWT ID\" unique identifier for a JWT\n jti = get_raw_jwt()[\"jti\"]\n BLACKLIST.add(jti)\n return {\"message\": USER_LOGGED_OUT}, 200\n\n\nclass TokenRefresh(Resource):\n @classmethod\n @jwt_refresh_token_required\n def post(cls):\n current_user_id = get_jwt_identity()\n new_token = create_access_token(identity=current_user_id, fresh=False)\n return {\"access_token\": new_token}, 200\n"
}
] | 6 |
oohx/bert_sequence_label
|
https://github.com/oohx/bert_sequence_label
|
f17610b30a6d07bdbff118912906b43063528a22
|
fe14650bd249e51bc99bd8221d66d65f6ae9ad87
|
3c9366a3a72ddb546fb27312fb2150789cd686d7
|
refs/heads/master
| 2022-12-04T08:25:19.110928 | 2020-08-17T09:32:07 | 2020-08-17T09:32:07 | 288,135,302 | 3 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6636363863945007,
"alphanum_fraction": 0.6652892827987671,
"avg_line_length": 30.8157901763916,
"blob_id": "03d08e79558e37639b4137233c5495495df18e79",
"content_id": "49b7efb482a0e0eac4e1244baef30e8540245063",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1234,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 38,
"path": "/inference.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 模型推理脚本\nimport os\nimport pickle\nimport tensorflow as tf\nfrom model import Model\nfrom tools import create_model\nfrom utils.loader import input_from_line\nfrom train import FLAGS, load_config\nfrom utils.utils import get_logger\n\n\ndef main(_):\n config_file = os.path.join(FLAGS.output, 'config.json')\n log_file = os.path.join(FLAGS.output, 'model.log')\n\n config = load_config(config_file)\n config['init_checkpoint'] = FLAGS.init_checkpoint\n logger = get_logger(log_file)\n # limit GPU memory\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n map_file = os.path.join(FLAGS.output, 'maps.pkl')\n with open(map_file, \"rb\") as f:\n tag_to_id, id_to_tag = pickle.load(f)\n\n with tf.Session(config=tf_config) as sess:\n model = create_model(sess, Model, os.path.join(FLAGS.output, 'checkpoint'), config, logger)\n text = \"中国你好成都\"\n result = model.evaluate_line(sess, input_from_line(text, FLAGS.max_seq_len, tag_to_id), id_to_tag, export=True)\n print(result)\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n tf.app.run(main)\n\n"
},
{
"alpha_fraction": 0.6413792967796326,
"alphanum_fraction": 0.6965517401695251,
"avg_line_length": 35.25,
"blob_id": "f8a0bb0b7390a4d2496f5971c6f188078206ab96",
"content_id": "566b555ad06ca6bf6c94af42b40c522efdab8241",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 4,
"path": "/client/docker.sh",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\ndocker run -t --rm -p 8501:8501 \\\n-v \"$(pwd)/saved_model:/models/docker_test\" \\\n-e MODEL_NAME=docker_test tensorflow/serving\n"
},
{
"alpha_fraction": 0.577464759349823,
"alphanum_fraction": 0.6478873491287231,
"avg_line_length": 27.600000381469727,
"blob_id": "661f2e4708ebb47bbfb60fdecfefa14f2ff6ba97",
"content_id": "3c8ceebfb4008871332845432af64f2916ff9ed5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 5,
"path": "/inference.sh",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\npython inference.py \\\n --init_checkpoint chinese_L-12_H-768_A-12 \\\n --max_seq_len 128 \\\n --output output/word_cut"
},
{
"alpha_fraction": 0.5085632801055908,
"alphanum_fraction": 0.5159372091293335,
"avg_line_length": 34.03333282470703,
"blob_id": "619d5198d3176af91797101286383fe57acc6079",
"content_id": "44cf709eb0f9c077d11ba31b77ab76d02b36b231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4298,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 120,
"path": "/utils/loader.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 数据预处理脚本\nimport codecs\nimport numpy as np\nfrom .utils import zero_digits, create_dico, create_mapping\nfrom bert import tokenization\nfrom .utils import convert_single_example\n\ntokenizer = tokenization.FullTokenizer(vocab_file='chinese_L-12_H-768_A-12/vocab.txt',\n do_lower_case=True)\n\n\ndef load_sentences(path, zeros):\n \"\"\"\n Load sentences. A line must contain at least a word and its tag.\n Sentences are separated by empty lines.\n \"\"\"\n sentences = []\n sentence = []\n num = 0\n for line in codecs.open(path, 'r', 'utf8'):\n num += 1\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n # print(list(line))\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":\n line = \"$\" + line[1:]\n word = line.split()\n else:\n word = line.split()\n \n assert len(word) >= 2, print([word[0]])\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences\n\n\ndef prepare_dataset(sentences, max_seq_length, tag_to_id, train=True):\n \"\"\"\n Prepare the dataset. Return a list of lists of dictionaries containing:\n - word indexes\n - word char indexes\n - tag indexes\n \"\"\"\n data = []\n for s in sentences:\n string = [w[0].strip() for w in s]\n\n char_line = ' '.join(string) # 使用空格把汉字拼起来\n text = tokenization.convert_to_unicode(char_line)\n\n if train:\n tags = [w[-1] for w in s]\n else:\n tags = ['O' for _ in string]\n\n labels = ' '.join(tags) # 使用空格把标签拼起来\n labels = tokenization.convert_to_unicode(labels)\n\n ids, mask, segment_ids, label_ids = convert_single_example(char_line=text,\n tag_to_id=tag_to_id,\n max_seq_length=max_seq_length,\n tokenizer=tokenizer,\n label_line=labels)\n data.append([string, segment_ids, ids, mask, label_ids])\n\n return data\n\n\ndef tag_mapping(sentences):\n \"\"\"\n Create a dictionary and a mapping of tags, sorted by frequency.\n \"\"\"\n tags = [[char[-1] for char in s] for s in sentences]\n\n dico = create_dico(tags)\n dico['[SEP]'] = len(dico) + 1\n dico['[CLS]'] = len(dico) + 2\n\n tag_to_id, id_to_tag = create_mapping(dico)\n print(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag\n\n\ndef input_from_line(line, max_seq_length, tag_to_id):\n \"\"\"\n Take sentence data and return an input for\n the training or the evaluation function.\n \"\"\"\n string = [w[0].strip() for w in line]\n # chars = [char_to_id[f(w) if f(w) in char_to_id else '<UNK>']\n # for w in string]\n char_line = ' '.join(string) # 使用空格把汉字拼起来\n text = tokenization.convert_to_unicode(char_line)\n\n tags = ['[CLS]' for _ in string]\n\n labels = ' '.join(tags) # 使用空格把标签拼起来\n labels = tokenization.convert_to_unicode(labels)\n\n ids, mask, segment_ids, label_ids = convert_single_example(char_line=text,\n tag_to_id=tag_to_id,\n max_seq_length=max_seq_length,\n tokenizer=tokenizer,\n label_line=labels)\n segment_ids = np.reshape(segment_ids, (1, max_seq_length))\n ids = np.reshape(ids, (1, max_seq_length))\n mask = np.reshape(mask, (1, max_seq_length))\n label_ids = np.reshape(label_ids, (1, max_seq_length))\n return [string, segment_ids, ids, mask, label_ids]\n"
},
{
"alpha_fraction": 0.5393431782722473,
"alphanum_fraction": 0.5461686849594116,
"avg_line_length": 27.130434036254883,
"blob_id": "7592931590018ba542c20a53d7fe4e098c6ef3c1",
"content_id": "5185be5935b49f7dbd55886c78a2aa784216f0b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7863,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 276,
"path": "/utils/utils.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 数据处理相关工具脚本\nimport codecs\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport re\nfrom .conlleval import return_report\n\n\ndef bio_to_json(string, tags_list):\n tags = []\n for _ in tags_list:\n if _ != \"O\":\n tags.append(_+\"-cut\")\n else:\n tags.append(_)\n item = {\"string\": string, \"entities\": []}\n entity_name = \"\"\n entity_start = 0\n iCount = 0\n entity_tag = \"\"\n\n for c_idx in range(len(tags)):\n c, tag = string[c_idx], tags[c_idx]\n if c_idx < len(tags) - 1:\n tag_next = tags[c_idx + 1]\n else:\n tag_next = ''\n\n if tag[0] == 'B':\n entity_tag = tag[2:]\n entity_name = c\n entity_start = iCount\n if tag_next[2:] != entity_tag:\n item[\"entities\"].append({\"word\": c, \"start\": iCount, \"end\": iCount + 1, \"type\": tag[2:]})\n elif tag[0] == \"I\":\n if tag[2:] != tags[c_idx - 1][2:] or tags[c_idx - 1][2:] == 'O':\n tags[c_idx] = 'O'\n pass\n else:\n entity_name = entity_name + c\n if tag_next[2:] != entity_tag:\n item[\"entities\"].append(\n {\"word\": entity_name, \"start\": entity_start, \"end\": iCount + 1, \"type\": entity_tag})\n entity_name = ''\n iCount += 1\n return item\n\n\ndef bmes_to_json(string, tags):\n \"\"\"\n 中文分词\n \"\"\"\n item = {\"string\": string, \"entities\": []}\n entity_name = \"\"\n\n for c_idx in range(len(tags)):\n c, tag = string[c_idx], tags[c_idx]\n if tag == 'B':\n entity_name = c\n elif tag == 'M' or tag == 'E':\n entity_name += c\n else:\n item['entities'].append(c)\n if tag == 'E':\n item['entities'].append(entity_name)\n return item\n\n\nclass BatchManager(object):\n\n def __init__(self, data, batch_size):\n self.batch_data = self.sort_and_pad(data, batch_size)\n self.len_data = len(self.batch_data)\n\n def sort_and_pad(self, data, batch_size):\n num_batch = int(math.ceil(len(data) / batch_size))\n sorted_data = sorted(data, key=lambda x: len(x[0]))\n batch_data = list()\n for i in range(num_batch):\n batch_data.append(self.arrange_batch(sorted_data[int(i * batch_size): int((i + 1) * batch_size)]))\n return batch_data\n\n @staticmethod\n def arrange_batch(batch):\n \"\"\"\n 把batch整理为一个[5, ]的数组\n :param batch:\n :return:\n \"\"\"\n strings = []\n segment_ids = []\n chars = []\n mask = []\n targets = []\n for string, seg_ids, char, msk, target in batch:\n strings.append(string)\n segment_ids.append(seg_ids)\n chars.append(char)\n mask.append(msk)\n targets.append(target)\n return [strings, segment_ids, chars, mask, targets]\n\n @staticmethod\n def pad_data(data):\n strings = []\n chars = []\n segs = []\n targets = []\n max_length = max([len(sentence[0]) for sentence in data])\n for line in data:\n string, segment_ids, char, seg, target = line\n padding = [0] * (max_length - len(string))\n strings.append(string + padding)\n chars.append(char + padding)\n segs.append(seg + padding)\n targets.append(target + padding)\n return [strings, chars, segs, targets]\n\n def iter_batch(self, shuffle=False):\n if shuffle:\n random.shuffle(self.batch_data)\n for idx in range(self.len_data):\n yield self.batch_data[idx]\n\n\ndef convert_single_example(char_line, tag_to_id, max_seq_length, tokenizer, label_line):\n \"\"\"\n 将一个样本进行分析,然后将字转化为id, 标签转化为lb\n \"\"\"\n text_list = char_line.split(' ')\n label_list = label_line.split(' ')\n\n tokens = []\n labels = []\n for i, word in enumerate(text_list):\n token = tokenizer.tokenize(word)\n\n tokens.extend(token)\n label_1 = label_list[i]\n for m in range(len(token)):\n if m == 0:\n labels.append(label_1)\n else:\n labels.append(\"X\")\n\n # 序列截断\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n labels = labels[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n label_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n # append(\"O\") or append(\"[CLS]\") not sure!\n label_ids.append(tag_to_id[\"[CLS]\"])\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n label_ids.append(tag_to_id[labels[i]])\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n # append(\"O\") or append(\"[SEP]\") not sure!\n label_ids.append(tag_to_id[\"[SEP]\"])\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n\n # padding\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n # we don't concerned about it!\n label_ids.append(0)\n ntokens.append(\"**NULL**\")\n\n return input_ids, input_mask, segment_ids, label_ids\n\n\ndef get_logger(log_file):\n logger = logging.getLogger(log_file)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n ch.setFormatter(formatter)\n fh.setFormatter(formatter)\n logger.addHandler(ch)\n logger.addHandler(fh)\n logger.removeHandler(ch)\n logger.removeHandler(fh)\n return logger\n\n\ndef test_ner(results, path):\n \"\"\"\n Run perl script to evaluate model\n \"\"\"\n output_file = os.path.join(path, \"ner_predict.utf8\")\n with codecs.open(output_file, \"w\", 'utf8') as f:\n to_write = []\n for block in results:\n for line in block:\n to_write.append(line + \"\\n\")\n to_write.append(\"\\n\")\n\n f.writelines(to_write)\n eval_lines = return_report(output_file)\n return eval_lines\n\n\ndef print_config(config, logger):\n \"\"\"\n Print configuration of the model\n \"\"\"\n for k, v in config.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))\n\n\ndef save_config(config, config_file):\n \"\"\"\n Save configuration of the model\n parameters are stored in json format\n \"\"\"\n with open(config_file, \"w\", encoding=\"utf8\") as f:\n json.dump(config, f, ensure_ascii=False, indent=4)\n\n\ndef load_config(config_file):\n \"\"\"\n Load configuration of the model\n parameters are stored in json format\n \"\"\"\n with open(config_file, encoding=\"utf8\") as f:\n return json.load(f)\n\n\ndef zero_digits(s):\n \"\"\"\n Replace every digit in a string by a zero.\n \"\"\"\n return re.sub(r'\\d', '0', s)\n\n\ndef create_dico(item_list):\n \"\"\"\n Create a dictionary of items from a list of list of items.\n \"\"\"\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico\n\n\ndef create_mapping(dico):\n \"\"\"\n Create a mapping (item to ID / ID to item) from a dictionary.\n Items are ordered by decreasing frequency.\n \"\"\"\n sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n item_to_id = {v: k for k, v in id_to_item.items()}\n return item_to_id, id_to_item\n\n"
},
{
"alpha_fraction": 0.654587984085083,
"alphanum_fraction": 0.6797194480895996,
"avg_line_length": 44.02631759643555,
"blob_id": "c9c05e529975af3411b60a2a72c3614948f87bfe",
"content_id": "a4d20b8192fbe6418030a1376c3e7541c9e140a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1721,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 38,
"path": "/config.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 超参数设置\nimport tensorflow as tf\n\n\ndef get_flags():\n flags = tf.flags\n flags.DEFINE_boolean(\"train\", False, \"Wither train the model\")\n # configurations for the model\n flags.DEFINE_integer(\"batch_size\", 64, \"batch size\")\n flags.DEFINE_integer(\"seg_dim\", 200, \"Embedding size for segmentation, 0 if not used\")\n flags.DEFINE_integer(\"char_dim\", 100, \"Embedding size for characters\")\n flags.DEFINE_integer(\"lstm_dim\", 256, \"Num of hidden units in LSTM\")\n flags.DEFINE_string(\"tag_schema\", \"iob\", \"tagging schema iobes or iob\")\n\n # configurations for training\n flags.DEFINE_float(\"clip\", 5, \"Gradient clip\")\n flags.DEFINE_float(\"dropout\", 0.5, \"Dropout rate\")\n flags.DEFINE_float(\"lr\", 0.001, \"Initial learning rate\")\n flags.DEFINE_string(\"optimizer\", \"adam\", \"Optimizer for training\")\n flags.DEFINE_boolean(\"zeros\", False, \"Wither replace digits with zero\")\n\n flags.DEFINE_integer(\"max_seq_len\", 256, \"max sequence length for bert\")\n flags.DEFINE_integer(\"max_epoch\", 100, \"maximum training epochs\")\n flags.DEFINE_integer(\"steps_check\", 100, \"steps per checkpoint\")\n\n flags.DEFINE_string(\"output\", \"output\", \"Path to save model\")\n flags.DEFINE_string(\"data\", \"data\", \"Path for train data\")\n flags.DEFINE_string(\"init_checkpoint\", \"chinese_L-12_H-768_A-12\", \"Path to save model\")\n\n FLAGS = tf.flags.FLAGS\n assert FLAGS.clip < 5.1, \"gradient clip should't be too much\"\n assert 0 <= FLAGS.dropout < 1, \"dropout rate between 0 and 1\"\n assert FLAGS.lr > 0, \"learning rate must larger than zero\"\n assert FLAGS.optimizer in [\"adam\", \"sgd\", \"adagrad\"]\n return flags\n"
},
{
"alpha_fraction": 0.5546961426734924,
"alphanum_fraction": 0.5674822330474854,
"avg_line_length": 30.361385345458984,
"blob_id": "237ce562ebbff3b3290597e93b71fccde789bd59",
"content_id": "4dd26377ab46926f92afd9d4cc3523c1e0d3a5eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6481,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 202,
"path": "/client/client.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 序列标注客户端\nimport json\nimport pickle\nimport requests\nimport numpy as np\nimport tokenization\n\n\ndef convert_single_example(char_line, max_seq_length, tokenizer):\n \"\"\"\n 将一个样本进行分析,然后将字转化为id, 标签转化为lb\n \"\"\"\n text_list = char_line.split(' ')\n\n tokens = []\n for i, word in enumerate(text_list):\n token = tokenizer.tokenize(word)\n\n tokens.extend(token)\n\n # 序列截断\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n\n # padding\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n ntokens.append(\"**NULL**\")\n\n return input_ids, input_mask, segment_ids\n\n\ndef input_from_line(line, max_seq_length, tag_to_id):\n \"\"\"\n Take sentence data and return an input for\n the training or the evaluation function.\n \"\"\"\n string = [w[0].strip() for w in line]\n char_line = ' '.join(string) # 使用空格把汉字拼起来\n text = tokenization.convert_to_unicode(char_line)\n\n tags = ['[CLS]' for _ in string]\n\n labels = ' '.join(tags) # 使用空格把标签拼起来\n labels = tokenization.convert_to_unicode(labels)\n tokenizer = tokenization.FullTokenizer(vocab_file='./vocab/vocab.txt',\n do_lower_case=True)\n ids, mask, segment_ids = convert_single_example(char_line=text,\n max_seq_length=max_seq_length,\n tokenizer=tokenizer)\n segment_ids = np.reshape(segment_ids, (1, max_seq_length))\n ids = np.reshape(ids, (1, max_seq_length))\n mask = np.reshape(mask, (1, max_seq_length))\n return [string, segment_ids, ids, mask]\n\n\ndef viterbi_decode(score, transition_params):\n \"\"\"Decode the highest scoring sequence of tags outside of TensorFlow.\n\n This should only be used at test time.\n\n Args:\n score: A [seq_len, num_tags] matrix of unary potentials.\n transition_params: A [num_tags, num_tags] matrix of binary potentials.\n\n Returns:\n viterbi: A [seq_len] list of integers containing the highest scoring tag\n indices.\n viterbi_score: A float containing the score for the Viterbi sequence.\n \"\"\"\n trellis = np.zeros_like(score)\n backpointers = np.zeros_like(score, dtype=np.int32)\n trellis[0] = score[0]\n\n for t in range(1, score.shape[0]):\n v = np.expand_dims(trellis[t - 1], 1) + transition_params\n trellis[t] = score[t] + np.max(v, 0)\n backpointers[t] = np.argmax(v, 0)\n\n viterbi = [np.argmax(trellis[-1])]\n for bp in reversed(backpointers[1:]):\n viterbi.append(bp[viterbi[-1]])\n viterbi.reverse()\n\n viterbi_score = np.max(trellis[-1])\n return viterbi, viterbi_score\n\n\ndef decode(logits, lengths, matrix, tag_to_id):\n \"\"\"\n :param logits: [batch_size, num_steps, num_tags]float32, logits\n :param lengths: [batch_size]int32, real length of each sequence\n :param matrix: transaction matrix for inference\n :return:\n \"\"\"\n # inference final labels usa viterbi Algorithm\n paths = []\n small = -1000.0\n start = np.asarray([[small] * len(tag_to_id) + [0]])\n for score, length in zip(logits, lengths):\n score = score[:length]\n pad = small * np.ones([length, 1])\n logits = np.concatenate([score, pad], axis=1)\n logits = np.concatenate([start, logits], axis=0)\n path, _ = viterbi_decode(logits, matrix)\n paths.append(path[1:])\n return paths\n\n\ndef up_data(features, tag_to_id):\n \"\"\"\n 模型预测数据并返回预测结果\n :param features:\n :return:\n \"\"\"\n string, segment_ids, chars, mask = features\n payload = {\n \"instances\": [{'input_ids': chars.tolist()[0],\n \"input_mask\": mask.tolist()[0],\n \"segment_ids\": segment_ids.tolist()[0],\n \"dropout\": 1.0}]\n }\n r = requests.post('http://localhost:8501/v1/models/docker_test:predict', json=payload)\n # print(r.content.decode('utf-8'))\n pred_text = json.loads(r.content.decode('utf-8'))['predictions']\n scores = np.array(pred_text)\n length = len(string) + 1\n trans = np.load(\"vocab/trans.npy\")\n batch_paths = decode(scores, [length], trans, tag_to_id)\n return batch_paths\n\n\ndef bio_to_json(string, tags_list):\n tags = []\n for _ in tags_list:\n if _ != \"O\":\n tags.append(_+\"-cut\")\n else:\n tags.append(_)\n item = {\"string\": string, \"entities\": []}\n entity_name = \"\"\n entity_start = 0\n iCount = 0\n entity_tag = \"\"\n\n for c_idx in range(len(tags)):\n c, tag = string[c_idx], tags[c_idx]\n if c_idx < len(tags) - 1:\n tag_next = tags[c_idx + 1]\n else:\n tag_next = ''\n\n if tag[0] == 'B':\n entity_tag = tag[2:]\n entity_name = c\n entity_start = iCount\n if tag_next[2:] != entity_tag:\n item[\"entities\"].append({\"word\": c, \"start\": iCount, \"end\": iCount + 1, \"type\": tag[2:]})\n elif tag[0] == \"I\":\n if tag[2:] != tags[c_idx - 1][2:] or tags[c_idx - 1][2:] == 'O':\n tags[c_idx] = 'O'\n pass\n else:\n entity_name = entity_name + c\n if tag_next[2:] != entity_tag:\n item[\"entities\"].append(\n {\"word\": entity_name, \"start\": entity_start, \"end\": iCount + 1, \"type\": entity_tag})\n entity_name = ''\n iCount += 1\n return item\n\n\ndef get_result(msg: str):\n with open(\"vocab/maps.pkl\", \"rb\") as f:\n tag_to_id, id_to_tag = pickle.load(f)\n\n data = input_from_line(msg, max_seq_length=128, tag_to_id=tag_to_id)\n result = up_data(data, tag_to_id)\n tags = [id_to_tag[idx] for idx in result[0]]\n return bio_to_json(data[0], tags[1:-1])\n\n\nif __name__ == '__main__':\n text = \"中国你好成都。\"\n res = get_result(text)\n print(res)\n"
},
{
"alpha_fraction": 0.6079968810081482,
"alphanum_fraction": 0.6150529384613037,
"avg_line_length": 37.07462692260742,
"blob_id": "6d176573ab789e73fe8bf4c21e708dc36ddb013d",
"content_id": "4e7fd9b000f4c0cf2fa1e25aded6bd2afdb4843e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5124,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 134,
"path": "/train.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 模型训练和测试模型脚本\nimport os\nimport pickle\nfrom collections import OrderedDict\nimport numpy as np\nimport tensorflow as tf\nfrom utils.utils import BatchManager\nfrom utils.loader import load_sentences, prepare_dataset, tag_mapping\nfrom model import Model\nfrom utils.utils import get_logger\nfrom tools import create_model, save_model\nfrom utils.utils import print_config, save_config, load_config, test_ner\nfrom config import get_flags\nFLAGS = get_flags().FLAGS\n\n\n# config for the model\ndef config_model(tag_to_id):\n config = OrderedDict()\n config[\"num_tags\"] = len(tag_to_id)\n config[\"lstm_dim\"] = FLAGS.lstm_dim\n config[\"batch_size\"] = FLAGS.batch_size\n config['max_seq_len'] = FLAGS.max_seq_len\n config[\"clip\"] = FLAGS.clip\n config[\"dropout_keep\"] = 1.0 - FLAGS.dropout\n config[\"optimizer\"] = FLAGS.optimizer\n config[\"lr\"] = FLAGS.lr\n config[\"tag_schema\"] = FLAGS.tag_schema\n config[\"zeros\"] = FLAGS.zeros\n config[\"init_checkpoint\"] = FLAGS.init_checkpoint\n return config\n\n\ndef evaluate(sess, model, name, data, id_to_tag, logger):\n logger.info(\"evaluate:{}\".format(name))\n ner_results = model.evaluate(sess, data, id_to_tag)\n eval_lines = test_ner(ner_results, FLAGS.output)\n for line in eval_lines:\n logger.info(line)\n f1 = float(eval_lines[1].strip().split()[-1])\n if name == \"dev\":\n best_test_f1 = model.best_dev_f1.eval()\n if f1 > best_test_f1:\n tf.assign(model.best_dev_f1, f1).eval()\n logger.info(\"new best dev f1 score:{:>.3f}\".format(f1))\n return f1 > best_test_f1\n elif name == \"test\":\n best_test_f1 = model.best_test_f1.eval()\n if f1 > best_test_f1:\n tf.assign(model.best_test_f1, f1).eval()\n logger.info(\"new best test f1 score:{:>.3f}\".format(f1))\n return f1 > best_test_f1\n\n\ndef train():\n tf.io.gfile.mkdir(FLAGS.output)\n log_path = os.path.join(FLAGS.output, 'model.log')\n logger = get_logger(log_path)\n # load data sets\n train_sentences = load_sentences(os.path.join(FLAGS.data, \"train.txt\"), FLAGS.zeros)\n dev_sentences = load_sentences(os.path.join(FLAGS.data, \"dev.txt\"), FLAGS.zeros)\n test_sentences = load_sentences(os.path.join(FLAGS.data, \"test.txt\"), FLAGS.zeros)\n # create maps if not exist\n map_file = os.path.join(FLAGS.output, 'maps.pkl')\n if not os.path.isfile(map_file):\n # Create a dictionary and a mapping for tags\n _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)\n with open(map_file, \"wb\") as f:\n pickle.dump([tag_to_id, id_to_tag], f)\n else:\n with open(map_file, \"rb\") as f:\n tag_to_id, id_to_tag = pickle.load(f)\n\n # prepare data, get a collection of list containing index\n train_data = prepare_dataset(\n train_sentences, FLAGS.max_seq_len, tag_to_id\n )\n dev_data = prepare_dataset(\n dev_sentences, FLAGS.max_seq_len, tag_to_id\n )\n test_data = prepare_dataset(\n test_sentences, FLAGS.max_seq_len, tag_to_id\n )\n logger.info(\"%i / %i / %i sentences in train / dev / test.\" % (\n len(train_data), len(dev_data), len(test_data)))\n train_manager = BatchManager(train_data, FLAGS.batch_size)\n dev_manager = BatchManager(dev_data, FLAGS.batch_size)\n test_manager = BatchManager(test_data, FLAGS.batch_size)\n # make path for store log and model if not exist\n config_file = os.path.join(FLAGS.output, 'config.json')\n if os.path.isfile(config_file):\n config = load_config(config_file)\n else:\n config = config_model(tag_to_id)\n save_config(config, config_file)\n print_config(config, logger)\n # limit GPU memory\n tf_config = tf.ConfigProto()\n tf_config.gpu_options.allow_growth = True\n steps_per_epoch = train_manager.len_data\n with tf.Session(config=tf_config) as sess:\n model = create_model(sess, Model, os.path.join(FLAGS.output, 'checkpoint'), config, logger)\n\n logger.info(\"start training\")\n loss = []\n for i in range(100):\n for batch in train_manager.iter_batch(shuffle=True):\n step, batch_loss = model.run_step(sess, True, batch)\n\n loss.append(batch_loss)\n if step % FLAGS.steps_check == 0:\n iteration = step // steps_per_epoch + 1\n logger.info(\"iteration:{} step:{}/{}, \"\n \"NER loss:{:>9.6f}\".format(iteration, step % steps_per_epoch,\n steps_per_epoch, np.mean(loss)))\n loss = []\n\n best = evaluate(sess, model, \"dev\", dev_manager, id_to_tag, logger)\n if best:\n save_model(sess, model, os.path.join(FLAGS.output, 'checkpoint'), logger, global_steps=step)\n evaluate(sess, model, \"test\", test_manager, id_to_tag, logger)\n\n\ndef main(_):\n FLAGS.train = True\n train()\n\n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n tf.app.run(main)\n"
},
{
"alpha_fraction": 0.4766214191913605,
"alphanum_fraction": 0.5985419750213623,
"avg_line_length": 15.784810066223145,
"blob_id": "93cb1b64479e802dce6e037257c8081a8bfcda00",
"content_id": "bd9f0f10f5ac3ad3a578294b0682dd97d5e51fb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4982,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 237,
"path": "/README.md",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "## 基于BERT-BLSTM-CRF 序列标注模型\n> 本项目基于谷歌官方的BERT:https://github.com/google-research/bert \n> 对BERT进行迁移学习,扩展BLSTM-CRF使模型支持序列标注任务 \n> 1. 中文分词\n> 2. 词性标注\n> 3. 命名实体识别\n> 4. 语义角色标注\n\n### 环境配置\n- #### miniconda安装\n\n ```shell\n $ wget -c http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh\n $ chmod +x Miniconda-latest-Linux-x86_64.sh\n $ ./Miniconda-latest-Linux-x86_64.sh\n ```\n\n > 关闭终端,重新打开终端\n\n- #### 项目运行环境配置\n\n ```shell\n $ conda create -n BERT python=3.6 cudatoolkit==10.0.130 cudnn==7.6.4\n $ source activate BERT\n $ pip install -r requirements.txt\n ```\n\n### 数据准备\n> 1. 数据按照如下格式进行整理\n> 2. 句子间用换行分开 \n> 3. 分割训练数据、测试数据和开发数据(一般为7:2:1)\n#### 中文分词数据\n```text\n义\tB\n诊\tI\n当\tB\n天\tI\n共\tB\n有\tI\n3\tB\n0\tI\n0\tI\n多\tO\n名\tO\n群\tB\n众\tI\n接\tB\n受\tI\n义\tB\n诊\tI\n,\tO\n并\tO\n直\tB\n接\tI\n受\tB\n益\tI\n。\tO\n```\n\n#### 词性标注数据\n```text\n一 B-m\n、 B-w\n给 B-v\n予 I-v\n辽 B-ns\n宁 I-ns\n队 B-n\n和 B-c\n山 B-ns\n东 I-ns\n队 B-n\n严 B-a\n重 I-a\n警 B-v\n告 I-v\n。 B-w\n```\n\n#### 命名实体识别数据\n```text\n千 O\n鹤 O\n金 O\n是 O\n在 O\n国 O\n家 O\n推 O\n动 O\n下 O\n。 O\n\n有 O\n大 B-NAME\n象 I-NAME\n联 I-NAME\n盟 I-NAME\n发 O\n行 O\n的 O\n我 O\n们 O\n中 O\n国 O\n首 O\n个 O\n承 O\n认 O\n的 O\n数 B-DESC\n字 I-DESC\n货 I-DESC\n币 I-DESC\n。 O\n```\n\n#### 语义角色标注数据\n```text\n奥 B-A0\n巴 I-A0\n马 I-A0\n昨 B-TMP\n晚 I-TMP\n在 O\n白 B-LOC\n宫 I-LOC\n发 O\n表 O\n了 O\n演 B-A1\n说 I-A1\n```\n\n### 训练模型\n#### 下载预训练模型\n[BERT-Base, Chinese](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)\n> 解压到根目录\n#### 参数设置\n```bash\n#!/usr/bin/env bash\npython train.py \\\n --train true \\\n --data data \\ # 训练数据保存路径 \n --init_checkpoint chinese_L-12_H-768_A-12 \\ # 预训练模型保存路径\n --max_seq_len 128 \\ # 句子最长长度\n --max_epoch 1 \\ # 模型训练轮数\n --batch_size 64 \\ # 模型迭代一次训练的句子数\n --dropout 0.5 \\ # 防止过拟合的神经元随机失活率\n --lr 0.001 \\ # 学习率\n --optimizer adam \\ # 模型优化器\n --output output # 训练模型及日志保存路径 \n```\n#### 开始训练\n```bash\n./train.sh\n```\n#### 训练日志\n```text\nI0117 11:15:14.439068 139934521526016 train.py:38] evaluate:dev\nI0117 11:15:15.194412 139934521526016 train.py:42] processed 6788 tokens with 3818 phrases; found: 3798 phrases; correct: 3384.\n\nI0117 11:15:15.194531 139934521526016 train.py:42] accuracy: 91.28%; precision: 89.10%; recall: 88.63%; FB1: 88.87\n\nI0117 11:15:15.194565 139934521526016 train.py:42] : precision: 89.10%; recall: 88.63%; FB1: 88.87 3798\n\nI0117 11:15:15.195159 139934521526016 train.py:38] evaluate:test\nI0117 11:15:15.936123 139934521526016 train.py:42] processed 6661 tokens with 3682 phrases; found: 3649 phrases; correct: 3442.\n\nI0117 11:15:15.936207 139934521526016 train.py:42] accuracy: 95.48%; precision: 94.33%; recall: 93.48%; FB1: 93.90\n\nI0117 11:15:15.936244 139934521526016 train.py:42] : precision: 94.33%; recall: 93.48%; FB1: 93.90 3649\n\n```\n\n### 模型推理\n#### 参数设置\n```bash\n#!/usr/bin/env bash\npython inference.py \\\n --init_checkpoint chinese_L-12_H-768_A-12 \\ # 获取vocab.txt\n --max_seq_len 128 \\ # 句子最长长度\n --output output # 模型路径\n```\n#### 开始推理\n```bash\n./inference.sh\n```\n#### 推理日志\n> 以分词为例\n```text\n中国你好成都\n{'string': ['中', '国', '你', '好', '成', '都'], 'entities': [{'word': '中国', 'start': 0, 'end': 2, 'type': 'cut'}, {'word': '成都', 'start': 4, 'end': 6, 'type': 'cut'}]}\n```\n#### saved_model格式模型导出\n> 推理结束后,会自动导出saved_model格式模型,用于部署。\n### 模型部署\n#### 模型文件\n```text\nsaved_model\n└── 000000\n ├── saved_model.pb\n └── variables\n ├── variables.data-00000-of-00001\n └── variables.index\n\n2 directories, 3 files\n```\n#### docker部署\n```bash\n#!/usr/bin/env bash\ndocker run -t --rm -p 8501:8501 \\\n-v \"$(pwd)/saved_model:/models/docker_test\" \\\n-e MODEL_NAME=docker_test tensorflow/serving\n```\n#### 客户端依赖文件\n```text\nvocab\n├── maps.pkl # 标签对应表\n├── trans.npy # 转移概率矩阵\n└── vocab.txt # 词对应表\n\n0 directories, 3 files\n\n```\n#### 客户端测试\n```python\nfrom client import get_result\ntext = \"中国你好成都。\"\nres = get_result(text)\nprint(res)\n```\n\n#### 测试效果\n```text\n{'string': ['中', '国', '你', '好', '成', '都'], 'entities': [{'word': '中国', 'start': 0, 'end': 2, 'type': 'cut'}, {'word': '成都', 'start': 4, 'end': 6, 'type': 'cut'}]}\n```\n"
},
{
"alpha_fraction": 0.5636363625526428,
"alphanum_fraction": 0.7090908885002136,
"avg_line_length": 21,
"blob_id": "9e374900ca99b47b0164c1ff790a5001815b507c",
"content_id": "556d3cc3b0f7fe3dffb27523807b9f558434208f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "-i https://pypi.tuna.tsinghua.edu.cn/simple\nnumpy==1.16.4\ntensorflow-gpu==1.14.0\nsix==1.12.0\nrequests==2.22.0\n"
},
{
"alpha_fraction": 0.6996663212776184,
"alphanum_fraction": 0.7007786631584167,
"avg_line_length": 33.57692337036133,
"blob_id": "f1fbc04a3d8956168e2d09c22c9be21d9ecc63bc",
"content_id": "0f436c6701baca1050251ef15dce4dcfd84aa82e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 915,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 26,
"path": "/tools.py",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "# !/user/bin/env python\n# -*- encoding: utf-8 -*-\n# @Author : Seven\n# @Function: 模型保存恢复脚本\nimport tensorflow as tf\nimport os\n\n\ndef create_model(session, Model_class, path, config, logger):\n # create model, reuse parameters if exists\n model = Model_class(config)\n\n ckpt = tf.train.get_checkpoint_state(path)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n logger.info(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n logger.info(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n return model\n\n\ndef save_model(sess, model, path, logger, global_steps):\n checkpoint_path = os.path.join(path, \"ner.checkpoint\")\n model.saver.save(sess, checkpoint_path, global_step=global_steps)\n logger.info(\"model saved\")\n"
},
{
"alpha_fraction": 0.5308641791343689,
"alphanum_fraction": 0.5895061492919922,
"avg_line_length": 25.91666603088379,
"blob_id": "c062665854e61935a329c8434a74da91d4291f6c",
"content_id": "9cc07370f51efe833ce437d0ac29af38203c3896",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 356,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 12,
"path": "/train.sh",
"repo_name": "oohx/bert_sequence_label",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\npython train.py \\\n --train true \\\n --data /media/seven/data/datasets/自然语言处理数据集/中文分词数据集/data \\\n --init_checkpoint chinese_L-12_H-768_A-12 \\\n --max_seq_len 128 \\\n --max_epoch 1 \\\n --batch_size 64 \\\n --dropout 0.5 \\\n --lr 0.001 \\\n --optimizer adam \\\n --output output/word_cut\n\n"
}
] | 12 |
Lukeskyward/Secao4
|
https://github.com/Lukeskyward/Secao4
|
067bbd8569d4bfbf4580fb92526ef46c30b7de22
|
ee4b345095b2078fb28dda852d27587a4b18aeb7
|
dd36e524b2bd7ed9fc1d930c0b07dc1cf7161434
|
refs/heads/main
| 2023-03-13T18:11:40.610524 | 2021-03-08T06:08:38 | 2021-03-08T06:08:38 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5626911520957947,
"alphanum_fraction": 0.6483180522918701,
"avg_line_length": 28.727272033691406,
"blob_id": "09a22687c2b0dd725aa977303440c113ba5dbb0a",
"content_id": "76ed54ecbb0be468878cfee0d289d6a780b8ed2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 11,
"path": "/exercicio47.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um numero de 1000 a 9999\")\nnum = int(input())\n\nwhile num < 1000 or num > 9999:\n print(\"Digite um numero de 1000 a 9999\")\n num = int(input())\n\nprint(f\"primeiro digito é {str(num)[0]}\")\nprint(f\"segundo digito é {str(num)[1]}\")\nprint(f\"terceiro digito é {str(num)[2]}\")\nprint(f\"quarto digito é {str(num)[3]}\")\n"
},
{
"alpha_fraction": 0.6546762585639954,
"alphanum_fraction": 0.6618704795837402,
"avg_line_length": 26.799999237060547,
"blob_id": "ee8c6807d5dc16153b25a85ccf0ada833c9c488c",
"content_id": "14c74dfd68c022bf528256237a30acfec71b7258",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 141,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 5,
"path": "/exercicio34.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\nprint(\"Digite o valor do raio de um circulo\")\nrai = int(input())\narea = math.pi * rai ** 2\nprint(f\"O valor da área é: {area}\")\n"
},
{
"alpha_fraction": 0.6762589812278748,
"alphanum_fraction": 0.6978417038917542,
"avg_line_length": 33.75,
"blob_id": "a1d797f76465841113b56b65c00455918b264c05",
"content_id": "d1414777e22f8a9f3840594b6e4f97a44d3a1e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 4,
"path": "/exercicio38.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o salário do funcionário\")\nsal = float(input())\nnov_sal = sal * 1.25\nprint(f\"O novo salário do funcionário é de: {nov_sal}\")\n"
},
{
"alpha_fraction": 0.5620915293693542,
"alphanum_fraction": 0.6405228972434998,
"avg_line_length": 37.25,
"blob_id": "2afd312335394482039e4f9685b721131895d8a7",
"content_id": "21beecc85e57c63b13b7acffffab82e390cbe64f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 4,
"path": "/exercicio24.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor da área em m2\")\nm2 = float(input())\nac = m2 * 0.000247\nprint(f\"O valor da área em m² é: {m2}, e o valor da área em acres é: {ac}\")\n"
},
{
"alpha_fraction": 0.6074073910713196,
"alphanum_fraction": 0.6518518328666687,
"avg_line_length": 32.75,
"blob_id": "62a56eca94a7322cd6c358419f34facea994e871",
"content_id": "601e4f4ee0eafdcfe5caa78f88ebe6bcbf6523d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 4,
"path": "/exercicio19.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor em litros\")\nlt = float(input())\nm3 = lt / 1000\nprint(f\"O valor em metros cubicos é: {m3}, e em litros é: {lt}\")\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 20.66666603088379,
"blob_id": "6b9f2dd34ffd47a2a1b962176c26349b7114dcb2",
"content_id": "e7a9d5cb0ab386864e767cab57a666fadfdbd327",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 3,
"path": "/exercicio1.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print('Digite um numero inteiro:')\nnum = int(input())\nprint(num)\n"
},
{
"alpha_fraction": 0.5620437860488892,
"alphanum_fraction": 0.6350364685058594,
"avg_line_length": 21.83333396911621,
"blob_id": "70f8299dbcb229ccbcba35551183c63db5d13982",
"content_id": "485f9a59187ce8ba735b7ea60a418bf385a3a767",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 12,
"path": "/exercicio28.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite abaixo 3 valores\")\nprint(\"Valor 1\")\nval1 = float(input())\nprint(\"Valor 2\")\nval2 = float(input())\nprint(\"Valor 3\")\nval3 = float(input())\nqv1 = val1 ** 2\nqv2 = val2 ** 2\nqv3 = val3 ** 2\nsoma = qv1 + qv2 + qv3\nprint(f\"A soma do quadrado dos 3 valores é: {soma}\")\n"
},
{
"alpha_fraction": 0.6141732335090637,
"alphanum_fraction": 0.6377952694892883,
"avg_line_length": 30.75,
"blob_id": "f22862bfdb6487f5efea0a7834ad73f672c7ac56",
"content_id": "34fc555cd5db703f9ea63193aa100398ea64d2d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 4,
"path": "/exercicio13.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma distância em KM\")\nkm = int(input())\nmil = km / 1.61\nprint(f\"A distância em milhas é: {mil} e em km é: {km}\")\n"
},
{
"alpha_fraction": 0.7033638954162598,
"alphanum_fraction": 0.7033638954162598,
"avg_line_length": 26.25,
"blob_id": "1f25fbff121e3eedcc21689cece6144989dd4130",
"content_id": "ee7fb1fa403d52282ef6bdbdb43158ff75eb1305",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 12,
"path": "/exercicio53.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o comprimento do terreno em metros\")\ncomp = float(input())\nprint(\"Digite a largura do terreno em metros\")\nlarg = float(input())\nprint(\"Digite o valor do preço do metro da tela para cercar o terreno\")\nprec = float(input())\n\ntTela = comp * larg\n\ntPrec = tTela * prec\n\nprint(f\"O valor a ser gasto é de R$: {tPrec}\")\n"
},
{
"alpha_fraction": 0.6338028311729431,
"alphanum_fraction": 0.6549295783042908,
"avg_line_length": 34.5,
"blob_id": "f83211c1007a4f8f9055f340095d8683be71db2f",
"content_id": "a1b8d45a3ffd8f8936f4c3846499996876461875",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 4,
"path": "/exercicio21.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor em libras\")\nlib = float(input())\nkg = lib * 0.45\nprint(f\"O Valor em libras é: {lib}, e o valor em quilogramas é: {kg}\")\n"
},
{
"alpha_fraction": 0.6438356041908264,
"alphanum_fraction": 0.664383590221405,
"avg_line_length": 35.5,
"blob_id": "0eb54d521429f9b16601c04e50819f96c9c4682b",
"content_id": "798afe66fba97a0ceb8fb62e9b71393011bd8d57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 4,
"path": "/exercicio20.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor em quilogramas\")\nkg = float(input())\nlib = kg / 0.45\nprint(f\"O Valor em libras é: {lib}, e o valor em quilogramas é: {kg}\")\n"
},
{
"alpha_fraction": 0.6477987170219421,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 30.799999237060547,
"blob_id": "a022f85efebd3539e8c2641c1c34d6b3335090a8",
"content_id": "25795f14accdcd85cf6c068d57ceabe69fbf2ef8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 5,
"path": "/exercicio15.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\nprint(\"Digite um ângulo em radianos\")\nrad = float(input())\ngra = rad * 180 / math.pi\nprint(f\"O Angulo em graus é: {gra} , e em radianos é: {rad}\")\n"
},
{
"alpha_fraction": 0.7037037014961243,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 20.600000381469727,
"blob_id": "ddb1043b1fd52220cec9d06c41589a5ea74f5f6d",
"content_id": "62a6a4590f17dfc30984a0fba749bb159151249b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 10,
"path": "/exercicio44.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\n\nprint(\"Digite a altura desejada do degrau\")\ndegrau = int(input())\nprint(\"Digite a altura desejada da escada\")\nalt = int(input())\n\naltd = math.ceil(alt / degrau)\n\nprint(f\"A altura desejada é de: {altd}\")\n"
},
{
"alpha_fraction": 0.6410256624221802,
"alphanum_fraction": 0.6602563858032227,
"avg_line_length": 30.200000762939453,
"blob_id": "d6b9dbb87eabcd2ff9871e133420a4084a5f0b03",
"content_id": "7e2aad81e1e12ffc4279c92dce4c653a6ed8f5a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 159,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 5,
"path": "/exercicio14.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\nprint(\"Digite um ângulo em graus\")\ngra = float(input())\nrad = gra * math.pi / 180\nprint(f\"O Angulo em graus é: {gra} , e em radianos é: {rad}\")\n"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.646258533000946,
"avg_line_length": 35.75,
"blob_id": "0ec94b4e0845f2ef36be765d409eef0b1b025db8",
"content_id": "67e1ff3e3508a35e43f3d9d37bc0b85dc3880bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 4,
"path": "/exercicio10.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite a velocidade atual em km/h\")\nkmh = int(input())\nms = kmh / 3.6\nprint(f\"A velocidade em KM/H é: {kmh} e a velocidade em m/s é: {ms}\")\n"
},
{
"alpha_fraction": 0.6524063944816589,
"alphanum_fraction": 0.6577540040016174,
"avg_line_length": 19.77777862548828,
"blob_id": "4f96e4eb6902011650af48133464695129f86823",
"content_id": "976c94d85f1322785456f4fb6cb097abb0cbd579",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 9,
"path": "/exercicio36.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\n\nprint(\"Digite a altura do cilindro\")\nalt = float(input())\nprint(\"Digite o raio do cilindro\")\nrai = float(input())\nvol = math.pi * rai ** 2 * alt\n\nprint(f\"O volume é: {vol}\")\n"
},
{
"alpha_fraction": 0.645714282989502,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 42.75,
"blob_id": "a6dab68b2282d7aff3e7d1a96dd71441aa07c6b7",
"content_id": "60f38e02913d5857cae20eb2c8f466b9a34c667d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 4,
"path": "/exercicio6.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma temperatura em Celsius\")\ncel = float(input())\nfah = cel * (9.0/5.0)+32.0\nprint(f\"A temperatura em Celsius é: {cel}, e a temperatura em Fahrenheit é: {fah}\")\n"
},
{
"alpha_fraction": 0.6691176295280457,
"alphanum_fraction": 0.6948529481887817,
"avg_line_length": 29.22222137451172,
"blob_id": "d2ebab31081cf2842a5ec96092b348ba2864f31d",
"content_id": "bf67efa154032ba132909726ea5def0cc0ae679f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/exercicio3.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite três valores inteiros abaixo.\")\nprint(\"Digite o primeiro valor\")\nnum1 = int(input())\nprint(\"Digite o segundo valor\")\nnum2 = int(input())\nprint(\"Digite o terceiro valor\")\nnum3 = int(input())\nsoma = num1 + num2 + num3\nprint(f\" A soma dos 3 valores é: {soma}\")\n"
},
{
"alpha_fraction": 0.6687116622924805,
"alphanum_fraction": 0.699386477470398,
"avg_line_length": 39.75,
"blob_id": "37e3f8b3dfbe1ddb670a8ee4462c6c877369a197",
"content_id": "22b023d04491b651763542b288fb62181fb81563",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 4,
"path": "/exercicio9.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma temperatura em Celsius\")\ncel = float(input())\nkel = cel + 273.15\nprint(f\"A temperatura em Celsius é: {cel}, e a temperatura em kelvin é: {kel}\")\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6975308656692505,
"avg_line_length": 39.5,
"blob_id": "7ee1e43a2ebd1392b5401489379114f0f8e458c4",
"content_id": "0af0b16718cae7764e21effe6144e69a37cac200",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 4,
"path": "/exercicio8.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma temperatura em Kelvin\")\nkel = float(input())\ncel = kel - 273.15\nprint(f\"A temperatura em Celsius é: {cel}, e a temperatura em kelvin é: {kel}\")\n"
},
{
"alpha_fraction": 0.5309734344482422,
"alphanum_fraction": 0.6548672318458557,
"avg_line_length": 24.11111068725586,
"blob_id": "eb0b66e3cdf77057710f39f1b1450790688c4c38",
"content_id": "724d433ff12aef33c7c14e123ef72e989f25e68c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/exercicio39.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "premio = 780000.00\n\ngan1 = premio * 0.46\ngan2 = premio * 0.32\ngan3 = premio - gan1 - gan2\n\nprint(f\"O Ganhador 1 receberá: {gan1:.2f}.\")\nprint(f\"O Ganhador 2 receberá: {gan2:.2f}.\")\nprint(f\"O Ganhador 3 receberá: {gan3:.2f}.\")\n"
},
{
"alpha_fraction": 0.6478873491287231,
"alphanum_fraction": 0.6690140962600708,
"avg_line_length": 34.5,
"blob_id": "0ed45f7372898e26aba09435ad9dc0e8c5777104",
"content_id": "52696c251c20a8ab5d68dba2800e5d77f66fdcd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/exercicio17.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor em centimetros\")\ncm = float(input())\npol = cm / 2.54\nprint(f\"O valor em polegadas é: {pol}, e em centimetros é: {cm}\")\n"
},
{
"alpha_fraction": 0.5870967507362366,
"alphanum_fraction": 0.6451612710952759,
"avg_line_length": 37.75,
"blob_id": "e8651dfb6a4c9bbfba59aa0442ce78375aeda00b",
"content_id": "dece6b03e9c4db64ebbe89ad32b99d8a23b793d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 4,
"path": "/exercicio25.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor da área em Acres\")\nac = float(input())\nm2 = ac * 4048.58\nprint(f\"O valor da área em m² é: {m2}, e o valor da área em acres é: {ac}\")\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6837607026100159,
"avg_line_length": 38,
"blob_id": "308d2ca5439d44b3fbc742031f067e0145a56f6a",
"content_id": "e4f24950aafc2372d2567ff0568f013f4318d935",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 3,
"path": "/exercicio31.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um numero inteiro\")\nnum = int(input())\nprint(f\"O seu antecessor é {num-1} e o seu sucessor é {num+1}\")\n"
},
{
"alpha_fraction": 0.5587044358253479,
"alphanum_fraction": 0.6356275081634521,
"avg_line_length": 21.454545974731445,
"blob_id": "2611281c3a2c3e0576a4f06cd25f7aec591722c5",
"content_id": "cc62805028574b4f978952dbb18a4d153c1722e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 11,
"path": "/exercicio46.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um numero de 100 a 999\")\nnum = int(input())\n\nwhile num < 100 or num > 999:\n print(\"Digite um numero de 100 a 999\")\n num = int(input())\n\nnum = str(num)\nreverse = num[::-1]\n\nprint(f\" O numero digitado ao contrário é: {reverse}\")\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.668067216873169,
"avg_line_length": 25.44444465637207,
"blob_id": "324fd71e776306892555919a1fbb2c166ac4debb",
"content_id": "8061af9ac22727a703ab78667dad85e53ba1fe2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 9,
"path": "/exercicio48.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor desejado em segundos\")\nsec = int(input())\n\nmini = sec / 60\nhor = sec / 3600\n\nprint(f\"O valor em segundos é: {sec} segundos \")\nprint(f\"O valor em minutos é: {min} minutos \")\nprint(f\"O valor em horas é: {hor} horas \")\n"
},
{
"alpha_fraction": 0.6196318864822388,
"alphanum_fraction": 0.6687116622924805,
"avg_line_length": 39.75,
"blob_id": "53592999ef4421c38fe65251d3d22525832032da",
"content_id": "fab8c774c6907e5791b872ec5b8df7b6920b25f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 4,
"path": "/exercicio27.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor de área em hectares\")\nhec = float(input())\nm2 = hec * 10000\nprint(f\"O valor da área em m² é: {m2}, e o valor da área em hectares é: {hec}\")\n"
},
{
"alpha_fraction": 0.6453900933265686,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 34.25,
"blob_id": "632e8f4a4007f2f7553c9f46fd0ca6ecda5d7136",
"content_id": "7f66fab4ad587e5734372320713fa0a8ef31bc97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/exercicio16.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor em polegadas\")\npol = float(input())\ncm = pol * 2.54\nprint(f\"O valor em polegadas é: {pol}, e em centimetros é: {cm}\")\n"
},
{
"alpha_fraction": 0.6403326392173767,
"alphanum_fraction": 0.6964656710624695,
"avg_line_length": 42.727272033691406,
"blob_id": "a2077300a2d1a2c7236b2f0e06c55858bca422ec",
"content_id": "6cf26dea0049dfd6912710fc8ab190e360e1995e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 11,
"path": "/exercicio43.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor da mercadoria\")\nmerc = float(input())\nmerc10 = merc * 0.90\nmerc3x = merc / 3\ncomiv = merc10 * 0.05\ncomip = merc * 0.05\n\nprint(f\"Com 10% de desconto o valor da mercadoria será de: {merc10:.2f}\")\nprint(f\"O valor de cada parcela em 3x sem juros é de: {merc3x:.2f}\")\nprint(f\"O valor da comissão do vendedor sobre a venda a vista no valor de 5% é de: {comiv:.2f}\")\nprint(f\"O valor da comissão do vendedor sobre a venda parcelada no valor de 5% é de: {comip:.2f}\")\n"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.7062146663665771,
"avg_line_length": 21.125,
"blob_id": "781ddfe8c836185cfad8e3ab12f2f5212c529375",
"content_id": "46108b017799addbe9ad95755e45c939c2aa49b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 178,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/exercicio45.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma letra em maisculo:\")\nletMai = input()\nordAsciiMa = ord(letMai)\n\nordAsciiMi = ord(letMai) + 32\nLetMi = chr(ordAsciiMi)\n\nprint(f\"A letra em misculo é: {LetMi}\")\n"
},
{
"alpha_fraction": 0.6287878751754761,
"alphanum_fraction": 0.6515151262283325,
"avg_line_length": 32,
"blob_id": "5503742cc12be3c0bc261db3f65651f33288eb08",
"content_id": "0be5d5eebfafbb789278dc90de564fae13dbcd90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 4,
"path": "/exercicio12.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma distância em milhas\")\nmil = int(input())\nkm = 1.61 * mil\nprint(f\"A distância em milhas é: {mil} e em km é: {km}\")\n"
},
{
"alpha_fraction": 0.5968992114067078,
"alphanum_fraction": 0.6589147448539734,
"avg_line_length": 24.799999237060547,
"blob_id": "25116bb4d31d9a094a8ab11b75435890f1b86356",
"content_id": "35e01ce38a7c93bc2185cc6a9e278c0275ceb8d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 5,
"path": "/exercicio40.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Numeros de dias trabalhados\")\ndias = int(input())\nliq = (30.00 * dias)*0.92\n\nprint(f\"O Salário recebido é de: {liq:.2f}\")\n"
},
{
"alpha_fraction": 0.5932203531265259,
"alphanum_fraction": 0.6553672552108765,
"avg_line_length": 24.285715103149414,
"blob_id": "103a721f8d33e4631c12588e32424f5f19133525",
"content_id": "6a6d5fa5fe835b6f69417f03faf76e1d4a5feed9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 7,
"path": "/exercicio42.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite abaixo o salário base do funcionário\")\nsbase = float(input())\ns1 = (sbase * 0.05)\ns2 = (sbase * 0.93)\nsfinal = s1 + s2\n\nprint(f\"Salário final é de: {sfinal:.2f}\")\n"
},
{
"alpha_fraction": 0.48239436745643616,
"alphanum_fraction": 0.5457746386528015,
"avg_line_length": 30.55555534362793,
"blob_id": "b49219f41f049ee78151cad196a761ab5f57e0d6",
"content_id": "1588b8079aa8decbf3e80698f4d4978bd4cf4abd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 575,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 18,
"path": "/exercicio51.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "x1 = int(input(f'Informe x1 => '))\ny1 = int(input(f'Informe y1 => '))\n\n# elevar à uma fracão corresponde a tirar a raiz tendo como com indice\n# número indicado no denominador\ndistancia = int((((0 - x1) ** 2) + ((0 - y1) ** 2)) ** (1 / 2))\n\nprint(f'A distância entre a origem (0,0) e o ponto de'\n f' coordenadas ({x1},{y1})é: {distancia}')\n#\n# Demonstrando o cálculo\n\nprint(f'x2 - x1 = {0 - x1}, \\n'\n f'y2 - y1 = {0 - y1}')\nprint(f'(x2 - x1) ** 2 = {(0 - x1) ** 2}, \\n'\n f'(y2 - y1) ** 2 = {(0 - y1) ** 2}')\n\nprint(f'Distância da origem = {distancia}')\n"
},
{
"alpha_fraction": 0.6388888955116272,
"alphanum_fraction": 0.6597222089767456,
"avg_line_length": 35,
"blob_id": "423647ad10550f4521b89948728d5844a54d8b24",
"content_id": "d6fa763128f7cd616aca0aa8afffb9453e18bec6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 4,
"path": "/exercicio22.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor de comprimento em jardas\")\njar = float(input())\nmt = 0.91 * jar\nprint(f\"O valor em jardas é: {jar}, e em metros é: {mt}\")\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6811594367027283,
"avg_line_length": 33.5,
"blob_id": "abc2d21308abd8b00bb54d947668f3e4e6043b30",
"content_id": "b140527a86f8b7cf4ddf31c731280d1ff415635c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 4,
"path": "/exercicio33.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o tamanho de um lado de um quadrado em metros\")\nlad = int(input())\narea = lad ** 2\nprint(f\"a area do quadrado é: {area}m²\")\n"
},
{
"alpha_fraction": 0.6178107857704163,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 23.5,
"blob_id": "4e593e32d9c5714424ef3eb39a69ba15d278550d",
"content_id": "69196d7d4a005fa3fa671d027ed960aff07edbae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 542,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 22,
"path": "/exercicio52.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Informe o valor do premio:\")\npremio = float(input())\nprint(\"Informe o valor do apostador 1\")\nap1 = float(input())\nprint(\"Informe o valor do apostador 2\")\nap2 = float(input())\nprint(\"Informe o valor do apostador 3\")\nap3 = float(input())\n\naTotal = ap1 + ap2 + ap3\n\npap1 = ap1 / aTotal\npap2 = ap2 / aTotal\npap3 = ap3 / aTotal\n\nval1 = pap1 * premio\nval2 = pap2 * premio\nval3 = pap3 * premio\n\nprint(f\"O premio do apostador 1 é: {val1:.2f}\")\nprint(f\"O premio do apostador 2 é: {val2:.2f}\")\nprint(f\"O premio do apostador 3 é: {val3:.2f}\")\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.65625,
"avg_line_length": 23,
"blob_id": "cf05f60f04f78d0cc49345902dde698b96b3d92d",
"content_id": "0c2c453974d7382454d268d0dce8cefd078c54da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 8,
"path": "/exercicio32.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um numero inteiro\")\nnum = int(input())\n\nresult = (num * 3) + 1\nresult1 = (num * 2) - 1\n\nprint(f\"Sucessor do seu triplo é {result}\")\nprint(f\"Atencessor do seu dobro é {result1}\")\n"
},
{
"alpha_fraction": 0.6577777862548828,
"alphanum_fraction": 0.6755555272102356,
"avg_line_length": 27.125,
"blob_id": "74b75dc288df9c5991aab7ae2b966a8f3857e3eb",
"content_id": "1a9df92e7a44f5b5c20d64a388883adb40f373d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 8,
"path": "/exercicio41.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor da hora de trabalho em reais, incluindo os centavos\")\nht = float(input())\nprint(\"Digite o numero de horas trabalhadas no mês\")\nhm = int(input())\n\nsal = (ht * hm) * 1.10\n\nprint(f\"O valor é de {sal:.2f}\")\n"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6643356680870056,
"avg_line_length": 34.75,
"blob_id": "8cc25040e94cb363da35a526e92f26766a290c07",
"content_id": "9d1471698cd52f4663515333ce92fe0e6e44fd32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 4,
"path": "/exercicio18.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor em metros cubicos\")\nm3 = float(input())\nlt = 1000 * m3\nprint(f\"O valor em metros cubicos é: {m3}, e em litros é: {lt}\")\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6573426723480225,
"avg_line_length": 34.75,
"blob_id": "592486e18f55f720623776ca8bde06bc48379b22",
"content_id": "839ab39fd31244804e5bf90e21649ad019817991",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 4,
"path": "/exercicio23.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor de comprimento em metros\")\nmt = float(input())\njar = mt / 0.91\nprint(f\"O valor em jardas é: {jar}, e em metros é: {mt}\")\n"
},
{
"alpha_fraction": 0.5977859497070312,
"alphanum_fraction": 0.6457564830780029,
"avg_line_length": 17.066667556762695,
"blob_id": "41e188c157661d4144a92da1b3492c73fd659ab9",
"content_id": "14de31427db0af0470fad5ebebc0b0ffc8467994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/exercicio29.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite abaixo quadro notas\")\nprint(\"Nota1\")\nnt1 = float(input())\nprint(\"Nota2\")\nnt2 = float(input())\nprint(\"Nota3\")\nnt3 = float(input())\nprint(\"Nota4\")\nnt4 = float(input())\n\n\nmedia = (nt1 + nt2 + nt3 + nt4) / 4\n\n\nprint(f\"A média aritimética das notas é: {media}\")\n"
},
{
"alpha_fraction": 0.6275861859321594,
"alphanum_fraction": 0.6413792967796326,
"avg_line_length": 35.25,
"blob_id": "60d71a324a66f08bcb83d9d78135f9d3ed95f01c",
"content_id": "e539d6d1470efb90c4a5a1872cd7bc2738e05cac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 4,
"path": "/exercicio11.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite a velocidade atual em m/s\")\nms = int(input())\nkmh = ms * 3.6\nprint(f\"A velocidade em KM/H é: {kmh} e a velocidade em m/s é: {ms}\")\n"
},
{
"alpha_fraction": 0.7175925970077515,
"alphanum_fraction": 0.7175925970077515,
"avg_line_length": 35,
"blob_id": "877bdadaef29ea33f13a7926fcd56b00939eb36b",
"content_id": "9f73abfaf38c6d946309130e16a43196c3720770",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 6,
"path": "/exercicio30.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite o valor a ser depositado em reais\")\nreal = float(input())\nprint(\"Digite a atual cotação em dolar\")\ndol = float(input())\ncot = real / dol\nprint(f\"O valor digitado em reais, corresponde a {cot} dolares\")\n"
},
{
"alpha_fraction": 0.6013513803482056,
"alphanum_fraction": 0.6441441178321838,
"avg_line_length": 21.200000762939453,
"blob_id": "7bad879345939cf35547a4c8e8c4d019b2ea6cc6",
"content_id": "03ef42493ee36673fe1dc745971300431bbb963f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 20,
"path": "/exercicio49.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite a hora os minutos e os segundos abaixo\")\nprint(\"hora(s)\")\nhor = int(input())\nprint(\"minuto(s)\")\nmi = int(input())\nprint(\"segundos(s)\")\nseg = int(input())\nprint(\"Informe a duração em segundos: \")\ndur = int(input())\n\nhor = hor * 3600\nmi = mi * 600\ntSec = hor + mi + seg + dur\n\nhFinal = int(tSec / 3600)\nrest = tSec % 3600\nmiFinal = int(rest / 60)\nrest = rest % 60\n\nprint(f\"A experiência irá terminar as: {hFinal}:{miFinal}:{rest}\")\n"
},
{
"alpha_fraction": 0.6239316463470459,
"alphanum_fraction": 0.632478654384613,
"avg_line_length": 22.399999618530273,
"blob_id": "3bbccf60d7efc9b893bf5a4a0721294e558d5d64",
"content_id": "4490a73f7c35b9c084c30bba3369f8af3d42babe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 10,
"path": "/exercicio35.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "import math\n\nprint(\"Considerando 'a' e 'b' os catetos de um triangulo\")\nprint(\"Digite o cateto 'A'\")\na = float(input())\nprint(\"Digite o cateto 'B'\")\nb = float(input())\nhip = math.sqrt(a ** 2 + b ** 2)\n\nprint(f\"a hipotenusa é: {hip}\")\n"
},
{
"alpha_fraction": 0.6804123520851135,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 23.25,
"blob_id": "f65f1717b4dd64ec138614ce1fcf186fc9202427",
"content_id": "7e3f2e7955e0311df01551fa02531e83accc66be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 8,
"path": "/exercicio50.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite a idade que você terá no ultimo dia do ano atual\")\nidade = int(input())\nprint(\"Digite o ano atual\")\nano = int(input())\n\nnasc = ano - idade\n\nprint(f\"O ano de nascimento é: {nasc}\")\n"
},
{
"alpha_fraction": 0.5859872698783875,
"alphanum_fraction": 0.6496815085411072,
"avg_line_length": 38.25,
"blob_id": "0e88f11f34fecabd69caecef2832dd9c74c5862d",
"content_id": "3e7c5c9e5fd1f499628c0efaa2d055a78fd57d32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 4,
"path": "/exercicio26.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite um valor de área em m²\")\nm2 = float(input())\nhec = m2 * 0.0001\nprint(f\"O valor da área em m² é: {m2}, e o valor da área em hectares é: {hec}\")\n"
},
{
"alpha_fraction": 0.6516854166984558,
"alphanum_fraction": 0.6853932738304138,
"avg_line_length": 43.5,
"blob_id": "c081330a5b46bf622fd90e1569a6d93ca4f726f0",
"content_id": "90df36b1aa0b83722f9a8277384c239deb47d84e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 4,
"path": "/exercicio7.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print(\"Digite uma temperatura em Fahrenheit\")\nfah = float(input())\ncel = 5 * (fah - 32.0)/9.0\nprint(f\"A temperatura em Celsius é: {cel}, e a temperatura em Fahrenheit é: {fah}\")\n"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.6875,
"avg_line_length": 20.33333396911621,
"blob_id": "03169f358a81a292c802586c87a36a794ac27f09",
"content_id": "e925857dc6c6263aa800521774603ea42176b32f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 3,
"path": "/exercicio2.py",
"repo_name": "Lukeskyward/Secao4",
"src_encoding": "UTF-8",
"text": "print('Digite um numero real:')\nnum = float(input())\nprint(num)\n"
}
] | 50 |
smartmark-pro/leetcode_record
|
https://github.com/smartmark-pro/leetcode_record
|
653f4c63367d90c6cb6c2c0994080355b280c2bb
|
6504b733d892a705571eb4eac836fb10e94e56db
|
e04d091935b52dbe28cfcc3a6ec1b1557253c1e4
|
refs/heads/master
| 2022-12-15T15:11:58.219867 | 2020-09-12T10:54:29 | 2020-09-12T10:54:29 | 294,687,276 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "37d1f143857ecf4eb36f5f436c69cef306e093f0",
"content_id": "34162b842c5fdc810f756d9db655b5520dffb86c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/190.reverse-bits.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=190 lang=python3\n#\n# [190] reverse-bits\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 13.285714149475098,
"blob_id": "b568687e967fa6c7a30fe6401de23ca86e6d8d83",
"content_id": "fe6ca5828f5efcbacf958c75c29e38a6f20ec162",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1063.best-sightseeing-pair.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1063 lang=python3\n#\n# [1063] best-sightseeing-pair\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 14.142857551574707,
"blob_id": "b2b0e67ab17fd34d49defe5f402ca039c4a682fe",
"content_id": "67f5791c98a677c6f6d066fc46a8e981b2582082",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/138.copy-list-with-random-pointer.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=138 lang=python3\n#\n# [138] copy-list-with-random-pointer\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "18c694e6d1a7da641d59af739a3cd24ecc6bd74a",
"content_id": "34f333b494eb68bd4071e96d1620767e9ab7f980",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/codes_auto/7.reverse-integer.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=7 lang=python3\n#\n# [7] reverse-integer\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6460176706314087,
"alphanum_fraction": 0.7079645991325378,
"avg_line_length": 15.285714149475098,
"blob_id": "20ecde7a164f1e75ece229bc5b6fdf39b02385ea",
"content_id": "4f616c7bebddba30d05ee6d76ae266dd50a58171",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 7,
"path": "/codes_auto/297.serialize-and-deserialize-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=297 lang=python3\n#\n# [297] serialize-and-deserialize-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6057692170143127,
"alphanum_fraction": 0.6730769276618958,
"avg_line_length": 14,
"blob_id": "c4c57279a445363f684e447c95f4fa94212a825c",
"content_id": "95a5f06e226598e5f727a4c440bb0d58c085250b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/237.delete-node-in-a-linked-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=237 lang=python3\n#\n# [237] delete-node-in-a-linked-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6041666865348816,
"alphanum_fraction": 0.6770833134651184,
"avg_line_length": 12.857142448425293,
"blob_id": "af3d30e57b9dad1bd7f6d65e545ccc62717d99a3",
"content_id": "87baadfb6186c9fe3bddfff42737354eeb464772",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/110.balanced-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=110 lang=python3\n#\n# [110] balanced-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5955055952072144,
"alphanum_fraction": 0.6516854166984558,
"avg_line_length": 11.857142448425293,
"blob_id": "22b483c7b00dc51efde25b308ccc1f9e8c999516",
"content_id": "09ce93f286d0c88a349b1131edbed7230267db19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/63.unique-paths-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=63 lang=python3\n#\n# [63] unique-paths-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6086956262588501,
"alphanum_fraction": 0.686956524848938,
"avg_line_length": 15.571428298950195,
"blob_id": "2e91ecdce1a87ad61cc04cfd7df1c14466a183ff",
"content_id": "4dc0dc364693952516be47dbf492337d7531ba00",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 7,
"path": "/codes_auto/1632.number-of-good-ways-to-split-a-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1632 lang=python3\n#\n# [1632] number-of-good-ways-to-split-a-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6640625,
"alphanum_fraction": 0.71875,
"avg_line_length": 17.428571701049805,
"blob_id": "8b6b6e57724b06a971f4954731d36f0f7812e0fe",
"content_id": "cf778eb7bde97627df9271bd878cb521a463f584",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 7,
"path": "/codes_auto/340.longest-substring-with-at-most-k-distinct-characters.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=340 lang=python3\n#\n# [340] longest-substring-with-at-most-k-distinct-characters\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 13.285714149475098,
"blob_id": "cfe9892f6d16d8a1afb653734be295ddba5193cb",
"content_id": "aa152bbb43953c324141ca9b53cddd1923e97862",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/codes_auto/4.median-of-two-sorted-arrays.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=4 lang=python3\n#\n# [4] median-of-two-sorted-arrays\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "58d93d113597bd29d5386d26d0a1a61fc2632c2b",
"content_id": "36b92bb1526f3a9da7363ca52f98832014813eaf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/251.flatten-2d-vector.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=251 lang=python3\n#\n# [251] flatten-2d-vector\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5696202516555786,
"alphanum_fraction": 0.6329113841056824,
"avg_line_length": 10.428571701049805,
"blob_id": "05e47cce4cfb12135cf54e3abf0cb1f2212c2914",
"content_id": "6351aaafa01d08b8b31d754d8e0c35a9149eaf71",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/69.sqrtx.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=69 lang=python3\n#\n# [69] sqrtx\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5813953280448914,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "1420ed1c46f5a54b48be159124c8daad15083332",
"content_id": "2006e6d44e801aa01dc8e103a8f208d743415a0c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/16.3sum-closest.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=16 lang=python3\n#\n# [16] 3sum-closest\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5977011322975159,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "bbd36a10adbc438da73b046f86b0d8446cdf9948",
"content_id": "ff1a535999ebb8db69ca149c1aaef81ed0189270",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/72.edit-distance.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=72 lang=python3\n#\n# [72] edit-distance\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "9d28aac3a1ccc838dc225a6ea0f110ee82d81558",
"content_id": "6cd4f31b7574cede19190de2942715e3ea6349a0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/221.maximal-square.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=221 lang=python3\n#\n# [221] maximal-square\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.7226890921592712,
"avg_line_length": 16.14285659790039,
"blob_id": "e15451986d3144e991c4b4f611a5904c645127a8",
"content_id": "4a578eb4f2e0dd9c445e421aa52f16e3ef7adacc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 7,
"path": "/codes_auto/1007.numbers-with-same-consecutive-differences.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1007 lang=python3\n#\n# [1007] numbers-with-same-consecutive-differences\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6203703880310059,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 14.571428298950195,
"blob_id": "3bad6e58cdd6493564bf8ee7a98fc9f8147b9931",
"content_id": "e5cbc25216cd9d9b2c00b7d95a8448d99ad35abe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1668.find-longest-awesome-substring.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1668 lang=python3\n#\n# [1668] find-longest-awesome-substring\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5595238208770752,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 11.142857551574707,
"blob_id": "4eeeb635226a5ebac640aec26ace58245a31f871",
"content_id": "6a2543edcf1bb9bb6e5c7a0ca8b61f5841ddb281",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/112.path-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=112 lang=python3\n#\n# [112] path-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6555555462837219,
"avg_line_length": 12,
"blob_id": "1f255ed0bf53d17b39fae1da6d4b6ef18a9d6c0a",
"content_id": "c17185572c61ca89e25ea96a34e54b4c8d1d5717",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/13.roman-to-integer.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=13 lang=python3\n#\n# [13] roman-to-integer\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6145833134651184,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.857142448425293,
"blob_id": "96d0917b6893e8b63d5ac44501635c956bcb61fc",
"content_id": "c1203e30ece270782e6f56084d5b42bc8895d85b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/21.merge-two-sorted-lists.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=21 lang=python3\n#\n# [21] merge-two-sorted-lists\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6320754885673523,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 14.285714149475098,
"blob_id": "de31e9d446577b6b6e68546fbc27af28edb24442",
"content_id": "0bb24569f631f25bef50976dcb1547cae5575dd3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/406.queue-reconstruction-by-height.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=406 lang=python3\n#\n# [406] queue-reconstruction-by-height\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5760869383811951,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 12.285714149475098,
"blob_id": "93895c80105eb9ea7962988ca8689518dd8db848",
"content_id": "1fbd742e26eb9c37f15ec2a3f0378bc5336cbf36",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/393.utf-8-validation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=393 lang=python3\n#\n# [393] utf-8-validation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6492537260055542,
"alphanum_fraction": 0.7164179086685181,
"avg_line_length": 18.285715103149414,
"blob_id": "14b9c7958466abc839dfa8f495d8ef66db12e6a4",
"content_id": "37a8649ebd054a6a30b5bf955660c17c609765d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 7,
"path": "/codes_auto/1573.find-two-non-overlapping-sub-arrays-each-with-target-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1573 lang=python3\n#\n# [1573] find-two-non-overlapping-sub-arrays-each-with-target-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6507936716079712,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 17.14285659790039,
"blob_id": "fc0593b428599e3965eda9daac4cd7b76bfe5db6",
"content_id": "fc388f80e3f5dcff2e5d7f48c78d26a0af0326c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 7,
"path": "/codes_auto/1700.minimum-deletion-cost-to-avoid-repeating-letters.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1700 lang=python3\n#\n# [1700] minimum-deletion-cost-to-avoid-repeating-letters\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.6555555462837219,
"avg_line_length": 12,
"blob_id": "c5e9c3e02776a7eb1a301996d6b5a10b42cb5c98",
"content_id": "94ee2367fbfcf88b14ec3c81fcb2756e511cbe3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1685.stone-game-v.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1685 lang=python3\n#\n# [1685] stone-game-v\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5822784900665283,
"alphanum_fraction": 0.6202531456947327,
"avg_line_length": 10.428571701049805,
"blob_id": "5f2452d5e9656da366c9fac2eb14c5bb18335b5d",
"content_id": "1d1ee94a319ca9ea944a9f30c48feef9f787d2c4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/codes_auto/1.two-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] two-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5824176073074341,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 12.142857551574707,
"blob_id": "2f2058f7ca95e24f120c1217411f9f261cc47f9c",
"content_id": "eadac97be55fc6f1b7d1fb3435f6035df306b61b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/213.house-robber-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=213 lang=python3\n#\n# [213] house-robber-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6095238327980042,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 14.142857551574707,
"blob_id": "1005d6405aa2973fe6c67aadd1d965b5123bee0c",
"content_id": "8fd16978f6c9d0678c67f51e04d309f50e9d609f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/438.find-all-anagrams-in-a-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=438 lang=python3\n#\n# [438] find-all-anagrams-in-a-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.6875,
"avg_line_length": 12.857142448425293,
"blob_id": "d170c634fea745d0235d59d54dbb1b09cfcf2a5a",
"content_id": "ac7823f57c57b4da939e5d97374577d32de5119c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1571.allocate-mailboxes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1571 lang=python3\n#\n# [1571] allocate-mailboxes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6271186470985413,
"alphanum_fraction": 0.7033898234367371,
"avg_line_length": 16,
"blob_id": "7e8bc000b1eae30f845f56c6ac2703cdf1cb6210",
"content_id": "7db74970a4168eb801cca55c477889571c50d892",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 7,
"path": "/codes_auto/1528.kids-with-the-greatest-number-of-candies.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1528 lang=python3\n#\n# [1528] kids-with-the-greatest-number-of-candies\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6336633563041687,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "1319e2d4ea8bc9681b862f78ff9a0e6736805870",
"content_id": "e9ab1d73bef0ca4e2cebe9713ebd6e5b14b33738",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/98.validate-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=98 lang=python3\n#\n# [98] validate-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5980392098426819,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 13.714285850524902,
"blob_id": "6a6287340ccc0eed5e4eb670392a4a2092177114",
"content_id": "81c05707e84387b8e5353b6d7eeaad114009dab3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1025.minimum-cost-for-tickets.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1025 lang=python3\n#\n# [1025] minimum-cost-for-tickets\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "d257f212315324ba62a514c859b7d230681ba44f",
"content_id": "ab11b6d6ce2c9e471f352f6da141bd869049ac52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/312.burst-balloons.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=312 lang=python3\n#\n# [312] burst-balloons\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.630630612373352,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "04feb74c90bc923244736a0d13496f99aec4eba2",
"content_id": "0b3890dcb008097184f4d96ab69690c6181512c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/codes_auto/315.count-of-smaller-numbers-after-self.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=315 lang=python3\n#\n# [315] count-of-smaller-numbers-after-self\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "ddf5b4ce0f73f144529a5ec24efe17e2d410f785",
"content_id": "91746a4fe78e2ce0cf5da9729af1ef16c288b991",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/909.stone-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=909 lang=python3\n#\n# [909] stone-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "f2ba3a3537472b9d947efb4a218e3d4414a673bc",
"content_id": "920a84e84640f902977353234a7b722a922b79bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/104.maximum-depth-of-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=104 lang=python3\n#\n# [104] maximum-depth-of-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.621052622795105,
"alphanum_fraction": 0.6736842393875122,
"avg_line_length": 12.714285850524902,
"blob_id": "10c721121360258089845ba4a0559ad0f9aa6a5d",
"content_id": "0ed5c76b621378329656586a124321e9a4cbad79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/14.longest-common-prefix.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=14 lang=python3\n#\n# [14] longest-common-prefix\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.584269642829895,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "475ad2d4a9e65accf6412afecc421c2ac60c5257",
"content_id": "c192b6ee564095148599031d7b4dff09fa37611d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/136.single-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=136 lang=python3\n#\n# [136] single-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6161616444587708,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 13.285714149475098,
"blob_id": "e4d47e9f0c32173e3289ec2efe6f2d665f3c585b",
"content_id": "22a0541987f4a44b18f7850a4b8126bb1c4d7259",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/696.count-binary-substrings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=696 lang=python3\n#\n# [696] count-binary-substrings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 12.714285850524902,
"blob_id": "da3cbbb550222ecb464f28b7cb963260a7ff22e8",
"content_id": "ce6b276a6d982be6f962346f8f6bc5c7ded54f28",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/214.shortest-palindrome.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=214 lang=python3\n#\n# [214] shortest-palindrome\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6384615302085876,
"alphanum_fraction": 0.7076923251152039,
"avg_line_length": 17.714284896850586,
"blob_id": "86381a6b59ec73160f8115757475fba1e840f9eb",
"content_id": "108838b868da4f64aefef15fd585c6d5a8015bf2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 130,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 7,
"path": "/codes_auto/1389.minimum-moves-to-move-a-box-to-their-target-location.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1389 lang=python3\n#\n# [1389] minimum-moves-to-move-a-box-to-their-target-location\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6549295783042908,
"alphanum_fraction": 0.7183098793029785,
"avg_line_length": 19.428571701049805,
"blob_id": "e7130f9dff44d03f127da6ef5b97ec68a0da0c48",
"content_id": "9584952759e8a5982d58ea5ce8498c421bcbde83",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 7,
"path": "/codes_auto/1516.the-k-th-lexicographical-string-of-all-happy-strings-of-length-n.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1516 lang=python3\n#\n# [1516] the-k-th-lexicographical-string-of-all-happy-strings-of-length-n\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 14.571428298950195,
"blob_id": "0f61417ff70708b5fd1a1582d963ea0da93fdd48",
"content_id": "0eb47ecd577d73698e9e727c607cde0e84c6b029",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/877.shortest-path-visiting-all-nodes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=877 lang=python3\n#\n# [877] shortest-path-visiting-all-nodes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6346153616905212,
"alphanum_fraction": 0.7019230723381042,
"avg_line_length": 14,
"blob_id": "e66a27728272d7149484b3528b765b08464ecf13",
"content_id": "00d95fedf103713d25a34f06532b0a73644a5f4c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/128.longest-consecutive-sequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=128 lang=python3\n#\n# [128] longest-consecutive-sequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6616541147232056,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 18.14285659790039,
"blob_id": "b9ca162628ea4c3fdc6875b08dd2d352de1b6115",
"content_id": "0f54813253d923a1142bff24b4f50d22171ff1b1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 7,
"path": "/codes_auto/1584.average-salary-excluding-the-minimum-and-maximum-salary.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1584 lang=python3\n#\n# [1584] average-salary-excluding-the-minimum-and-maximum-salary\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6486486196517944,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "d0b9511b23b6a26f311750418745f94a627da86c",
"content_id": "bae2c904d2b47000c2950892d95583eff40987c8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/82.remove-duplicates-from-sorted-list-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=82 lang=python3\n#\n# [82] remove-duplicates-from-sorted-list-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "69f8705ab672355ee09d29a0f9dafb1282f7e891",
"content_id": "49a2734db3383bc3d36a6163b562841b6a78cae0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/20.valid-parentheses.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=20 lang=python3\n#\n# [20] valid-parentheses\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "48064c5ed17a1e2c96262f6c8654cb9a33df8a60",
"content_id": "e0d078dd55e6375f341172b1ee013dd8e34cc7d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/257.binary-tree-paths.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=257 lang=python3\n#\n# [257] binary-tree-paths\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6100000143051147,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "498ab0c9246dcd551eaa5cbc89d09f3148351081",
"content_id": "2e548137ae0c5e32284b0377fafcccebfe2942b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/967.minimum-falling-path-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=967 lang=python3\n#\n# [967] minimum-falling-path-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6203703880310059,
"alphanum_fraction": 0.6851851940155029,
"avg_line_length": 14.571428298950195,
"blob_id": "67d55302bf41e7b7d64643921b4b9023e338cec9",
"content_id": "68d82adc20badb4889e8c63c131448afc49e4373",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/784.insert-into-a-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=784 lang=python3\n#\n# [784] insert-into-a-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5681818127632141,
"alphanum_fraction": 0.6477272510528564,
"avg_line_length": 11.714285850524902,
"blob_id": "059ce4c953f5975f09a95623590d45235dccced0",
"content_id": "e176728f3d9431bf884bfff7d9e6277a619940cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/437.path-sum-iii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=437 lang=python3\n#\n# [437] path-sum-iii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "7677a95dc14e0b5ccb2c65492e793446453ec395",
"content_id": "acea5b7c886d6936ff5272d9458ef6af26fa5003",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1642.water-bottles.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1642 lang=python3\n#\n# [1642] water-bottles\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.646616518497467,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 18.14285659790039,
"blob_id": "461b4e8b7a7617578895a2e1f74e0b8f17e79285",
"content_id": "11aaede1fdb10e3b2f5707fb4f2b55eb249836b0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 7,
"path": "/codes_auto/1645.find-a-value-of-a-mysterious-function-closest-to-target.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1645 lang=python3\n#\n# [1645] find-a-value-of-a-mysterious-function-closest-to-target\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5858585834503174,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "ec1f438eface459dd3af1af5e36fe801af38aea1",
"content_id": "af4b13a324ede5ca3d4a733f28f4c16adfe28731",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1659.get-the-maximum-score.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1659 lang=python3\n#\n# [1659] get-the-maximum-score\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5301204919815063,
"alphanum_fraction": 0.6385542154312134,
"avg_line_length": 11,
"blob_id": "549d11a69d70a51cfa7608e7ac656a0e4f39c235",
"content_id": "61d0b1581ab0894975de246fd0c2852571198aee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/679.24-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=679 lang=python3\n#\n# [679] 24-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6194690465927124,
"alphanum_fraction": 0.6991150379180908,
"avg_line_length": 15.285714149475098,
"blob_id": "e1ad43063626766d34919e23532ceef18e594959",
"content_id": "703336944ce0c6581b290a817b676a62cca12140",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/1194.path-in-zigzag-labelled-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1194 lang=python3\n#\n# [1194] path-in-zigzag-labelled-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "e890a1f46d06e0c51f23e0b221a8a7a5a67a4f0d",
"content_id": "ce7c1c980dbf9ef9b79f6e9a1f24857814ad696c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/508.most-frequent-subtree-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=508 lang=python3\n#\n# [508] most-frequent-subtree-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6642335653305054,
"alphanum_fraction": 0.7299270033836365,
"avg_line_length": 18.714284896850586,
"blob_id": "1ca3b69dcac430f88580c8ba34ab2b5e6cfc7683",
"content_id": "09fcc3354735373b74519fe215024b68b66ad54f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 7,
"path": "/codes_auto/1621.number-of-subsequences-that-satisfy-the-given-sum-condition.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1621 lang=python3\n#\n# [1621] number-of-subsequences-that-satisfy-the-given-sum-condition\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5858585834503174,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "e2c77bf1e1ddb1c0f42b7c372b7f45b72c4f82cc",
"content_id": "f52a1fe3198244c55c9cd06564534489cbb4fec4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1622.max-value-of-equation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1622 lang=python3\n#\n# [1622] max-value-of-equation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "251b92f33908f588e46ecddf00b939d2dfb26f3c",
"content_id": "e5173562abca1f6d6d65edc4090d9aefcf0191af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/486.predict-the-winner.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=486 lang=python3\n#\n# [486] predict-the-winner\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6168224215507507,
"alphanum_fraction": 0.6822429895401001,
"avg_line_length": 14.428571701049805,
"blob_id": "6a19e0279bf7b73c889d4d068838b4fd7008d52e",
"content_id": "99438552b03f39e2f30f6cb56067543fd14f5469",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/215.kth-largest-element-in-an-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=215 lang=python3\n#\n# [215] kth-largest-element-in-an-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6240000128746033,
"alphanum_fraction": 0.6959999799728394,
"avg_line_length": 17,
"blob_id": "116e2733fcd953add10f50ac08bf0acd17a8995a",
"content_id": "0165a9dbc242c5db7a648cb6faa5592ed7317b3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 125,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 7,
"path": "/codes_auto/1692.number-of-ways-to-reorder-array-to-get-same-bst.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1692 lang=python3\n#\n# [1692] number-of-ways-to-reorder-array-to-get-same-bst\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6272727251052856,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 14.857142448425293,
"blob_id": "810376f85f9b84ea25323e00461e4dad08808230",
"content_id": "528d0dfa546e65411f38ec68be73b74cf256c1e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/114.flatten-binary-tree-to-linked-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=114 lang=python3\n#\n# [114] flatten-binary-tree-to-linked-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.593406617641449,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "388ad2264d489a2f0e78406b156d35cf801a40ea",
"content_id": "80e59e248c4d413659089414b10f17111ce5107f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/207.course-schedule.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=207 lang=python3\n#\n# [207] course-schedule\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "920bc14a65e693a3685f3dc0f05658e17cd26c7e",
"content_id": "eea05fa8152bf05f5265d916c1630d97c2a0cae0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/31.next-permutation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=31 lang=python3\n#\n# [31] next-permutation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5744680762290955,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "06c2172b442b83ab8a692c94e9c703dc8e67622b",
"content_id": "b7c8a962e9db776c0ea7a4a8b06392382ea2f070",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1242.matrix-block-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1242 lang=python3\n#\n# [1242] matrix-block-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "78fe59a06007cc434a5d618a85ce3dde38d64be2",
"content_id": "f7bccc776fa12ed31c0e5c7a3d0075c7885d8d5d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/269.alien-dictionary.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=269 lang=python3\n#\n# [269] alien-dictionary\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.584269642829895,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "d08a548c0a5bdc8fcfc7ef5f959711835e4e9c3f",
"content_id": "bf59a443b094f95eec0e1d821c685ec56fed270f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/343.integer-break.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=343 lang=python3\n#\n# [343] integer-break\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5647059082984924,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "56a93875e7ccd7a9768c38a93fa0121df1fb23a8",
"content_id": "5c40147933d97a22432e657fc055b771a88edafe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/155.min-stack.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=155 lang=python3\n#\n# [155] min-stack\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6555555462837219,
"avg_line_length": 12,
"blob_id": "eaae00baeeb79d963e4f76ff0c0bd8cf6238b631",
"content_id": "100d08be9b1a612835bf00fd8b5b61e1e244f7ee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/64.minimum-path-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=64 lang=python3\n#\n# [64] minimum-path-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6116504669189453,
"alphanum_fraction": 0.6796116232872009,
"avg_line_length": 13.857142448425293,
"blob_id": "6fbb04cb453a808244757952be31e042e0658aa1",
"content_id": "947cbbbc2042fb42db030a85a536e9ab7c3bae05",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/199.binary-tree-right-side-view.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=199 lang=python3\n#\n# [199] binary-tree-right-side-view\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "d710ae62bda80663e0e602052a6be673feff69ab",
"content_id": "1d75d47c40ced98e190ded357b5c0d1ea225f1b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1630.count-odd-numbers-in-an-interval-range.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1630 lang=python3\n#\n# [1630] count-odd-numbers-in-an-interval-range\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 18,
"blob_id": "feb67ca85f3e20ce146e046d4551d0ea29aaeff2",
"content_id": "5386edabcd46c2b09202bbfcd68394119b6a86e4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 7,
"path": "/codes_auto/1576.reorder-routes-to-make-all-paths-lead-to-the-city-zero.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1576 lang=python3\n#\n# [1576] reorder-routes-to-make-all-paths-lead-to-the-city-zero\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6036036014556885,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "5aa847ca2490037804909c92ef16e37414663217",
"content_id": "6cb5b12fa6a1af94311c9c4dd04a618f372ba520",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1636.number-of-substrings-with-only-1s.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1636 lang=python3\n#\n# [1636] number-of-substrings-with-only-1s\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6106194853782654,
"alphanum_fraction": 0.6902654767036438,
"avg_line_length": 15.285714149475098,
"blob_id": "046b56d13cf56932469d579684497f0514a1bbf9",
"content_id": "22765301e1063b056cacfa4d6669637fa90b6054",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/1683.maximum-number-of-coins-you-can-get.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1683 lang=python3\n#\n# [1683] maximum-number-of-coins-you-can-get\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6074766516685486,
"alphanum_fraction": 0.6915887594223022,
"avg_line_length": 14.428571701049805,
"blob_id": "704995638ee1696073331c98b08f50aa0b6242c7",
"content_id": "30087d3578add0efaad7cb20cc2c5ac9f5ed368f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1499.maximum-performance-of-a-team.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1499 lang=python3\n#\n# [1499] maximum-performance-of-a-team\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5760869383811951,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "acd613f434fcc50eff00713e4a4160d98ae33d1a",
"content_id": "f2160188fe9fa1b0efcf4cfefc800603acd2769f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1651.shuffle-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1651 lang=python3\n#\n# [1651] shuffle-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "5e5ff77d8e9e476bdbdd403a4ab3f9719d2db587",
"content_id": "7472bfe33c4182bea0d9b1ee78aa2e21d8693b6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/256.paint-house.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=256 lang=python3\n#\n# [256] paint-house\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.585106372833252,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 12.571428298950195,
"blob_id": "c702a5d824a15218da118b26ef0ae58ab8589013",
"content_id": "d0fa2fc6c38617a62a43a75a827ef1391fb70024",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/348.design-tic-tac-toe.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=348 lang=python3\n#\n# [348] design-tic-tac-toe\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6515151262283325,
"alphanum_fraction": 0.7196969985961914,
"avg_line_length": 18,
"blob_id": "21c7143c438a9e5d516ff567db00a6a920065b5d",
"content_id": "e9bb771a2f1a6aa3ab3223ceefad39b130c84560",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 7,
"path": "/codes_auto/1662.minimum-numbers-of-function-calls-to-make-target-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1662 lang=python3\n#\n# [1662] minimum-numbers-of-function-calls-to-make-target-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5742574334144592,
"alphanum_fraction": 0.6732673048973083,
"avg_line_length": 13.571428298950195,
"blob_id": "f7a53816eedf9b7889b2e3f3c9b876c6aec7e4f7",
"content_id": "f720066904260a4d87d72e6f5790ddf77d44d217",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1603.running-sum-of-1d-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1603 lang=python3\n#\n# [1603] running-sum-of-1d-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5802469253540039,
"alphanum_fraction": 0.6419752836227417,
"avg_line_length": 10.714285850524902,
"blob_id": "8872d5aa0503c1cf927caf751c0876b321167581",
"content_id": "5d6de21ee5f9b48bc7a4ad5b60cae2f302314827",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 81,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/78.subsets.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=78 lang=python3\n#\n# [78] subsets\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6585366129875183,
"alphanum_fraction": 0.7317073345184326,
"avg_line_length": 16.714284896850586,
"blob_id": "25935784f1837868b52618f38e79ced31665388a",
"content_id": "7940c9b8c0fc7a3e1e67b6fb4618a0a6ee7bc85d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 7,
"path": "/codes_auto/1359.circular-permutation-in-binary-representation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1359 lang=python3\n#\n# [1359] circular-permutation-in-binary-representation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.654411792755127,
"alphanum_fraction": 0.720588207244873,
"avg_line_length": 18.571428298950195,
"blob_id": "4a640c4c6261855db65f3d1a93842fc0ecad2315",
"content_id": "72d5866e953551731844461ef28504a23689569b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 7,
"path": "/codes_auto/1701.remove-max-number-of-edges-to-keep-graph-fully-traversable.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1701 lang=python3\n#\n# [1701] remove-max-number-of-edges-to-keep-graph-fully-traversable\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5604395866394043,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 12.142857551574707,
"blob_id": "8425c487759164d27263b564fa62e5bc6260d2a4",
"content_id": "cc746912de2c3faa3cf7bc9a10bf74cc38df47c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1617.stone-game-iv.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1617 lang=python3\n#\n# [1617] stone-game-iv\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.6875,
"avg_line_length": 12.857142448425293,
"blob_id": "3e7381cd3d348f4d330760ef8aa95ff3ee788b78",
"content_id": "d70c33e9cf92527235921556b8b97c615080e878",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1660.thousand-separator.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1660 lang=python3\n#\n# [1660] thousand-separator\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 12.142857551574707,
"blob_id": "c8b252361b7072d67cb1e0c697da4cfb8b2232bb",
"content_id": "00be1931395778a49ffc887ca266f913f3cdcc9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/650.2-keys-keyboard.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=650 lang=python3\n#\n# [650] 2-keys-keyboard\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6160714030265808,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 15.142857551574707,
"blob_id": "118c0ab8408f7f2fbf3c09d67616a9346ce8b1e8",
"content_id": "c890f59f6afc9179686302e647590cc3bc4521e9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/codes_auto/1228.minimum-cost-tree-from-leaf-values.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1228 lang=python3\n#\n# [1228] minimum-cost-tree-from-leaf-values\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6899999976158142,
"avg_line_length": 13.428571701049805,
"blob_id": "e454ff71f374c72b943ba8db54195a99d37981ad",
"content_id": "73a44bcaa3fba472871157ca5614cca4a5c8adec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1293.three-consecutive-odds.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1293 lang=python3\n#\n# [1293] three-consecutive-odds\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "dbd6cbdb0ce28b2dc2e5e5b76fe09b88896de144",
"content_id": "391e06a231d65035f0b792581bd7c11fadc8f391",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/344.reverse-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=344 lang=python3\n#\n# [344] reverse-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "5c78a4e7efbac4fa5360f422ca48ce7153daedbd",
"content_id": "d968a52b0aaf8e145dd834920e12b4491c79d785",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/410.split-array-largest-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=410 lang=python3\n#\n# [410] split-array-largest-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.7203390002250671,
"avg_line_length": 16,
"blob_id": "663cbde022f7b0ff379bc3715b2c80019fb66c93",
"content_id": "8d2bc061061380cd3c2de72885ed4e61172ed188",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 7,
"path": "/codes_auto/3.longest-substring-without-repeating-characters.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=3 lang=python3\n#\n# [3] longest-substring-without-repeating-characters\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.584269642829895,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "ad422da69b3ce41fa7a9b5ca31767e750f9fd05f",
"content_id": "4c7f034485c45ec4e1c85da9f5dd1cab1cdad75f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/394.decode-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=394 lang=python3\n#\n# [394] decode-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.584269642829895,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "97275e05de1d0da4241bd4218197da63ca3613d7",
"content_id": "e306d32d324dec5dbe87991195d34bf04a7a0c6a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/338.counting-bits.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=338 lang=python3\n#\n# [338] counting-bits\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.7478190064430237,
"alphanum_fraction": 0.7798030376434326,
"avg_line_length": 192.30616760253906,
"blob_id": "7c5d8b9d75eb96222ba99face10525296410dcb9",
"content_id": "3308eec304dd4bca21e04cbd7bc8690750c95f81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90081,
"license_type": "permissive",
"max_line_length": 303,
"num_lines": 405,
"path": "/README.md",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "# Stay hangury ,Stay foolish \n\n<p> \n<img src=\"https://img.shields.io/badge/User-mei-shi-shua-shua-ti-purple.svg?\" alt=\"\">\n<img src=\"https://img.shields.io/badge/Solved-403/1786-blue.svg?\" alt=\"\">\n<img src=\"https://img.shields.io/badge/Easy-121-yellow.svg?\" alt=\"\">\n<img src=\"https://img.shields.io/badge/Medium-209-green.svg?\" alt=\"\">\n<img src=\"https://img.shields.io/badge/Hard-73-red.svg?\" alt=\"\">\n</p> \n\n:heart: 最近一次更新: 2020-09-12 18:42:08 \n\n:heart: 题目后带有 :lock: 表示该题尚未解锁,需要购买力扣经典会员。\n\n:heart: 本README文件与源码文件均为自动生成,详情见爬虫项目[Leetcode-Helper](https://github.com/Liuyang0001/Leetcode-Helper)。\n\n\n\n<hr>\n\n| 题号 | 题目 | 难度 | 标签 | 源码 | \n| :----: | :--------: | :----: | :------: | :------: | \n|1|[两数之和](https://leetcode-cn.com/problems/two-sum)|简单|数组、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1.two-sum.py)|\n|2|[两数相加](https://leetcode-cn.com/problems/add-two-numbers)|中等|链表、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/2.add-two-numbers.py)|\n|3|[无重复字符的最长子串](https://leetcode-cn.com/problems/longest-substring-without-repeating-characters)|中等|哈希表、双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/3.longest-substring-without-repeating-characters.py)|\n|4|[寻找两个正序数组的中位数](https://leetcode-cn.com/problems/median-of-two-sorted-arrays)|困难|数组、二分查找、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/4.median-of-two-sorted-arrays.py)|\n|5|[最长回文子串](https://leetcode-cn.com/problems/longest-palindromic-substring)|中等|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/5.longest-palindromic-substring.py)|\n|6|[Z 字形变换](https://leetcode-cn.com/problems/zigzag-conversion)|中等|字符串|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/6.zigzag-conversion)|\n|7|[整数反转](https://leetcode-cn.com/problems/reverse-integer)|简单|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/7.reverse-integer.py)|\n|9|[回文数](https://leetcode-cn.com/problems/palindrome-number)|简单|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/9.palindrome-number.py)|\n|10|[正则表达式匹配](https://leetcode-cn.com/problems/regular-expression-matching)|困难|字符串、动态规划、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/10.regular-expression-matching.py)|\n|11|[盛最多水的容器](https://leetcode-cn.com/problems/container-with-most-water)|中等|数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/11.container-with-most-water.py)|\n|13|[罗马数字转整数](https://leetcode-cn.com/problems/roman-to-integer)|简单|数学、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/13.roman-to-integer.py)|\n|14|[最长公共前缀](https://leetcode-cn.com/problems/longest-common-prefix)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/14.longest-common-prefix.py)|\n|15|[三数之和](https://leetcode-cn.com/problems/3sum)|中等|数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/15.3sum.py)|\n|16|[最接近的三数之和](https://leetcode-cn.com/problems/3sum-closest)|中等|数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/16.3sum-closest.py)|\n|17|[电话号码的字母组合](https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number)|中等|字符串、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/17.letter-combinations-of-a-phone-number.py)|\n|19|[删除链表的倒数第N个节点](https://leetcode-cn.com/problems/remove-nth-node-from-end-of-list)|中等|链表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/19.remove-nth-node-from-end-of-list.py)|\n|20|[有效的括号](https://leetcode-cn.com/problems/valid-parentheses)|简单|栈、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/20.valid-parentheses.py)|\n|21|[合并两个有序链表](https://leetcode-cn.com/problems/merge-two-sorted-lists)|简单|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/21.merge-two-sorted-lists.py)|\n|22|[括号生成](https://leetcode-cn.com/problems/generate-parentheses)|中等|字符串、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/22.generate-parentheses.py)|\n|23|[合并K个升序链表](https://leetcode-cn.com/problems/merge-k-sorted-lists)|困难|堆、链表、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/23.merge-k-sorted-lists.py)|\n|25|[K 个一组翻转链表](https://leetcode-cn.com/problems/reverse-nodes-in-k-group)|困难|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/25.reverse-nodes-in-k-group.py)|\n|26|[删除排序数组中的重复项](https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array)|简单|数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/26.remove-duplicates-from-sorted-array.py)|\n|28|[实现 strStr()](https://leetcode-cn.com/problems/implement-strstr)|简单|双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/28.implement-strstr.py)|\n|31|[下一个排列](https://leetcode-cn.com/problems/next-permutation)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/31.next-permutation.py)|\n|32|[最长有效括号](https://leetcode-cn.com/problems/longest-valid-parentheses)|困难|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/32.longest-valid-parentheses.py)|\n|33|[搜索旋转排序数组](https://leetcode-cn.com/problems/search-in-rotated-sorted-array)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/33.search-in-rotated-sorted-array.py)|\n|34|[在排序数组中查找元素的第一个和最后一个位置](https://leetcode-cn.com/problems/find-first-and-last-position-of-element-in-sorted-array)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/34.find-first-and-last-position-of-element-in-sorted-array.py)|\n|35|[搜索插入位置](https://leetcode-cn.com/problems/search-insert-position)|简单|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/35.search-insert-position.py)|\n|38|[外观数列](https://leetcode-cn.com/problems/count-and-say)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/38.count-and-say.py)|\n|39|[组合总和](https://leetcode-cn.com/problems/combination-sum)|中等|数组、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/39.combination-sum.py)|\n|40|[组合总和 II](https://leetcode-cn.com/problems/combination-sum-ii)|中等|数组、回溯算法|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/40.combination-sum-ii)|\n|41|[缺失的第一个正数](https://leetcode-cn.com/problems/first-missing-positive)|困难|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/41.first-missing-positive.py)|\n|42|[接雨水](https://leetcode-cn.com/problems/trapping-rain-water)|困难|栈、数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/42.trapping-rain-water.py)|\n|43|[字符串相乘](https://leetcode-cn.com/problems/multiply-strings)|中等|数学、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/43.multiply-strings.py)|\n|44|[通配符匹配](https://leetcode-cn.com/problems/wildcard-matching)|困难|贪心算法、字符串、动态规划、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/44.wildcard-matching.py)|\n|45|[跳跃游戏 II](https://leetcode-cn.com/problems/jump-game-ii)|困难|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/45.jump-game-ii.py)|\n|46|[全排列](https://leetcode-cn.com/problems/permutations)|中等|回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/46.permutations.py)|\n|48|[旋转图像](https://leetcode-cn.com/problems/rotate-image)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/48.rotate-image.py)|\n|49|[字母异位词分组](https://leetcode-cn.com/problems/group-anagrams)|中等|哈希表、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/49.group-anagrams.py)|\n|50|[Pow(x, n)](https://leetcode-cn.com/problems/powx-n)|中等|数学、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/50.powx-n.py)|\n|51|[N 皇后](https://leetcode-cn.com/problems/n-queens)|困难|回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/51.n-queens.py)|\n|53|[最大子序和](https://leetcode-cn.com/problems/maximum-subarray)|简单|数组、分治算法、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/53.maximum-subarray.py)|\n|55|[跳跃游戏](https://leetcode-cn.com/problems/jump-game)|中等|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/55.jump-game.py)|\n|56|[合并区间](https://leetcode-cn.com/problems/merge-intervals)|中等|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/56.merge-intervals.py)|\n|60|[第k个排列](https://leetcode-cn.com/problems/permutation-sequence)|中等|数学、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/60.permutation-sequence.py)|\n|62|[不同路径](https://leetcode-cn.com/problems/unique-paths)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/62.unique-paths.py)|\n|63|[不同路径 II](https://leetcode-cn.com/problems/unique-paths-ii)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/63.unique-paths-ii.py)|\n|64|[最小路径和](https://leetcode-cn.com/problems/minimum-path-sum)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/64.minimum-path-sum.py)|\n|66|[加一](https://leetcode-cn.com/problems/plus-one)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/66.plus-one.py)|\n|67|[二进制求和](https://leetcode-cn.com/problems/add-binary)|简单|数学、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/67.add-binary.py)|\n|69|[x 的平方根](https://leetcode-cn.com/problems/sqrtx)|简单|数学、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/69.sqrtx.py)|\n|70|[爬楼梯](https://leetcode-cn.com/problems/climbing-stairs)|简单|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/70.climbing-stairs.py)|\n|71|[简化路径](https://leetcode-cn.com/problems/simplify-path)|中等|栈、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/71.simplify-path.py)|\n|72|[编辑距离](https://leetcode-cn.com/problems/edit-distance)|困难|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/72.edit-distance.py)|\n|75|[颜色分类](https://leetcode-cn.com/problems/sort-colors)|中等|排序、数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/75.sort-colors.py)|\n|76|[最小覆盖子串](https://leetcode-cn.com/problems/minimum-window-substring)|困难|哈希表、双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/76.minimum-window-substring.py)|\n|77|[组合](https://leetcode-cn.com/problems/combinations)|中等|回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/77.combinations.py)|\n|78|[子集](https://leetcode-cn.com/problems/subsets)|中等|位运算、数组、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/78.subsets.py)|\n|79|[单词搜索](https://leetcode-cn.com/problems/word-search)|中等|数组、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/79.word-search.py)|\n|81|[搜索旋转排序数组 II](https://leetcode-cn.com/problems/search-in-rotated-sorted-array-ii)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/81.search-in-rotated-sorted-array-ii.py)|\n|82|[删除排序链表中的重复元素 II](https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list-ii)|中等|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/82.remove-duplicates-from-sorted-list-ii.py)|\n|83|[删除排序链表中的重复元素](https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list)|简单|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/83.remove-duplicates-from-sorted-list.py)|\n|84|[柱状图中最大的矩形](https://leetcode-cn.com/problems/largest-rectangle-in-histogram)|困难|栈、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/84.largest-rectangle-in-histogram.py)|\n|85|[最大矩形](https://leetcode-cn.com/problems/maximal-rectangle)|困难|栈、数组、哈希表、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/85.maximal-rectangle.py)|\n|86|[分隔链表](https://leetcode-cn.com/problems/partition-list)|中等|链表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/86.partition-list.py)|\n|91|[解码方法](https://leetcode-cn.com/problems/decode-ways)|中等|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/91.decode-ways.py)|\n|92|[反转链表 II](https://leetcode-cn.com/problems/reverse-linked-list-ii)|中等|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/92.reverse-linked-list-ii.py)|\n|93|[复原IP地址](https://leetcode-cn.com/problems/restore-ip-addresses)|中等|字符串、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/93.restore-ip-addresses.py)|\n|94|[二叉树的中序遍历](https://leetcode-cn.com/problems/binary-tree-inorder-traversal)|中等|栈、树、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/94.binary-tree-inorder-traversal.py)|\n|95|[不同的二叉搜索树 II](https://leetcode-cn.com/problems/unique-binary-search-trees-ii)|中等|树、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/95.unique-binary-search-trees-ii.py)|\n|96|[不同的二叉搜索树](https://leetcode-cn.com/problems/unique-binary-search-trees)|中等|树、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/96.unique-binary-search-trees.py)|\n|97|[交错字符串](https://leetcode-cn.com/problems/interleaving-string)|困难|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/97.interleaving-string.py)|\n|98|[验证二叉搜索树](https://leetcode-cn.com/problems/validate-binary-search-tree)|中等|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/98.validate-binary-search-tree.py)|\n|99|[恢复二叉搜索树](https://leetcode-cn.com/problems/recover-binary-search-tree)|困难|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/99.recover-binary-search-tree.py)|\n|100|[相同的树](https://leetcode-cn.com/problems/same-tree)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/100.same-tree.py)|\n|101|[对称二叉树](https://leetcode-cn.com/problems/symmetric-tree)|简单|树、深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/101.symmetric-tree.py)|\n|102|[二叉树的层序遍历](https://leetcode-cn.com/problems/binary-tree-level-order-traversal)|中等|树、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/102.binary-tree-level-order-traversal.py)|\n|103|[二叉树的锯齿形层次遍历](https://leetcode-cn.com/problems/binary-tree-zigzag-level-order-traversal)|中等|栈、树、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/103.binary-tree-zigzag-level-order-traversal.py)|\n|104|[二叉树的最大深度](https://leetcode-cn.com/problems/maximum-depth-of-binary-tree)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/104.maximum-depth-of-binary-tree.py)|\n|105|[从前序与中序遍历序列构造二叉树](https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal)|中等|树、深度优先搜索、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/105.construct-binary-tree-from-preorder-and-inorder-traversal.py)|\n|107|[二叉树的层次遍历 II](https://leetcode-cn.com/problems/binary-tree-level-order-traversal-ii)|简单|树、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/107.binary-tree-level-order-traversal-ii.py)|\n|108|[将有序数组转换为二叉搜索树](https://leetcode-cn.com/problems/convert-sorted-array-to-binary-search-tree)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/108.convert-sorted-array-to-binary-search-tree.py)|\n|109|[有序链表转换二叉搜索树](https://leetcode-cn.com/problems/convert-sorted-list-to-binary-search-tree)|中等|深度优先搜索、链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/109.convert-sorted-list-to-binary-search-tree.py)|\n|110|[平衡二叉树](https://leetcode-cn.com/problems/balanced-binary-tree)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/110.balanced-binary-tree.py)|\n|111|[二叉树的最小深度](https://leetcode-cn.com/problems/minimum-depth-of-binary-tree)|简单|树、深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/111.minimum-depth-of-binary-tree.py)|\n|112|[路径总和](https://leetcode-cn.com/problems/path-sum)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/112.path-sum.py)|\n|114|[二叉树展开为链表](https://leetcode-cn.com/problems/flatten-binary-tree-to-linked-list)|中等|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/114.flatten-binary-tree-to-linked-list.py)|\n|120|[三角形最小路径和](https://leetcode-cn.com/problems/triangle)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/120.triangle.py)|\n|121|[买卖股票的最佳时机](https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock)|简单|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/121.best-time-to-buy-and-sell-stock.py)|\n|122|[买卖股票的最佳时机 II](https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-ii)|简单|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/122.best-time-to-buy-and-sell-stock-ii.py)|\n|124|[二叉树中的最大路径和](https://leetcode-cn.com/problems/binary-tree-maximum-path-sum)|困难|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/124.binary-tree-maximum-path-sum.py)|\n|125|[验证回文串](https://leetcode-cn.com/problems/valid-palindrome)|简单|双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/125.valid-palindrome.py)|\n|126|[单词接龙 II](https://leetcode-cn.com/problems/word-ladder-ii)|困难|广度优先搜索、数组、字符串、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/126.word-ladder-ii.py)|\n|128|[最长连续序列](https://leetcode-cn.com/problems/longest-consecutive-sequence)|困难|并查集、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/128.longest-consecutive-sequence.py)|\n|130|[被围绕的区域](https://leetcode-cn.com/problems/surrounded-regions)|中等|深度优先搜索、广度优先搜索、并查集|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/130.surrounded-regions.py)|\n|133|[克隆图](https://leetcode-cn.com/problems/clone-graph)|中等|深度优先搜索、广度优先搜索、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/133.clone-graph.py)|\n|136|[只出现一次的数字](https://leetcode-cn.com/problems/single-number)|简单|位运算、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/136.single-number.py)|\n|138|[复制带随机指针的链表](https://leetcode-cn.com/problems/copy-list-with-random-pointer)|中等|哈希表、链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/138.copy-list-with-random-pointer.py)|\n|139|[单词拆分](https://leetcode-cn.com/problems/word-break)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/139.word-break.py)|\n|140|[单词拆分 II](https://leetcode-cn.com/problems/word-break-ii)|困难|动态规划、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/140.word-break-ii.py)|\n|141|[环形链表](https://leetcode-cn.com/problems/linked-list-cycle)|简单|链表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/141.linked-list-cycle.py)|\n|142|[环形链表 II](https://leetcode-cn.com/problems/linked-list-cycle-ii)|中等|链表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/142.linked-list-cycle-ii.py)|\n|143|[重排链表](https://leetcode-cn.com/problems/reorder-list)|中等|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/143.reorder-list.py)|\n|144|[二叉树的前序遍历](https://leetcode-cn.com/problems/binary-tree-preorder-traversal)|中等|栈、树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/144.binary-tree-preorder-traversal.py)|\n|145|[二叉树的后序遍历](https://leetcode-cn.com/problems/binary-tree-postorder-traversal)|中等|栈、树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/145.binary-tree-postorder-traversal.py)|\n|146|[LRU缓存机制](https://leetcode-cn.com/problems/lru-cache)|中等|设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/146.lru-cache.py)|\n|148|[排序链表](https://leetcode-cn.com/problems/sort-list)|中等|排序、链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/148.sort-list.py)|\n|150|[逆波兰表达式求值](https://leetcode-cn.com/problems/evaluate-reverse-polish-notation)|中等|栈|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/150.evaluate-reverse-polish-notation.py)|\n|151|[翻转字符串里的单词](https://leetcode-cn.com/problems/reverse-words-in-a-string)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/151.reverse-words-in-a-string.py)|\n|152|[乘积最大子数组](https://leetcode-cn.com/problems/maximum-product-subarray)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/152.maximum-product-subarray.py)|\n|153|[寻找旋转排序数组中的最小值](https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/153.find-minimum-in-rotated-sorted-array.py)|\n|154|[寻找旋转排序数组中的最小值 II](https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array-ii)|困难|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/154.find-minimum-in-rotated-sorted-array-ii.py)|\n|155|[最小栈](https://leetcode-cn.com/problems/min-stack)|简单|栈、设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/155.min-stack.py)|\n|160|[相交链表](https://leetcode-cn.com/problems/intersection-of-two-linked-lists)|简单|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/160.intersection-of-two-linked-lists.py)|\n|163|缺失的区间 :lock:|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/163.missing-ranges.py)|\n|167|[两数之和 II - 输入有序数组](https://leetcode-cn.com/problems/two-sum-ii-input-array-is-sorted)|简单|数组、双指针、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/167.two-sum-ii-input-array-is-sorted.py)|\n|169|[多数元素](https://leetcode-cn.com/problems/majority-element)|简单|位运算、数组、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/169.majority-element.py)|\n|174|[地下城游戏](https://leetcode-cn.com/problems/dungeon-game)|困难|二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/174.dungeon-game.py)|\n|176|[第二高的薪水](https://leetcode-cn.com/problems/second-highest-salary)|简单|暂无标签|[Mysql](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/176.second-highest-salary)|\n|190|[颠倒二进制位](https://leetcode-cn.com/problems/reverse-bits)|简单|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/190.reverse-bits.py)|\n|191|[位1的个数](https://leetcode-cn.com/problems/number-of-1-bits)|简单|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/191.number-of-1-bits.py)|\n|198|[打家劫舍](https://leetcode-cn.com/problems/house-robber)|简单|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/198.house-robber.py)|\n|199|[二叉树的右视图](https://leetcode-cn.com/problems/binary-tree-right-side-view)|中等|树、深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/199.binary-tree-right-side-view.py)|\n|200|[岛屿数量](https://leetcode-cn.com/problems/number-of-islands)|中等|深度优先搜索、广度优先搜索、并查集|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/200.number-of-islands.py)|\n|201|[数字范围按位与](https://leetcode-cn.com/problems/bitwise-and-of-numbers-range)|中等|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/201.bitwise-and-of-numbers-range.py)|\n|202|[快乐数](https://leetcode-cn.com/problems/happy-number)|简单|哈希表、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/202.happy-number.py)|\n|206|[反转链表](https://leetcode-cn.com/problems/reverse-linked-list)|简单|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/206.reverse-linked-list.py)|\n|207|[课程表](https://leetcode-cn.com/problems/course-schedule)|中等|深度优先搜索、广度优先搜索、图、拓扑排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/207.course-schedule.py)|\n|208|[实现 Trie (前缀树)](https://leetcode-cn.com/problems/implement-trie-prefix-tree)|中等|设计、字典树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/208.implement-trie-prefix-tree.py)|\n|209|[长度最小的子数组](https://leetcode-cn.com/problems/minimum-size-subarray-sum)|中等|数组、双指针、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/209.minimum-size-subarray-sum.py)|\n|210|[课程表 II](https://leetcode-cn.com/problems/course-schedule-ii)|中等|深度优先搜索、广度优先搜索、图、拓扑排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/210.course-schedule-ii.py)|\n|213|[打家劫舍 II](https://leetcode-cn.com/problems/house-robber-ii)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/213.house-robber-ii.py)|\n|214|[最短回文串](https://leetcode-cn.com/problems/shortest-palindrome)|困难|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/214.shortest-palindrome.py)|\n|215|[数组中的第K个最大元素](https://leetcode-cn.com/problems/kth-largest-element-in-an-array)|中等|堆、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/215.kth-largest-element-in-an-array.py)|\n|216|[组合总和 III](https://leetcode-cn.com/problems/combination-sum-iii)|中等|数组、回溯算法|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/216.combination-sum-iii)|\n|221|[最大正方形](https://leetcode-cn.com/problems/maximal-square)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/221.maximal-square.py)|\n|226|[翻转二叉树](https://leetcode-cn.com/problems/invert-binary-tree)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/226.invert-binary-tree.py)|\n|232|[用栈实现队列](https://leetcode-cn.com/problems/implement-queue-using-stacks)|简单|栈、设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/232.implement-queue-using-stacks.py)|\n|234|[回文链表](https://leetcode-cn.com/problems/palindrome-linked-list)|简单|链表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/234.palindrome-linked-list.py)|\n|236|[二叉树的最近公共祖先](https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree)|中等|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/236.lowest-common-ancestor-of-a-binary-tree.py)|\n|237|[删除链表中的节点](https://leetcode-cn.com/problems/delete-node-in-a-linked-list)|简单|链表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/237.delete-node-in-a-linked-list.py)|\n|238|[除自身以外数组的乘积](https://leetcode-cn.com/problems/product-of-array-except-self)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/238.product-of-array-except-self.py)|\n|239|[滑动窗口最大值](https://leetcode-cn.com/problems/sliding-window-maximum)|困难|堆|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/239.sliding-window-maximum.py)|\n|240|[搜索二维矩阵 II](https://leetcode-cn.com/problems/search-a-2d-matrix-ii)|中等|二分查找、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/240.search-a-2d-matrix-ii.py)|\n|251|展开二维向量 :lock:|中等|设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/251.flatten-2d-vector.py)|\n|253|会议室 II :lock:|中等|堆、贪心算法、排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/253.meeting-rooms-ii.py)|\n|256|粉刷房子 :lock:|简单|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/256.paint-house.py)|\n|257|[二叉树的所有路径](https://leetcode-cn.com/problems/binary-tree-paths)|简单|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/257.binary-tree-paths.py)|\n|260|[只出现一次的数字 III](https://leetcode-cn.com/problems/single-number-iii)|中等|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/260.single-number-iii.py)|\n|269|火星词典 :lock:|困难|图、拓扑排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/269.alien-dictionary.py)|\n|276|栅栏涂色 :lock:|简单|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/276.paint-fence.py)|\n|277|搜寻名人 :lock:|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/277.find-the-celebrity.py)|\n|279|[完全平方数](https://leetcode-cn.com/problems/perfect-squares)|中等|广度优先搜索、数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/279.perfect-squares.py)|\n|283|[移动零](https://leetcode-cn.com/problems/move-zeroes)|简单|数组、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/283.move-zeroes.py)|\n|285|二叉搜索树中的顺序后继 :lock:|中等|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/285.inorder-successor-in-bst.py)|\n|287|[寻找重复数](https://leetcode-cn.com/problems/find-the-duplicate-number)|中等|数组、双指针、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/287.find-the-duplicate-number.py)|\n|297|[二叉树的序列化与反序列化](https://leetcode-cn.com/problems/serialize-and-deserialize-binary-tree)|困难|树、设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/297.serialize-and-deserialize-binary-tree.py)|\n|299|[猜数字游戏](https://leetcode-cn.com/problems/bulls-and-cows)|简单|哈希表|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/299.bulls-and-cows)|\n|300|[最长上升子序列](https://leetcode-cn.com/problems/longest-increasing-subsequence)|中等|二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/300.longest-increasing-subsequence.py)|\n|301|[删除无效的括号](https://leetcode-cn.com/problems/remove-invalid-parentheses)|困难|深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/301.remove-invalid-parentheses.py)|\n|303|[区域和检索 - 数组不可变](https://leetcode-cn.com/problems/range-sum-query-immutable)|简单|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/303.range-sum-query-immutable.py)|\n|308|二维区域和检索 - 可变 :lock:|困难|树状数组、线段树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/308.range-sum-query-2d-mutable.py)|\n|309|[最佳买卖股票时机含冷冻期](https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/309.best-time-to-buy-and-sell-stock-with-cooldown.py)|\n|312|[戳气球](https://leetcode-cn.com/problems/burst-balloons)|困难|分治算法、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/312.burst-balloons.py)|\n|315|[计算右侧小于当前元素的个数](https://leetcode-cn.com/problems/count-of-smaller-numbers-after-self)|困难|排序、树状数组、线段树、二分查找、分治算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/315.count-of-smaller-numbers-after-self.py)|\n|322|[零钱兑换](https://leetcode-cn.com/problems/coin-change)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/322.coin-change.py)|\n|329|[矩阵中的最长递增路径](https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix)|困难|深度优先搜索、拓扑排序、记忆化|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/329.longest-increasing-path-in-a-matrix.py)|\n|332|[重新安排行程](https://leetcode-cn.com/problems/reconstruct-itinerary)|中等|深度优先搜索、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/332.reconstruct-itinerary.py)|\n|336|[回文对](https://leetcode-cn.com/problems/palindrome-pairs)|困难|字典树、哈希表、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/336.palindrome-pairs.py)|\n|337|[打家劫舍 III](https://leetcode-cn.com/problems/house-robber-iii)|中等|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/337.house-robber-iii.py)|\n|338|[比特位计数](https://leetcode-cn.com/problems/counting-bits)|中等|位运算、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/338.counting-bits.py)|\n|340|至多包含 K 个不同字符的最长子串 :lock:|困难|哈希表、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/340.longest-substring-with-at-most-k-distinct-characters.py)|\n|343|[整数拆分](https://leetcode-cn.com/problems/integer-break)|中等|数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/343.integer-break.py)|\n|344|[反转字符串](https://leetcode-cn.com/problems/reverse-string)|简单|双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/344.reverse-string.py)|\n|347|[前 K 个高频元素](https://leetcode-cn.com/problems/top-k-frequent-elements)|中等|堆、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/347.top-k-frequent-elements.py)|\n|348|判定井字棋胜负 :lock:|中等|设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/348.design-tic-tac-toe.py)|\n|350|[两个数组的交集 II](https://leetcode-cn.com/problems/intersection-of-two-arrays-ii)|简单|排序、哈希表、双指针、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/350.intersection-of-two-arrays-ii.py)|\n|354|[俄罗斯套娃信封问题](https://leetcode-cn.com/problems/russian-doll-envelopes)|困难|二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/354.russian-doll-envelopes.py)|\n|378|[有序矩阵中第K小的元素](https://leetcode-cn.com/problems/kth-smallest-element-in-a-sorted-matrix)|中等|堆、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/378.kth-smallest-element-in-a-sorted-matrix.py)|\n|392|[判断子序列](https://leetcode-cn.com/problems/is-subsequence)|简单|贪心算法、二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/392.is-subsequence.py)|\n|393|[UTF-8 编码验证](https://leetcode-cn.com/problems/utf-8-validation)|中等|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/393.utf-8-validation.py)|\n|394|[字符串解码](https://leetcode-cn.com/problems/decode-string)|中等|栈、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/394.decode-string.py)|\n|399|[除法求值](https://leetcode-cn.com/problems/evaluate-division)|中等|并查集、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/399.evaluate-division.py)|\n|406|[根据身高重建队列](https://leetcode-cn.com/problems/queue-reconstruction-by-height)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/406.queue-reconstruction-by-height.py)|\n|410|[分割数组的最大值](https://leetcode-cn.com/problems/split-array-largest-sum)|困难|二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/410.split-array-largest-sum.py)|\n|413|[等差数列划分](https://leetcode-cn.com/problems/arithmetic-slices)|中等|数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/413.arithmetic-slices.py)|\n|415|[字符串相加](https://leetcode-cn.com/problems/add-strings)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/415.add-strings.py)|\n|416|[分割等和子集](https://leetcode-cn.com/problems/partition-equal-subset-sum)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/416.partition-equal-subset-sum.py)|\n|432|[全 O(1) 的数据结构](https://leetcode-cn.com/problems/all-oone-data-structure)|困难|设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/432.all-oone-data-structure.py)|\n|437|[路径总和 III](https://leetcode-cn.com/problems/path-sum-iii)|中等|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/437.path-sum-iii.py)|\n|438|[找到字符串中所有字母异位词](https://leetcode-cn.com/problems/find-all-anagrams-in-a-string)|中等|哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/438.find-all-anagrams-in-a-string.py)|\n|443|[压缩字符串](https://leetcode-cn.com/problems/string-compression)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/443.string-compression.py)|\n|448|[找到所有数组中消失的数字](https://leetcode-cn.com/problems/find-all-numbers-disappeared-in-an-array)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/448.find-all-numbers-disappeared-in-an-array.py)|\n|459|[重复的子字符串](https://leetcode-cn.com/problems/repeated-substring-pattern)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/459.repeated-substring-pattern.py)|\n|461|[汉明距离](https://leetcode-cn.com/problems/hamming-distance)|简单|位运算|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/461.hamming-distance.py)|\n|466|[统计重复个数](https://leetcode-cn.com/problems/count-the-repetitions)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/466.count-the-repetitions.py)|\n|486|[预测赢家](https://leetcode-cn.com/problems/predict-the-winner)|中等|极小化极大、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/486.predict-the-winner.py)|\n|491|[递增子序列](https://leetcode-cn.com/problems/increasing-subsequences)|中等|深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/491.increasing-subsequences.py)|\n|494|[目标和](https://leetcode-cn.com/problems/target-sum)|中等|深度优先搜索、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/494.target-sum.py)|\n|508|[出现次数最多的子树元素和](https://leetcode-cn.com/problems/most-frequent-subtree-sum)|中等|树、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/508.most-frequent-subtree-sum.py)|\n|529|[扫雷游戏](https://leetcode-cn.com/problems/minesweeper)|中等|深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/529.minesweeper.py)|\n|538|[把二叉搜索树转换为累加树](https://leetcode-cn.com/problems/convert-bst-to-greater-tree)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/538.convert-bst-to-greater-tree.py)|\n|542|[01 矩阵](https://leetcode-cn.com/problems/01-matrix)|中等|深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/542.01-matrix.py)|\n|543|[二叉树的直径](https://leetcode-cn.com/problems/diameter-of-binary-tree)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/543.diameter-of-binary-tree.py)|\n|546|[移除盒子](https://leetcode-cn.com/problems/remove-boxes)|困难|深度优先搜索、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/546.remove-boxes.py)|\n|547|[朋友圈](https://leetcode-cn.com/problems/friend-circles)|中等|深度优先搜索、并查集|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/547.friend-circles.py)|\n|557|[反转字符串中的单词 III](https://leetcode-cn.com/problems/reverse-words-in-a-string-iii)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/557.reverse-words-in-a-string-iii.py)|\n|560|[和为K的子数组](https://leetcode-cn.com/problems/subarray-sum-equals-k)|中等|数组、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/560.subarray-sum-equals-k.py)|\n|567|[字符串的排列](https://leetcode-cn.com/problems/permutation-in-string)|中等|双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/567.permutation-in-string.py)|\n|572|[另一个树的子树](https://leetcode-cn.com/problems/subtree-of-another-tree)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/572.subtree-of-another-tree.py)|\n|581|[最短无序连续子数组](https://leetcode-cn.com/problems/shortest-unsorted-continuous-subarray)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/581.shortest-unsorted-continuous-subarray.py)|\n|617|[合并二叉树](https://leetcode-cn.com/problems/merge-two-binary-trees)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/617.merge-two-binary-trees.py)|\n|621|[任务调度器](https://leetcode-cn.com/problems/task-scheduler)|中等|贪心算法、队列、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/621.task-scheduler.py)|\n|632|[最小区间](https://leetcode-cn.com/problems/smallest-range-covering-elements-from-k-lists)|困难|哈希表、双指针、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/632.smallest-range-covering-elements-from-k-lists.py)|\n|637|[二叉树的层平均值](https://leetcode-cn.com/problems/average-of-levels-in-binary-tree)|简单|树|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/637.average-of-levels-in-binary-tree)|\n|647|[回文子串](https://leetcode-cn.com/problems/palindromic-substrings)|中等|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/647.palindromic-substrings.py)|\n|650|[只有两个键的键盘](https://leetcode-cn.com/problems/2-keys-keyboard)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/650.2-keys-keyboard.py)|\n|657|[机器人能否返回原点](https://leetcode-cn.com/problems/robot-return-to-origin)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/657.robot-return-to-origin.py)|\n|674|[最长连续递增序列](https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/674.longest-continuous-increasing-subsequence.py)|\n|679|[24 点游戏](https://leetcode-cn.com/problems/24-game)|困难|深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/679.24-game.py)|\n|680|[验证回文字符串 Ⅱ](https://leetcode-cn.com/problems/valid-palindrome-ii)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/680.valid-palindrome-ii.py)|\n|695|[岛屿的最大面积](https://leetcode-cn.com/problems/max-area-of-island)|中等|深度优先搜索、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/695.max-area-of-island.py)|\n|696|[计数二进制子串](https://leetcode-cn.com/problems/count-binary-substrings)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/696.count-binary-substrings.py)|\n|712|[两个字符串的最小ASCII删除和](https://leetcode-cn.com/problems/minimum-ascii-delete-sum-for-two-strings)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/712.minimum-ascii-delete-sum-for-two-strings.py)|\n|714|[买卖股票的最佳时机含手续费](https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee)|中等|贪心算法、数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/714.best-time-to-buy-and-sell-stock-with-transaction-fee.py)|\n|718|[最长重复子数组](https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray)|中等|数组、哈希表、二分查找、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/718.maximum-length-of-repeated-subarray.py)|\n|733|[图像渲染](https://leetcode-cn.com/problems/flood-fill)|简单|深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/733.flood-fill.py)|\n|739|[每日温度](https://leetcode-cn.com/problems/daily-temperatures)|中等|栈、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/739.daily-temperatures.py)|\n|747|[使用最小花费爬楼梯](https://leetcode-cn.com/problems/min-cost-climbing-stairs)|简单|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/747.min-cost-climbing-stairs.py)|\n|784|[二叉搜索树中的插入操作](https://leetcode-cn.com/problems/insert-into-a-binary-search-tree)|中等|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/784.insert-into-a-binary-search-tree.py)|\n|801|[判断二分图](https://leetcode-cn.com/problems/is-graph-bipartite)|中等|深度优先搜索、广度优先搜索、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/801.is-graph-bipartite.py)|\n|861|[翻转图像](https://leetcode-cn.com/problems/flipping-an-image)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/861.flipping-an-image.py)|\n|862|[字符串中的查找与替换](https://leetcode-cn.com/problems/find-and-replace-in-string)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/862.find-and-replace-in-string.py)|\n|864|[图像重叠](https://leetcode-cn.com/problems/image-overlap)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/864.image-overlap.py)|\n|867|[新21点](https://leetcode-cn.com/problems/new-21-game)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/867.new-21-game.py)|\n|871|[钥匙和房间](https://leetcode-cn.com/problems/keys-and-rooms)|中等|深度优先搜索、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/871.keys-and-rooms.py)|\n|874|[比较含退格的字符串](https://leetcode-cn.com/problems/backspace-string-compare)|简单|栈、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/874.backspace-string-compare.py)|\n|875|[数组中的最长山脉](https://leetcode-cn.com/problems/longest-mountain-in-array)|中等|双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/875.longest-mountain-in-array.py)|\n|876|[一手顺子](https://leetcode-cn.com/problems/hand-of-straights)|中等|暂无标签|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/876.hand-of-straights.py)|\n|877|[访问所有节点的最短路径](https://leetcode-cn.com/problems/shortest-path-visiting-all-nodes)|困难|广度优先搜索、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/877.shortest-path-visiting-all-nodes.py)|\n|899|[二进制间距](https://leetcode-cn.com/problems/binary-gap)|简单|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/899.binary-gap.py)|\n|900|[重新排序得到 2 的幂](https://leetcode-cn.com/problems/reordered-power-of-2)|中等|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/900.reordered-power-of-2.py)|\n|901|[优势洗牌](https://leetcode-cn.com/problems/advantage-shuffle)|中等|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/901.advantage-shuffle.py)|\n|902|[最低加油次数](https://leetcode-cn.com/problems/minimum-number-of-refueling-stops)|困难|堆、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/902.minimum-number-of-refueling-stops.py)|\n|909|[石子游戏](https://leetcode-cn.com/problems/stone-game)|中等|极小化极大、数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/909.stone-game.py)|\n|958|[按奇偶排序数组 II](https://leetcode-cn.com/problems/sort-array-by-parity-ii)|简单|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/958.sort-array-by-parity-ii.py)|\n|967|[下降路径最小和](https://leetcode-cn.com/problems/minimum-falling-path-sum)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/967.minimum-falling-path-sum.py)|\n|1005|[单值二叉树](https://leetcode-cn.com/problems/univalued-binary-tree)|简单|树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1005.univalued-binary-tree.py)|\n|1006|[元音拼写检查器](https://leetcode-cn.com/problems/vowel-spellchecker)|中等|哈希表、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1006.vowel-spellchecker.py)|\n|1007|[连续差相同的数字](https://leetcode-cn.com/problems/numbers-with-same-consecutive-differences)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1007.numbers-with-same-consecutive-differences.py)|\n|1008|[监控二叉树](https://leetcode-cn.com/problems/binary-tree-cameras)|困难|树、深度优先搜索、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1008.binary-tree-cameras.py)|\n|1016|[和可被 K 整除的子数组](https://leetcode-cn.com/problems/subarray-sums-divisible-by-k)|中等|数组、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1016.subarray-sums-divisible-by-k.py)|\n|1025|[最低票价](https://leetcode-cn.com/problems/minimum-cost-for-tickets)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1025.minimum-cost-for-tickets.py)|\n|1032|[等式方程的可满足性](https://leetcode-cn.com/problems/satisfiability-of-equality-equations)|中等|并查集、图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1032.satisfiability-of-equality-equations.py)|\n|1033|[坏了的计算器](https://leetcode-cn.com/problems/broken-calculator)|中等|贪心算法、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1033.broken-calculator.py)|\n|1034|[K 个不同整数的子数组](https://leetcode-cn.com/problems/subarrays-with-k-different-integers)|困难|哈希表、双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1034.subarrays-with-k-different-integers.py)|\n|1063|[最佳观光组合](https://leetcode-cn.com/problems/best-sightseeing-pair)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1063.best-sightseeing-pair.py)|\n|1086|[除数博弈](https://leetcode-cn.com/problems/divisor-game)|简单|数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1086.divisor-game.py)|\n|1093|[从先序遍历还原二叉树](https://leetcode-cn.com/problems/recover-a-tree-from-preorder-traversal)|困难|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1093.recover-a-tree-from-preorder-traversal.py)|\n|1185|[山脉数组中查找目标值](https://leetcode-cn.com/problems/find-in-mountain-array)|困难|二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1185.find-in-mountain-array.py)|\n|1194|[二叉树寻路](https://leetcode-cn.com/problems/path-in-zigzag-labelled-binary-tree)|中等|树、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1194.path-in-zigzag-labelled-binary-tree.py)|\n|1195|[分糖果 II](https://leetcode-cn.com/problems/distribute-candies-to-people)|简单|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1195.distribute-candies-to-people.py)|\n|1196|[填充书架](https://leetcode-cn.com/problems/filling-bookcase-shelves)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1196.filling-bookcase-shelves.py)|\n|1197|[解析布尔表达式](https://leetcode-cn.com/problems/parsing-a-boolean-expression)|困难|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1197.parsing-a-boolean-expression.py)|\n|1203|[按序打印](https://leetcode-cn.com/problems/print-in-order)|简单|暂无标签|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1203.print-in-order.py)|\n|1209|设计有限阻塞队列 :lock:|中等|暂无标签|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1209.design-bounded-blocking-queue.py)|\n|1217|[数组的相对排序](https://leetcode-cn.com/problems/relative-sort-array)|简单|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1217.relative-sort-array.py)|\n|1228|[叶值的最小代价生成树](https://leetcode-cn.com/problems/minimum-cost-tree-from-leaf-values)|中等|栈、树、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1228.minimum-cost-tree-from-leaf-values.py)|\n|1232|[转变数组后最接近目标值的数组和](https://leetcode-cn.com/problems/sum-of-mutated-array-closest-to-target)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1232.sum-of-mutated-array-closest-to-target.py)|\n|1240|[石子游戏 II](https://leetcode-cn.com/problems/stone-game-ii)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1240.stone-game-ii.py)|\n|1242|[矩阵区域和](https://leetcode-cn.com/problems/matrix-block-sum)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1242.matrix-block-sum.py)|\n|1250|[最长公共子序列](https://leetcode-cn.com/problems/longest-common-subsequence)|中等|动态规划|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1250.longest-common-subsequence)|\n|1283|[转变日期格式](https://leetcode-cn.com/problems/reformat-date)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1283.reformat-date.py)|\n|1285|[将二叉搜索树变平衡](https://leetcode-cn.com/problems/balance-a-binary-search-tree)|中等|二叉搜索树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1285.balance-a-binary-search-tree.py)|\n|1293|[存在连续三个奇数的数组](https://leetcode-cn.com/problems/three-consecutive-odds)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1293.three-consecutive-odds.py)|\n|1325|[概率最大的路径](https://leetcode-cn.com/problems/path-with-maximum-probability)|中等|图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1325.path-with-maximum-probability.py)|\n|1358|[找出给定方程的正整数解](https://leetcode-cn.com/problems/find-positive-integer-solution-for-a-given-equation)|简单|数学、二分查找|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1358.find-positive-integer-solution-for-a-given-equation)|\n|1359|[循环码排列](https://leetcode-cn.com/problems/circular-permutation-in-binary-representation)|中等|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1359.circular-permutation-in-binary-representation.py)|\n|1360|[串联字符串的最大长度](https://leetcode-cn.com/problems/maximum-length-of-a-concatenated-string-with-unique-characters)|中等|位运算、回溯算法|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1360.maximum-length-of-a-concatenated-string-with-unique-characters)|\n|1361|[铺瓷砖](https://leetcode-cn.com/problems/tiling-a-rectangle-with-the-fewest-squares)|困难|动态规划、回溯算法|[Java](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1361.tiling-a-rectangle-with-the-fewest-squares)|\n|1362|[飞机座位分配概率](https://leetcode-cn.com/problems/airplane-seat-assignment-probability)|中等|脑筋急转弯、数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1362.airplane-seat-assignment-probability.py)|\n|1368|多线程网页爬虫 :lock:|中等|深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1368.web-crawler-multithreaded.py)|\n|1370|[统计「优美子数组」](https://leetcode-cn.com/problems/count-number-of-nice-subarrays)|中等|双指针|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1370.count-number-of-nice-subarrays.py)|\n|1386|[二维网格迁移](https://leetcode-cn.com/problems/shift-2d-grid)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1386.shift-2d-grid.py)|\n|1387|[在受污染的二叉树中查找元素](https://leetcode-cn.com/problems/find-elements-in-a-contaminated-binary-tree)|中等|树、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1387.find-elements-in-a-contaminated-binary-tree.py)|\n|1388|[可被三整除的最大和](https://leetcode-cn.com/problems/greatest-sum-divisible-by-three)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1388.greatest-sum-divisible-by-three.py)|\n|1389|[推箱子](https://leetcode-cn.com/problems/minimum-moves-to-move-a-box-to-their-target-location)|困难|广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1389.minimum-moves-to-move-a-box-to-their-target-location.py)|\n|1402|[统计全为 1 的正方形子矩阵](https://leetcode-cn.com/problems/count-square-submatrices-with-all-ones)|中等|数组、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1402.count-square-submatrices-with-all-ones.py)|\n|1410|红绿灯路口 :lock:|简单|暂无标签|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1410.traffic-light-controlled-intersection.py)|\n|1421|[统计位数为偶数的数字](https://leetcode-cn.com/problems/find-numbers-with-even-number-of-digits)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1421.find-numbers-with-even-number-of-digits.py)|\n|1422|[划分数组为连续数字的集合](https://leetcode-cn.com/problems/divide-array-in-sets-of-k-consecutive-numbers)|中等|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1422.divide-array-in-sets-of-k-consecutive-numbers.py)|\n|1423|[子串的最大出现次数](https://leetcode-cn.com/problems/maximum-number-of-occurrences-of-a-substring)|中等|位运算、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1423.maximum-number-of-occurrences-of-a-substring.py)|\n|1424|[你能从盒子里获得的最大糖果数](https://leetcode-cn.com/problems/maximum-candies-you-can-get-from-boxes)|困难|广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1424.maximum-candies-you-can-get-from-boxes.py)|\n|1434|[解码字母到整数映射](https://leetcode-cn.com/problems/decrypt-string-from-alphabet-to-integer-mapping)|简单|字符串|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1434.decrypt-string-from-alphabet-to-integer-mapping)|\n|1435|[子数组异或查询](https://leetcode-cn.com/problems/xor-queries-of-a-subarray)|中等|位运算|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1435.xor-queries-of-a-subarray)|\n|1436|[获取你好友已观看的视频](https://leetcode-cn.com/problems/get-watched-videos-by-your-friends)|中等|广度优先搜索、哈希表、字符串|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1436.get-watched-videos-by-your-friends)|\n|1473|[每个元音包含偶数次的最长子字符串](https://leetcode-cn.com/problems/find-the-longest-substring-containing-vowels-in-even-counts)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1473.find-the-longest-substring-containing-vowels-in-even-counts.py)|\n|1496|[矩阵中的幸运数](https://leetcode-cn.com/problems/lucky-numbers-in-a-matrix)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1496.lucky-numbers-in-a-matrix.py)|\n|1497|[设计一个支持增量操作的栈](https://leetcode-cn.com/problems/design-a-stack-with-increment-operation)|中等|栈、设计|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1497.design-a-stack-with-increment-operation.py)|\n|1499|[最大的团队表现值](https://leetcode-cn.com/problems/maximum-performance-of-a-team)|困难|贪心算法、排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1499.maximum-performance-of-a-team.py)|\n|1514|[逐步求和得到正数的最小值](https://leetcode-cn.com/problems/minimum-value-to-get-positive-step-by-step-sum)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1514.minimum-value-to-get-positive-step-by-step-sum.py)|\n|1515|[和为 K 的最少斐波那契数字数目](https://leetcode-cn.com/problems/find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k)|中等|贪心算法、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1515.find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k.py)|\n|1516|[长度为 n 的开心字符串中字典序第 k 小的字符串](https://leetcode-cn.com/problems/the-k-th-lexicographical-string-of-all-happy-strings-of-length-n)|中等|回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1516.the-k-th-lexicographical-string-of-all-happy-strings-of-length-n.py)|\n|1517|[恢复数组](https://leetcode-cn.com/problems/restore-the-array)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1517.restore-the-array.py)|\n|1528|[拥有最多糖果的孩子](https://leetcode-cn.com/problems/kids-with-the-greatest-number-of-candies)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1528.kids-with-the-greatest-number-of-candies.py)|\n|1570|[商品折扣后的最终价格](https://leetcode-cn.com/problems/final-prices-with-a-special-discount-in-a-shop)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1570.final-prices-with-a-special-discount-in-a-shop.py)|\n|1571|[安排邮筒](https://leetcode-cn.com/problems/allocate-mailboxes)|困难|数学、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1571.allocate-mailboxes.py)|\n|1572|[子矩形查询](https://leetcode-cn.com/problems/subrectangle-queries)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1572.subrectangle-queries.py)|\n|1573|[找两个和为目标值且不重叠的子数组](https://leetcode-cn.com/problems/find-two-non-overlapping-sub-arrays-each-with-target-sum)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1573.find-two-non-overlapping-sub-arrays-each-with-target-sum.py)|\n|1574|[数组中两元素的最大乘积](https://leetcode-cn.com/problems/maximum-product-of-two-elements-in-an-array)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1574.maximum-product-of-two-elements-in-an-array.py)|\n|1575|[切割后面积最大的蛋糕](https://leetcode-cn.com/problems/maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1575.maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts.py)|\n|1576|[重新规划路线](https://leetcode-cn.com/problems/reorder-routes-to-make-all-paths-lead-to-the-city-zero)|中等|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1576.reorder-routes-to-make-all-paths-lead-to-the-city-zero.py)|\n|1577|[两个盒子中球的颜色数相同的概率](https://leetcode-cn.com/problems/probability-of-a-two-boxes-having-the-same-number-of-distinct-balls)|困难|数学、回溯算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1577.probability-of-a-two-boxes-having-the-same-number-of-distinct-balls.py)|\n|1580|[重新排列数组](https://leetcode-cn.com/problems/shuffle-the-array)|简单|数组|[Python3](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1580.shuffle-the-array)|\n|1584|[去掉最低工资和最高工资后的工资平均值](https://leetcode-cn.com/problems/average-salary-excluding-the-minimum-and-maximum-salary)|简单|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1584.average-salary-excluding-the-minimum-and-maximum-salary.py)|\n|1585|[n 的第 k 个因子](https://leetcode-cn.com/problems/the-kth-factor-of-n)|中等|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1585.the-kth-factor-of-n.py)|\n|1586|[删掉一个元素以后全为 1 的最长子数组](https://leetcode-cn.com/problems/longest-subarray-of-1s-after-deleting-one-element)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1586.longest-subarray-of-1s-after-deleting-one-element.py)|\n|1587|[并行课程 II](https://leetcode-cn.com/problems/parallel-courses-ii)|困难|图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1587.parallel-courses-ii.py)|\n|1603|[一维数组的动态和](https://leetcode-cn.com/problems/running-sum-of-1d-array)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1603.running-sum-of-1d-array.py)|\n|1610|[数组异或操作](https://leetcode-cn.com/problems/xor-operation-in-an-array)|简单|位运算、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1610.xor-operation-in-an-array.py)|\n|1611|[保证文件名唯一](https://leetcode-cn.com/problems/making-file-names-unique)|中等|哈希表、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1611.making-file-names-unique.py)|\n|1612|[避免洪水泛滥](https://leetcode-cn.com/problems/avoid-flood-in-the-city)|中等|数组、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1612.avoid-flood-in-the-city.py)|\n|1615|[子数组和排序后的区间和](https://leetcode-cn.com/problems/range-sum-of-sorted-subarray-sums)|中等|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1615.range-sum-of-sorted-subarray-sums.py)|\n|1616|[三次操作后最大值与最小值的最小差](https://leetcode-cn.com/problems/minimum-difference-between-largest-and-smallest-value-in-three-moves)|中等|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1616.minimum-difference-between-largest-and-smallest-value-in-three-moves.py)|\n|1617|[石子游戏 IV](https://leetcode-cn.com/problems/stone-game-iv)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1617.stone-game-iv.py)|\n|1619|[判断路径是否相交](https://leetcode-cn.com/problems/path-crossing)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1619.path-crossing.py)|\n|1620|[检查数组对是否可以被 k 整除](https://leetcode-cn.com/problems/check-if-array-pairs-are-divisible-by-k)|中等|贪心算法、数组、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1620.check-if-array-pairs-are-divisible-by-k.py)|\n|1621|[满足条件的子序列数目](https://leetcode-cn.com/problems/number-of-subsequences-that-satisfy-the-given-sum-condition)|中等|排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1621.number-of-subsequences-that-satisfy-the-given-sum-condition.py)|\n|1622|[满足不等式的最大值](https://leetcode-cn.com/problems/max-value-of-equation)|困难|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1622.max-value-of-equation.py)|\n|1626|[判断能否形成等差数列](https://leetcode-cn.com/problems/can-make-arithmetic-progression-from-sequence)|简单|排序、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1626.can-make-arithmetic-progression-from-sequence.py)|\n|1627|[所有蚂蚁掉下来前的最后一刻](https://leetcode-cn.com/problems/last-moment-before-all-ants-fall-out-of-a-plank)|中等|脑筋急转弯、数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1627.last-moment-before-all-ants-fall-out-of-a-plank.py)|\n|1628|[统计全 1 子矩形](https://leetcode-cn.com/problems/count-submatrices-with-all-ones)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1628.count-submatrices-with-all-ones.py)|\n|1629|[最多 K 次交换相邻数位后得到的最小整数](https://leetcode-cn.com/problems/minimum-possible-integer-after-at-most-k-adjacent-swaps-on-digits)|困难|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1629.minimum-possible-integer-after-at-most-k-adjacent-swaps-on-digits.py)|\n|1630|[在区间范围内统计奇数数目](https://leetcode-cn.com/problems/count-odd-numbers-in-an-interval-range)|简单|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1630.count-odd-numbers-in-an-interval-range.py)|\n|1631|[和为奇数的子数组数目](https://leetcode-cn.com/problems/number-of-sub-arrays-with-odd-sum)|中等|数组、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1631.number-of-sub-arrays-with-odd-sum.py)|\n|1632|[字符串的好分割数目](https://leetcode-cn.com/problems/number-of-good-ways-to-split-a-string)|中等|位运算、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1632.number-of-good-ways-to-split-a-string.py)|\n|1633|[形成目标数组的子数组最少增加次数](https://leetcode-cn.com/problems/minimum-number-of-increments-on-subarrays-to-form-a-target-array)|困难|线段树|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1633.minimum-number-of-increments-on-subarrays-to-form-a-target-array.py)|\n|1635|[好数对的数目](https://leetcode-cn.com/problems/number-of-good-pairs)|简单|数组、哈希表、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1635.number-of-good-pairs.py)|\n|1636|[仅含 1 的子串数](https://leetcode-cn.com/problems/number-of-substrings-with-only-1s)|中等|数学、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1636.number-of-substrings-with-only-1s.py)|\n|1637|[压缩字符串 II](https://leetcode-cn.com/problems/string-compression-ii)|困难|字符串、动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1637.string-compression-ii.py)|\n|1638|[服务中心的最佳位置](https://leetcode-cn.com/problems/best-position-for-a-service-centre)|困难|几何|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1638.best-position-for-a-service-centre.py)|\n|1642|[换酒问题](https://leetcode-cn.com/problems/water-bottles)|简单|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1642.water-bottles.py)|\n|1643|[子树中标签相同的节点数](https://leetcode-cn.com/problems/number-of-nodes-in-the-sub-tree-with-the-same-label)|中等|深度优先搜索、广度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1643.number-of-nodes-in-the-sub-tree-with-the-same-label.py)|\n|1644|[最多的不重叠子字符串](https://leetcode-cn.com/problems/maximum-number-of-non-overlapping-substrings)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1644.maximum-number-of-non-overlapping-substrings.py)|\n|1645|[找到最接近目标值的函数值](https://leetcode-cn.com/problems/find-a-value-of-a-mysterious-function-closest-to-target)|困难|位运算、线段树、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1645.find-a-value-of-a-mysterious-function-closest-to-target.py)|\n|1646|[第 k 个缺失的正整数](https://leetcode-cn.com/problems/kth-missing-positive-number)|简单|数组、哈希表|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1646.kth-missing-positive-number.py)|\n|1647|[K 次操作转变字符串](https://leetcode-cn.com/problems/can-convert-string-in-k-moves)|中等|贪心算法、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1647.can-convert-string-in-k-moves.py)|\n|1648|[平衡括号字符串的最少插入次数](https://leetcode-cn.com/problems/minimum-insertions-to-balance-a-parentheses-string)|中等|栈、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1648.minimum-insertions-to-balance-a-parentheses-string.py)|\n|1649|[和为目标值的最大数目不重叠非空子数组数目](https://leetcode-cn.com/problems/maximum-number-of-non-overlapping-subarrays-with-sum-equals-target)|中等|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1649.maximum-number-of-non-overlapping-subarrays-with-sum-equals-target.py)|\n|1651|[重新排列字符串](https://leetcode-cn.com/problems/shuffle-string)|简单|排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1651.shuffle-string.py)|\n|1652|[灯泡开关 IV](https://leetcode-cn.com/problems/bulb-switcher-iv)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1652.bulb-switcher-iv.py)|\n|1653|[好叶子节点对的数量](https://leetcode-cn.com/problems/number-of-good-leaf-nodes-pairs)|中等|树、深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1653.number-of-good-leaf-nodes-pairs.py)|\n|1656|[统计好三元组](https://leetcode-cn.com/problems/count-good-triplets)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1656.count-good-triplets.py)|\n|1657|[找出数组游戏的赢家](https://leetcode-cn.com/problems/find-the-winner-of-an-array-game)|中等|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1657.find-the-winner-of-an-array-game.py)|\n|1658|[排布二进制网格的最少交换次数](https://leetcode-cn.com/problems/minimum-swaps-to-arrange-a-binary-grid)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1658.minimum-swaps-to-arrange-a-binary-grid.py)|\n|1659|[最大得分](https://leetcode-cn.com/problems/get-the-maximum-score)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1659.get-the-maximum-score.py)|\n|1660|[千位分隔数](https://leetcode-cn.com/problems/thousand-separator)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1660.thousand-separator.py)|\n|1661|[可以到达所有点的最少点数目](https://leetcode-cn.com/problems/minimum-number-of-vertices-to-reach-all-nodes)|中等|图|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1661.minimum-number-of-vertices-to-reach-all-nodes.py)|\n|1662|[得到目标数组的最少函数调用次数](https://leetcode-cn.com/problems/minimum-numbers-of-function-calls-to-make-target-array)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1662.minimum-numbers-of-function-calls-to-make-target-array.py)|\n|1663|[二维网格图中探测环](https://leetcode-cn.com/problems/detect-cycles-in-2d-grid)|困难|深度优先搜索|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1663.detect-cycles-in-2d-grid.py)|\n|1666|[整理字符串](https://leetcode-cn.com/problems/make-the-string-great)|简单|栈、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1666.make-the-string-great.py)|\n|1667|[找出第 N 个二进制字符串中的第 K 位](https://leetcode-cn.com/problems/find-kth-bit-in-nth-binary-string)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1667.find-kth-bit-in-nth-binary-string.py)|\n|1668|[找出最长的超赞子字符串](https://leetcode-cn.com/problems/find-longest-awesome-substring)|困难|位运算、字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1668.find-longest-awesome-substring.py)|\n|1669|[切棍子的最小成本](https://leetcode-cn.com/problems/minimum-cost-to-cut-a-stick)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1669.minimum-cost-to-cut-a-stick.py)|\n|1674|[使数组中所有元素相等的最小操作数](https://leetcode-cn.com/problems/minimum-operations-to-make-array-equal)|中等|数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1674.minimum-operations-to-make-array-equal.py)|\n|1675|[两球之间的磁力](https://leetcode-cn.com/problems/magnetic-force-between-two-balls)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1675.magnetic-force-between-two-balls.py)|\n|1676|[吃掉 N 个橘子的最少天数](https://leetcode-cn.com/problems/minimum-number-of-days-to-eat-n-oranges)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1676.minimum-number-of-days-to-eat-n-oranges.py)|\n|1677|[矩阵对角线元素的和](https://leetcode-cn.com/problems/matrix-diagonal-sum)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1677.matrix-diagonal-sum.py)|\n|1678|[分割字符串的方案数](https://leetcode-cn.com/problems/number-of-ways-to-split-a-string)|中等|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1678.number-of-ways-to-split-a-string.py)|\n|1679|[删除最短的子数组使剩余数组有序](https://leetcode-cn.com/problems/shortest-subarray-to-be-removed-to-make-array-sorted)|中等|数组、二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1679.shortest-subarray-to-be-removed-to-make-array-sorted.py)|\n|1680|[统计所有可行路径](https://leetcode-cn.com/problems/count-all-possible-routes)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1680.count-all-possible-routes.py)|\n|1682|[圆形赛道上经过次数最多的扇区](https://leetcode-cn.com/problems/most-visited-sector-in-a-circular-track)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1682.most-visited-sector-in-a-circular-track.py)|\n|1683|[你可以获得的最大硬币数目](https://leetcode-cn.com/problems/maximum-number-of-coins-you-can-get)|中等|排序|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1683.maximum-number-of-coins-you-can-get.py)|\n|1684|[查找大小为 M 的最新分组](https://leetcode-cn.com/problems/find-latest-group-of-size-m)|中等|二分查找|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1684.find-latest-group-of-size-m.py)|\n|1685|[石子游戏 V](https://leetcode-cn.com/problems/stone-game-v)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1685.stone-game-v.py)|\n|1689|[重复至少 K 次且长度为 M 的模式](https://leetcode-cn.com/problems/detect-pattern-of-length-m-repeated-k-or-more-times)|简单|数组|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1689.detect-pattern-of-length-m-repeated-k-or-more-times.py)|\n|1690|[乘积为正数的最长子数组长度](https://leetcode-cn.com/problems/maximum-length-of-subarray-with-positive-product)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1690.maximum-length-of-subarray-with-positive-product.py)|\n|1691|[使陆地分离的最少天数](https://leetcode-cn.com/problems/minimum-number-of-days-to-disconnect-island)|困难|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1691.minimum-number-of-days-to-disconnect-island.py)|\n|1692|[将子数组重新排序得到同一个二叉查找树的方案数](https://leetcode-cn.com/problems/number-of-ways-to-reorder-array-to-get-same-bst)|困难|动态规划|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1692.number-of-ways-to-reorder-array-to-get-same-bst.py)|\n|1698|[替换所有的问号](https://leetcode-cn.com/problems/replace-all-s-to-avoid-consecutive-repeating-characters)|简单|字符串|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1698.replace-all-s-to-avoid-consecutive-repeating-characters.py)|\n|1699|[数的平方等于两数乘积的方法数](https://leetcode-cn.com/problems/number-of-ways-where-square-of-number-is-equal-to-product-of-two-numbers)|中等|哈希表、数学|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1699.number-of-ways-where-square-of-number-is-equal-to-product-of-two-numbers.py)|\n|1700|[避免重复字母的最小删除成本](https://leetcode-cn.com/problems/minimum-deletion-cost-to-avoid-repeating-letters)|中等|贪心算法|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1700.minimum-deletion-cost-to-avoid-repeating-letters.py)|\n|1701|[保证图可完全遍历](https://leetcode-cn.com/problems/remove-max-number-of-edges-to-keep-graph-fully-traversable)|困难|并查集|[Python](https://github.com/smartmark-pro/leetcode_record/tree/master/codes_auto/1701.remove-max-number-of-edges-to-keep-graph-fully-traversable.py)|\n"
},
{
"alpha_fraction": 0.6346153616905212,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "1d6ec9a5b21560cc00517691f1827280c2a87c42",
"content_id": "c64734ea798a143f9b7adb85650ab01af5f4c74b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/33.search-in-rotated-sorted-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=33 lang=python3\n#\n# [33] search-in-rotated-sorted-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6020408272743225,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 13.142857551574707,
"blob_id": "77315cfbe8e7aad600b8b0ecb5255546dcbbce69",
"content_id": "fa31b2a35a8549eab2688305b8dd559442c6f734",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/657.robot-return-to-origin.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=657 lang=python3\n#\n# [657] robot-return-to-origin\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "8f2cc39b3845db0193208955aa13ea4bf4069a40",
"content_id": "901655ae2b528847d7a4dc3ec75fd04e72b10fd7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/392.is-subsequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=392 lang=python3\n#\n# [392] is-subsequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6379310488700867,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "42f868701b8cad4017c997ae5a3418f215a6a528",
"content_id": "4acb9109023fb177b5f14703d82575db6de62fc6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/712.minimum-ascii-delete-sum-for-two-strings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=712 lang=python3\n#\n# [712] minimum-ascii-delete-sum-for-two-strings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "6c3408ee404f03d226943723617d3f0256fee7aa",
"content_id": "ce7c77228259a93d152b9e41b929cff77aa3752e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/133.clone-graph.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=133 lang=python3\n#\n# [133] clone-graph\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.6436781883239746,
"avg_line_length": 11.571428298950195,
"blob_id": "28e7e900786c45488f1dac81362487eba6cee841",
"content_id": "3077ce8308c6a8cdd3c047196631ba94c97fa191",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/38.count-and-say.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=38 lang=python3\n#\n# [38] count-and-say\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6299999952316284,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "08e272200e280e8db460b27aef84563733492475",
"content_id": "6353ff05d1ecde6387779179cd4eb97e20b4c7a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/96.unique-binary-search-trees.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=96 lang=python3\n#\n# [96] unique-binary-search-trees\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 12.285714149475098,
"blob_id": "c15d1b74b86914e40954c54c04960cbd96cead0e",
"content_id": "599de97247e52f8341bc9ceb01033734c13a12d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/191.number-of-1-bits.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=191 lang=python3\n#\n# [191] number-of-1-bits\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6132075190544128,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 14.285714149475098,
"blob_id": "047ed6064b3d72c5cb2b435aa550c23469069933",
"content_id": "aa27cef4e8aeec6a5307a1a84d5cddeaeda64441",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1195.distribute-candies-to-people.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1195 lang=python3\n#\n# [1195] distribute-candies-to-people\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6037735939025879,
"alphanum_fraction": 0.6886792182922363,
"avg_line_length": 14.285714149475098,
"blob_id": "0f0449de33a0ba85d44fb71d5106a5931a388137",
"content_id": "142ab92f2dbb51ecfcc262d9d5c23176ffc5f36c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1285.balance-a-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1285 lang=python3\n#\n# [1285] balance-a-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6067415475845337,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "1eabed102beec6d04be13c36d38a650f0ab4c1ff",
"content_id": "589e7753fa29d3695aa4527c385827e223cdeb79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/70.climbing-stairs.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=70 lang=python3\n#\n# [70] climbing-stairs\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6836734414100647,
"avg_line_length": 13.142857551574707,
"blob_id": "9bdda1bf6bdabca2ffa599727ff0d6817879b351",
"content_id": "d19f66ada01507cf2ae9caf6d551f1614fb54fa0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/234.palindrome-linked-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=234 lang=python3\n#\n# [234] palindrome-linked-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6736842393875122,
"avg_line_length": 12.714285850524902,
"blob_id": "4094ebef25620d89dd31f01ef145729a72700f03",
"content_id": "781eaa954d5a1c0ee735a97d14431be2748d936f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/680.valid-palindrome-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=680 lang=python3\n#\n# [680] valid-palindrome-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5930232405662537,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "fc61909b1b3adfc11121e477eb9f40a744578f86",
"content_id": "2b34b29681e8c180a5358d1368b8a8807858d3e4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/48.rotate-image.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=48 lang=python3\n#\n# [48] rotate-image\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "1c1026a866e66f11e4914b9f661c105685c5c8f5",
"content_id": "59241a7f50c0dce304485f52a9cb6ec043ec5374",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1656.count-good-triplets.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1656 lang=python3\n#\n# [1656] count-good-triplets\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "5c3dc8d1b822950183f0dbab1d33bcecc46ecd5b",
"content_id": "9fb36915133ee9f8b86bab1fd5aab6d1f77ce665",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/733.flood-fill.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=733 lang=python3\n#\n# [733] flood-fill\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6355140209197998,
"alphanum_fraction": 0.7009345889091492,
"avg_line_length": 14.428571701049805,
"blob_id": "f902669444a93675c2ce017188e3b0765c3ebc30",
"content_id": "475a64b3366d4c8f2f01e6e148c56bb80cf920a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/145.binary-tree-postorder-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=145 lang=python3\n#\n# [145] binary-tree-postorder-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6339285969734192,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 15.142857551574707,
"blob_id": "7188cd2086fc481aa0abff2121cde613d7f8da91",
"content_id": "6620c4446982e8e42cc63ff08d1fa45686c1fa12",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/107.binary-tree-level-order-traversal-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=107 lang=python3\n#\n# [107] binary-tree-level-order-traversal-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5904762148857117,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 14.142857551574707,
"blob_id": "232cc7e443bac495e8c850dba7c40065ef42eed6",
"content_id": "4bf4c9751d2ad653483f54301341f5333ab2bf3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1669.minimum-cost-to-cut-a-stick.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1669 lang=python3\n#\n# [1669] minimum-cost-to-cut-a-stick\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6484375,
"alphanum_fraction": 0.703125,
"avg_line_length": 17.428571701049805,
"blob_id": "a1db71106380b2f9638857c60272949fcc5b8c8e",
"content_id": "ec00ffc46378039adc6466bfcab9bfb69d25c263",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 7,
"path": "/codes_auto/714.best-time-to-buy-and-sell-stock-with-transaction-fee.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=714 lang=python3\n#\n# [714] best-time-to-buy-and-sell-stock-with-transaction-fee\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "bc65091822315b339ecfecc167cf06bdca40e6f7",
"content_id": "fcdda89c308d59afbed16f6490a87f82a6f26582",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/79.word-search.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=79 lang=python3\n#\n# [79] word-search\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5744680762290955,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "573d4c817ce94f3eb68b1733905abd97378dd5a1",
"content_id": "0f6233520e5597363075715b29ba47e7f9240609",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1652.bulb-switcher-iv.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1652 lang=python3\n#\n# [1652] bulb-switcher-iv\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6736111044883728,
"alphanum_fraction": 0.7361111044883728,
"avg_line_length": 19.714284896850586,
"blob_id": "71f895eb8a964c7d2897082d90704b6e934aaf39",
"content_id": "aa816c1c086bb248202757d3f1fe3259fa15454c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 7,
"path": "/codes_auto/1649.maximum-number-of-non-overlapping-subarrays-with-sum-equals-target.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1649 lang=python3\n#\n# [1649] maximum-number-of-non-overlapping-subarrays-with-sum-equals-target\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6078431606292725,
"alphanum_fraction": 0.6960784196853638,
"avg_line_length": 13.714285850524902,
"blob_id": "5af6367e074e15adb1ea22c3b9a79ec8ae9c0ceb",
"content_id": "a2541309da593ad0cf6691bde1aa38c532f6b76d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1196.filling-bookcase-shelves.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1196 lang=python3\n#\n# [1196] filling-bookcase-shelves\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6022727489471436,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "e6f8a94c668c71c7daeb715738cf7dfb7e28f236",
"content_id": "50a7d5c31b3e8f3f7cae32055202d926deee83f1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/49.group-anagrams.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=49 lang=python3\n#\n# [49] group-anagrams\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5899999737739563,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "419e01bd77aba2edb37d59f3329c12d5c24799fc",
"content_id": "2f4e6a60341cb11bb1d53899610490305303cd8e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1185.find-in-mountain-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1185 lang=python3\n#\n# [1185] find-in-mountain-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 14.571428298950195,
"blob_id": "5fb37fc7055ab4d401ca88ce6cce4d00af3d8feb",
"content_id": "809723283babab5cb0d6a08acb0783ec9a3688ad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/160.intersection-of-two-linked-lists.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=160 lang=python3\n#\n# [160] intersection-of-two-linked-lists\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "634f5878b107ad38e595a5ee7c5a1351524ca642",
"content_id": "8198641d39ad66b90f4a88788716ab2ed2e605f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/277.find-the-celebrity.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=277 lang=python3\n#\n# [277] find-the-celebrity\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.6875,
"avg_line_length": 12.857142448425293,
"blob_id": "7580c0fea22decb73783a3d6b66e02a040b11dfc",
"content_id": "af87c0754ddd7a2acef86be306ca34ab5c817c62",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1006.vowel-spellchecker.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1006 lang=python3\n#\n# [1006] vowel-spellchecker\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6461538672447205,
"alphanum_fraction": 0.7153846025466919,
"avg_line_length": 17.714284896850586,
"blob_id": "63c81228ce2e473a938b3f0c4ad586bbc14018a3",
"content_id": "978ae5bf94c503ce1e0b0d29918e480dc692c28d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 130,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 7,
"path": "/codes_auto/1679.shortest-subarray-to-be-removed-to-make-array-sorted.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1679 lang=python3\n#\n# [1679] shortest-subarray-to-be-removed-to-make-array-sorted\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "14d9335e8a3772da29d0e04054107e312a5ce930",
"content_id": "a4f33c08ad60bd2c9d5c292f99bf9ff61c04f147",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/202.happy-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=202 lang=python3\n#\n# [202] happy-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6240000128746033,
"alphanum_fraction": 0.6959999799728394,
"avg_line_length": 17,
"blob_id": "622a9c583f4bd8229e3eaf59906fe8cbadd820b7",
"content_id": "f481d51b6a488b1e6f86a201b719cbc29172173c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 125,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 7,
"path": "/codes_auto/1627.last-moment-before-all-ants-fall-out-of-a-plank.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1627 lang=python3\n#\n# [1627] last-moment-before-all-ants-fall-out-of-a-plank\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6310679316520691,
"alphanum_fraction": 0.6796116232872009,
"avg_line_length": 13.857142448425293,
"blob_id": "94eb00057ece36f0a40578e12a4d22b3903b963c",
"content_id": "43310a68b99473ef7f252bf6a42a1a8878c740d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/95.unique-binary-search-trees-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=95 lang=python3\n#\n# [95] unique-binary-search-trees-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "c9e95fed7009309015853ea14bf01b2a835020c2",
"content_id": "186f9aa497ef51291a43ca8d790a88c2e5ebd51f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/801.is-graph-bipartite.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=801 lang=python3\n#\n# [801] is-graph-bipartite\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6341463327407837,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 16.714284896850586,
"blob_id": "d54b550cb935ea6c50247a8314fb21e6f1f7e091",
"content_id": "52a73f6c1f098cf04c77856952cee42202ba3ac0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 7,
"path": "/codes_auto/1661.minimum-number-of-vertices-to-reach-all-nodes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1661 lang=python3\n#\n# [1661] minimum-number-of-vertices-to-reach-all-nodes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6262626051902771,
"alphanum_fraction": 0.6969696879386902,
"avg_line_length": 13.285714149475098,
"blob_id": "50811eb321c39f81afeba3506295771a0bc5a97e",
"content_id": "867cd9f85f430ca4e6d15d28508f85afbd1f9adf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/491.increasing-subsequences.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=491 lang=python3\n#\n# [491] increasing-subsequences\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "bb6160bc8313e81e378b8bc90f8f88b6a7f75db8",
"content_id": "34a3d25bdede4122867dffee7a122390408c5b4c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/143.reorder-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=143 lang=python3\n#\n# [143] reorder-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6262626051902771,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "53853a1766eff8717d4bf358248ebe3448623148",
"content_id": "8ff634f4e8b902834cee003a0abb940224449acf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/11.container-with-most-water.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=11 lang=python3\n#\n# [11] container-with-most-water\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6836734414100647,
"avg_line_length": 13.142857551574707,
"blob_id": "13ada56de5ca99b7c3e7165f2d52a72e75029b63",
"content_id": "a11d367329286d73694dff92b73dad3c4acf895a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/354.russian-doll-envelopes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=354 lang=python3\n#\n# [354] russian-doll-envelopes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5979381203651428,
"alphanum_fraction": 0.6701030731201172,
"avg_line_length": 13,
"blob_id": "f11aea60aaa205b4c2e00f8a67544e576b9762f5",
"content_id": "1044f97fe717635af4c461656424e86e1433da2e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/560.subarray-sum-equals-k.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=560 lang=python3\n#\n# [560] subarray-sum-equals-k\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.65625,
"alphanum_fraction": 0.7265625,
"avg_line_length": 17.428571701049805,
"blob_id": "806a8ec14e3c1ead98f7e90b162722d14d69e38c",
"content_id": "2301eda6d6ff90bc2cb837a2da6263d7960e29de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 7,
"path": "/codes_auto/1648.minimum-insertions-to-balance-a-parentheses-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1648 lang=python3\n#\n# [1648] minimum-insertions-to-balance-a-parentheses-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6132075190544128,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 14.285714149475098,
"blob_id": "0c3aafb87284f3e4edfb176a6c652492d9cf7225",
"content_id": "3e29ff5a7b6d862328536510c22af12acea0de56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1197.parsing-a-boolean-expression.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1197 lang=python3\n#\n# [1197] parsing-a-boolean-expression\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.581632673740387,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 13.142857551574707,
"blob_id": "3e43cda555f659b6b3cc648697ebcd689644b99b",
"content_id": "943c8ed1cd17178b2e7dd6ef67854da8a007f148",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1635.number-of-good-pairs.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1635 lang=python3\n#\n# [1635] number-of-good-pairs\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5977011322975159,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "a47ff4911180a7a6e7f34588d99a2c4e84cd5aa9",
"content_id": "bab4386c3d3f087c25110cb0bc8b063612c34336",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/71.simplify-path.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=71 lang=python3\n#\n# [71] simplify-path\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6078431606292725,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 13.714285850524902,
"blob_id": "a77ab3ef33a5e1cec9fd87d960c3c661de6fa4d5",
"content_id": "bee6df11710281af24122d29c1d52b6c380afc6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/862.find-and-replace-in-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=862 lang=python3\n#\n# [862] find-and-replace-in-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6507936716079712,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 17.14285659790039,
"blob_id": "10346a63c6ffd9b8dcdbc4b72a70852b060d8c29",
"content_id": "540b7acd18877e8e655d0ea28655fe9917e3e1a0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 7,
"path": "/codes_auto/1690.maximum-length-of-subarray-with-positive-product.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1690 lang=python3\n#\n# [1690] maximum-length-of-subarray-with-positive-product\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "11ac4b25b7ba535b56d19d21dc98b82bfddc1f46",
"content_id": "ac0a063b53892dd6b9a1dff79c51d0a7df459e59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/283.move-zeroes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=283 lang=python3\n#\n# [283] move-zeroes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "76b2b2fb643b7f866b9d65db90fe15c4ad118600",
"content_id": "525137fcb87c264c2e008ac31b18ca912ebefdb3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/547.friend-circles.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=547 lang=python3\n#\n# [547] friend-circles\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "58e486d6ff3e22e50af8feae089da49630cc20a7",
"content_id": "d505e5e8535f8edd0261e6393459ab725bc31855",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/861.flipping-an-image.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=861 lang=python3\n#\n# [861] flipping-an-image\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.60550457239151,
"alphanum_fraction": 0.6880733966827393,
"avg_line_length": 14.714285850524902,
"blob_id": "49a6fcbdf1dae6fbc2853f5ecd8c1d4ff83022cd",
"content_id": "c5433a10b42341bbf99ceb0e3a32a991e0309212",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1653.number-of-good-leaf-nodes-pairs.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1653 lang=python3\n#\n# [1653] number-of-good-leaf-nodes-pairs\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "1de44b31ad3c9976f48318c28608627d13c0193e",
"content_id": "64ebfee497508be2257b66b2cccbad1b36864dc1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1587.parallel-courses-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1587 lang=python3\n#\n# [1587] parallel-courses-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5869565010070801,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 12.285714149475098,
"blob_id": "9fbb05fb712963167665b6a125694a7d24d88cf7",
"content_id": "d1c010acca335b57149b8fcf21cd70e0cdab7e59",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/253.meeting-rooms-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=253 lang=python3\n#\n# [253] meeting-rooms-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "e07df9b58ead1ec41c9e3dc5f5f0b6ea05a1f31a",
"content_id": "6c15af2d056b30147dadcf19d226ac19b48d4818",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1658.minimum-swaps-to-arrange-a-binary-grid.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1658 lang=python3\n#\n# [1658] minimum-swaps-to-arrange-a-binary-grid\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5411764979362488,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "eaa13987cceef8bb4db61d245b9105e4e9c94d58",
"content_id": "fa12e9a58668cf2577dc5f10fc134db509a92085",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/542.01-matrix.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=542 lang=python3\n#\n# [542] 01-matrix\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6780821681022644,
"alphanum_fraction": 0.7397260069847107,
"avg_line_length": 20,
"blob_id": "e7b7eef3d951cbc1bf3d9125d43f364e9742f4b7",
"content_id": "bce9aba1d86a225b99e94ffa8b993247d53a443c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 7,
"path": "/codes_auto/1616.minimum-difference-between-largest-and-smallest-value-in-three-moves.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1616 lang=python3\n#\n# [1616] minimum-difference-between-largest-and-smallest-value-in-three-moves\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5858585834503174,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "63561ec9bef94dd35a4a1f5923079821c6c89a9c",
"content_id": "fa88d837798a0a41eeeb3a20b78351e124c67e46",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1666.make-the-string-great.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1666 lang=python3\n#\n# [1666] make-the-string-great\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6299999952316284,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "0d9587e85f6392080e43c86c4a77732018c2ee1b",
"content_id": "fb3725a5d47d6ed0144c5198f812cd994e5d3d13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/99.recover-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=99 lang=python3\n#\n# [99] recover-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6116504669189453,
"alphanum_fraction": 0.6990291476249695,
"avg_line_length": 13.857142448425293,
"blob_id": "738d25aca4449f422b80a531a03c1b93caef0a85",
"content_id": "e875e7fd85d38bebdbf225b0eae6d8f17c019a19",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1368.web-crawler-multithreaded.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1368 lang=python3\n#\n# [1368] web-crawler-multithreaded\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6100000143051147,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "b0aa02da301a6cc479d02ef5d97a0a80ba3d244e",
"content_id": "ce017424861ff25d1ecf934485d8054bdf9c0c9c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/747.min-cost-climbing-stairs.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=747 lang=python3\n#\n# [747] min-cost-climbing-stairs\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "c41e2158e2c255a205c8bbc5ec69431a919e209d",
"content_id": "5c52e90c7825129ec7a62bb5f7cae11fdc65ed30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/336.palindrome-pairs.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=336 lang=python3\n#\n# [336] palindrome-pairs\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6126126050949097,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "51454d65d0faf6069caf0d296e4c33cfa8579843",
"content_id": "d64cd1a00a0cc6d229588d6d7b70f9f11b94a424",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1615.range-sum-of-sorted-subarray-sums.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1615 lang=python3\n#\n# [1615] range-sum-of-sorted-subarray-sums\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "12d6cdd6bd9d12c87b2f12666df627d80eece565",
"content_id": "f038e5b28b0c44f7e939c07824a7ddfcafe7e215",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1232.sum-of-mutated-array-closest-to-target.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1232 lang=python3\n#\n# [1232] sum-of-mutated-array-closest-to-target\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "cbbf3878acf8371772a4ccc44344b958e14d92fa",
"content_id": "57fa178fe5d41d7df54e376c9b5601e80dd59c56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/201.bitwise-and-of-numbers-range.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=201 lang=python3\n#\n# [201] bitwise-and-of-numbers-range\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "7bb7cd9695323b062892bcabfe3b0006d691b20b",
"content_id": "98b5e77f005051e644721cc7391fd97557d69c0a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/621.task-scheduler.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=621 lang=python3\n#\n# [621] task-scheduler\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6377952694892883,
"alphanum_fraction": 0.7165354490280151,
"avg_line_length": 17.285715103149414,
"blob_id": "7d06988011e58316311d830f99722c45985bbc04",
"content_id": "4a9de91ebc2f5c5c49ecb34c4d4ed44e831e98c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 7,
"path": "/codes_auto/1586.longest-subarray-of-1s-after-deleting-one-element.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1586 lang=python3\n#\n# [1586] longest-subarray-of-1s-after-deleting-one-element\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5813953280448914,
"alphanum_fraction": 0.6395348906517029,
"avg_line_length": 11.428571701049805,
"blob_id": "7a25ce299dca47fac1262ef88dd9e3ff0a7cca50",
"content_id": "3ef195d5f96b9f049a22b6720dba7d26d4a1776a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/45.jump-game-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=45 lang=python3\n#\n# [45] jump-game-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "baad031e05008d3a22b07a82825fa2c835c9960c",
"content_id": "0028f3bec1a6d88dd3c284161540ddbcb9be8eac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/53.maximum-subarray.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=53 lang=python3\n#\n# [53] maximum-subarray\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 13.285714149475098,
"blob_id": "87f4c1f298c8a2b7dc3dc91fb27e9d2613e48670",
"content_id": "af105d1351a2ad43442b359a199613f990380dad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/958.sort-array-by-parity-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=958 lang=python3\n#\n# [958] sort-array-by-parity-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6095238327980042,
"alphanum_fraction": 0.6952381134033203,
"avg_line_length": 14.142857551574707,
"blob_id": "75a1e5dd2a1fc93caaa8a438356a09c68232da40",
"content_id": "c20e6f5f5b005b1d969e41f0b8aa3511800be229",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1646.kth-missing-positive-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1646 lang=python3\n#\n# [1646] kth-missing-positive-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "b39750ce5e315f695ebfe417d98511a4f8999e58",
"content_id": "593dd80c86eaa599d16db81a2dca7135e77026e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/174.dungeon-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=174 lang=python3\n#\n# [174] dungeon-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6435643434524536,
"alphanum_fraction": 0.6930692791938782,
"avg_line_length": 13.571428298950195,
"blob_id": "22fdcd90d0639e4675bbfe483276aa6e76e8b65f",
"content_id": "6cf4cdd7e3a8b32a0e7d82a4b0d01b976591e78e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/10.regular-expression-matching.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=10 lang=python3\n#\n# [10] regular-expression-matching\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6020408272743225,
"alphanum_fraction": 0.6938775777816772,
"avg_line_length": 13.142857551574707,
"blob_id": "fa2d2bea27a12e5e7768b3f1f74507cfd7860059",
"content_id": "6321010626686bd1686fd62c7ef84832d464a746",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1572.subrectangle-queries.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1572 lang=python3\n#\n# [1572] subrectangle-queries\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6379310488700867,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "36a03b9ec803dda1fa1f939565ed090a31466ae1",
"content_id": "9e9ff4f2ee2c299b6c15afa84210c89387694209",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/448.find-all-numbers-disappeared-in-an-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=448 lang=python3\n#\n# [448] find-all-numbers-disappeared-in-an-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 14,
"blob_id": "902b02220d87c29a5b1a4c1a17a4b5e8f796f568",
"content_id": "6e3ce08d1adce94659d1bf30730b2811fc874469",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/232.implement-queue-using-stacks.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=232 lang=python3\n#\n# [232] implement-queue-using-stacks\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5904762148857117,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 14.142857551574707,
"blob_id": "62b16af551ad270c7abbe901b058538cfb54286c",
"content_id": "2564e9c912ef6a0e2037fc0f62c7830f540e039d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1684.find-latest-group-of-size-m.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1684 lang=python3\n#\n# [1684] find-latest-group-of-size-m\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6082473993301392,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "a455aa4d77fb5a855df7452aca45eafad1eb7aff",
"content_id": "3aac7b988e86d715920d4082afaef8384da0d250",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/466.count-the-repetitions.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=466 lang=python3\n#\n# [466] count-the-repetitions\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "33e4e52b5038e8f448fc9c43cec678602f5419a2",
"content_id": "5de55c6c7524fedc53ecef73ec99ea6a8d9a8c6c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/139.word-break.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=139 lang=python3\n#\n# [139] word-break\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5981308221817017,
"alphanum_fraction": 0.6822429895401001,
"avg_line_length": 14.428571701049805,
"blob_id": "2fe9f147f83c44be224b1093cdee9d6cca264b99",
"content_id": "33208f3fd0051d50635b04c61cb2c73d545b76ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1647.can-convert-string-in-k-moves.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1647 lang=python3\n#\n# [1647] can-convert-string-in-k-moves\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6037735939025879,
"alphanum_fraction": 0.6886792182922363,
"avg_line_length": 14.285714149475098,
"blob_id": "d4986ce1bad3103809126e605f58e79e0277c369",
"content_id": "0c4916411c26b36b38b287fa328b11dd66db15a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1016.subarray-sums-divisible-by-k.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1016 lang=python3\n#\n# [1016] subarray-sums-divisible-by-k\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6836734414100647,
"avg_line_length": 13.142857551574707,
"blob_id": "50d0c25dac5a548f4c6ecffcf77d30de20bb7372",
"content_id": "65df887ae83b288f99cfdde56e473ba87ed8f106",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/239.sliding-window-maximum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=239 lang=python3\n#\n# [239] sliding-window-maximum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6274510025978088,
"alphanum_fraction": 0.6960784196853638,
"avg_line_length": 13.714285850524902,
"blob_id": "1492e472cf95db666de45dfcc04406007f3c6e24",
"content_id": "ab2b82a7d6f59b4569c5ba3193649b935d8f90f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/301.remove-invalid-parentheses.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=301 lang=python3\n#\n# [301] remove-invalid-parentheses\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "2cb78af6ef20d85a90b5d784d0ed9b7eba69aabf",
"content_id": "414717cde00370e85bc79886abb94af91370e2cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/226.invert-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=226 lang=python3\n#\n# [226] invert-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "73dbe295ee2f7df5e84ec9babd3d09cdb0944283",
"content_id": "d1f5a5306a8ceef120388c6465856a7667856307",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/124.binary-tree-maximum-path-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=124 lang=python3\n#\n# [124] binary-tree-maximum-path-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5783132314682007,
"alphanum_fraction": 0.6385542154312134,
"avg_line_length": 11,
"blob_id": "19d5880927872476df3d1f1ce5511cf25ba31577",
"content_id": "00fefaf3a0fa1c368e09b67019e0bc89ee961435",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/55.jump-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=55 lang=python3\n#\n# [55] jump-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6292135119438171,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "f96724ac15a95bef21d3fb48855f9e0f9a1fb1e5",
"content_id": "19fd8ad51de5ba45c981c96018141b8b789fb239",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/codes_auto/9.palindrome-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=9 lang=python3\n#\n# [9] palindrome-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5841584205627441,
"alphanum_fraction": 0.6732673048973083,
"avg_line_length": 13.571428298950195,
"blob_id": "8d1f954b1371054aa34a62c2e7db32e7ff5ea6ab",
"content_id": "91a5ec528ea93bec735b040c2eac5cd678d4ab38",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1612.avoid-flood-in-the-city.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1612 lang=python3\n#\n# [1612] avoid-flood-in-the-city\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "8f3ef40d8f54b4787cf61d8bd209b344dd61bc43",
"content_id": "a44f764614005336ddb75e537065785983ac4c3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1283.reformat-date.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1283 lang=python3\n#\n# [1283] reformat-date\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6082473993301392,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "e73aeadb5de3475625641e94b0baef29e3cb029c",
"content_id": "a2b17047a1a32d982e9d33f0b956e36dd5306ea1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/567.permutation-in-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=567 lang=python3\n#\n# [567] permutation-in-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6293103694915771,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 15.714285850524902,
"blob_id": "11a7f31849a0d5e5475c684def9cd6134d4ed128",
"content_id": "cbf6a0d06e8d8e1e9d9f43d8b960ac0c9b7ca998",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1674.minimum-operations-to-make-array-equal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1674 lang=python3\n#\n# [1674] minimum-operations-to-make-array-equal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 12.571428298950195,
"blob_id": "eb3c1a80933496333862399f65b6947d4f48ae53",
"content_id": "0462f61e664b1cba98702bd1ee9f57bdd1878ddc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/23.merge-k-sorted-lists.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=23 lang=python3\n#\n# [23] merge-k-sorted-lists\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5930232405662537,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "9e46595868d54f45a57f382605902c565d8644a0",
"content_id": "23260c2d775025e2b8d38e366d44d6b0c8e9e667",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/62.unique-paths.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=62 lang=python3\n#\n# [62] unique-paths\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "5270d9b57c0ff9d34ac7f4908e7446a6ca181367",
"content_id": "41e5ccdb52e9aff7e36de22869bc7d8b20060a41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/141.linked-list-cycle.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=141 lang=python3\n#\n# [141] linked-list-cycle\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6170212626457214,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "f5d4c0edd49afc9a49e286ded0eae239aefd8418",
"content_id": "898cc2bb543f1b6e53c980c2f920b949e174d8a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/93.restore-ip-addresses.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=93 lang=python3\n#\n# [93] restore-ip-addresses\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "e8b315e3e040881a129143dc7239343ba7318256",
"content_id": "d77b327834a1eec1dd50b29680645095d4396be1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/572.subtree-of-another-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=572 lang=python3\n#\n# [572] subtree-of-another-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6129032373428345,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "e333a9bbe4efc43b7ab59bba1df9f4b8004827cf",
"content_id": "7652d0e63dc9720914e9ef8a1fc22a58a44666c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/42.trapping-rain-water.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=42 lang=python3\n#\n# [42] trapping-rain-water\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5666666626930237,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "4e9e38e459d756b94ce63b2e7a7c82d9ab2ded90",
"content_id": "8009468af5593e58c3909038a2fe84b469ed1aa7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1086.divisor-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1086 lang=python3\n#\n# [1086] divisor-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11.571428298950195,
"blob_id": "e8d03a51b8427aac70a61e8571024b20690ab1ac",
"content_id": "dd7ec59ce0eb8be89b8076fb8b8f8fd5724a060c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/529.minesweeper.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=529 lang=python3\n#\n# [529] minesweeper\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.625,
"avg_line_length": 10.571428298950195,
"blob_id": "90f75d5a1b622d2055f116cebc13265902c7e2a0",
"content_id": "692ebc68a9569e943658fbb2718b38644aa88817",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/50.powx-n.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=50 lang=python3\n#\n# [50] powx-n\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 13.714285850524902,
"blob_id": "1f58eb30e0893ce78698c25f06b29c1ae3374fa4",
"content_id": "eec697b696ee43eda914ae855405a8cfa5c96c13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/208.implement-trie-prefix-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=208 lang=python3\n#\n# [208] implement-trie-prefix-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6239316463470459,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 15.857142448425293,
"blob_id": "82880118cb087ad293c3365ad2880b9a1a9d7b4d",
"content_id": "2542c4227508d86cfa2b66cdd24ea368bb909b5c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/1682.most-visited-sector-in-a-circular-track.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1682 lang=python3\n#\n# [1682] most-visited-sector-in-a-circular-track\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "9f31496d73df7af7cef970751dcb316a734e1edb",
"content_id": "3bc7201b54d9e83af5f1b95fee879a07e1ca956b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1008.binary-tree-cameras.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1008 lang=python3\n#\n# [1008] binary-tree-cameras\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.7264957427978516,
"avg_line_length": 15.857142448425293,
"blob_id": "163a0499bf7632f89fc755d8988076585bd1fba9",
"content_id": "f1a304c486a76b42e0ab04d806b855671aeb53bb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 7,
"path": "/codes_auto/674.longest-continuous-increasing-subsequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=674 lang=python3\n#\n# [674] longest-continuous-increasing-subsequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6396396160125732,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 15,
"blob_id": "05c34f87a82c4fd4da987ffa60dde2e5ddf30bf9",
"content_id": "55debae295c1593ab8ff54ef36a2908b716c66dc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/codes_auto/718.maximum-length-of-repeated-subarray.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=718 lang=python3\n#\n# [718] maximum-length-of-repeated-subarray\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5777778029441833,
"alphanum_fraction": 0.6555555462837219,
"avg_line_length": 12,
"blob_id": "cec0a3a5b02e18dc09c7d649364e1992aa0afe14",
"content_id": "8b0edc510597d8cd8aaec5d7a1644da17f74d592",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/126.word-ladder-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=126 lang=python3\n#\n# [126] word-ladder-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6280992031097412,
"alphanum_fraction": 0.702479362487793,
"avg_line_length": 16.428571701049805,
"blob_id": "4d3a99c6bb7d9ea49f2c35846a5eb05a218f6089",
"content_id": "0cc84524719aaa160677b2585270be660476b89f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 7,
"path": "/codes_auto/1574.maximum-product-of-two-elements-in-an-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1574 lang=python3\n#\n# [1574] maximum-product-of-two-elements-in-an-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "59e08977f34d35af66e5eb2ad2a7b51e6e61c9a8",
"content_id": "f3f584ed1f802bb937157bb79f608fca9b8fc766",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/287.find-the-duplicate-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=287 lang=python3\n#\n# [287] find-the-duplicate-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6236559152603149,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 12.428571701049805,
"blob_id": "f05961415afdb35d52bf75e5e3a24fe916e4a60d",
"content_id": "476563e444a7a3203059bf4fea85147da7083bbf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/97.interleaving-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=97 lang=python3\n#\n# [97] interleaving-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "c571d64f66973763769dd3e3ee17963c9960ea3a",
"content_id": "bfa8aa3df7d781f21e24ac5bd726be866c6571f1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1619.path-crossing.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1619 lang=python3\n#\n# [1619] path-crossing\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5980392098426819,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 13.714285850524902,
"blob_id": "04cadc80761ee9df221c9da46adb20c6969cac12",
"content_id": "e7d952de58781e0b75af02b1f0a269bb4bd4d9ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1611.making-file-names-unique.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1611 lang=python3\n#\n# [1611] making-file-names-unique\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5957446694374084,
"alphanum_fraction": 0.6702127456665039,
"avg_line_length": 12.571428298950195,
"blob_id": "01e542be8047c0b858b41a29bc95c6cb9eebc644",
"content_id": "f0c67012f8346d433b831d5bfc2cceda92a19b3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/210.course-schedule-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=210 lang=python3\n#\n# [210] course-schedule-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 11.142857551574707,
"blob_id": "9d0e985ddf4322e1127463404eaeae783d3a3db3",
"content_id": "404724fc545b8faebbe1baf89ba444d0494b1caf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/67.add-binary.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=67 lang=python3\n#\n# [67] add-binary\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6982758641242981,
"avg_line_length": 15.714285850524902,
"blob_id": "ef370a6c62c7e6cb595c272845c53995b1b40c4a",
"content_id": "bcbc730d6595f10205953ecdb4eea2f1695fc995",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1424.maximum-candies-you-can-get-from-boxes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1424 lang=python3\n#\n# [1424] maximum-candies-you-can-get-from-boxes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 14.857142448425293,
"blob_id": "c4c0c8fe45f100ea784107cc2a2bb1c8e4b19fae",
"content_id": "734371f1dd3c6e4218b2a3638ce8c99ad481ff6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1657.find-the-winner-of-an-array-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1657 lang=python3\n#\n# [1657] find-the-winner-of-an-array-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 12.571428298950195,
"blob_id": "e84f5cbe44b8ad4de133033f9ccb8c4adc47ac23",
"content_id": "09a3486618f60a1e0ebde60345cc1e23b1a141ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/130.surrounded-regions.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=130 lang=python3\n#\n# [130] surrounded-regions\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6616541147232056,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 18.14285659790039,
"blob_id": "63aec41296905f4c8315a0c2ad4c8bdfd1497f6c",
"content_id": "1ee64172db3395d3b50202091ec385bad0b6a6fb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 7,
"path": "/codes_auto/1698.replace-all-s-to-avoid-consecutive-repeating-characters.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1698 lang=python3\n#\n# [1698] replace-all-s-to-avoid-consecutive-repeating-characters\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6770833134651184,
"avg_line_length": 12.857142448425293,
"blob_id": "a04886d3d77c4849509f8cbcafbd7b34704a943d",
"content_id": "791804d145c64b82a0e3baa12288458cf7ad927d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/35.search-insert-position.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=35 lang=python3\n#\n# [35] search-insert-position\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6067415475845337,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "cbb23e5a845457c49d760426d4054db2359a4fa6",
"content_id": "b4cc816d84e105d675e91009412bb783008fce0b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/56.merge-intervals.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=56 lang=python3\n#\n# [56] merge-intervals\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.602150559425354,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 12.428571701049805,
"blob_id": "e009a06ec3b47041d234480376881ba57b298e7a",
"content_id": "1a02485dd803d75ab9e989cef1851e41845a66af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/399.evaluate-division.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=399 lang=python3\n#\n# [399] evaluate-division\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6279069781303406,
"alphanum_fraction": 0.6976743936538696,
"avg_line_length": 17.571428298950195,
"blob_id": "d50c6976a5da699fc7f9b4b3ec27aeb12ca3f6c8",
"content_id": "1c113b6960097cde0f977bb058780b2db744a25d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 7,
"path": "/codes_auto/1643.number-of-nodes-in-the-sub-tree-with-the-same-label.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1643 lang=python3\n#\n# [1643] number-of-nodes-in-the-sub-tree-with-the-same-label\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "c4e0886562e83092d103c33659adfe1209f815d4",
"content_id": "a3b9f501aa89864edb44583024447098e56ae689",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1217.relative-sort-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1217 lang=python3\n#\n# [1217] relative-sort-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "f10089c93a450bbc4ca3dfdcf1c94d14302f20e4",
"content_id": "6c6bbf68b153aaca5cddb7df69ceee0cf2d1774d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/876.hand-of-straights.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=876 lang=python3\n#\n# [876] hand-of-straights\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5647059082984924,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "a932793ce76945aad9abfd0f53fae8b61da2adf1",
"content_id": "5dfe2fdc172ed3d50e0d63da5022e581211f7aee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/100.same-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=100 lang=python3\n#\n# [100] same-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.7075471878051758,
"avg_line_length": 14.285714149475098,
"blob_id": "b742c4e454b5a2200bbb95877b019d97ae03399b",
"content_id": "80e58f0eb9c2458eb01da678ca161b30f79928d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/300.longest-increasing-subsequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=300 lang=python3\n#\n# [300] longest-increasing-subsequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 12.285714149475098,
"blob_id": "d2ae4477604b1628b22ecac9456d7d8aeabae7b7",
"content_id": "ff2cc3e4fa0010425f995d6994abc80dcad5bb40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1203.print-in-order.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1203 lang=python3\n#\n# [1203] print-in-order\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 13.285714149475098,
"blob_id": "38b343c2f1455cfc7e37203c253e1904b44bc40d",
"content_id": "3985807169191970bf259661450cd07cd087964b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1637.string-compression-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1637 lang=python3\n#\n# [1637] string-compression-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6407766938209534,
"alphanum_fraction": 0.6893203854560852,
"avg_line_length": 13.857142448425293,
"blob_id": "70d124cef99e165ef1f3eb6adbfe06f01ba2cccd",
"content_id": "9df29a04cc824140f6a41438a9bf175aa6b5d49d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/94.binary-tree-inorder-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=94 lang=python3\n#\n# [94] binary-tree-inorder-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6513761281967163,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 14.714285850524902,
"blob_id": "4466cacd8c32f71b6807baca14e2d395f526997c",
"content_id": "8ea6d157b28d171e2cf83c0004f329005ef5bb30",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/26.remove-duplicates-from-sorted-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=26 lang=python3\n#\n# [26] remove-duplicates-from-sorted-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.632478654384613,
"alphanum_fraction": 0.7094017267227173,
"avg_line_length": 15.857142448425293,
"blob_id": "5799461d7a0fda022fa1b3cf903e70d05d1edc3c",
"content_id": "f046991fa1ceea594acc8320c3f3c63954ff98b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/1497.design-a-stack-with-increment-operation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1497 lang=python3\n#\n# [1497] design-a-stack-with-increment-operation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6547619104385376,
"avg_line_length": 11.142857551574707,
"blob_id": "bfdf4597f1d127f5ea33e7136fe2c5c0b4e6e19d",
"content_id": "a5f0526129453f1aacb67b9aab4e4ed88424a8d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/120.triangle.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=120 lang=python3\n#\n# [120] triangle\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6528925895690918,
"alphanum_fraction": 0.71074378490448,
"avg_line_length": 16.428571701049805,
"blob_id": "31c9c2bdfa76af7fcd8ace59f60a359d80d352a0",
"content_id": "e1f9f354e9291a4426265d34411566a985131500",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 7,
"path": "/codes_auto/632.smallest-range-covering-elements-from-k-lists.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=632 lang=python3\n#\n# [632] smallest-range-covering-elements-from-k-lists\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "91ff045e4a45e03bb2dcc3bd34f900c51466b9f6",
"content_id": "b232b14d88fa535f67da9227d18b9a905df10c56",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/260.single-number-iii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=260 lang=python3\n#\n# [260] single-number-iii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "595a91c322e5ed262ac6adaf1afd2a28eee8dae8",
"content_id": "b98884c69005d14c3bc113b48110cdab9ddcb0d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/91.decode-ways.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=91 lang=python3\n#\n# [91] decode-ways\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.6627907156944275,
"avg_line_length": 11.428571701049805,
"blob_id": "860a57648b5659870009d67c09e81a6a1fc9a035",
"content_id": "98201c84e6970ec1efefc4dffe382618d29c7db6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/46.permutations.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=46 lang=python3\n#\n# [46] permutations\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 13.285714149475098,
"blob_id": "e15b7d145009b68c471cf95817853a64258e8fe3",
"content_id": "14377c86438ccf30304d4a15b6c4ed501d2950bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/32.longest-valid-parentheses.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=32 lang=python3\n#\n# [32] longest-valid-parentheses\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5894736647605896,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 12.714285850524902,
"blob_id": "2ea97ebd9518f59eb3e7936ac827ec26216571d8",
"content_id": "3614efcecd42303166d3bd6219b3657a498fdb14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1033.broken-calculator.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1033 lang=python3\n#\n# [1033] broken-calculator\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.857142448425293,
"blob_id": "e8ff0514bbfea4db2e6b0ecff5c1ccad3963faa0",
"content_id": "0d76c5392b3a071056df7b8cf7375e72fa2178b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/900.reordered-power-of-2.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=900 lang=python3\n#\n# [900] reordered-power-of-2\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6440678238868713,
"alphanum_fraction": 0.7033898234367371,
"avg_line_length": 16,
"blob_id": "f729aad97a53504d34e894433e96848ede5c3f02",
"content_id": "5e2c75a241e2d2f6bba151cef0dce02da66a0d67",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 7,
"path": "/codes_auto/108.convert-sorted-array-to-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=108 lang=python3\n#\n# [108] convert-sorted-array-to-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6388888955116272,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 14.571428298950195,
"blob_id": "3e826d4dea0353ecd9b5d8c134fb6a7d53f33e9d",
"content_id": "7c354b9080df570dda091c1bdd7625722295e056",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/150.evaluate-reverse-polish-notation.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=150 lang=python3\n#\n# [150] evaluate-reverse-polish-notation\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.857142448425293,
"blob_id": "c95de9b9b078f4488a7396a05cca170ef989c881",
"content_id": "108226fa62ae320c27b7ef4ede1ea483b74246a5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/142.linked-list-cycle-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=142 lang=python3\n#\n# [142] linked-list-cycle-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6067415475845337,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "c024adc3740f7a59ca60fb59970385f22ac95642",
"content_id": "897d324ff5bcdba550fdedfc33a1ec717053488f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/39.combination-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=39 lang=python3\n#\n# [39] combination-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6162790656089783,
"alphanum_fraction": 0.6395348906517029,
"avg_line_length": 11.428571701049805,
"blob_id": "f7278f60bd038747f591c821c90755ceed36657c",
"content_id": "dbbb16e66427ea80fc7e5fcade85bea300e5e15e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 7,
"path": "/codes_auto/2.add-two-numbers.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=2 lang=python\n#\n# [2] add-two-numbers\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 14.714285850524902,
"blob_id": "ab1dec97871b119c52f8b53fc90ee114ec30b880",
"content_id": "82ee0f295607be392ee40c940f5009751c0cf27b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/902.minimum-number-of-refueling-stops.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=902 lang=python3\n#\n# [902] minimum-number-of-refueling-stops\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5789473652839661,
"alphanum_fraction": 0.6736842393875122,
"avg_line_length": 12.714285850524902,
"blob_id": "ba4e6d0c113ddb24d060dc78bf50a89d5381bd30",
"content_id": "bf965ddbfa4111f520dbba8c593d627e5847fbd3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1517.restore-the-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1517 lang=python3\n#\n# [1517] restore-the-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.71074378490448,
"avg_line_length": 16.428571701049805,
"blob_id": "82688ec02a707dccfc5e6e882cc497b237b9ded8",
"content_id": "de506f7055fc36a82cda60daaabaf90341edeade",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 7,
"path": "/codes_auto/1387.find-elements-in-a-contaminated-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1387 lang=python3\n#\n# [1387] find-elements-in-a-contaminated-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6274510025978088,
"alphanum_fraction": 0.6960784196853638,
"avg_line_length": 13.714285850524902,
"blob_id": "dc149c5a1708841fe8244fdc13183ea24d0c7ecb",
"content_id": "7799a093826a7779a9805c9e2081daf7f7027f93",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/459.repeated-substring-pattern.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=459 lang=python3\n#\n# [459] repeated-substring-pattern\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 12.571428298950195,
"blob_id": "b060f1ee09e96e80598000a0f196f6b6cda5f036",
"content_id": "fb160c5443d22a5bc1f25ac6960e8d524a09d659",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/443.string-compression.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=443 lang=python3\n#\n# [443] string-compression\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6200000047683716,
"alphanum_fraction": 0.6899999976158142,
"avg_line_length": 13.428571701049805,
"blob_id": "62cc2ebaca5d62c4e693d5d43e27a5e78f6ebc2f",
"content_id": "3b78a5504118bf4d5420957281e366bc7bb4c30a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/152.maximum-product-subarray.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=152 lang=python3\n#\n# [152] maximum-product-subarray\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 14.571428298950195,
"blob_id": "b05fa7ec633e58313cd7d3cec11723f16eb9f25e",
"content_id": "aa2c557e543412266be728fcab8222e294b74204",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1370.count-number-of-nice-subarrays.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1370 lang=python3\n#\n# [1370] count-number-of-nice-subarrays\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "2a4e850fb7f27ab90724a4cd9bdd0e8921bbef79",
"content_id": "9821904be707e71b776959c34cad8e2a3bf0c292",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/322.coin-change.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=322 lang=python3\n#\n# [322] coin-change\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.634782612323761,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 15.571428298950195,
"blob_id": "4d60247594ecf9d96460b7ed423ff2938808a49b",
"content_id": "1609688a059214449baf1c6d58a7f1bebf9308dc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/236.lowest-common-ancestor-of-a-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=236 lang=python3\n#\n# [236] lowest-common-ancestor-of-a-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6355140209197998,
"alphanum_fraction": 0.6822429895401001,
"avg_line_length": 14.428571701049805,
"blob_id": "2809a7aa08450131f37789f2683fbf53478cb655",
"content_id": "68065accbe6d02da1724f78eb7b1f8ed596e6520",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/81.search-in-rotated-sorted-array-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=81 lang=python3\n#\n# [81] search-in-rotated-sorted-array-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "2132d81b36cb587050bf3d79c01e311c975a983e",
"content_id": "e5cbbf22c613e79bf6b078a4da5eb956d22f033e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/169.majority-element.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=169 lang=python3\n#\n# [169] majority-element\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 14.857142448425293,
"blob_id": "a92d1cd6bb1410de4faeecd5d4b6611f16d9d8e5",
"content_id": "6935267c18820936e933fc70db07c007702839da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1678.number-of-ways-to-split-a-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1678 lang=python3\n#\n# [1678] number-of-ways-to-split-a-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6090909242630005,
"alphanum_fraction": 0.6727272868156433,
"avg_line_length": 14.857142448425293,
"blob_id": "dc209ee27fe5fafefa377b8fc6deaa2359fb8571",
"content_id": "a3272a72a7902278e84468a40b6c1f1b0529a853",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/122.best-time-to-buy-and-sell-stock-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=122 lang=python3\n#\n# [122] best-time-to-buy-and-sell-stock-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "9b4e64ea2d4d6bbdabff066fa09bfd31ce139c6c",
"content_id": "52dfa51b04be92322427e75824808790fa6cfc55",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/347.top-k-frequent-elements.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=347 lang=python3\n#\n# [347] top-k-frequent-elements\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6036036014556885,
"alphanum_fraction": 0.684684693813324,
"avg_line_length": 15,
"blob_id": "d5f523feb420761044e274dc36b65a27e1cd866d",
"content_id": "21fee6b9ac26fee4e244088a48cd7104633d43e2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1667.find-kth-bit-in-nth-binary-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1667 lang=python3\n#\n# [1667] find-kth-bit-in-nth-binary-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6475409865379333,
"alphanum_fraction": 0.7213114500045776,
"avg_line_length": 16.571428298950195,
"blob_id": "7115a5f908651e285c291d02df2f860ddf3d7640",
"content_id": "b2f81f89c01ded3991e0fb8d01ae1b235748b642",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 7,
"path": "/codes_auto/1644.maximum-number-of-non-overlapping-substrings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1644 lang=python3\n#\n# [1644] maximum-number-of-non-overlapping-substrings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5731707215309143,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 10.857142448425293,
"blob_id": "b4b08432ec0587dc718dfdebf1a615c1893a931b",
"content_id": "0b3f51ab21cb1f40b6039482994c7ea65f1b5824",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/51.n-queens.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=51 lang=python3\n#\n# [51] n-queens\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.686274528503418,
"avg_line_length": 13.714285850524902,
"blob_id": "2c9563c5243fd372f6e8a77a18bbe170307cb590",
"content_id": "13889e3a62ec811175f369457a0d3d79e44fd078",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/416.partition-equal-subset-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=416 lang=python3\n#\n# [416] partition-equal-subset-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5980392098426819,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 13.714285850524902,
"blob_id": "2e9825be9fad14ae316bb4a085275f4804830c6d",
"content_id": "4c4a3aa2ab5eca6f47b705efc923b5e9836e3985",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/308.range-sum-query-2d-mutable.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=308 lang=python3\n#\n# [308] range-sum-query-2d-mutable\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5959596037864685,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 13.285714149475098,
"blob_id": "3c715eca8d6717a44c5d52b4439b7a459d2bd1ee",
"content_id": "838e83267f58a996295a81daf6be7a060d303785",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1005.univalued-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1005 lang=python3\n#\n# [1005] univalued-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5494505763053894,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 12.142857551574707,
"blob_id": "dbf11446c8518ed5e8669ecc6723f6dba9fb7457",
"content_id": "ad3c467796c62612571e177137810e9aaa49cab5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1386.shift-2d-grid.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1386 lang=python3\n#\n# [1386] shift-2d-grid\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6320754885673523,
"alphanum_fraction": 0.698113203048706,
"avg_line_length": 14.285714149475098,
"blob_id": "5d67dacdc6b647d9e1c80e48324b64738223eefe",
"content_id": "48b173309c1423da250cb20915f70978b6ef2fcf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/144.binary-tree-preorder-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=144 lang=python3\n#\n# [144] binary-tree-preorder-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6633663177490234,
"alphanum_fraction": 0.6930692791938782,
"avg_line_length": 13.571428298950195,
"blob_id": "9e4df97b58a987babd681d9764f26eb5de4e678c",
"content_id": "3ce374b0195d466c0c59c642d7790def4bdc3fc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/codes_auto/5.longest-palindromic-substring.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=5 lang=python3\n#\n# [5] longest-palindromic-substring\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6770833134651184,
"avg_line_length": 12.857142448425293,
"blob_id": "5672584c44b8bf059e9b2cc93a2a7a815254a569",
"content_id": "921dae7a9a3c8ff61ec5cec920e52cad2f6b89ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/41.first-missing-positive.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=41 lang=python3\n#\n# [41] first-missing-positive\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "3637ae6e2f67308d3c82f91f9b0f2659e602816c",
"content_id": "b2577201b26e2dbc179da1d0b5a5cb60aa1a9d5d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/546.remove-boxes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=546 lang=python3\n#\n# [546] remove-boxes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "43965cbc15f5a4f024c452a15078cb8d972b15dc",
"content_id": "bce28f4d14d68a3c5d33c928b95d66ee78ec360a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/85.maximal-rectangle.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=85 lang=python3\n#\n# [85] maximal-rectangle\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "e9130269ca9e55773e5ad638cde7725f57a48cce",
"content_id": "64561d59b5c0c362957915f20cb5dc785957e7d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/899.binary-gap.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=899 lang=python3\n#\n# [899] binary-gap\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5922330021858215,
"alphanum_fraction": 0.6796116232872009,
"avg_line_length": 13.857142448425293,
"blob_id": "f3a919781c3e51a33be50f7e972f084d87d062a2",
"content_id": "c4c404349ca8a80d00d614434e4b5ae2a645a4d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1496.lucky-numbers-in-a-matrix.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1496 lang=python3\n#\n# [1496] lucky-numbers-in-a-matrix\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.643478274345398,
"alphanum_fraction": 0.7217391133308411,
"avg_line_length": 15.571428298950195,
"blob_id": "cc8442f08f6bdee6deb5e91d06ba767cb740d8ea",
"content_id": "10470505898e9782a812c946d3ce196a2d58077d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 7,
"path": "/codes_auto/1410.traffic-light-controlled-intersection.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1410 lang=python3\n#\n# [1410] traffic-light-controlled-intersection\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6200000047683716,
"alphanum_fraction": 0.6899999976158142,
"avg_line_length": 13.428571701049805,
"blob_id": "6f0ac8f14df3bb06d85b8da7294c18b901989be0",
"content_id": "3b3147fac8895951cac6017838c710f25d5b972c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/874.backspace-string-compare.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=874 lang=python3\n#\n# [874] backspace-string-compare\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 14.714285850524902,
"blob_id": "eb1ac1bbc4fdd8d684c5be8946cfe7b293d8ed1c",
"content_id": "d03218960d9f175b4af603a95223ff298fc2693d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/102.binary-tree-level-order-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=102 lang=python3\n#\n# [102] binary-tree-level-order-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6022727489471436,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "f366832c7b9f7b97bfb7b86047958411eb426bf6",
"content_id": "6b197b999341280c6eb735e97733c45a70d9ac4c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/86.partition-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=86 lang=python3\n#\n# [86] partition-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6181818246841431,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 14.857142448425293,
"blob_id": "621bf88eebc4c54f96d19c81ea56b13369a7880e",
"content_id": "1d394d73f64e52a5025fd8d41dab1654d7058d2d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1675.magnetic-force-between-two-balls.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1675 lang=python3\n#\n# [1675] magnetic-force-between-two-balls\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6496350169181824,
"alphanum_fraction": 0.7153284549713135,
"avg_line_length": 18.714284896850586,
"blob_id": "8c769b34f01be8805aa6c109300bf66946dcb39a",
"content_id": "fcac7f6a543e8561fb8415d7ada65fd2418e3e53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 7,
"path": "/codes_auto/1515.find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1515 lang=python3\n#\n# [1515] find-the-minimum-number-of-fibonacci-numbers-whose-sum-is-k\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6168224215507507,
"alphanum_fraction": 0.7009345889091492,
"avg_line_length": 14.428571701049805,
"blob_id": "05e4954d9797699ce13b4fe5d0495432bf835c63",
"content_id": "b924f3f382cc6cc5c40afc51e72324b6641ae4d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1325.path-with-maximum-probability.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1325 lang=python3\n#\n# [1325] path-with-maximum-probability\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "cef0a4f718e846346adc0c4583267232d8fae76f",
"content_id": "823356301514cb895fdf5d6a6e3dc9e5d756ec80",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/238.product-of-array-except-self.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=238 lang=python3\n#\n# [238] product-of-array-except-self\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6619718074798584,
"alphanum_fraction": 0.7253521084785461,
"avg_line_length": 19.428571701049805,
"blob_id": "1ebf4d15ac040f44a2b06e0d3b8270d8a06d23c6",
"content_id": "88edef9bc409124fbd18c514a05e5f45b19b8e53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 7,
"path": "/codes_auto/1633.minimum-number-of-increments-on-subarrays-to-form-a-target-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1633 lang=python3\n#\n# [1633] minimum-number-of-increments-on-subarrays-to-form-a-target-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5512820482254028,
"alphanum_fraction": 0.6282051205635071,
"avg_line_length": 10.285714149475098,
"blob_id": "6b31065adb9e568715e2cc596e50f2f8a8f38746",
"content_id": "6bcf4d194e18b74e3ded45fc23580a805e33aed6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/15.3sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=15 lang=python3\n#\n# [15] 3sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6620689630508423,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 19.85714340209961,
"blob_id": "5d85321d39cacef49acb0855275e0b97745765f8",
"content_id": "0347a44a57135b25338e9552c2233af7cee2ebf7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 145,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 7,
"path": "/codes_auto/1577.probability-of-a-two-boxes-having-the-same-number-of-distinct-balls.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1577 lang=python3\n#\n# [1577] probability-of-a-two-boxes-having-the-same-number-of-distinct-balls\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6185566782951355,
"alphanum_fraction": 0.6907216310501099,
"avg_line_length": 13,
"blob_id": "ecb84236bf37fea8ca01eff5eb6107d724103880",
"content_id": "8ed4bdaedd376dc3f016f6848da704f263c42a5e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/332.reconstruct-itinerary.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=332 lang=python3\n#\n# [332] reconstruct-itinerary\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.71074378490448,
"avg_line_length": 16.428571701049805,
"blob_id": "9758697ef625a6ed4318b119f1e4b538dd994cac",
"content_id": "9ea996d8e3a5da8d386df775fcaf7e0f5dd6e51d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 7,
"path": "/codes_auto/1691.minimum-number-of-days-to-disconnect-island.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1691 lang=python3\n#\n# [1691] minimum-number-of-days-to-disconnect-island\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6481481194496155,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 14.571428298950195,
"blob_id": "0f432d075571e628101d46a6f94a3e34c37cad2f",
"content_id": "38110048d21ae5eabe383a85eb776f6c665c0980",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/83.remove-duplicates-from-sorted-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=83 lang=python3\n#\n# [83] remove-duplicates-from-sorted-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.593406617641449,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "9bf81d8c590fd799c71fd972934fa52521721511",
"content_id": "c43eec02edb67b3a5097418b5a803823899b0afb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/279.perfect-squares.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=279 lang=python3\n#\n# [279] perfect-squares\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.630630612373352,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "0cf7a84973dc5cc66ba8d5ae67372cbd478525b3",
"content_id": "81476afe0b3c261967454da8d31aaf61c24c99d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/codes_auto/329.longest-increasing-path-in-a-matrix.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=329 lang=python3\n#\n# [329] longest-increasing-path-in-a-matrix\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "e8e9e4680cedecea02c20dec0398430161962bd7",
"content_id": "ae42366e2efba580825aeaa88e13dbda5dfa02da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/125.valid-palindrome.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=125 lang=python3\n#\n# [125] valid-palindrome\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6293103694915771,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 15.714285850524902,
"blob_id": "629f6ddb986b75113b4008c0f7e4b87f877eb936",
"content_id": "0522aa0216f855cac20391e81bfa80e240c2402f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1402.count-square-submatrices-with-all-ones.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1402 lang=python3\n#\n# [1402] count-square-submatrices-with-all-ones\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.540229856967926,
"alphanum_fraction": 0.6436781883239746,
"avg_line_length": 11.571428298950195,
"blob_id": "33305186bb2f27bdac6301d0d0532b0082c38361",
"content_id": "b06f003590d380635ab51f6ffc8551d9b7f31406",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/867.new-21-game.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=867 lang=python3\n#\n# [867] new-21-game\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "70c83120ecba42fdf7fa24e6820039deed53f9b3",
"content_id": "ff85dd871a87fce196617e5452005e7aada6fe91",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/75.sort-colors.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=75 lang=python3\n#\n# [75] sort-colors\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6019417643547058,
"alphanum_fraction": 0.6893203854560852,
"avg_line_length": 13.857142448425293,
"blob_id": "ad1a6517ddfbc21eb2c8360e1dc334219b7ddce6",
"content_id": "74e2452b03c1f883fa032be569c1f875bfc9515a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1680.count-all-possible-routes.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1680 lang=python3\n#\n# [1680] count-all-possible-routes\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6504064798355103,
"alphanum_fraction": 0.7235772609710693,
"avg_line_length": 16.714284896850586,
"blob_id": "72f42fbf27d531fd9d13a5da5a4e9e7427e9b037",
"content_id": "67ce2637989e1535282deacb627384204f54d218",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 7,
"path": "/codes_auto/1626.can-make-arithmetic-progression-from-sequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1626 lang=python3\n#\n# [1626] can-make-arithmetic-progression-from-sequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6642335653305054,
"alphanum_fraction": 0.7299270033836365,
"avg_line_length": 18.714284896850586,
"blob_id": "8b55c1bed278632859f27dee19e0582067cb0888",
"content_id": "043b7374238ca3b1e8d3c27aa0301bd33c2d95dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 7,
"path": "/codes_auto/1473.find-the-longest-substring-containing-vowels-in-even-counts.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1473 lang=python3\n#\n# [1473] find-the-longest-substring-containing-vowels-in-even-counts\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5647059082984924,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "d949a8ffdd3578962581306419e13be807445fe0",
"content_id": "fd6ff2b08037384929cbe5376b82cac5560bba16",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/148.sort-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=148 lang=python3\n#\n# [148] sort-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.640350878238678,
"alphanum_fraction": 0.719298243522644,
"avg_line_length": 15.428571701049805,
"blob_id": "cb01043bcddd143380e083bba3b854a8f91bd148",
"content_id": "f9ae46fdd1dd8b7f57027d23b9416354c71bff43",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 114,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 7,
"path": "/codes_auto/1362.airplane-seat-assignment-probability.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1362 lang=python3\n#\n# [1362] airplane-seat-assignment-probability\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6942148804664612,
"avg_line_length": 16.428571701049805,
"blob_id": "6513e3111b2edae9132f363341f0e1ea10bea086",
"content_id": "986e1237091f6e54289486f8e895132b2c523a3d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 121,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 7,
"path": "/codes_auto/309.best-time-to-buy-and-sell-stock-with-cooldown.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=309 lang=python3\n#\n# [309] best-time-to-buy-and-sell-stock-with-cooldown\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5876288414001465,
"alphanum_fraction": 0.6804123520851135,
"avg_line_length": 13,
"blob_id": "09a375d583c781701fe0787d58ed4cb89838846e",
"content_id": "91813599d8afd78806aac01bc712421b848a29b3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1677.matrix-diagonal-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1677 lang=python3\n#\n# [1677] matrix-diagonal-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "79e8b36652a33de360b5022f77a5f98c39a9da76",
"content_id": "037642d5b4697f3b0349b895dbd76248cb7629d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/303.range-sum-query-immutable.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=303 lang=python3\n#\n# [303] range-sum-query-immutable\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5731707215309143,
"alphanum_fraction": 0.6341463327407837,
"avg_line_length": 10.857142448425293,
"blob_id": "fd542cded86bd4304e92a38817c39a37d9d7a5cb",
"content_id": "07c1af72beed94407e1f61ab6bf62c4716507458",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/66.plus-one.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=66 lang=python3\n#\n# [66] plus-one\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6486486196517944,
"alphanum_fraction": 0.6936936974525452,
"avg_line_length": 15,
"blob_id": "e24612275267870e8ec1652ff5d6de37cf47ad3b",
"content_id": "346157063ece0dad7a2661a5fd963c991307de0d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/17.letter-combinations-of-a-phone-number.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=17 lang=python3\n#\n# [17] letter-combinations-of-a-phone-number\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 14,
"blob_id": "f0bfc3860097cebc92ecbb0078a4a1ff07c8eba2",
"content_id": "94f742791db5e57639b418060107f4ad5b722ef8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/111.minimum-depth-of-binary-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=111 lang=python3\n#\n# [111] minimum-depth-of-binary-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.585106372833252,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 12.571428298950195,
"blob_id": "58b225093de3b9c1785e8929a713827fe2f04d91",
"content_id": "f510df8ad76e65f4dd8aa61589c824532d0a5158",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/695.max-area-of-island.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=695 lang=python3\n#\n# [695] max-area-of-island\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6283186078071594,
"alphanum_fraction": 0.7079645991325378,
"avg_line_length": 15.285714149475098,
"blob_id": "5895ef900d196bd1e8d12c08ded54470ee6da1d5",
"content_id": "eec8d6d222bdef3f1448086a0f1cf9c44d216c70",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/1034.subarrays-with-k-different-integers.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1034 lang=python3\n#\n# [1034] subarrays-with-k-different-integers\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6224489808082581,
"alphanum_fraction": 0.6938775777816772,
"avg_line_length": 13.142857551574707,
"blob_id": "2be91eae501ce2c1cb675d259ec8b34c4909844e",
"content_id": "38bf3213d21b9be3443fbac99bcb2e91debd5a2b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/647.palindromic-substrings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=647 lang=python3\n#\n# [647] palindromic-substrings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6146789193153381,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 14.714285850524902,
"blob_id": "8e24eb09695eb273c82f9302dd21a1c538efe0b3",
"content_id": "3499bbfe6f8a8a0f24a0d0e1345d17d9aaec95ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1628.count-submatrices-with-all-ones.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1628 lang=python3\n#\n# [1628] count-submatrices-with-all-ones\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "bbb7bfda89e0eb2ccdb6075aae177a212ab9ac9f",
"content_id": "9a9e5941f423d622a5b9f8c6b78b282591517bf5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/101.symmetric-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=101 lang=python3\n#\n# [101] symmetric-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6410256624221802,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 15.857142448425293,
"blob_id": "e9904e9f50324ad0f8f2b5bac98bfdf3ab58e707",
"content_id": "6f3efa77b2a4831cbc094a26eb6961dcfaa7aa1f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 7,
"path": "/codes_auto/109.convert-sorted-list-to-binary-search-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=109 lang=python3\n#\n# [109] convert-sorted-list-to-binary-search-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5604395866394043,
"alphanum_fraction": 0.6593406796455383,
"avg_line_length": 12.142857551574707,
"blob_id": "4a03b5066a958af83feabbfd263019d51dc6f1e9",
"content_id": "007af9543c1a3c0f69e69b70f1b89dae8da1a333",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1240.stone-game-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1240 lang=python3\n#\n# [1240] stone-game-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "d4fefd5d31c8458ca3eb4ffdc25d844476f5280b",
"content_id": "c2f4cc9c77fdfc3f48ac667978be7aa30fa46765",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/276.paint-fence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=276 lang=python3\n#\n# [276] paint-fence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6341463327407837,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 16.714284896850586,
"blob_id": "7513f7cf6e06a6d667eafabfddc1b55cd38d4d88",
"content_id": "7a0e94299d8bd8abe18fc953ba8e635b0e2fdcdc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 7,
"path": "/codes_auto/1422.divide-array-in-sets-of-k-consecutive-numbers.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1422 lang=python3\n#\n# [1422] divide-array-in-sets-of-k-consecutive-numbers\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6020408272743225,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 13.142857551574707,
"blob_id": "17456a65478a9e7f57e52119c1d677c2f9896da4",
"content_id": "0f032521e184f1fa8a0a86a13faacc211610b88e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/617.merge-two-binary-trees.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=617 lang=python3\n#\n# [617] merge-two-binary-trees\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5888888835906982,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "a3d77e50c9c38877d8ba4ea86ae11392a445abc5",
"content_id": "830a1ffa73d0ad34174317dc03d61dfff92f81be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/163.missing-ranges.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=163 lang=python3\n#\n# [163] missing-ranges\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 14.142857551574707,
"blob_id": "9e3f9fd2d3549fc4db3faf13b80ad5a1b87ca37a",
"content_id": "e57f0193423a20670976c85c0f99175ba2d35058",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/350.intersection-of-two-arrays-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=350 lang=python3\n#\n# [350] intersection-of-two-arrays-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6736842393875122,
"avg_line_length": 12.714285850524902,
"blob_id": "9d69ca434592a3bcca17fae89d543699287fa982",
"content_id": "5bd942d553d111205362bf14a11c2cd10a3de7e2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 95,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/206.reverse-linked-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=206 lang=python3\n#\n# [206] reverse-linked-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5747126340866089,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 11.571428298950195,
"blob_id": "12529070ea730b9633aee5fefff1f8a8456b721b",
"content_id": "c5bab174927b72eb672e1255696ecb7ca10be1fb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/415.add-strings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=415 lang=python3\n#\n# [415] add-strings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.6627907156944275,
"avg_line_length": 11.428571701049805,
"blob_id": "ab6fc3bac36047921d029559e8f7806d914e83e1",
"content_id": "bced289cd14e6660de95f0ead4e37f5882ff7af3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/77.combinations.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=77 lang=python3\n#\n# [77] combinations\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "8eb1d26c5816f6edf692d0bebb2433d47f5d6a3d",
"content_id": "93b15ed75c8ac8c447aa40c55c04dfb8b8f38f14",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/875.longest-mountain-in-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=875 lang=python3\n#\n# [875] longest-mountain-in-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 12.142857551574707,
"blob_id": "5cfb75ba0ee1df71b6e81cf682eb21238b902619",
"content_id": "8da749e33f922b3eabfc7617313c2a525d834cb3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/44.wildcard-matching.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=44 lang=python3\n#\n# [44] wildcard-matching\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5647059082984924,
"alphanum_fraction": 0.6470588445663452,
"avg_line_length": 11.285714149475098,
"blob_id": "9d4cdbdf70befe132e6010e0b08f70f7cec6d4cb",
"content_id": "0ec952b44a3b40a06f6ce8b4296673e9f1bd1177",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/146.lru-cache.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=146 lang=python3\n#\n# [146] lru-cache\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6146789193153381,
"alphanum_fraction": 0.6972476840019226,
"avg_line_length": 14.714285850524902,
"blob_id": "78fbd6352c6d915e500f6f29de4ce5f7ced167bb",
"content_id": "57f705b2904261d341692a2d7afba06f8f9d26b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1388.greatest-sum-divisible-by-three.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1388 lang=python3\n#\n# [1388] greatest-sum-divisible-by-three\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6036036014556885,
"alphanum_fraction": 0.684684693813324,
"avg_line_length": 15,
"blob_id": "be3f71ca7b8304cff4ed8da2b0d2f92b9e71a1f8",
"content_id": "ff35be5d911e26a3e819f9da65e8a4be282f7256",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1631.number-of-sub-arrays-with-odd-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1631 lang=python3\n#\n# [1631] number-of-sub-arrays-with-odd-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6116504669189453,
"alphanum_fraction": 0.6796116232872009,
"avg_line_length": 13.857142448425293,
"blob_id": "44f0c20d150cae3e58483500a5c14c903666dd9b",
"content_id": "446399ff2962482379a81d851aec22fcd55e4dd7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/538.convert-bst-to-greater-tree.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=538 lang=python3\n#\n# [538] convert-bst-to-greater-tree\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5670102834701538,
"alphanum_fraction": 0.6597937941551208,
"avg_line_length": 13,
"blob_id": "0817c5af5755cc10be0b2ce255e78d4fdfb3ca05",
"content_id": "510c2d2b4021c3a1b015debdccc3c59697e5a321",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1585.the-kth-factor-of-n.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1585 lang=python3\n#\n# [1585] the-kth-factor-of-n\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5913978219032288,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.428571701049805,
"blob_id": "bf4367aad7afb19c7527882922b459d27e687efd",
"content_id": "2c1f668f910b09d53c7538b9d8f9d03f7204f573",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/200.number-of-islands.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=200 lang=python3\n#\n# [200] number-of-islands\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6293103694915771,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 15.714285850524902,
"blob_id": "190ef01bc723653bd4ecdddbc9b45e132dceb854",
"content_id": "1b75369b48f77752b77ea7131973d2283451f83c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/1093.recover-a-tree-from-preorder-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1093 lang=python3\n#\n# [1093] recover-a-tree-from-preorder-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6095238327980042,
"alphanum_fraction": 0.6761904954910278,
"avg_line_length": 14.142857551574707,
"blob_id": "1cafb4aba85641d90560b2b4c7992976417bf9c1",
"content_id": "0567eaaaa653b3cb45cf754c2704e073dbfdd9b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/557.reverse-words-in-a-string-iii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=557 lang=python3\n#\n# [557] reverse-words-in-a-string-iii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.634782612323761,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 15.571428298950195,
"blob_id": "c2b4f68cc290eeb9afe74d4ffe4bff81c719128b",
"content_id": "8b5ca6a380a01e2ea0d7165e994d86a158126d1e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/154.find-minimum-in-rotated-sorted-array-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=154 lang=python3\n#\n# [154] find-minimum-in-rotated-sorted-array-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6632652878761292,
"avg_line_length": 13.142857551574707,
"blob_id": "02e359e5d23d0022fe3240ec3fc14500ad6bd82b",
"content_id": "fb5112f9032feadae431ff3b8b487548c2585c6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/25.reverse-nodes-in-k-group.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=25 lang=python3\n#\n# [25] reverse-nodes-in-k-group\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6276595592498779,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 12.571428298950195,
"blob_id": "9cfb83144c97976d00ad6e8d6e6a8ddc45cc599b",
"content_id": "2e171355243f9769fef4106c90ecd7f0e33cdc6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/22.generate-parentheses.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=22 lang=python3\n#\n# [22] generate-parentheses\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5730336904525757,
"alphanum_fraction": 0.6516854166984558,
"avg_line_length": 11.857142448425293,
"blob_id": "dfeb381f90aabe2767aa9f2c803a8911535211f0",
"content_id": "1543fee4db60391a748c647252b99ba67fe01f25",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/140.word-break-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=140 lang=python3\n#\n# [140] word-break-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6643356680870056,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 19.571428298950195,
"blob_id": "a53ec7b48009cb7ad507a62f93fa2c5ebeda6495",
"content_id": "58436eba1d379a84179d0ad21874ed61a0aaae8a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 7,
"path": "/codes_auto/1629.minimum-possible-integer-after-at-most-k-adjacent-swaps-on-digits.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1629 lang=python3\n#\n# [1629] minimum-possible-integer-after-at-most-k-adjacent-swaps-on-digits\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6356589198112488,
"alphanum_fraction": 0.7054263353347778,
"avg_line_length": 17.571428298950195,
"blob_id": "6ecaf0d48d122c86750ffd49874783543a394452",
"content_id": "53cc78b91ef65627a63577fdb7360bd7d29f2211",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 7,
"path": "/codes_auto/1689.detect-pattern-of-length-m-repeated-k-or-more-times.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1689 lang=python3\n#\n# [1689] detect-pattern-of-length-m-repeated-k-or-more-times\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.584269642829895,
"alphanum_fraction": 0.6629213690757751,
"avg_line_length": 11.857142448425293,
"blob_id": "8f5bc01af762558f28325405f2c6a792f770d0c1",
"content_id": "7c5fc4cc207438f062dd60c8412ff0e76d5ad9f3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/864.image-overlap.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=864 lang=python3\n#\n# [864] image-overlap\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6074766516685486,
"alphanum_fraction": 0.672897219657898,
"avg_line_length": 14.428571701049805,
"blob_id": "92bc940312b7d3742cbadfa75a0a9869d52197e8",
"content_id": "2301dc87c86a80a0be1a6c3fef37c5dfe5d19109",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/121.best-time-to-buy-and-sell-stock.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=121 lang=python3\n#\n# [121] best-time-to-buy-and-sell-stock\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 15.857142448425293,
"blob_id": "96f88326bebc4726a1e3e333f8949c15d7b4a5cb",
"content_id": "df382aea4ca65b7499bfe01c1e023c7453118403",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/1620.check-if-array-pairs-are-divisible-by-k.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1620 lang=python3\n#\n# [1620] check-if-array-pairs-are-divisible-by-k\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5795454382896423,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 11.714285850524902,
"blob_id": "706abe53a4e23b0cd7b96cabe1884037c0d5445a",
"content_id": "2fc00c26a120a51a29955dc0efcf39d03c6ca97b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 88,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/198.house-robber.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=198 lang=python3\n#\n# [198] house-robber\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "daf57c6681effa6a2ada01110dbcc99da6f9573c",
"content_id": "d68be518c9c05167abe5dadaf91a06e33ee30a20",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/43.multiply-strings.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=43 lang=python3\n#\n# [43] multiply-strings\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6698113083839417,
"avg_line_length": 14.285714149475098,
"blob_id": "2ee0ca5d0ed64a5d52c4eb78c397085b54570a41",
"content_id": "e1cc557f2906f8c2c6d735563ee8554646ebb770",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/19.remove-nth-node-from-end-of-list.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=19 lang=python3\n#\n# [19] remove-nth-node-from-end-of-list\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6766917109489441,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 18.14285659790039,
"blob_id": "a73b9f1a1ceb784af26506446a63879bcf9f1880",
"content_id": "811ef637851da5a3c7ea73122813c65dbeffef4b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 7,
"path": "/codes_auto/105.construct-binary-tree-from-preorder-and-inorder-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=105 lang=python3\n#\n# [105] construct-binary-tree-from-preorder-and-inorder-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6600000262260437,
"alphanum_fraction": 0.7200000286102295,
"avg_line_length": 20.571428298950195,
"blob_id": "eb19534802e7bba65ec6e469f516f7b2f983b190",
"content_id": "477427279c3db44ed501354b56277c65d54c1772",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 7,
"path": "/codes_auto/1699.number-of-ways-where-square-of-number-is-equal-to-product-of-two-numbers.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1699 lang=python3\n#\n# [1699] number-of-ways-where-square-of-number-is-equal-to-product-of-two-numbers\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6393442749977112,
"alphanum_fraction": 0.7131147384643555,
"avg_line_length": 16.571428298950195,
"blob_id": "b0b1f9831adc2ee77b470401505043407b47ece8",
"content_id": "d6d07e6f683d5b2f7b9d15f53127e5f7e6f36168",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 7,
"path": "/codes_auto/1423.maximum-number-of-occurrences-of-a-substring.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1423 lang=python3\n#\n# [1423] maximum-number-of-occurrences-of-a-substring\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.602150559425354,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 12.428571701049805,
"blob_id": "76a4da54bcedba4632fcd342110324e7d1a422de",
"content_id": "a572ac017e6eb917b33ce5b162e88f26d0750d70",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/413.arithmetic-slices.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=413 lang=python3\n#\n# [413] arithmetic-slices\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.640350878238678,
"alphanum_fraction": 0.719298243522644,
"avg_line_length": 15.428571701049805,
"blob_id": "542d011560ca1cae0a3abcb9b52d682299b3e554",
"content_id": "a26d30ce5069522d54c50eb3df6c4a237f805c34",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 114,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 7,
"path": "/codes_auto/1032.satisfiability-of-equality-equations.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1032 lang=python3\n#\n# [1032] satisfiability-of-equality-equations\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12,
"blob_id": "b90127c9c6e3100dc164add7542e553e37db359b",
"content_id": "f421050ff213cd0c566d8e8293a14e7f0e8565d4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/28.implement-strstr.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=28 lang=python3\n#\n# [28] implement-strstr\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6290322542190552,
"alphanum_fraction": 0.7016128897666931,
"avg_line_length": 16.85714340209961,
"blob_id": "3e9dd87d22e81b05df19e61ad13196bc9ad4878d",
"content_id": "e0f7f7c6b2449b5b73fa5cf16b018fd54da6def2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 7,
"path": "/codes_auto/1514.minimum-value-to-get-positive-step-by-step-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1514 lang=python3\n#\n# [1514] minimum-value-to-get-positive-step-by-step-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5777778029441833,
"alphanum_fraction": 0.6555555462837219,
"avg_line_length": 12,
"blob_id": "6fece476898eeb36bbc26899ae44cfa8b5c8a42e",
"content_id": "2996eaccf050df133af1fd89c41de4c562fcd9f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 90,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/871.keys-and-rooms.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=871 lang=python3\n#\n# [871] keys-and-rooms\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.7054263353347778,
"avg_line_length": 17.571428298950195,
"blob_id": "a5e0dc6ae4318a429f900f9da810ff814945923b",
"content_id": "0834a93be17aaf457a1efbd47210c18326ace614",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 7,
"path": "/codes_auto/34.find-first-and-last-position-of-element-in-sorted-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=34 lang=python3\n#\n# [34] find-first-and-last-position-of-element-in-sorted-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6465517282485962,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 15.714285850524902,
"blob_id": "dee8d7db2b25f593dd360ac43ba953e3516e7805",
"content_id": "adffdcf09663e5fdd88c84af98851e76c2d3d5ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/103.binary-tree-zigzag-level-order-traversal.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=103 lang=python3\n#\n# [103] binary-tree-zigzag-level-order-traversal\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 12.571428298950195,
"blob_id": "767fd1bd19c22b5c4b4c588231c22061ef38cff7",
"content_id": "d998f9835178ed7a12c19a5b265fb378e463eb91",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/739.daily-temperatures.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=739 lang=python3\n#\n# [739] daily-temperatures\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.634782612323761,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 15.571428298950195,
"blob_id": "12576fb472672ffd22d3260ad0b96c03c2ade7ae",
"content_id": "0675b985858300f13cf422bfb009585a1c1a5a20",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 115,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 7,
"path": "/codes_auto/378.kth-smallest-element-in-a-sorted-matrix.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=378 lang=python3\n#\n# [378] kth-smallest-element-in-a-sorted-matrix\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.602150559425354,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 12.428571701049805,
"blob_id": "ab734ed533112ee782f26fba242bc4a38141d2d5",
"content_id": "6b8369a3cb897d85e8ffabb71b367779dd25d310",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 93,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/901.advantage-shuffle.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=901 lang=python3\n#\n# [901] advantage-shuffle\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5978260636329651,
"alphanum_fraction": 0.6739130616188049,
"avg_line_length": 12.285714149475098,
"blob_id": "9112e7a27cb3d5bcc649a16e7944de158fad7b40",
"content_id": "e649b8a2e7af8313f6816ceb2e80b7de9ad0f7df",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/461.hamming-distance.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=461 lang=python3\n#\n# [461] hamming-distance\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6138613820075989,
"alphanum_fraction": 0.6831682920455933,
"avg_line_length": 13.571428298950195,
"blob_id": "3b36da0f05317499f9e0b62c52c7af3644ee789d",
"content_id": "869effe54c36bbafc85c8c3ef7a7f07b6865efd3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/209.minimum-size-subarray-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=209 lang=python3\n#\n# [209] minimum-size-subarray-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5869565010070801,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 12.285714149475098,
"blob_id": "4b70ea4c2c6e0a680008667d5897204f000f0cb4",
"content_id": "aff642bc72ed24a03d8f2634e5a1c7d821d69607",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/337.house-robber-iii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=337 lang=python3\n#\n# [337] house-robber-iii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6160714030265808,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 15.142857551574707,
"blob_id": "be420a18b120ecdeca1bbb3ae90f4152da07b751",
"content_id": "9a1d87f7beb4ff12a8418c48afff81839c503df9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/codes_auto/1638.best-position-for-a-service-centre.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1638 lang=python3\n#\n# [1638] best-position-for-a-service-centre\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6759259104728699,
"avg_line_length": 14.571428298950195,
"blob_id": "d6de5282c8034fbf7d43a683b2d99d891c48a92b",
"content_id": "d715de666751c81e554fd602b1ec05109bc2b234",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/167.two-sum-ii-input-array-is-sorted.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=167 lang=python3\n#\n# [167] two-sum-ii-input-array-is-sorted\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6168224215507507,
"alphanum_fraction": 0.7009345889091492,
"avg_line_length": 14.428571701049805,
"blob_id": "28a2c1c8cb2819a13e2c2696221617ae90a784a5",
"content_id": "aaf78da5aeab1a8226ae009326f651a07359f25a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1209.design-bounded-blocking-queue.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1209 lang=python3\n#\n# [1209] design-bounded-blocking-queue\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6597222089767456,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 19.714284896850586,
"blob_id": "af9e799521da3edf4cc81ba4fd2e4b95e314bdd6",
"content_id": "52801cfaa0828d34dc49f0a866e08bd04aa20623",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 7,
"path": "/codes_auto/1575.maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1575 lang=python3\n#\n# [1575] maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6290322542190552,
"alphanum_fraction": 0.7016128897666931,
"avg_line_length": 16.85714340209961,
"blob_id": "a03eed5a9d2f9a5690297f7cdfdb331edf3f9f55",
"content_id": "864514fe59e67859b4ca3142742951d6ee51fbe1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 7,
"path": "/codes_auto/1570.final-prices-with-a-special-discount-in-a-shop.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1570 lang=python3\n#\n# [1570] final-prices-with-a-special-discount-in-a-shop\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 15.857142448425293,
"blob_id": "1d459ff72633e0e307ffd08ec9e21bed158b3040",
"content_id": "c40924f31b97fc6acdde6fc7f7b9ac4fb01900ff",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/1676.minimum-number-of-days-to-eat-n-oranges.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1676 lang=python3\n#\n# [1676] minimum-number-of-days-to-eat-n-oranges\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6548672318458557,
"alphanum_fraction": 0.7168141603469849,
"avg_line_length": 15.285714149475098,
"blob_id": "5a3830a3aaca9e3da2ad34118e357c5c1e9c936c",
"content_id": "0a8b883bf0124b557c486bc0a0065b7d7d3d5eaa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 7,
"path": "/codes_auto/581.shortest-unsorted-continuous-subarray.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=581 lang=python3\n#\n# [581] shortest-unsorted-continuous-subarray\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6145833134651184,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 12.857142448425293,
"blob_id": "58309fd13e46d8e3f7d07c6ddd1efa7cad1fbd3a",
"content_id": "4b5efcd1bd362e89166a967234603b0f3b05a067",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/92.reverse-linked-list-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=92 lang=python3\n#\n# [92] reverse-linked-list-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5922330021858215,
"alphanum_fraction": 0.6796116232872009,
"avg_line_length": 13.857142448425293,
"blob_id": "ec41de588faf1fde46e4e0c1a761b375eae03292",
"content_id": "5598174c7ac1fff18deeb8a6b60e7de4e2fa33fa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1610.xor-operation-in-an-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1610 lang=python3\n#\n# [1610] xor-operation-in-an-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.603960394859314,
"alphanum_fraction": 0.6732673048973083,
"avg_line_length": 13.571428298950195,
"blob_id": "715e84f46c3d5da32e274effcbfd6c4c5bf5c3d7",
"content_id": "8e7c73e033a4c645d000dc31c28366bc336086dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/151.reverse-words-in-a-string.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=151 lang=python3\n#\n# [151] reverse-words-in-a-string\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5784313678741455,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 13.714285850524902,
"blob_id": "c486ccc8e7513d101d6d34ef77d5a42128ae0ef5",
"content_id": "9afc14180d94d9b374076c870a7a0c3c2e583bff",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 7,
"path": "/codes_auto/1663.detect-cycles-in-2d-grid.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1663 lang=python3\n#\n# [1663] detect-cycles-in-2d-grid\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6276595592498779,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 12.571428298950195,
"blob_id": "6769d621b5421dca2491e38a60df488528b20e45",
"content_id": "0fd792dbce6199300636c9e4ca327f7659885137",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/60.permutation-sequence.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=60 lang=python3\n#\n# [60] permutation-sequence\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.569767415523529,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 11.428571701049805,
"blob_id": "5a516881aa2f7ef0c6c0d387f34b95b4dc27d6ea",
"content_id": "cfe60db78c9f082d3342a74d604719e4b5cbc506",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/494.target-sum.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=494 lang=python3\n#\n# [494] target-sum\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6060606241226196,
"alphanum_fraction": 0.6767676472663879,
"avg_line_length": 13.285714149475098,
"blob_id": "51253403e9b0bf28ebf64ed3aba7301a1a7cc7a2",
"content_id": "589670c167e10a9c175fd81774436f2bb3bdd7f2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/432.all-oone-data-structure.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=432 lang=python3\n#\n# [432] all-oone-data-structure\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6339285969734192,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 15.142857551574707,
"blob_id": "0daab74e72176f15422399a24d4bfe1e03e77060",
"content_id": "3f928a92a159bb56b92af405fd8fd3a9e8a01a2f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 7,
"path": "/codes_auto/153.find-minimum-in-rotated-sorted-array.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=153 lang=python3\n#\n# [153] find-minimum-in-rotated-sorted-array\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6239316463470459,
"alphanum_fraction": 0.7008547186851501,
"avg_line_length": 15.857142448425293,
"blob_id": "2d9ff9b297e7320bdb69ffdd36618812be4925cc",
"content_id": "191276c67546f22fb3d08dafc9c9bbc1a0bbbac7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/codes_auto/1421.find-numbers-with-even-number-of-digits.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=1421 lang=python3\n#\n# [1421] find-numbers-with-even-number-of-digits\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.5773195624351501,
"alphanum_fraction": 0.6597937941551208,
"avg_line_length": 13,
"blob_id": "534555e9504e6fb1860cf413e22cd01035ef266f",
"content_id": "4e088cbbb9251b48fbd0373f0ecbdc1e1f0a4a9e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/240.search-a-2d-matrix-ii.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=240 lang=python3\n#\n# [240] search-a-2d-matrix-ii\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6326530575752258,
"alphanum_fraction": 0.6836734414100647,
"avg_line_length": 13.142857551574707,
"blob_id": "cd1367fc67943918240726fb5f011f2c8def2fbd",
"content_id": "80bf363a60d5d422b18c5451462777d96b060d4b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/76.minimum-window-substring.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=76 lang=python3\n#\n# [76] minimum-window-substring\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6442307829856873,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 14,
"blob_id": "b5720bd47626039a06bc9e9763685dc238896970",
"content_id": "46fe7d777bb1cb733b428adcf04f35789cfb12a1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 7,
"path": "/codes_auto/84.largest-rectangle-in-histogram.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=84 lang=python3\n#\n# [84] largest-rectangle-in-histogram\n#\nNone\n# @lc code=end"
},
{
"alpha_fraction": 0.6100000143051147,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 13.428571701049805,
"blob_id": "63502253c9219ced63c925dd23647ccdb8f81777",
"content_id": "9a44455c2c38f5c773f7b9ee4bc4165d71c82b63",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 7,
"path": "/codes_auto/285.inorder-successor-in-bst.py",
"repo_name": "smartmark-pro/leetcode_record",
"src_encoding": "UTF-8",
"text": "#\n# @lc app=leetcode.cn id=285 lang=python3\n#\n# [285] inorder-successor-in-bst\n#\nNone\n# @lc code=end"
}
] | 369 |
kklw/data-modelling-with-postgres
|
https://github.com/kklw/data-modelling-with-postgres
|
17e6a99397c5bab92b506bc980967c993597c772
|
b976cc80ace82b472955948d7fab13894b601eb1
|
c98c90f098e011c8a6d721be86dcb194b127a548
|
refs/heads/master
| 2020-11-26T19:00:47.348156 | 2019-12-23T17:43:38 | 2019-12-23T17:43:38 | 229,179,798 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4732585549354553,
"alphanum_fraction": 0.4732585549354553,
"avg_line_length": 32.920352935791016,
"blob_id": "68c123e97f840167688ef923a1e290caafe35827",
"content_id": "006ee7c9725a3f83ea0abab7bb1d741170aba707",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3833,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 113,
"path": "/sql_queries.py",
"repo_name": "kklw/data-modelling-with-postgres",
"src_encoding": "UTF-8",
"text": "# DROP TABLES\n\nsongplay_table_drop = \"DROP TABLE IF EXISTS songplay\"\nuser_table_drop = \"DROP TABLE IF EXISTS app_user\"\nsong_table_drop = \"DROP TABLE IF EXISTS song\"\nartist_table_drop = \"DROP TABLE IF EXISTS artist\"\ntime_table_drop = \"DROP TABLE IF EXISTS time\"\n\n# CREATE TABLES\n\nsongplay_table_create = (\"\"\"\nCREATE TABLE IF NOT EXISTS songplay(\n songplay_id SERIAL PRIMARY KEY,\n user_id int NOT NULL,\n song_id varchar,\n artist_id varchar,\n start_time timestamp,\n level varchar,\n session_id int,\n location text,\n user_agent text)\n\"\"\")\n\nuser_table_create = (\"\"\"\nCREATE TABLE IF NOT EXISTS app_user(\n user_id int NOT NULL,\n first_name varchar,\n last_name varchar,\n gender char,\n level varchar,\n PRIMARY KEY (user_id))\n\"\"\")\n\nsong_table_create = (\"\"\"\nCREATE TABLE IF NOT EXISTS song(\n song_id varchar NOT NULL,\n title varchar,\n artist_id varchar,\n year int,\n duration float,\n PRIMARY KEY (song_id))\n\"\"\")\n\nartist_table_create = (\"\"\"\nCREATE TABLE IF NOT EXISTS artist(\n artist_id varchar NOT NULL,\n name varchar,\n location varchar,\n latitude numeric,\n longitude numeric,\n PRIMARY KEY (artist_id))\n\"\"\")\n\ntime_table_create = (\"\"\"\nCREATE TABLE IF NOT EXISTS time(\n start_time timestamp NOT NULL,\n hour int,\n day int,\n week int,\n month int,\n year int,\n weekday varchar,\n PRIMARY KEY (start_time))\n\"\"\")\n\n# INSERT RECORDS\n\nsongplay_table_insert = (\"\"\"\nINSERT INTO songplay(start_time, user_id, level, artist_id, song_id, session_id, location, user_agent)\n VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\n\"\"\")\n\nuser_table_insert = (\"\"\"\nINSERT INTO app_user(user_id, first_name, last_name, gender, level)\n VALUES(%s, %s, %s, %s, %s)\n ON CONFLICT (user_id) \n DO UPDATE SET level = excluded.level\n\"\"\")\n\nsong_table_insert = (\"\"\"\nINSERT INTO song(song_id, title, artist_id, year, duration)\n VALUES(%s, %s, %s, %s, %s)\n ON CONFLICT (song_id) \n DO NOTHING\n\"\"\")\n\nartist_table_insert = (\"\"\"\nINSERT INTO artist(artist_id, name, location, latitude, longitude)\n VALUES(%s, %s, %s, %s, %s)\n ON CONFLICT (artist_id) \n DO NOTHING\n\"\"\")\n\ntime_table_insert = (\"\"\"\nINSERT INTO time(start_time, hour, day, week, month, year, weekday)\n VALUES(%s, %s, %s, %s, %s, %s, %s)\n ON CONFLICT (start_time) \n DO NOTHING\n\"\"\")\n\n# FIND SONGS\n\nsong_select = (\"\"\"\nSELECT song.song_id, artist.artist_id FROM song \n JOIN artist ON song.artist_id=artist.artist_id\n WHERE song.title=%s AND artist.name=%s AND song.duration=%s;\n\"\"\")\n\n# QUERY LISTS\n\ncreate_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create,\n time_table_create]\ndrop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]\n"
},
{
"alpha_fraction": 0.6396946310997009,
"alphanum_fraction": 0.6447837352752686,
"avg_line_length": 38.31999969482422,
"blob_id": "8bd1e1520014139d049f540e0dbfba548a2b71ca",
"content_id": "71d9ad1ef6f4cd43ae57281c727394bd627f85c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1965,
"license_type": "no_license",
"max_line_length": 321,
"num_lines": 50,
"path": "/README.md",
"repo_name": "kklw/data-modelling-with-postgres",
"src_encoding": "UTF-8",
"text": "# Data Modeling with Postgres\nProject from Udacity's Data Engineer Nano Degree. ETL pipeline to ingest songs and user activity data.\n\n# Setup\nProject is using python 3. Install dependencies:\n```\npip install -r requirements.txt\n```\n\nPostgres database is assumed to connect with these configs:\n```\nhost=127.0.0.1\ndbname=studentdb\nuser=student\npassword=student\n```\n\n# Run Project\n```\npython3 create_tables.py\npython3 etl.py\n```\n\n# Project Structure\n\n| Folder / File | Description |\n|------------------|-------------------------------------------------------------------|\n| data folder | Contains songs and user activity data. |\n| sql_queries.py | Sql commands. |\n| create_tables.py | Creates songplay, app_user, song, artist and time tables. |\n| etl.py | Process the files in data folder and stores the data in database. |\n\n\n# Implementation Details\nFirst, we create all the database tables. Next, we will perform ETL on the first dataset, `data/song_data`, to create the `song` and `artist` dimensional tables. Also, we will perform ETL on the second dataset, `data/log_data`, to create the `time` and `app_user` dimensional tables, as well as the `songplay` fact table.\n\n## Database Design\nThe denormalised star schema was choose to enable simplified queries. The fact table is songplay, and the 4 other tables are dimension tables.\n\n\n## ETL Process\n- Create all tables\n- Read song and data files\n- Song data processing\n - Extract song id, title, etc. Loads as song.\n - Extract artist idm artist name, etc. Loads as artist.\n- Log data processing\n - Extract start time and transform it to fields such as hour and day. Loads as time.\n - Extract user id, first name, etc. Loads as user.\n - Retrieve song id and artist id. Extract user id, level, etc. Loads as songplay."
}
] | 2 |
oleg45202/redis-rate-limiting-python-master-test-all
|
https://github.com/oleg45202/redis-rate-limiting-python-master-test-all
|
c814da2f29cc682fbd305fbbb3876407bf0f649a
|
c47ab6ab89f2685e0ebc22a1661ed5a21de18131
|
259b5186d4ccfeb9fe4b1453f705a0f9b48ae5af
|
refs/heads/master
| 2023-02-13T04:18:41.097763 | 2021-01-13T08:54:19 | 2021-01-13T08:54:19 | 329,241,336 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7892976403236389,
"alphanum_fraction": 0.7892976403236389,
"avg_line_length": 29,
"blob_id": "8701e80e0c2a1d6167f9a4b9964f21a94aa1982e",
"content_id": "cd956b73c2ec373ff9ed793e1724c1d06c931ae9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 299,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 10,
"path": "/index.py",
"repo_name": "oleg45202/redis-rate-limiting-python-master-test-all",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django-backend.configuration.settings')\n\napp = get_wsgi_application()\n\n# os.system(\"python django-backend/manage.py collectstatic\")\n# os.system(\"python django-backend/manage.py runserver\")"
}
] | 1 |
dysomni/parking_camera_demo
|
https://github.com/dysomni/parking_camera_demo
|
d67a289a3792433b2f2e999ffd9e71cff44ee25c
|
b50b8b673527bc0a9020a1dc46d252f6e9b4fc74
|
20f1c100d81bd591a714472a7c401fecbaa806a7
|
refs/heads/master
| 2020-09-03T01:02:22.421114 | 2019-11-03T18:29:11 | 2019-11-03T18:29:11 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6067561507225037,
"alphanum_fraction": 0.6500649452209473,
"avg_line_length": 21.871286392211914,
"blob_id": "bc466a4c8055089859566b7da7b724b57f879c65",
"content_id": "6c55d8d2c7ad1761aa157315c5a4047dfc87f0b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2309,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 101,
"path": "/src/__main__.py",
"repo_name": "dysomni/parking_camera_demo",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\n\n# np.set_printoptions(threshold=298359238592385)\n\ncv2.namedWindow(\"frame\")\n\nhue_low = 21\nsat_low = 68\nval_low = 59\nhue_high = 56\nsat_high = 237\nval_high = 222\n\ndef color_low_action(x):\n global hue_low\n hue_low = x\n\ndef color_high_action(x):\n global hue_high\n hue_high = x\n\n\ndef sat_low_action(x):\n global sat_low\n sat_low = x\n\n\ndef sat_high_action(x):\n global sat_high\n sat_high = x\n\n\ndef val_low_action(x):\n global val_low\n val_low = x\n\n\ndef val_high_action(x):\n global val_high\n val_high = x\n\ncap = cv2.VideoCapture(0)\n\n\n\ncv2.createTrackbar('color low', 'frame', 0, 255, color_low_action)\ncv2.setTrackbarPos('color low', 'frame', hue_low)\ncv2.createTrackbar('color high', 'frame', 0, 255, color_high_action)\ncv2.setTrackbarPos('color high', 'frame', hue_high)\n\ncv2.createTrackbar('sat low', 'frame', 0, 255, sat_low_action)\ncv2.setTrackbarPos('sat low', 'frame', sat_low)\ncv2.createTrackbar('sat high', 'frame', 0, 255, sat_high_action)\ncv2.setTrackbarPos('sat high', 'frame', sat_high)\n\ncv2.createTrackbar('val low', 'frame', 0, 255, val_low_action)\ncv2.setTrackbarPos('val low', 'frame', val_low)\ncv2.createTrackbar('val high', 'frame', 0, 255, val_high_action)\ncv2.setTrackbarPos('val high', 'frame', val_high)\n\n\ndef get_mask(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lower_color = np.array([hue_low, sat_low, val_low])\n upper_color = np.array([hue_high, sat_high, val_high])\n ret = cv2.inRange(img, lower_color, upper_color)\n ret = cv2.cvtColor(ret, cv2.COLOR_GRAY2BGR)\n # print(ret)\n return ret\n\ndef white_pixel_count(img):\n count = 0\n # pixel_value = list(Image.open(img).s)\n for p in img:\n for pp in p:\n if pp[0] == 255:\n count += 1\n return count\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n frame = cv2.resize(frame, (500, 500))\n\n mask = get_mask(frame)\n print(white_pixel_count(mask))\n\n # Display the resulting frame\n cv2.imshow('frame1',frame)\n cv2.imshow('frame2', mask)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()"
}
] | 1 |
dcos-labs/dcos-hivemq-service
|
https://github.com/dcos-labs/dcos-hivemq-service
|
46dd7670bfb345714830970e7adce8f6831d6fb6
|
37a412ad9d0b3755f4decae9bb0787ac48258e0e
|
c937dba5256aa2c6709f5d158acb6918a0dfbeb2
|
refs/heads/master
| 2020-03-17T18:34:38.556478 | 2018-06-18T19:47:47 | 2018-06-18T19:47:47 | 133,827,115 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6693877577781677,
"alphanum_fraction": 0.6751020550727844,
"avg_line_length": 31.236841201782227,
"blob_id": "77c6ab8556319447a9ddb9fefcdf65522f11b69d",
"content_id": "9289461042c4569474ddd0a4aef8260dcf0b740f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 38,
"path": "/hivemq/tools/universe/test_package_builder.py",
"repo_name": "dcos-labs/dcos-hivemq-service",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom .package_builder import UniversePackageBuilder\nfrom .package import Package\n\n\ndef test_non_existent_input_dir_raises_exception():\n with pytest.raises(Exception) as e:\n UniversePackageBuilder(None, None, '__SHOULD_NOT_EXIST__', '.', [])\n\n assert \"Provided package path is not a directory: __SHOULD_NOT_EXIST__\" in str(\n e.value)\n\n\ndef test_empty_input_dir_raises_exception():\n with pytest.raises(Exception) as e:\n UniversePackageBuilder(None, None, 'resources/empty', '.', [])\n\n assert \"Provided package path does not contain the expected package files: resources/empty\" in str(\n e.value)\n\n\ndef test_hivemq_service_(mocker):\n\n package_json = {\n 'name': 'hivemq',\n 'version': '1.2.3',\n 'releaseVersion': 0\n }\n package = Package(\"hivemq\", \"stub-universe\")\n package_manager = mocker.Mock()\n\n package_manager.get_latest = mocker.MagicMock(return_value=Package.from_json(package_json))\n\n upb = UniversePackageBuilder(package, package_manager, 'resources/hivemq', ',', [])\n\n hivemq_mapping = upb._get_hivemq_mapping_for_content(\"\")\n assert 'upgrades-from' in hivemq_mapping\n assert hivemq_mapping['upgrades-from'] == \"1.2.3\"\n"
},
{
"alpha_fraction": 0.6944444179534912,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 22.66666603088379,
"blob_id": "32ee854281737c721f11a48725bc782a48458ac9",
"content_id": "2fee9b7828e1eb62c86f476d4df5a88677ad4f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/hivemq/tests/config.py",
"repo_name": "dcos-labs/dcos-hivemq-service",
"src_encoding": "UTF-8",
"text": "PACKAGE_NAME = 'hivemq'\nSERVICE_NAME = 'hivemq'\nDEFAULT_TASK_COUNT = 1\n\n"
},
{
"alpha_fraction": 0.6934046149253845,
"alphanum_fraction": 0.6951871514320374,
"avg_line_length": 32,
"blob_id": "e856b13af9d5f49723322226c1be471d6d4b83f2",
"content_id": "3d9d2f6792888f10b55a4f068b3e547b4614eab2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 17,
"path": "/hivemq/build.sh",
"repo_name": "dcos-labs/dcos-hivemq-service",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nset -e\n\nFRAMEWORK_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" && pwd )\"\nREPO_ROOT_DIR=.\n\n# Build/test our scheduler.zip\n${REPO_ROOT_DIR}/gradlew -p ${FRAMEWORK_DIR} check distZip\n\n# Build package with our scheduler.zip and the local SDK artifacts we built:\nHIVEMQ_DOCUMENTATION_PATH=\"http://YOURNAMEHERE.COM/DOCS\" \\\nHIVEMQ_ISSUES_PATH=\"http://YOURNAMEHERE.COM/SUPPORT\" \\\n$REPO_ROOT_DIR/tools/build_package.sh \\\n hivemq \\\n $FRAMEWORK_DIR \\\n -a \"$FRAMEWORK_DIR/build/distributions/$(basename $FRAMEWORK_DIR)-scheduler.zip\" \\\n $@\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 27,
"blob_id": "e57caea183253b852f2dabac9d88c3c07aa84dbe",
"content_id": "73cf8794a789e85ceb1be586daaea51115e5325b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Gradle",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/hivemq/settings.gradle",
"repo_name": "dcos-labs/dcos-hivemq-service",
"src_encoding": "UTF-8",
"text": "rootProject.name = 'hivemq'\n"
}
] | 4 |
marron120/KeepKalm
|
https://github.com/marron120/KeepKalm
|
a7919455ad23db554cfe12f832f50de0e87e6063
|
4bd0310b081a466bcee51f48e06dbf069f7c9892
|
85f2fb9c2ed51034ee93b177b9c8417c7b3e5218
|
refs/heads/master
| 2020-07-03T15:31:07.352817 | 2019-08-16T15:29:03 | 2019-08-16T15:29:03 | 201,953,086 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6306461095809937,
"alphanum_fraction": 0.6455117464065552,
"avg_line_length": 30.23214340209961,
"blob_id": "b2e1114ec70eaf0b81e02ef51a470e16c1f72467",
"content_id": "a0fd93c585742dfd8485ee15998214b8303b59a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1749,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 56,
"path": "/keepkalm/main.py",
"repo_name": "marron120/KeepKalm",
"src_encoding": "UTF-8",
"text": "import webapp2\nimport jinja2\nimport os\n\njinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass KeepKalm(webapp2.RequestHandler):\n def get(self):\n t = jinja_env.get_template('/templates/index.html')\n ()\n self.response.write(t.render())\n\n def post(self):\n t = jinja_env.get_template('/templates/index.html')\n self.response.write(t.render())\n\nclass Response(webapp2.RequestHandler):\n def get(self):\n t = jinja_env.get_template('/templates/index.html')\n seed_data()\n self.response.write(t.render())\n\n def post(self):\n option=self.request.get('option')\n option2=self.request.get('option2')\n option3=self.request.get('option3')\n option4=self.request.get('option4')\n option5=self.request.get('option5')\n\n\n d = {'option':option,'option2':option2,'option3':option3,'option4':option4,'option5':option5}\n t = jinja_env.get_template('/templates/result.html')\n self.response.write(t.render(d))\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n welcome_template = jinja_env.get_template('templates/front.html')\n self.response.write(welcome_template.render({'animate': True})) # the response\n\nclass WelcomePage(webapp2.RequestHandler):\n def get(self):\n welcome_template = jinja_env.get_template('templates/front.html')\n self.response.write(welcome_template.render({'animate': False})) # the response\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/welcome', WelcomePage),\n ('/keepkalm', KeepKalm),\n ('/response', Response),\n ], debug=True)\n"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6122449040412903,
"avg_line_length": 28.5,
"blob_id": "e4c9d8ccb33e081cab0a8831d105c2112f31532b",
"content_id": "bb0fba8f6f76f1150ca6a1d49f44f7999cf86a69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 10,
"path": "/KeepKalm/keepkalm/scripts/script.js",
"repo_name": "marron120/KeepKalm",
"src_encoding": "UTF-8",
"text": "function Respond()\n{\n text = document.forms['suggest']['text'];\n\n if(text.value != \"Add Suggestion Here\" && text.value != \"\")\n {\n element = document.querySelector('#response');\n element.innerHTML = \"Your suggestion has been received. Thank you for your time.\" \n }\n}"
}
] | 2 |
Mercer01/py-speedtest
|
https://github.com/Mercer01/py-speedtest
|
92799d724c3c7ea2f65cf40781c28016a9e530ec
|
17daf6e04b54ff1c83c217562a59e376fa079213
|
91b85a02fa65bf8cd418de933f8773e888286910
|
refs/heads/master
| 2020-03-26T18:57:36.955129 | 2018-08-18T17:56:07 | 2018-08-18T17:56:07 | 145,241,310 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5448818802833557,
"alphanum_fraction": 0.5480315089225769,
"avg_line_length": 23.461538314819336,
"blob_id": "702a7e9045b01e07498d2d9f006786f9d7532ff6",
"content_id": "9b7208c0b524b4fefb5dd20836ed82a1ecc79aec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 635,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 26,
"path": "/test.py",
"repo_name": "Mercer01/py-speedtest",
"src_encoding": "UTF-8",
"text": "import csv\nimport speedtest\nimport time\n# import twitter\n \ndef test():\n while True:\n s = speedtest.Speedtest()\n s.get_best_server()\n s.download()\n s.upload()\n s.results.share()\n\n results_dict = s.results.dict()\n print(results_dict)\n\n results_dict[\"download\"]\n with open(\"data.csv\",\"a\",newline='') as out_file:\n out_file = csv.writer(out_file)\n out_file.writerow((time.strftime(\"%a, %d %b %Y %H:%M:%S\"),results_dict[\"download\"],results_dict[\"upload\"],results_dict[\"ping\"]))\n\n time.sleep(10)\n\n \nif __name__ == '__main__':\n test()"
}
] | 1 |
tgavs/python-challenge2
|
https://github.com/tgavs/python-challenge2
|
88f40d21beedc08a3f8fe930b627506d3f64608f
|
62ce57e681994a9ef1cd23416419e76d41916f86
|
d614d6fd370ce342546c90dd1ad4cb9c705fb3b8
|
refs/heads/master
| 2020-04-09T21:39:23.565185 | 2018-12-06T02:49:44 | 2018-12-06T02:49:44 | 160,608,603 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.577151358127594,
"alphanum_fraction": 0.5825914740562439,
"avg_line_length": 19.92708396911621,
"blob_id": "619c6044e70012b90ccd7e4d717371a980a3f5ca",
"content_id": "2e7746c3d30335ade727afc099ec34e4fd7aee5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2022,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 96,
"path": "/PyPoll/main.py",
"repo_name": "tgavs/python-challenge2",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\n\n\nvoterid=[]\ncounty=[]\ncandidates=[]\nunique=[]\n\n\n#set the path where the raw data is stored\n\npath=os.path.join('Resources','election_data.csv')\n\n#read the raw data\n\nwith open(path) as raw_data:\n\n reader=csv.reader(raw_data)\n\n header=next(reader)\n\n #set the dataframe\n\n for row in reader:\n\n voterid.append(row[0])\n county.append(row[1])\n candidates.append(row[2])\n\n# unique candidates list whith list comprenhensions\n[unique.append(i) for i in candidates if not unique.count(i)]\n\n#calculate the total votes\ntotalvotes=len(voterid)\n\n#calculate the total votes and the percent of votes for each candidate\n\nvotes=[candidates.count(candidate) for candidate in unique]\npct_votes=[candidates.count(candidate)/totalvotes for candidate in unique]\n\n#Print the results in terminal\n\nprint('-----------------Electoral Results------------------------------')\n\nprint('\\n')\n\nprint(f'Total Voters: {totalvotes:,}')\n\nprint('\\n')\n\nprint('------------------Votes by Candidate----------------------------')\n\nprint('\\n')\n\nfor i in range(len(unique)):\n\n print(f'Candidate:{unique[i]}, votes: {pct_votes[i]*100:.2f}% ({votes[i]:,})')\n\nprint('\\n')\n\nprint('---------------------Final result--------------------------------')\n\nprint(f'Winner:{unique[pct_votes.index(max(pct_votes))]}')\n\nprint('\\n')\n\n# Print the results in .txt\n\nreport=open(\"Elections_Report.txt\",\"w\")\n\nreport.write('-----------------Electoral Results------------------------------\\n')\n\nreport.write('\\n')\n\nreport.write(f'Total Voters: {totalvotes:,}\\n')\n\nreport.write('\\n')\n\nreport.write('------------------Votes by Candidate----------------------------\\n')\n\nreport.write('\\n')\n\nfor i in range(len(unique)):\n\n report.write(f'Candidate:{unique[i]}, votes: {pct_votes[i]*100:.2f}% ({votes[i]:,})\\n')\n\nreport.write('\\n')\n\nreport.write('---------------------Final result--------------------------------\\n')\n\nreport.write(f'Winner:{unique[pct_votes.index(max(pct_votes))]}\\n')\n\nreport.write('\\n')\n\nreport.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6393577456474304,
"alphanum_fraction": 0.6475915908813477,
"avg_line_length": 23.969072341918945,
"blob_id": "98588d05f9669aeecd995341a55951e220b6ce30",
"content_id": "8e9b43dd698774618ff86115a10d18b96b989f4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2429,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 97,
"path": "/PyBank/main.py",
"repo_name": "tgavs/python-challenge2",
"src_encoding": "UTF-8",
"text": "# PyBank Solution\n\n#import libraries\n\nimport os\nimport csv\n\n# define lists to build\n\nmonths=[]\nprofit_losses=[]\nindex=[]\nshift=[] # moves the profit_losses data from the position n to the n+1\nchange=[] # to calculate the change between profit_losses and shifht\n\n#control variable\n\ni=0\n\n# data path to read\nrd_path= os.path.join('Resources','budget_data.csv')\n\n#read data\n\nwith open(rd_path) as raw_data:\n\n reader=csv.reader(raw_data)\n\n header=next(reader)\n\n\n# build data frame\n for row in reader:\n\n profit_losses.append(float(row[1]))\n months.append(row[0])\n index.append(i) \n i+=1\n\n\n# build the shifted profit column to calculate changes\n# it will pass the element n to the possition n+1\n# thats why the shift list starts in cero\n shift.append(0)\n\n for j in range(0,len(profit_losses)-1):\n\n shift.append(profit_losses[j]) \n\n# calculates the change for each profit row vs the previous one\n for i in index:\n\n change.append(profit_losses[i]-shift[i])\n\n#the first row is set to cero because the first element has no previous element to compare\n change[0]=0\n\n \n#calculate statistics\n\n totalmonths= len(months)\n totalp_l = sum(profit_losses)\n av_change= sum(change)/(len(change)-1) #the average should not take into account the first element\n maxlist_value= max(change)\n maxlist_date=months[change.index(max(change))]\n minlist_value=min(change)\n minlist_date=months[change.index(min(change))]\n\n#Print in terminal\n\n\n print('Financial Analysis')\n\n print('-------------------------------------')\n\n print(f'Total Months: {totalmonths}')\n print(f'Total: ${totalp_l:,.2f}')\n print(f'Average Change: ${av_change:,.2f}')\n print(f'Greatest Increase in Profits: {maxlist_date } ${maxlist_value:,.2f}')\n print(f'Greatest Decrease in Profits: {minlist_date } ${minlist_value:,.2f}')\n\n\n#Print the report in .txt\n\n report=open('PyBankReport.txt','w')\n\n report.write('Financial Analysis\\n')\n\n report.write('-------------------------------------\\n')\n\n report.write(f'Total Months: {totalmonths}\\n')\n report.write(f'Total: ${totalp_l:,.2f}\\n')\n report.write(f'Average Change: ${av_change:,.2f}\\n')\n report.write(f'Greatest Increase in Profits: {maxlist_date } ${maxlist_value:,.2f}\\n')\n report.write(f'Greatest Decrease in Profits: {minlist_date } ${minlist_value:,.2f}\\n')\n \n report.close()\n\n\n\n\n\n\n\n"
}
] | 2 |
olivia-char/promMate
|
https://github.com/olivia-char/promMate
|
05e74f2accd4efca1ae685261ad727a212fd4246
|
f1bc7a1cd714e9d7e89eab3e570a436da5d8cfb8
|
06791761d854380a691e657f283b963cc5844d0e
|
refs/heads/master
| 2020-05-22T10:00:05.101602 | 2017-03-12T00:14:24 | 2017-03-12T00:14:24 | 84,688,583 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6149584650993347,
"alphanum_fraction": 0.6149584650993347,
"avg_line_length": 26.846153259277344,
"blob_id": "635f1b2231df8e0e382b65c1485324fab4d7f0dc",
"content_id": "ce4c5dbffbea5ed634e11ccf0350a76f0da0360f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 13,
"path": "/apps/myApp/urls.py",
"repo_name": "olivia-char/promMate",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views \n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^register$', views.register),\n url(r'^login$', views.login),\n url(r'^homePage$', views.homePage),\n url(r'^cart$', views.cart),\n url(r'^userPage$', views.userPage),\n url(r'^match$', views.match),\n url(r'^addDress$', views.addDress),\n]"
},
{
"alpha_fraction": 0.7002328038215637,
"alphanum_fraction": 0.7054714560508728,
"avg_line_length": 25.030303955078125,
"blob_id": "d7b45d6a44ad4ab135a45ffeafa690039d0d3852",
"content_id": "8325c9e1b0292432c2c803ebffef4385f100f371",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1718,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 66,
"path": "/apps/myApp/views.py",
"repo_name": "olivia-char/promMate",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect, HttpResponse\nfrom .models import User\nfrom django.contrib import messages\n\n# Create your views here.\n\ndef index(request):\n\tprint \"at index\"\n\treturn render(request, \"myApp/index.html\")\n\ndef register(request):\n\tprint \"data sent to register\"\n\tif request.method == 'POST':\n\t\tfname = request.POST['first_name']\n\t\tlname = request.POST['last_name']\n\t\temail = request.POST['email']\n\t\tpw = request.POST['password']\n\t\tcpw = request.POST['c_password']\n\n\tnewUser = User.userManager.register(fname, lname, email, pw, cpw) \n\tprint \"sent to db\"\n\n\tif newUser[0] == False:\n\t\tprint newUser[1]['errorMessage']\n\t\terrorMessage = newUser[1]['errorMessage']\n\t\tfor i in errorMessage:\n\t\t\tmessages.error(request, i)\n\t\treturn redirect('/')\n\telse:\n\t\trequest.session['id'] = newUser[1].id\n\t\tprint newUser[1].id\t\n\t\treturn redirect(\"/homePage\")\n\ndef login(request):\n\tprint \"login page\"\n\tif request.method == 'POST':\n \t\temail = request.POST['email']\n\t\tpassword = request.POST['password']\n\t\tloginUser = User.userManager.login(email, password)\n\n\t\tif loginUser[0] == False:\n\t\t\terrorMessage = loginUser[1]['errorMessage']\n\t\t\tfor i in errorMessage:\n\t\t\t\tmessages.error(request, i)\n\t\t\treturn redirect('/') \n\t\telse:\n\t\t\tprint loginUser[1]\n\t\t\trequest.session['id'] = loginUser[1].id \n\t\t\tprint \"What's in session\", request.session['id']\n\t\t\n\t\treturn redirect('/homePage')\n\ndef homePage(request):\n\treturn render(request, \"myApp/homepage.html\")\n\ndef cart(request):\n\treturn render(request, \"myApp/cart.html\")\n\ndef userPage(request):\n\treturn render(request, \"myApp/userpage.html\")\n\ndef match(request):\n\treturn render(request, \"myApp/match.html\")\n\ndef addDress(request):\n\treturn render(request, \"myApp/addDress.html\")\n"
},
{
"alpha_fraction": 0.6646923422813416,
"alphanum_fraction": 0.674234926700592,
"avg_line_length": 24.982906341552734,
"blob_id": "49f0c93b952b2f760f1f8a2aba4d4752be75a212",
"content_id": "87feeba0945f42bc2e9fbee806b8bcd4e16a4ae7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3039,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 117,
"path": "/apps/myApp/models.py",
"repo_name": "olivia-char/promMate",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom django.db import models \nimport re\nimport time\n\n# Create your models here.\n\nNAME_REGEX = re.compile(r'^[a-zA-Z]+$')\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\n\nclass UserManager(models.Manager):\n\t\n\t# creates a new user\n\t\n\tdef register(self, first_name, last_name, email, password, c_password):\n\t\t\n\t\t# validations\n\n\t\tstatus = True \n\t\terror = []\n\n\t\tif len(first_name) < 1:\n\t\t\terror.append(\"First Name Required\")\n\t\t\tstatus = False\n\t\telif len(first_name) <= 2:\n\t\t\terror.append(\"First Name must be more than 2 characters\")\n\t\t\tstatus = False\n\t\telif not NAME_REGEX.match(first_name):\n\t\t\terror.append(\"First Name cannot contain numbers\")\n\t\t\tstatus = False\n\t\t\n\t\tif len(last_name) < 1:\n\t\t\terror.append(\"Last Name Required\")\n\t\t\tstatus = False\n\t\telif len(last_name) <= 2:\n\t\t\terror.append(\"Last Name must be more than 2 characters\")\n\t\t\tstatus = False\n\t\telif not NAME_REGEX.match(last_name):\n\t\t\terror.append(\"Last name cannot contain numbers\")\n\t\t\tstatus = False\n\t\t\n\t\tif len(email) < 1:\n\t\t\terror.append(\"Email Required\")\n\t\t\tstatus = False\n\t\telif not EMAIL_REGEX.match(email):\n\t\t\terror.append(\"Enter a vaild email\")\n\t\t\tstatus = False\n\t\telif len(User.userManager.filter(email=email)) > 0:\n\t\t\terror.append(\"Email already taken\")\n\t\t\tstatus = False\n\t\n\t\tif len(password) < 1:\n\t\t\terror.append(\"Password Required\")\n\t\t\tstatus = False\n\t\telif len(password) < 8:\n\t\t\terror.append(\"Password must be longer than 8 characters\")\n\t\t\tstatus = False\n\t\tif password != c_password:\n\t\t\terror.append(\"Passwords do not match\")\n\t\t\tstatus = False\n\t\t\n\t\tif status == False:\n\t\t\treturn (False, {'errorMessage': error}) \n\n\t\t# if user passes validations creates a new user id and sends to database\n\t\t# user is equal to an array\n\t\tif status == True:\n\t\t\treturn (True, self.create(first_name=first_name, last_name=last_name, email=email, password=password))\n\n\t#logs in existing user\n\n\tdef login(self, email, password):\n\t\t\n\t\tstatus = True \n\t\terror = []\n\n\t\t# validations for login \n\n\t\tif len(email) < 1:\n\t\t\terror.append(\"Email Required\")\n\t\t\tstatus = False\n\n\t\tif len(password) < 1:\n\t\t\terror.append(\"Password Required\")\n\t\t\tstatus = False\n\n\t\tif status == False:\n\t\t\treturn (False, {'errorMessage': error})\n\n\t\t# takes in login email and password looks for existing email and password together\n\t\t# user is equal to an array \t\n\t\t\n\t\tuser = User.userManager.filter(email=email, password=password)\n\t\t\n\t\tif len(user) > 0:\n\t\t\tprint \"in the models, got back user\", user \n\t\t\treturn (True, user[0]) \n\t\telse:\n\t\t\terror.append(\"cannot login\")\n\t\t\tstatus = False\n\t\t\treturn (False, {'errorMessage': error})\n\n\t\tprint email, password \n\t\t\n\t\tif status == True:\n\t\t\treturn (True, id)\n\n\t\t\t\nclass User(models.Model):\n\tfirst_name = models.CharField(max_length = 45)\n\tlast_name = models.CharField(max_length = 45)\n\temail = models.EmailField(max_length = 100)\n\tpassword = models.CharField(max_length = 100)\n\tcreated_at = models.DateTimeField(auto_now_add = True)\n\tupdated_at = models.DateTimeField(auto_now = True)\n\tuserManager = UserManager()"
}
] | 3 |
s183922/MooseJuice
|
https://github.com/s183922/MooseJuice
|
35f16ae4a1780bd6041333a1eca4a0fdf5f42d5a
|
3e613895246f97a56f8cefc9f0db9c4ef8a9a068
|
eda12d6d7596de85546a39a80fb41605c848928c
|
refs/heads/main
| 2023-02-24T15:15:52.209423 | 2021-02-01T17:32:50 | 2021-02-01T17:32:50 | 334,149,508 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.572638213634491,
"alphanum_fraction": 0.5798959732055664,
"avg_line_length": 36.411766052246094,
"blob_id": "694f316630abc5a6066ec14fb4a7256d06e47e30",
"content_id": "11b0e362ca5118cb93dac28d087cbd48ff2ec49b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8267,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 221,
"path": "/MooseJuice/utils.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from MooseJuice.models import User, Post, Prices, Goals\nimport pandas as pd\nimport numpy as np\nfrom MooseJuice import db\n\ndef getPrices():\n if Prices.query.all():\n beer_price = Prices.query.all()[-1].beer_price\n soda_price = Prices.query.all()[-1].soda_price\n else:\n beer_price, soda_price = 5, 5\n \n return beer_price, soda_price\ndef getNumber(string):\n try:\n return eval(string)\n except:\n return 0\n\ndef getQuery(user):\n try:\n user_post = user.posts[-1]\n except:\n user_post = None\n return user_post\n\ndef NewPost( form, id):\n beer_price, soda_price = getPrices()\n beers, sodas = None, None\n user = User.query.filter_by(id=id).first()\n user_post = getQuery(user)\n \n\n\n if form.formType == 'Purchase':\n beers = getNumber(form.amount_beer.data)\n sodas = getNumber(form.amount_soda.data)\n amount = beers * beer_price + sodas * soda_price\n user_balance = user_post.user_balance - amount if user_post else - amount\n moose_balance = Post.query.all()[-1].moose_balance\n\n if (beers and beers != 0) and (sodas and sodas != 0):\n message = f\"You bought {beers} {'beers' if beers > 1 else 'beer'} and {sodas} {'sodas' if sodas > 1 else 'soda'}!\"\n\n elif (beers and beers !=0):\n message = f\"You bought {beers} {'beers' if beers > 1 else 'beer'}!\"\n\n elif (sodas and sodas !=0):\n message = f\"You bought {sodas} {'sodas' if sodas > 1 else 'soda'}!\"\n\n elif form.formType == 'Transfer':\n amount = form.transfer.data\n user_balance = user_post.user_balance + amount if user_post else + amount\n\n if user_balance > 1000:\n return False, \"You cannot have more than 1000 kr in your Moose Account\"\n moose_balance = Post.query.all()[-1].moose_balance + amount\n message = \"Your account balance has been updated!\"\n\n\n \n\n return Post(post_type = form.formType, amount = amount, beers = beers, sodas = sodas,\n user_balance = user_balance, moose_balance = moose_balance, user_id = id), message\n\n\ndef getRoom(user):\n return f\"Room {user.room}\" if \"Guest\" not in user.room else f\"Guest {user.room.split('t')[-1]}\"\n\ndef getUserbalance(user):\n user = User.query.filter_by(id=user.id).first()\n latest_post = getQuery(user)\n if latest_post:\n category = (\"danger\" if latest_post.user_balance < 0 else \"success\") if user.status != 'admin' else (\"danger\" if Post.query.all()[-1].moose_balance < 0 else \"success\")\n user_balance = latest_post.user_balance if user.status != 'admin' else Post.query.all()[-1].moose_balance\n else:\n category = \"success\"\n user_balance = 0\n\n return category, user_balance\n\n\nclass activeTab:\n balance = [\"active\",\"show active\"]\n post = [\"\", \"\"]\n update = [\"\", \"\"]\n date = [\"active\", \"show active\"]\n week = [\"\", \"\"]\n month = [\"\", \"\"]\n\n active = [\"active\", \"show active\"]\n inactive = [\"\", \"\"]\n\n def updateTab(self, method):\n if method == \"balance\":\n self.balance = self.active\n self.update = self.inactive\n self.post = self.inactive\n elif method == \"update\":\n self.balance = self.inactive\n self.update = self.active\n self.post = self.inactive\n elif method == \"post\":\n self.post = self.active\n self.balance = self.inactive\n self.update = self.inactive\n elif method == \"date\":\n self.date = self.active\n self.week = self.inactive\n self.month = self.inactive\n elif method == \"week\":\n self.date = self.inactive\n self.week = self.active\n self.month = self.inactive\n elif method == \"month\":\n self.date = self.inactive\n self.week = self.inactive\n self.month = self.active\n\nTab = activeTab()\n\n\ndef getDF(user):\n Posts_order_asc = Post.query.filter_by(user_id = user.id).all()\n postHeadings = [\"Date\", \"Week\", \"Month\", \"Type\", \"Beers\", \"Sodas\", \"Amount\", \"Balance\"]\n if Posts_order_asc:\n PostsTable = [(p.date.strftime('%d-%m-%Y'), p.date.strftime(\"%V (%Y)\"), p.date.strftime(\"%B (%Y)\"),\n p.post_type, p.beers, p.sodas, p.amount, p.user_balance) for p in Posts_order_asc]\n\n else:\n PostsTable = [tuple([\"\" for _ in postHeadings]) for i in range(3)]\n\n df = pd.DataFrame(PostsTable, columns = postHeadings)\n return df\n \ndef groupBy(df, what):\n df_transfer = df[df[\"Type\"] == 'Transfer'].groupby([what]).sum()\n df_purchase = df[df[\"Type\"] == 'Purchase'].groupby([what]).sum()\n df_balance = df.groupby([what]).last()\n\n df_purchase[\"Transfer\"] = df_transfer[\"Amount\"]\n df_purchase[\"Balance\"] = df_balance[\"Balance\"]\n\n postHeadings = [what, \"Beers\", \"Sodas\", \"Total Price\", \"Transfer\", \"End of the day Balance\"]\n df = df_purchase.reset_index()[[what, \"Beers\",\"Sodas\", \"Amount\", \"Transfer\", \"Balance\"]].fillna(\"0\").applymap(str)\n\n \n\n return df.iloc[::-1]\n\n\ndef getDFPost():\n post = Post.query.all()\n postHeadings = [\"Date\", \"Username\", \"Status\", \"Type\", \"Amount\", \"Beers\", \"Sodas\", \"User Balance\", \"Moose Balance\"]\n\n post = [(p.date.strftime('%d-%m-%Y'), p.author.username, p.author.status, p.post_type, p.amount, p.beers, p.sodas, p.user_balance, p.moose_balance) for p in post]\n df = pd.DataFrame(post, columns = postHeadings)\n\n return df\n\nclass MooseStats:\n def __init__(self, df):\n\n post = Post.query.all()\n self.active = df[df[\"Status\"] == 'active']\n self.admin = df[df[\"Status\"] == 'admin']\n self.active_grouped = self.active.groupby([\"Username\"]).agg({'Beers': 'sum', 'Sodas': 'sum', 'User Balance': 'last'}).reset_index()\n self.in_da_bank = int(np.ceil(float(post[-1].moose_balance)))\n self.no_outstanding = int(np.ceil(float(self.in_da_bank - self.active_grouped[\"User Balance\"].sum())))\n\n self.category1 = \"danger\" if self.in_da_bank < 0 else \"info\"\n self.category2 = \"danger\" if self.no_outstanding < 0 else \"info\"\n self.beer_score = [(i + 1 , U[0], U[1]) for i, U in enumerate(self.active_grouped.sort_values([\"Beers\"], ascending = False)[[\"Beers\", \"Username\"]].values)]\n self.soda_score = [(U[0], U[1]) for U in self.active_grouped.sort_values([\"Sodas\"], ascending = False)[[\"Sodas\", \"Username\"]].values]\n\n self.score_table = [self.beer_score[i] + (\" \",) + self.soda_score[i] for i in range(len(self.beer_score))]\n\ndef updateProgress():\n mb = MooseStats(getDFPost()).no_outstanding\n goals = Goals.query.all()\n \n\n for goal in goals:\n \n goal.progress = max(0, min(100, (mb-1000)/goal.price *100 ))\n \n \n db.session.commit()\ndef updatePosts(stats = False):\n \n post_table = getDFPost()\n admin_post = post_table[post_table[\"Status\"] == 'admin'][1:]\n other_post = post_table[post_table[\"Status\"] != 'admin']\n\n posts = Post.query.all()\n edits = list(filter(None, [(i, P) if (P.comment != None and \"CHECKED\" not in P.comment) else None for i,P in enumerate(posts)]))\n if not stats:\n for i, p in edits:\n if p.author.status != 'admin':\n latest = list(filter(None, [user_p if user_p.date < p.date else None for user_p in p.author.posts]))[-1]\n if p.post_type == 'Transfer':\n p.user_balance = latest.user_balance + p.amount\n p.moose_balance = posts[i-1].moose_balance + p.amount\n else:\n p.user_balance = latest.user_balance - p.amount\n p.moose_balance = posts[i-1].moose_balance\n else:\n if p.post_type == 'Transfer':\n p.moose_balance = posts[i-1].moose_balance + p.amount\n else:\n p.moose_balance = posts[i-1].moose_balance - p.amount\n\n p.comment += \" CHECKED\"\n\n db.session.commit()\n\n else: \n beers = admin_post[[\"Beers\", \"Sodas\"]].sum()[\"Beers\"] - other_post[[\"Beers\", \"Sodas\"]].sum()[\"Beers\"]\n sodas = admin_post[[\"Beers\", \"Sodas\"]].sum()[\"Sodas\"] - other_post[[\"Beers\", \"Sodas\"]].sum()[\"Sodas\"]\n\n return int(beers), int(sodas)"
},
{
"alpha_fraction": 0.6527672410011292,
"alphanum_fraction": 0.6667930483818054,
"avg_line_length": 38.98484802246094,
"blob_id": "6d082224f8e71f62de37fd44bf091a80acc9ba2e",
"content_id": "67fff2ca76df29803dbed0664d5cc6d1052a2e4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2638,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 66,
"path": "/MooseJuice/models.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom MooseJuice import db, login_manager, admin\nfrom flask_login import UserMixin, current_user\nfrom flask_table import Table, Col\nfrom flask_admin.contrib.sqla import ModelView\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=False, nullable=False)\n password = db.Column(db.String(60), unique=False, nullable=False)\n room = db.Column(db.String(10), unique=False, nullable=False)\n status = db.Column(db.String(10), unique=False, nullable=False, default = 'active')\n movein_date = db.Column(db.DateTime, nullable=False, default = datetime.utcnow)\n \n posts = db.relationship('Post', backref='author', lazy = True)\n\n def __repr__(self):\n return f\"User {self.id} {self.username} {self.room}\"\n \nclass Post(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n post_type = db.Column(db.String(20), nullable=False)\n date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n amount = db.Column(db.Float, nullable=False, default = 0)\n\n beers = db.Column(db.Integer, default = 0)\n sodas = db.Column(db.Integer, default = 0)\n\n\n user_balance = db.Column(db.Float, default = 0)\n\n moose_balance = db.Column(db.Float, nullable=False, default = 0)\n\n\n comment = db.Column(db.String(100))\n\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def __repr__(self):\n return f\"Post {self.id}: type = {self.post_type}, amount = {self.amount}, author = {self.author.username}, date = {self.date})\"\n\n\nclass Prices(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n beer_price = db.Column(db.Float, nullable=False)\n soda_price = db.Column(db.Float, nullable=False)\n account = db.Column(db.String(50), nullable=True, default = \"5396-560516\")\n\n\nclass Goals(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n item = db.Column(db.String(100), nullable=False)\n price = db.Column(db.Float, nullable=False)\n progress = db.Column(db.Integer, nullable=False, default = 0.0)\n image_file = db.Column(db.String(20), nullable=False, default='default.jpg')\n\nadmin.add_view(ModelView(User, db.session))\nadmin.add_view(ModelView(Post, db.session))\nadmin.add_view(ModelView(Prices, db.session))\nadmin.add_view(ModelView(Goals, db.session))"
},
{
"alpha_fraction": 0.4888392984867096,
"alphanum_fraction": 0.6875,
"avg_line_length": 15.629630088806152,
"blob_id": "cf45f54823ec9fc49a6d336b40d3b1412e5c8834",
"content_id": "dc64a9185a6d78a64ee615f8616daad337f1ff27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 448,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 27,
"path": "/requirements.txt",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "Babel==2.9.0\nbcrypt==3.2.0\ncffi==1.14.4\nclick==7.1.2\ndnspython==2.1.0\nemail-validator==1.1.2\nFlask==1.1.2\nFlask-Admin==1.5.7\nFlask-Babel==2.0.0\nFlask-Bcrypt==0.7.1\nFlask-Login==0.5.0\nFlask-SQLAlchemy==2.4.4\nFlask-Table==0.5.0\nFlask-WTF==0.14.3\nidna==3.1\nitsdangerous==1.1.0\nJinja2==2.11.2\nMarkupSafe==1.1.1\nnumpy==1.19.5\npandas==1.1.5\npycparser==2.20\npython-dateutil==2.8.1\npytz==2020.5\nsix==1.15.0\nSQLAlchemy==1.3.22\nWerkzeug==1.0.1\nWTForms==2.3.3"
},
{
"alpha_fraction": 0.6148558855056763,
"alphanum_fraction": 0.6202384829521179,
"avg_line_length": 36.50621032714844,
"blob_id": "6a8d43e208245aad1b1769044a4c143969f4bd5c",
"content_id": "67f4306323f41b11017bf8d9a2ad56d8470f22ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12076,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 322,
"path": "/MooseJuice/routes.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from MooseJuice.forms import RegistrationForm, LoginForm, UpdateUsernameForm, UpdatePasswordForm, PurchaseForm, TransferMoneyForm, MoveOutForm, TableForm, GoalForm\nfrom flask import render_template, url_for, flash, redirect, request\nfrom MooseJuice.models import User, Post, Prices, Goals\nfrom MooseJuice import app, bcrypt, db\nfrom flask_login import login_user, current_user, logout_user, login_required, logout_user\nfrom MooseJuice.utils import NewPost, getQuery, getRoom, getUserbalance, Tab, getDF, groupBy, MooseStats, getDFPost, getPrices, updateProgress, updatePosts\nfrom werkzeug.utils import secure_filename\nimport secrets\nimport os\[email protected]_first_request\ndef before_first_request():\n if not User.query.filter_by(status = 'admin').first():\n password = '$2b$12$rFvr/ona8l3EzGpuk2EkQOIv/1GH8GrUpxLGaDBmhg6FC7JmSwAZi'\n admin_user = User(username = 'admin', room = 'admin', status = 'admin', password=password)\n \n db.session.add(admin_user)\n db.session.commit()\n\n first_post = Post(post_type = \"First commit\", moose_balance=0, user_id=admin_user.id)\n db.session.add(first_post)\n db.session.commit()\n\n prices = Prices(beer_price = 5.0, soda_price = 5.0)\n db.session.add(prices)\n db.session.commit()\n \n\n\[email protected](\"/\", methods = ['GET', 'POST'])\[email protected](\"/login\", methods = ['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username = form.username.data).filter_by(status = 'active').first()\n\n user_admin = User.query.filter_by(username = form.username.data).filter_by(status = 'admin').first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember = form.remember_me.data)\n next_page = request.args.get('next')\n flash(f'You have been logged in!', 'success')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n\n elif user_admin and bcrypt.check_password_hash(user_admin.password, form.password.data):\n login_user(user_admin, remember = form.remember_me.data)\n next_page = request.args.get('next')\n flash(f'You have been logged in!', 'success')\n return redirect(next_page) if next_page else redirect('/admin')\n\n \n\n else:\n flash(f'Login unsuccessfull. Please check username and password', 'danger')\n\n return render_template('login.html', title = 'Login', form = form)\n\[email protected](\"/Home\", methods = ['GET', 'POST'])\n@login_required\ndef home():\n Tab.updateTab(\"balance\")\n Tab.updateTab(\"date\")\n updatePosts()\n image_file = url_for('static', filename = 'uploads/default.jpg' )\n form = PurchaseForm()\n room = \"\"\n beer_price, soda_price = getPrices()\n if current_user.is_authenticated:\n room = getRoom(current_user)\n\n if form.validate_on_submit() and form.submit.data:\n if (form.amount_soda.data and form.amount_soda.data != \"0\") or (form.amount_beer.data and form.amount_beer.data != \"0\"):\n purchase, message = NewPost(form, current_user.id)\n db.session.add(purchase)\n db.session.commit()\n flash(message, \"success\")\n \n return redirect(url_for('home'))\n\n\n return render_template('home.html', title = \"Moose Juice\", image_file = image_file, form = form, room = room, beer_price = beer_price, soda_price = soda_price)\n\n\nrooms = {str(i):\"available\" for i in range(188,200)}\nguests = {\"Guest\" + str(i) :\"available\" for i in range(1,10)}\n\n\[email protected](\"/register\", methods = ['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n \n for user in User.query.filter_by(status = \"active\"):\n if \"Guest\" in user.room:\n guests[user.room] = 'occupied'\n else:\n rooms[user.room] = 'occupied'\n\n form.room.choices = list(filter(None, [(room, \"Room \" + room) if cond != 'occupied' else None for room, cond in rooms.items()] + [(guest, guest) if cond != 'occupied' else None for guest, cond in guests.items()]))\n \n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username = form.username.data, password = hashed_password, room = form.room.data)\n \n db.session.add(user)\n db.session.commit()\n moose_balance = Post.query.all()[-1].moose_balance\n\n\n \n\n flash(f'Your account has been created! You are now able to login', 'success')\n return redirect(url_for('login'))\n \n \n return render_template('register.html', title = 'Register', form = form, rooms = rooms, guests = guests)\n\n\n\n\[email protected](\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\[email protected](\"/account\", methods = ['GET', 'POST'])\n@login_required\ndef account():\n form = TransferMoneyForm()\n form2 = UpdateUsernameForm()\n form3 = UpdatePasswordForm()\n account_number = Prices.query.all()[-1].account\n room = getRoom(current_user)\n category, user_balance = getUserbalance(current_user)\n # user = User.query.filter_by(username = form2.username.data).first()\n image_file = url_for('static', filename = 'uploads/default.jpg')\n\n table = getDF(current_user)\n form4 = TableForm(table)\n date = request.form.get('date')\n week = request.form.get('week')\n month = request.form.get('month')\n\n if request.form.get('date'):\n Tab.updateTab('post')\n Tab.updateTab('date')\n form4.updateTable('Date', form4.date.data)\n if request.form.get('week'):\n Tab.updateTab('post')\n Tab.updateTab('week')\n form4.updateTable('Week', form4.week.data)\n if request.form.get('month'):\n Tab.updateTab('post')\n Tab.updateTab('month')\n form4.updateTable('Month', form4.month.data)\n \n \n if form.validate_on_submit() and form.submit.data:\n post, message = NewPost(form, current_user.id)\n if not post:\n flash(message, \"danger\")\n else:\n db.session.add(post) \n db.session.commit()\n Tab.updateTab(\"balance\")\n\n flash(message, \"success\")\n return redirect(url_for('account'))\n\n elif form.transfer.data and form.submit.data:\n\n if form.transfer.data < 0:\n Tab.updateTab(\"balance\")\n\n flash(\"Please don't transfer a negative amount of money...\", \"danger\")\n return redirect(url_for('account'))\n\n if form2.validate_on_submit() and bcrypt.check_password_hash(current_user.password, form2.password.data) and form2.submit.data:\n current_user.username = form2.username.data\n Tab.updateTab(\"update\")\n db.session.commit()\n flash(\"Your username has been updated\", 'success')\n return redirect(url_for('account'))\n \n elif request.method == 'GET':\n form2.username.data = current_user.username\n form2.password.data = '*******'\n\n elif not bcrypt.check_password_hash(current_user.password, form2.password.data) and form2.submit.data:\n Tab.updateTab(\"update\")\n flash(\"Wrong password\", 'danger')\n return redirect(url_for('account'))\n\n\n if form3.validate_on_submit() and bcrypt.check_password_hash(current_user.password, form3.password1.data) and form3.submit1.data:\n hashed_password = bcrypt.generate_password_hash(form3.new_password.data).decode('utf-8')\n current_user.password = hashed_password\n Tab.updateTab(\"update\")\n db.session.commit()\n flash(\"Your password has been updated\", 'success')\n return redirect(url_for('account'))\n\n elif request.method == 'GET':\n form2.username.data = current_user.username\n form3.password1.data = '*******'\n\n elif not bcrypt.check_password_hash(current_user.password, form3.password1.data) and form3.submit1.data:\n Tab.updateTab(\"update\")\n flash(\"Wrong password\", 'danger')\n return redirect(url_for('account'))\n\n \n\n return render_template('account.html',\n title = 'Account',\n image_file = image_file,\n room = room,\n category = category,\n form = form,\n form2 = form2,\n form3 = form3, \n form4 = form4,\n user_balance = user_balance,\n Tab = Tab,\n table = table,\n headings = table.columns,\n account_number = account_number)\n\n\n\[email protected](\"/update_account\", methods = ['GET', 'POST'])\n@login_required\ndef update_account():\n form = UpdateAccountForm()\n user = User.query.filter_by(username = form.username.data).first()\n room = getRoom(current_user)\n if form.validate_on_submit() and bcrypt.check_password_hash(user.password, form.password.data):\n current_user.username = form.username.data\n hashed_password = bcrypt.generate_password_hash(form.new_password.data).decode('utf-8')\n current_user.password = hashed_password\n db.session.commit()\n flash(\"Your account has been updated\", 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.password.data = '*******'\n elif not bcrypt.check_password_hash(user.password, form.password.data):\n flash(\"Wrong password\", 'danger')\n image_file = url_for('static', filename = 'uploads/default.jpg')\n return render_template('update_account.html', title = 'Update Account', image_file = image_file, form = form, room = room)\n\n\n\[email protected](\"/moving_out\", methods = ['GET', 'POST'])\n@login_required\ndef moving_out():\n form = MoveOutForm()\n room = getRoom(current_user)\n category, user_balance = getUserbalance(current_user)\n image_file = url_for('static', filename = 'uploads/default.jpg')\n\n if form.validate_on_submit():\n flash(\"You have moved out. See you soon!\", 'success')\n current_user.status = 'inactive'\n db.session.commit()\n\n logout()\n\n return redirect(url_for('home'))\n return render_template('moveout.html', title = 'Moving Out', image_file = image_file, category = category, form = form, room = room, user_balance = user_balance)\n\n\[email protected](\"/MooseScore\")\n@login_required\ndef moosescore():\n room = getRoom(current_user) \n moosestats = MooseStats(getDFPost())\n beers, sodas = updatePosts(True)\n return render_template('moosescore.html', title = \"Moose Score\", room = room, stats = moosestats, beers = beers, sodas = sodas)\n\n\ndef save_image(form):\n random_hex = secrets.token_hex(8) \n _, f_ext = os.path.splitext(form.filename)\n image_fn = random_hex + f_ext\n image_path = os.path.join(app.root_path, 'static\\\\uploads', image_fn)\n form.save(image_path)\n\n return image_fn\n\[email protected](\"/Goals\",methods = ['GET', 'POST'])\n@login_required\ndef goals():\n room = getRoom(current_user) \n updateProgress()\n form = GoalForm()\n goals = Goals.query.all()\n \n goals = [(\"active\" if goal.id == 1 else \"\",\n goal,\n \"success\" if goal.progress == 100 else \"warning\",\n url_for('static', filename = \"uploads/\" + goal.image_file)) for goal in goals]\n\n status = \"active\" if len(goals) == 0 else \"\"\n if form.validate_on_submit():\n goal = Goals(item=form.item.data, price=form.price.data)\n if form.image.data:\n filename = save_image(form.image.data)\n \n goal.image_file = filename\n\n db.session.add(goal)\n db.session.commit()\n\n return redirect(url_for('goals'))\n \n\n \n return render_template('goals.html', title = \"Moose Score\", room = room, form = form, goals = goals,status = status)"
},
{
"alpha_fraction": 0.5723684430122375,
"alphanum_fraction": 0.625,
"avg_line_length": 18.125,
"blob_id": "1daed62b5528e9ffa9883ee1e61d9e09a7d97256",
"content_id": "c4474162464f5890e5d1a881366acf0e7881b746",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 8,
"path": "/run.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from MooseJuice import app\nfrom waitress import serve\n\n\n\nif __name__ == \"__main__\":\n serve(app, host=\"0.0.0.0\", port=5000)\n # app.run(debug=True)"
},
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7363636493682861,
"avg_line_length": 18.55555534362793,
"blob_id": "a19b2734cd1b31e3fc42caca06384604b0a2f2da",
"content_id": "77ea790f92e43a23eb83d63308a8941ff2eae8d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 880,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 45,
"path": "/README.md",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "# MooseJuice\n## How To Get started:\n\n### Fix the enviroment\nInstall the python virtual enviroment package with pip:\n\n$ python3 -m pip install --user virtualenv\n\nNavigate to or create an empty repository.\nCreate a virtual enviroment:\n\n$ python3 -m venv env (if not working try python -m venv env)\n\nActivate the enviroment:\n\n$ source env/bin/activate\n\n\n### Clone this git repository to the folder\n\n$ git clone https://github.com/s183922/MooseJuice.git\n\n### Update the enviroment\nNavigate to the cloned git repository MooseJuice and update the env:\n\n$ python -m pip install -r requirements.txt\n\n### Start the server\nIn the git repository open a python terminal:\n\n$ python\n\n\\>> from MooseJuice import db\n\n\\>> db.drop_all() # Deletes existing database\n\n\\>> db.create_all() # Initialise new database\n\nin python terminal:\n\n\\>> run.py\n\nin command prompt:\n\n$ python MooseJuice/run.py\n"
},
{
"alpha_fraction": 0.6645631790161133,
"alphanum_fraction": 0.6692960262298584,
"avg_line_length": 38.625,
"blob_id": "911c69f67a7123c4d95a6ed89dd4a664c1bceaba",
"content_id": "79d0f27fa0aa00a98b7613d39bd745154cee532e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5071,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 128,
"path": "/MooseJuice/forms.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField, FloatField\nfrom flask_wtf.file import FileField, FileAllowed\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError, NumberRange\nfrom MooseJuice.models import User\nfrom flask_login import current_user\nfrom MooseJuice.utils import groupBy\nimport numpy as np\nclass RegistrationForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])\n password = PasswordField('Password', validators = [DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])\n room = SelectField('room', choices = [], validators = [DataRequired()])\n \n submit = SubmitField('Move in')\n\n def validate_username(self,username):\n user = User.query.filter_by(username = username.data).filter_by(status = \"active\").first()\n \n if user:\n \n raise ValidationError(f'There already exists a person with this name in kitchen T. Choose a different one.' )\n\n\nclass LoginForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), Length(min=2, max = 20)])\n password = PasswordField('Password', validators = [DataRequired()])\n remember_me = BooleanField('Remember Me')\n submit = SubmitField('Login')\n\n # def validate_user(self,username):\n # user = User.query.filter_by(username = username.data).filter_by(status = \"active\").first()\n\n # if not user:\n # raise ValidationError('The user either does not exist or is inactive')\n\n\nclass UpdateUsernameForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])\n password = PasswordField('Password', validators = [DataRequired()])\n \n submit = SubmitField('Update Username')\n\n def validate_username(self,username):\n if username.data != current_user.username:\n user = User.query.filter_by(username = username.data).filter_by(status = \"active\").first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one')\n\n\nclass UpdatePasswordForm(FlaskForm):\n password1 = PasswordField('Password', validators = [DataRequired()])\n new_password = PasswordField('New Password', validators = [DataRequired()])\n confirm_password = PasswordField('Confirm New Password', validators=[DataRequired(), EqualTo('new_password', message=\"Must be equal to New Password\")])\n\n submit1 = SubmitField('Update Password')\n\n\n\nclass PurchaseForm(FlaskForm):\n amount_beer = StringField('Beer')\n amount_soda = StringField('Soda')\n submit = SubmitField('Buy')\n\n\n formType = \"Purchase\"\n \n def validate_username(self, amount_beer):\n if amount_beer == None:\n raise ValidationError('That username is taken. Please choose a different one')\n\n \n\nclass TransferMoneyForm(FlaskForm):\n transfer = FloatField('Money to transfer', validators=[DataRequired(), NumberRange(min=0)])\n\n submit = SubmitField('Transfer Money')\n\n formType = \"Transfer\"\n \n\nclass MoveOutForm(FlaskForm):\n moveout = BooleanField('I am sure', validators=[DataRequired()])\n\n submit = SubmitField('Move out')\n\n\nclass TableForm(FlaskForm):\n date = SelectField(\"Date\", choices=[], default = 0)\n week = SelectField(\"Week\", choices=[], default = 0)\n month = SelectField(\"Month\", choices=[], default = 0)\n\n\n submit4 = SubmitField(\"Submit\")\n def __init__(self, table):\n super(TableForm, self).__init__()\n self.table = table\n self.Dchoices = [(i, t[0]) for i, t in enumerate(groupBy(table, \"Date\").values)]\n self.date.choices = self.Dchoices\n \n\n self.Wchoices = [(i, t[0]) for i, t in enumerate(groupBy(table, \"Week\").values)]\n self.week.choices = self.Wchoices\n \n\n self.Mchoices = [(i, t[0]) for i, t in enumerate(groupBy(table, \"Month\").values)]\n self.month.choices = self.Mchoices\n\n self.beers = groupBy(table, \"Date\")[\"Beers\"].iloc[0]\n self.sodas = groupBy(table, \"Date\")[\"Sodas\"].iloc[0]\n self.transfer = groupBy(table, \"Date\")[\"Transfer\"].iloc[0]\n self.amount = groupBy(table, \"Date\")[\"Amount\"].iloc[0]\n\n\n def updateTable(self, type_, number):\n self.beers = groupBy(self.table, type_)[\"Beers\"].iloc[int(number)]\n self.sodas = groupBy(self.table, type_)[\"Sodas\"].iloc[int(number)]\n self.transfer = int(np.ceil(float(groupBy(self.table, type_)[\"Transfer\"].iloc[int(number)])))\n self.amount = int(np.ceil(float(groupBy(self.table, type_)[\"Amount\"].iloc[int(number)])))\n\n return \"\"\n\nclass GoalForm(FlaskForm):\n item = StringField(\"Name of Goal\", validators=[DataRequired()])\n price = FloatField(\"Price of Goal\", validators=[DataRequired(), NumberRange(min=0)])\n image = FileField(\"Upload image\", validators=[FileAllowed(['jpg', 'png', 'jpeg'])])\n\n submit = SubmitField(\"Add new goal\")"
},
{
"alpha_fraction": 0.7375415563583374,
"alphanum_fraction": 0.7375415563583374,
"avg_line_length": 28.09677505493164,
"blob_id": "7651a45e6b94172fa5301c28d275791e95f5b033",
"content_id": "08121cfc3600ad113ab38299239e0bbc5a979d26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 903,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 31,
"path": "/MooseJuice/__init__.py",
"repo_name": "s183922/MooseJuice",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, url_for, flash, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager, current_user\nfrom flask_admin import Admin, AdminIndexView\n\n\napp = Flask(__name__)\nUPLOAD_FOLDER = '/static/uploads'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\n\napp.config['SECRET_KEY'] = '8ba4ae8044cb14a474c522fecde7c4db'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\n\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\n\nclass AdminView(AdminIndexView):\n def is_accessible(self):\n return current_user.is_authenticated and current_user.status == 'admin' and current_user.room == 'admin'\n\nadmin = Admin(app, index_view=AdminView())\n\n\nfrom MooseJuice import routes\n\n"
}
] | 8 |
MhankBarBar/Otakudesu
|
https://github.com/MhankBarBar/Otakudesu
|
16d486c8fcc235c2f561f96556c1d141639898e5
|
fb8d6857ca273c7fb6c28ad05c8187144219b521
|
e2c202831f0d314c1aa9d946015470f9230df9f1
|
refs/heads/master
| 2023-06-29T19:28:45.472827 | 2021-07-22T17:42:12 | 2021-07-22T17:42:12 | 387,792,264 | 5 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5070314407348633,
"alphanum_fraction": 0.5129122734069824,
"avg_line_length": 41.510868072509766,
"blob_id": "80527c19570ec7bc234e9b7974a8564d5f08a898",
"content_id": "33c1ebe784b50719e539b2f894f6a3cfebbe818c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3911,
"license_type": "no_license",
"max_line_length": 490,
"num_lines": 92,
"path": "/otakudesu/utils.py",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "from json import dumps, loads\nfrom types import SimpleNamespace as SN\nfrom bs4 import BeautifulSoup as bs\nfrom requests import get\nfrom concurrent.futures import ThreadPoolExecutor\nimport validators\n\nclass extracts:\n\n def __init__(self, url) -> None:\n self.url = None\n self.content = None\n self.title = None\n if validators.url(url) and 'https://otakudesu.moe' in url:\n self.url = url\n self.content = bs(get(self.url).text, 'html.parser').find('div', {'class': 'venser'})\n self.anu = list(self.content.find('div', {'class': 'infozingle'}).strings)\n self.title = self.anu[1].strip(': ')\n\n @property\n def extract(self):\n if self.url:\n sinfo = list(self.content.find('div', {'class': 'infozingle'}).strings)\n genres = []\n linkdl = self.extractLink\n for _ in self.content.findAll('a', {'rel': 'tag'}):\n if _['href'] == ' ':pass\n else:genres.append(_.text)\n results = {'title': self.title, 'thumbnail': self.content.img['src'], 'jp_title': sinfo[3].strip(': '), 'rating': sinfo[5].strip(': '), 'producers': sinfo[7].strip(': '), 'type': sinfo[9].strip(': '), 'status': sinfo[11].strip(': '), 'episodes': sinfo[13].strip(': '), 'duration': sinfo[15].strip(': '), 'release_date': sinfo[17].strip(': '), 'genres': genres, 'synopsis': self.content.find('div', {'class': 'sinopc'}).text, 'downloads': linkdl, 'studio': sinfo[19].strip(': ')}\n return loads(dumps(results), object_hook=lambda d: SN(**d))\n\n @property\n def extractLink(self):\n meki = {}\n puki = {}\n deel = {}\n try:\n for _ in self.content.findAll('a', {'target': '_blank', 'data-wpel-link': 'internal'})[:-3]:\n if ':' in _.text:pass\n else:\n for e in bs(get(_['href']).text, 'html.parser').find('div', {'class': 'download'}).findAll('ul'):\n for __ in e.findAll('li'):\n for ___ in __.findAll('a'):\n deel.update({___.text.lower(): ___['href']})\n puki.update({'_'+'_'.join(__.strong.text.lower().split(' ')): deel})\n deel = {}\n meki.update({'eps'+''.join(filter(str.isdigit, _.text.split('Episode')[1].split()[0])) if 'batch' not in _.text.lower() and 'episode' in _.text.lower() else 'batch': puki})\n return meki\n except AttributeError:\n return []\n\n def __str__(self) -> str:\n return '<[title: %s]>' % self.title\n\n def __repr__(self) -> dict:\n return self.__str__()\n\nclass extractFromSearch:\n\n def __init__(self, content) -> None:\n self.result = []\n for _ in content:\n with ThreadPoolExecutor(max_workers=3) as moe:\n if 'anime' in _.a['href']:self.result.append(moe.submit(extracts, _.a['href']).result().extract)\n\n def __str__(self) -> str:\n return '<[result: %s]>' % self.result.__len__()\n\n def __repr__(self) -> str:\n return self.__str__()\n\nclass extractFromSchedule:\n\n def __init__(self, content) -> None:\n self.result = {}\n count = 0\n days = ['senin','selasa','rabu','kamis','jumat','sabtu','minggu','random']\n res = []\n for _ in content:\n for __ in _.findAll('a'):\n with ThreadPoolExecutor(max_workers=3) as moe:\n if 'anime' in __['href']:res.append(moe.submit(extracts, __['href']).result().extract)\n self.result.update({days[count]: res})\n count += 1\n res = []\n # self.result = loads(dumps(self.result), object_hook=lambda d: SN(**d))\n\n def __str__(self) -> str:\n return '<[result: %s]>' % self.result.__len__()\n\n def __repr__(self) -> str:\n return self.__str__()\n"
},
{
"alpha_fraction": 0.6190028190612793,
"alphanum_fraction": 0.6302916407585144,
"avg_line_length": 33.290321350097656,
"blob_id": "3449b5b820619271c6725586603a6072a872a285",
"content_id": "48e0ca4ac39c691d3130c213c08f233aa9e72a47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 31,
"path": "/setup.py",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\nfrom os import path\nbase_dir = path.abspath(path.dirname(__file__))\nsetup(\n name = 'otakudesu',\n packages = ['otakudesu'],\n include_package_data=True,\n version = '0.0.9',\n license = 'MIT',\n description = 'Otakudesu Scrapper',\n long_description_content_type = 'text/markdown',\n long_description = open('README.md', 'r').read(),\n author = 'MhankBarBar',\n author_email = '[email protected]',\n url = 'https://github.com/MhankBarBar/otakudesu',\n download_url = 'https://github.com/MhankBarBar/otakudesu/archive/0.0.9.tar.gz',\n keywords = ['anime', 'anime sub indo', 'anime scrapper', 'animeindo', 'animelovers', 'otakudesu'],\n install_requires=[\n 'validators',\n 'requests',\n 'bs4'\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha', \n 'Intended Audience :: Developers', \n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License', \n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n"
},
{
"alpha_fraction": 0.8275862336158752,
"alphanum_fraction": 0.8275862336158752,
"avg_line_length": 28,
"blob_id": "b77c4c3de7c8647649a0b32475093db858b60141",
"content_id": "7ef13114dc32af1ea27c581e847c92c1b7dc285e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 1,
"path": "/otakudesu/__init__.py",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "from .otaku import OtakuDesu\n"
},
{
"alpha_fraction": 0.6762226223945618,
"alphanum_fraction": 0.6762226223945618,
"avg_line_length": 17.53125,
"blob_id": "1980b3743295621070cf107031a289e84904fba4",
"content_id": "b47a4c5cfc770649fdf8df3c3a568e8b375cc2d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 32,
"path": "/README.md",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "# Install\n\n```bash\n> pip install otakudesu\n```\n# Run on terminal\n\n```bash\n> python -m otakudesu\n```\n# Python Interpreter\n## Search by query\n```python\n>>> from otakudesu import OtakuDesu\n>>> otakudesu = OtakuDesu()\n>>> x=otakudesu.search('Saenai heroine')\n>>> x.result\n```\n## Get from schedule\n```python\n>>> from otakudesu import OtakuDesu\n>>> otakudesu = OtakuDesu()\n>>> x=otakudesu.getSchedule\n>>> x.result\n```\n## ByUrl\n```python\n>>> from otakudesu import OtakuDesu\n>>> otakudesu = OtakuDesu()\n>>> x=otakudesu.byUrl('https://otakudesu.moe/anime/saenai-heroine-subtitle-indonesia/')\n>>> x\n```\n"
},
{
"alpha_fraction": 0.6713286638259888,
"alphanum_fraction": 0.6724941730499268,
"avg_line_length": 39.85714340209961,
"blob_id": "4114e55a290f31b68a276e00ee37da6b8d33a9d1",
"content_id": "5a5b2b2bd4887ef7d2be1e4e9aa1c44faf636122",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 21,
"path": "/otakudesu/otaku.py",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "from requests import get, post\nfrom bs4 import BeautifulSoup as bs\nfrom .utils import extracts, extractFromSearch, extractFromSchedule\n\nclass OtakuDesu:\n\n def __init__(self) -> None:\n self.BaseUrl = 'https://otakudesu.moe'\n self.SearchUrl = 'https://otakudesu.moe?s=%s&post_type=anime'\n self.scheduleUrl = 'https://otakudesu.moe/jadwal-rilis/'\n\n def search(self, query:str):\n return extractFromSearch(bs(post(self.SearchUrl % query).text, 'html.parser').find('ul', {'class': 'chivsrc'}).findAll('li'))\n\n def byUrl(self, url:str):\n if self.BaseUrl + '/anime' not in url:assert Exception('Url is invalid')\n return extracts(url).extract\n\n @property\n def getSchedule(self):\n return extractFromSchedule(bs(get(self.scheduleUrl).text, 'html.parser').find('div', {'class': 'venutama'}).findAll('ul'))\n"
},
{
"alpha_fraction": 0.3500390648841858,
"alphanum_fraction": 0.36461859941482544,
"avg_line_length": 38.597938537597656,
"blob_id": "4d59927f242470db90b83bbf69ca24c83a1ac3cc",
"content_id": "ac1fbaa7d217ebaa9d608e0ea3b4915ab4b80300",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7959,
"license_type": "no_license",
"max_line_length": 233,
"num_lines": 194,
"path": "/otakudesu/__main__.py",
"repo_name": "MhankBarBar/Otakudesu",
"src_encoding": "UTF-8",
"text": "from os import get_terminal_size, system as sys\nfrom . import OtakuDesu\nimport itertools\nfrom time import sleep\nfrom subprocess import call\nfrom platform import system\nfrom threading import Thread\nfrom random import choice\nfrom textwrap import wrap\notak = OtakuDesu()\n\nclass Base:\n\n def __init__(self):\n self.contact = \"https://wa.me/6285892766102\"\n self.clear = (lambda : sys(\"cls\" if system() == \"Windows\" else \"clear\"))\n\n def randco(self, text):\n lcol = [\"\\x1b[1;31m\",\"\\x1b[1;32m\",\"\\x1b[1;33m\",\"\\x1b[1;34m\",\"\\x1b[1;35m\",\"\\x1b[1;36m\"]\n return \"%s%s\\x1b[0m\" % (choice(lcol), text)\n\n def chotto(self):\n global matte\n matte = []\n for c in itertools.cycle([\"■□□□□□□□□□\",\"■■□□□□□□□□\", \"■■■□□□□□□□\", \"■■■■□□□□□□\", \"■■■■■□□□□□\", \"■■■■■■□□□□\", \"■■■■■■■□□□\", \"■■■■■■■■□□\", \"■■■■■■■■■□\", \"■■■■■■■■■■\"]):\n if bool(matte) is True:\n break\n print(f\" ╳ Wait {self.randco(c)}\\r\", end=\"\")\n sleep(0.1)\n\n def show(self, obj):\n # self.clear()\n print(\"\"\"\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n ➯ Title : %s\n ➯ Japanese Title : %s\n ➯ Score : %s\n ➯ Producers : %s\n ➯ Studio : %s\n ➯ Type : %s\n ➯ Status : %s\n ➯ Episodes : %s\n ➯ Duration : %s\n ➯ Release Date : %s\n ➯ Genre : %s\n ➯ Synopsis : %s\n \"\"\" % (obj.title, obj.jp_title, obj.rating, obj.producers, obj.studio, obj.type, obj.status, obj.episodes, obj.duration, obj.release_date, \", \".join(obj.genres), \"\\n \".join(wrap(obj.synopsis, get_terminal_size()[0]-12))))\n\n def showeps(self, obj):\n eps = list(obj.downloads.__dict__.keys())[::-1]\n for k, v in enumerate(eps, 1):\n print(\" %s. %s\" % (k, v.replace(\"eps\",\"Episode \").title()))\n chos = int(input(\"\\n ➯ Choose : \"))\n if chos == 99:Main().__main__()\n if chos > 0 and chos-1 < len(eps):\n self.showdl(obj.downloads.__dict__.get(eps[chos-1]))\n else:\n print(\" ╳ Invalid Input\")\n\n def showdl(self, obj):\n qlt = list(obj.__dict__.keys())[::-1]\n print(\"\\n\")\n for k, v in enumerate(qlt, 1):\n print(\" %s.%s\" % (k, v.replace(\"_\",\" \")))\n chos = int(input(\"\\n ➯ Choose : \"))\n if chos == 99:Main().__main__()\n if chos > 0 and chos-1 < len(qlt):\n self.showlink(obj.__dict__.get(qlt[chos-1]))\n else:\n print(\" ╳ Invalid Input\")\n\n def showlink(self, obj):\n deel = list(obj.__dict__.keys())\n print(\"\\n\")\n for k, v in enumerate(deel, 1):\n print(\" %s. %s\" % (k, v))\n chos = int(input(\"\\n ➯ Choose : \"))\n if chos == 99:Main().__main__()\n if chos > 0 and chos-1 < len(deel):\n try:\n call([\"termux-clipboard-set\", obj.__dict__.get(deel[chos-1])]) # Only work on termux\n print(f\" {obj.__dict__.get(deel[chos-1])}\\n Copied To Clipboard\")\n exit(0)\n except:\n print(f\" Result: {obj.__dict__.get(deel[chos-1])}\")\n exit(0)\n else:\n print(\" ╳ Invalid Input\")\n\nclass Main(Base):\n loads = (lambda x: Thread(target=Base().chotto).start())\n logo = \"\"\"\n _ \\ | | |\n | | __| _` | | / | | _` | _ \\ __| | |\n | | | ( | < | | ( | __/ \\__ \\ | |\n \\___/ \\__| \\__,_| _|\\_\\ \\__,_| \\__,_| \\___| ____/ \\__,_| .moe\n ⚘ MhankBarBar | © 2021\n ⚘ Search And Get Direct Download Link From Otakudesu.moe\n\n 1. Search\n 2. From Url\n 3. From Schedule (It's taking a long time)\n 4. Contact\n \"\"\"\n def __main__(self):\n self.clear()\n print(self.logo)\n try:\n if (mek := int(input(\" ➯ Choose : \"))):\n if mek == 1:\n quer = input(\" ➯ Query : \")\n print(\"\\n\")\n self.loads()\n if (hasil := otak.search(quer).result):\n matte.insert(0, True)\n sus = []\n for k, v in enumerate(hasil, 1):\n print(\" %s. %s\" % (k, v.title))\n sus.append(v)\n print(\" 99. Back to main menu\")\n while(True):\n if (pil := int(input(\"\\n ➯ Choose : \"))):\n if pil == 99:self.__main__()\n if pil > 0 and pil-1 < len(sus):\n self.show(sus[pil-1])\n self.showeps(sus[pil-1])\n else:\n print(\" ╳ Invalid Input\")\n else:\n print(\" ╳ Invalid Input\")\n else:\n matte.insert(0, True)\n sleep(0.5)\n print(\" ╳ Anime not found\")\n elif mek == 2:\n url = input(\" ➯ Url : \")\n print(\"\\n\")\n self.loads()\n if (hasil := otak.byUrl(url)):\n matte.insert(0, True)\n # self.clear()\n self.show(hasil)\n self.showeps(hasil)\n else:\n matte.insert(0, True)\n print(\" ╳ Error while scraping\")\n elif mek == 3:\n print(\"\\n\")\n self.loads()\n if (hasil := otak.getSchedule.result):\n matte.insert(0, True)\n sus = []\n print(\"\\n\")\n for k, v in enumerate(list(hasil.keys()), 1):\n print(\" %s. %s\" % (k, v))\n sus.append(v)\n print(\" 99. Back to main menu\")\n while(True):\n if (pil := int(input(\" ➯ Choose : \"))):\n if pil == 99:self.__main__()\n if pil > 0 and pil-1 < len(sus):\n sis = []\n for k, v in enumerate(hasil[sus[pil-1]], 1):\n print(\" %s. %s\" % (k, v.title))\n sis.append(v)\n if (pul := int(input(\" ➯ Choose : \"))):\n if pul == 99:self.__main__()\n if pul > 0 and pul-1 < len(sis):\n self.show(sis[pul-1])\n self.showeps(sis[pul-1])\n else:\n print(\" ╳ Invalid Input\")\n else:\n print(\" ╳ Invalid Input\")\n else:\n print(\" ╳ Invalid Input\")\n else:\n print(\" ╳ Invalid Input\")\n else:\n matte.insert(0, True)\n print(\" ╳ Error while scraping\")\n elif mek == 4:\n call([\"xdg-open\", self.contact])\n self.__main__()\n else:\n pass\n else:\n print(\" ╳ Invalid Input\")\n except Exception as e:\n print(e)\n exit(\" ╳ An error occurred\")\n\nif __name__ == \"__main__\":\n Main().__main__()\n"
}
] | 6 |
EvelinaZakharchenko/SchoolTasksHillel
|
https://github.com/EvelinaZakharchenko/SchoolTasksHillel
|
99af3c4600fcfa299c1ba187a2b97cd28f330f7a
|
28f36f2f50d2157eaa82ebbdc03a61e377394b45
|
79a600fdc590eca54964a7b2fa0cdb32839a1fbb
|
refs/heads/master
| 2023-08-20T05:48:14.729020 | 2021-10-21T13:34:53 | 2021-10-21T13:34:53 | 390,736,579 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4202127754688263,
"alphanum_fraction": 0.664893627166748,
"avg_line_length": 22.5,
"blob_id": "857b2af79c2f83e2a144c964f8da81c711eecba1",
"content_id": "91ee3c5c072f5cff816f4fdc66bca6d738fc0cba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/HW3/ListEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Reverse a given list in Python\n#aLsit = [100, 200, 300, 400, 500]\n#Expected output:\n#[500, 400, 300, 200, 100]\n\naList = [100, 200, 300, 400, 500]\naList.reverse()\nprint(aList)\n"
},
{
"alpha_fraction": 0.6645021438598633,
"alphanum_fraction": 0.6948052048683167,
"avg_line_length": 27.9375,
"blob_id": "1627ec350616a46370c28c033638255cd91b8bb3",
"content_id": "7dfe7b47c6d3c1fb2187b73bd68afee7adc4a72b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 16,
"path": "/HW2/Task1166.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# З урожаю льону на дослiднiй дiлянцi одержали 244 кг волокна льону,\n# а насiння - в 2 рази менше. Скiльки кг насiння одержали?\n\nlinen = 244\n\ndef find_seed():\n seeds = linen / 2\n return seeds\n\nprint(f\"\"\"\nУмова:\nЗ урожаю льону на дослiднiй дiлянцi одержали 244 кг волокна льону,\nа насiння - в 2 рази менше. Скiльки кг насiння одержали?\n\"\"\")\nprint(f\"1 дiя: {linen} : 2 = {find_seed()} (н.) - одержали\")\nprint(f\"Вiдповiдь: {find_seed()} кг насiння одержали.\")"
},
{
"alpha_fraction": 0.5498154759407043,
"alphanum_fraction": 0.5996310114860535,
"avg_line_length": 29.05555534362793,
"blob_id": "7cb58619b065e3589ba3e1c6b21477de84abfa05",
"content_id": "5cce570bcbf09c0448c3353699cf034c4e5d6cd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 542,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 18,
"path": "/HW4/StrEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Given two strings, s1 and s2, create a mixed String using the following rules\n#Note: create a third-string made of the first char of s1 then the last char of s2, Next, the second char of s1 and second last char of s2, and so on. Any leftover chars go at the end of the result.\n#Given:\ns1 = \"Abc\"\ns2 = \"Xyz\"\n#Expected Output:\n#AzbycX\n\n\ns3 = \"\"\nfor i in range(len(s1)):\n if i == 0:\n s3 += (s1[i] + s2[-i-1])\n elif i == (len(s1) - 1):\n s3 += (s1[i] + s2[0])\n else:\n s3 += (s1[i] + s2[-i-1])\nprint(s3)\n\n"
},
{
"alpha_fraction": 0.6425120830535889,
"alphanum_fraction": 0.6473429799079895,
"avg_line_length": 30.846153259277344,
"blob_id": "092a41cd73c54068ead56ecd0ad5f93d61d0df10",
"content_id": "c5c67c86170f6d719a4b3a805871fec5b7ed26ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 13,
"path": "/HW4/StrEx14.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 14: Remove empty strings from a list of strings\n#Given:\nstr_list = [\"Emma\", \"Jon\", \"\", \"Kelly\", None, \"Eric\", \"\"]\n#Expected Output:\n#Original list of sting\n#['Emma', 'Jon', '', 'Kelly', None, 'Eric', '']\n\n#After removing empty strings\n#['Emma', 'Jon', 'Kelly', 'Eric']\n#Note. Use comprehensive list expression!\n\nnew_list = [str_list[s] for s in range(len(str_list)) if bool(str_list[s])]\nprint(new_list)\n"
},
{
"alpha_fraction": 0.6385542154312134,
"alphanum_fraction": 0.6527929902076721,
"avg_line_length": 34.11538314819336,
"blob_id": "7dbb1759204821d8c8a2dfa027199145107076bb",
"content_id": "a0927629f0802875e2049bd55da8bc6ef7c06363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1064,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 26,
"path": "/HW7/Task2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Завдання №2\n# Створіть текстовий файл зі списком учнів, де кожна лінійка є у форматі: П.І.Б учня, середня оцінка\n# Створіть прогаму, що на основі даних з файлу, обрахує середню оцінку по класу\nimport os\nimport names\nimport random\n\ncount_of_students = 30\nwith open(os.path.join(os.path.curdir, \"students_list.txt\"), \"w\") as students_list:\n for _ in range(count_of_students):\n students_list.write(f\"{names.get_first_name()} {names.get_last_name()} {random.randint(1, 12)}\\n\")\n\n\ndef find_average():\n with open(os.path.join(os.path.curdir, \"students_list.txt\"), \"r\") as students_list:\n list1 = []\n lines = students_list.read()\n for s in range(len(lines)):\n if lines[s].isdigit() and lines[s - 1].isdigit():\n list1.append(int(lines[s - 1] + lines[s]))\n sum1 = sum(list1)\n average = sum1 / count_of_students\n return average\n\n\nprint(find_average())\n"
},
{
"alpha_fraction": 0.6007462739944458,
"alphanum_fraction": 0.6641790866851807,
"avg_line_length": 32.5,
"blob_id": "7e12063a92c80afe9ff826b4146c1809279cb5c0",
"content_id": "4b0298633a75ec29399b00114e6050bdb19bae17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 8,
"path": "/HW6/DecorEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Task #2\n# Consider you have a list [2, 4, 5, 6, 7].\n# Return a new set where all numbers became a strings (can be perform by str(34)).\n# No for loop nor comprehensive lists should be used\n\nlist1 = [2, 4, 5, 6, 7]\nset1 = set(map(lambda s: str(s), list1))\nprint(set1)\n"
},
{
"alpha_fraction": 0.5953859686851501,
"alphanum_fraction": 0.6064773797988892,
"avg_line_length": 31.66666603088379,
"blob_id": "14c58d0eb9d75437db2ea54fb7a12dbdc08b708a",
"content_id": "2d7783cb91c873e9fd8bc2f3f3ac04c9b786aae0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2744,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 69,
"path": "/HW7/Task4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Завдання №4. Гра \"Поле Чудес\"\n# У Вас є файл зі словами та поясненнями, кожна лінійка якого має формат:\n# слово || Пояснення слова\n# Для кожного слова застосовуємо такі правила:\n# Спершу виводимо слово, кожна буква якого замінюється на *\n# Користувач вводить букву, і там, де ця буква є у слові, * замінюється на неї, якщо ні,\n# то лічильник невірних вгадувань інкрементується на 1\n# Якщо користувач не вгадає букви довжина_слова * 2 (довжина слова - до 10 літер),\n# інакше стільки невірних спроб, скільки букв у слові, гру програно\n# Якщо всі букви вгадані, користувач виграв партію та отримує 32 / довжина_слова очків\n# Перебіг гри побудувати на основі coroutine.\nimport os\nimport random\n\n\ndef read_and_separate():\n with open(os.path.join(os.path.curdir, \"words_with_explanations.txt\"), \"r\") as words:\n full_text = words.read()\n lines = full_text.split(\"\\n\")\n list1 = []\n for i in range(len(lines)):\n list1.append(lines[i].split(\" || \"))\n return list1\n\n\ndef get_word():\n list1 = read_and_separate()\n rand = random.randint(0, 42)\n word = list1[rand][0]\n explanation = list1[rand][1]\n print(explanation)\n print(word)\n return word\n\n\ndef play():\n word = get_word()\n hidden_word = list(word)\n for s in range(len(word)):\n hidden_word[s] = \"*\"\n print(''.join(hidden_word))\n count_of_fails = 0\n while True:\n new_letter = (yield)\n if word.count(new_letter) > 0:\n for i in range(len(word)):\n if new_letter == word[i]:\n hidden_word[i] = new_letter\n print(f\"You guessed letter\")\n print(''.join(hidden_word))\n if ''.join(hidden_word) in ''.join(word):\n print(f\"You win, the word is {''.join(hidden_word)}, you got {32//len(hidden_word)} points\")\n break\n else:\n count_of_fails += 1\n print(f\"No such letter in word. Attempts left: {(len(word)*2)-count_of_fails}\")\n if count_of_fails == len(word)*2:\n print(f\"You lose!\")\n break\n\n\nmagic_field = play()\nmagic_field.__next__()\n\nwhile True:\n try:\n magic_field.send(input(\"Input missed letter: \"))\n except StopIteration as si:\n break\n"
},
{
"alpha_fraction": 0.6045340299606323,
"alphanum_fraction": 0.6549118161201477,
"avg_line_length": 29.538461685180664,
"blob_id": "2d8d8614fb779918457857a384b16ca89e3b2feb",
"content_id": "6e8599b8cd98792748afcce126050bc65701b3f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 13,
"path": "/HW6/DecorEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Task #4\n# Consider we have list of numbers from 1 to 100.\n# Sum all items which can be devided by 3, which is devided by 3\n# (if a can be devided by 3, + a/3). Use functools.reduce for that.\nfrom functools import reduce\n\nlist1 = [s for s in range(1, 101)]\nlist2 = []\nfor i in range(len(list1)):\n if i % 3 == 0:\n list2.append(i)\nresult = reduce(lambda a, b: a + b, list2)\nprint(result)\n"
},
{
"alpha_fraction": 0.6711409687995911,
"alphanum_fraction": 0.718120813369751,
"avg_line_length": 15.55555534362793,
"blob_id": "ba27575fad4d5fad9fbb15ac824aa17e50956e0a",
"content_id": "f363532cefa36b286aaf4d1a9a804851a266c77b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 9,
"path": "/HW4/StrEx11.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 11: Reverse a given string\n#Given:\nstr1 = \"PYnative\"\n#Expected Output:\n#evitanYP\n\nlist1 = list(str1)\nlist1.reverse()\nprint(''.join(list1))\n"
},
{
"alpha_fraction": 0.4256410300731659,
"alphanum_fraction": 0.5333333611488342,
"avg_line_length": 14,
"blob_id": "e0d3e98b3f48ce588f8b20f98acb3f3e421e5678",
"content_id": "d1342572f35aa087173cc9610fc7abdf3f7fde5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 195,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/HW3/ForEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Print the following pattern using for loop\n#5 4 3 2 1\n#4 3 2 1\n#3 2 1\n#2 1\n#1\n\nn = 5\n\nfor i in range(n, 0, -1):\n for j in range(i, 0, -1):\n print(j, end=\" \")\n print(\"\")\n"
},
{
"alpha_fraction": 0.3638985753059387,
"alphanum_fraction": 0.37209656834602356,
"avg_line_length": 32.436546325683594,
"blob_id": "71a962e6594545f18e943fcdec7b737ba0be38fa",
"content_id": "d710af6ba07e57382c1daf83fa195a02c7212501",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6589,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 197,
"path": "/HW4/SchoolDict.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#School Task\n#Create dictionary which represents the school. School consists of:\n#1.\tDirector\n#2.\tWise-Director\n#3.\tClasses\n#Class is also consisting of:\n#1.\tTeacher\n#2.\tList of students (list because we can exclude the student and add new)\n#Student has:\n#1.\tFirst Name\n#2.\tSecond Name\n#3.\tAverage score of studying\n#Task to do\n#Calculate average score of all students in all classes in the school \n\nimport names\nfrom pprint import pprint\ndictSchool = {\n \"Director\": \"Mr. John Smith\",\n \"Wise-Director\": \"Mrs. Jane Air\",\n \"Classes\": {\n 1: {\n \"Teacher\": \"Mrs. Liz Faxon\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 12\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 2\n }\n ]\n },\n 2: {\n \"Teacher\": \"Mr. John Snow\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 4\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 6\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 7\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n }\n ],\n\n },\n 3: {\n \"Teacher\": \"Mrs. Jenny Pow\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 2\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 12\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n }\n ],\n\n },\n 4: {\n \"Teacher\": \"Mr. Tom Watson\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 6\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 7\n }\n ]\n }\n }\n}\n#pprint(dictSchool)\nstudents_count = 0\nsum_marks = 0\nfor i in range(1, len(dictSchool[\"Classes\"])+1):\n students_count += len(dictSchool[\"Classes\"][i][\"Students\"])\n students = dictSchool[\"Classes\"][i][\"Students\"]\n for j in range(len(students)):\n sum_marks += students[j][\"Average\"]\n\naverage = sum_marks / students_count\n\nprint(f\"Average score of all students in all classes in the school is {average}\")\n"
},
{
"alpha_fraction": 0.6935123205184937,
"alphanum_fraction": 0.7046979665756226,
"avg_line_length": 25.294116973876953,
"blob_id": "ac79e6065c5a1e33fce762ff5f7536bb398a4d85",
"content_id": "cbab9fece2557ff2ecaed5a6c893c17df66f5632",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 447,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 17,
"path": "/HW4/StrEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Arrange string characters such that lowercase letters should come first\n#Given an input string with the combination of the lower and upper case\n# arrange characters in such a way that all lowercase letters should come first.\n#Given:\nstr1 = \"PyNaTive\"\n#Expected Output:\n#yaivePNT\n\nlow = []\nup = []\nfor char in str1:\n if char.islower():\n low.append(char)\n else:\n up.append(char)\nstr2 = ''.join(low + up)\nprint(str2)\n"
},
{
"alpha_fraction": 0.5868725776672363,
"alphanum_fraction": 0.6409266591072083,
"avg_line_length": 22.545454025268555,
"blob_id": "048b6b3b49f2406b7c98ff296d170c8bb8d2d6f1",
"content_id": "0f223a9ddb608a15923227cd443676455d350ab6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 11,
"path": "/HW4/StrEx10.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 10: Given an input string, count occurrences of all characters within a string\n#Given:\nstr1 = \"Apple\"\n#Expected Outcome:\n#{'A': 1, 'p': 2, 'l': 1, 'e': 1}\n\ndict1 = {}\nfor s in range(len(str1)):\n dict1[str1[s]] = str1.count(str1[s])\n\nprint(dict1)\n"
},
{
"alpha_fraction": 0.5894039869308472,
"alphanum_fraction": 0.6754966974258423,
"avg_line_length": 26.454545974731445,
"blob_id": "d0727d7d52a2beeb272a30c27dfe06f47a122422",
"content_id": "4cacaf408de825ed95e77150f79bea2b07459cc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 11,
"path": "/HW3/TupleEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Unpack the following tuple into 4 variables\naTuple = (10, 20, 30, 40)\n#Expected output:\n#aTuple = (10, 20, 30, 40)\n# Your code\n#print(a) # should print 10\n#print(b) # should print 20\n#print(c) # should print 30\n#print(d) # should print 40\nfor i in range(len(aTuple)):\n print(aTuple[i])\n"
},
{
"alpha_fraction": 0.6099018454551697,
"alphanum_fraction": 0.6184378862380981,
"avg_line_length": 29.8157901763916,
"blob_id": "5225381e5ec8e117c6969556aac8a29f513e782c",
"content_id": "968d82df240c2e1584c17d14bc19e2ba3a998878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2636,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 76,
"path": "/HW9/PrivateSchoolTask.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# - Підґрунтя\n# Приватна школа надає непогані послуги по освіті. Прекрасн аудиторії, чисті туалети... Вмотивовані вчителі...\n# Та за все треба платити...\n# - Технічне завдання\n# Розробити програму, яка підрахує на основі зарплат вчителів, директора, заучів та прибиральників,\n# скільки дітей та з якою оплатою за навчання треба формувати класи на наступний навчальний рік....\n\nclass SchoolWorker:\n def __init__(self, position: str, salary: int, count: int):\n self.__position = position\n self.__salary = salary\n self.count = count\n\n @property\n def position(self):\n return self.__position\n\n @property\n def salary(self):\n return self.__salary\n\n def total_salary_of_position(self):\n total = self.salary * self.count\n return total\n\n\nclass Director(SchoolWorker):\n def __init__(self, count: int):\n super().__init__(\n position=\"Director\",\n salary=30000,\n count=count\n )\n\n\nclass Teacher(SchoolWorker):\n def __init__(self, count: int):\n super().__init__(\n position=\"Teacher\",\n salary=15000,\n count=count\n )\n\n\nclass HeadTeacher(SchoolWorker):\n def __init__(self, count: int):\n super().__init__(\n position=\"Head teacher\",\n salary=20000,\n count=count\n )\n\n\nclass Cleaner(SchoolWorker):\n def __init__(self, count: int):\n super().__init__(\n position=\"Cleaner\",\n salary=10000,\n count=count\n )\n\n\nif __name__ == \"__main__\":\n cleaners_count = int(input(\"Input cleaners count: \"))\n headteachers_count = int(input(\"Input head teachers count: \"))\n teachers_count = int(input(\"Input teachers count: \"))\n directors_count = int(input(\"Input directors count: \"))\n students_count = int(input(\"Input expected students count: \"))\n\n director = Director(directors_count)\n headteacher = HeadTeacher(headteachers_count)\n teacher = Teacher(teachers_count)\n cleaner = Cleaner(cleaners_count)\n students_invoice = (director.total_salary_of_position() + headteacher.total_salary_of_position()\n + teacher.total_salary_of_position() + cleaner.total_salary_of_position())//students_count\n print(f\"Expected invoice from each student is {students_invoice} UAH\")\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.7080103158950806,
"avg_line_length": 18.350000381469727,
"blob_id": "4ce9f62f45de7340b3bda2eea3dd70a707260dd2",
"content_id": "c923bcd880ebc81ae2e90097baf390275b2ba7c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 20,
"path": "/HW4/StrEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Given a string of odd length greater than 7, return a new string made of the middle three characters of a given String\n#Given:\n#Case 1\nstr1 = \"JhonDipPeta\"\n#Output\n#Dip\n#Case 2\nstr2 = \"JaSonAy\"\n#Output\n#Son\n\n\ndef get3chars(str):\n middle = len(str) // 2\n middle3chars = str[middle - 1:middle + 2]\n return middle3chars\n\n\nprint(get3chars(str1))\nprint(get3chars(str2))\n"
},
{
"alpha_fraction": 0.6549520492553711,
"alphanum_fraction": 0.690095841884613,
"avg_line_length": 23.076923370361328,
"blob_id": "c5dbbbbe0fc912d5da33c0017ab1ee584687d2a6",
"content_id": "2cb5f24fbcf7519236274c699691bb069812a7b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 13,
"path": "/HW4/DictEx9.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 9: Get the key of a minimum value from the following dictionary\nsampleDict = {\n 'Physics': 82,\n 'Math': 65,\n 'history': 75\n}\n#Expected output:\n#Math\n#Note. You can easily count the minimum number in collection using min function. I.e.\n#t = [1, 2, 3]\n#print(min(t))\n#1\nprint(min(sampleDict.values()))\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.7023809552192688,
"avg_line_length": 13,
"blob_id": "d54eba21ac2f7023bae77410557194d6bae070ea",
"content_id": "61b8f737ac1e42a1a82d1e036a6f183640e18035",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 6,
"path": "/HW3/TupleEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Create a tuple with single item 50\n\n\naList = [50]\n\nprint(tuple(aList))\n"
},
{
"alpha_fraction": 0.5894039869308472,
"alphanum_fraction": 0.6655629277229309,
"avg_line_length": 29.100000381469727,
"blob_id": "9b1971f83cc970dde07a689c3a0e1c7f9202a809",
"content_id": "5eed3c356540be8c1ca66442781e2f81fb9f2edb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 10,
"path": "/HW4/StrEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Given two strings, s1, and s2 return a new string made of the first,\n# middle, and last characters each input string\n#Given:\ns1 = \"America\"\ns2 = \"Japan\"\n#Expected Output:\n#AJrpan\nmiddle1 = len(s1)//2\nmiddle2 = len(s2)//2\nprint(s1[0] + s2[0] + s1[middle1] + s2[middle2] + s1[-1] + s2[-1])\n\n"
},
{
"alpha_fraction": 0.6157205104827881,
"alphanum_fraction": 0.6812227368354797,
"avg_line_length": 24.44444465637207,
"blob_id": "d81634cbf2e7f3fbf0b5beafe37bce62476ca687",
"content_id": "5e89af1ddcc9eb6b5b39307aa651828b742dab19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 9,
"path": "/HW4/StrEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Given two strings, s1 and s2, create a new string by appending s2 in the middle of s1\n#Given:\ns1 = \"Ault\"\ns2 = \"Kelly\"\n#Expected Output:\n#AuKellylt\nmiddle = len(s1)//2\ns3 = s1[:middle] + s2 + s1[middle+1:]\nprint(s3)\n"
},
{
"alpha_fraction": 0.5882509350776672,
"alphanum_fraction": 0.5954279899597168,
"avg_line_length": 23.402597427368164,
"blob_id": "7a5c2c949e495c00a7b60daca68c6bc807aea438",
"content_id": "6eabfe809c0d77c5b660f730967cc25c3cdf4b0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4018,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 154,
"path": "/HW10/CoffeeTask.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# До кави можна замовити різні заправки на кшталт молочної пінки, соєвого молока, мокко\n# (під цією назвою в кавєярнях фігурує додання до кави гарячого шоколаду або шоколадного сиропу)\n# та ще прикрасити все це збитими вершками. Заправки не безкоштовні,\n# тому їх необхідно вбудувати в систему оформлення замовлень.\nfrom abc import ABC, abstractmethod\n\n\ndef add_milk_foam(cls):\n def wrapper():\n def get_cost(self):\n return int(10)\n\n def get_name(self):\n return \"milk foam\"\n\n cls.get_cost = get_cost\n cls.get_name = get_name\n\n return cls\n\n return wrapper()\n\n\ndef add_soy_milk(cls):\n def wrapper():\n def get_cost(self):\n return int(15)\n\n def get_name(self):\n return \"soy milk\"\n\n cls.get_cost = get_cost\n cls.get_name = get_name\n\n return cls\n\n return wrapper()\n\n\ndef add_chocolate_syrup(cls):\n def wrapper():\n def get_cost(self):\n return int(20)\n\n def get_name(self):\n return \"chocolate syrup\"\n\n cls.get_cost = get_cost\n cls.get_name = get_name\n\n return cls\n\n return wrapper()\n\n\ndef add_whipped_cream(cls):\n def wrapper():\n def get_cost(self):\n return int(5)\n\n def get_name(self):\n return \"whipped cream\"\n\n cls.get_cost = get_cost\n cls.get_name = get_name\n\n return cls\n\n return wrapper()\n\n\nclass Coffee(ABC):\n\n def get_total_cost(self):\n pass\n\n @abstractmethod\n def description(self):\n pass\n\n\n@add_milk_foam\nclass HouseBlend(Coffee):\n def __init__(self, main_name=\"House Blend\", cost=15):\n self.main_name = main_name,\n self.cost = cost,\n self.add_name = self.get_name(),\n self.cost_add = self.get_cost()\n\n def get_total_cost(self):\n return self.cost[0] + self.cost_add\n\n def description(self):\n description = f\"{self.main_name[0]} with {self.add_name[0]}\"\n return description\n\n\n@add_soy_milk\nclass DarkRoast(Coffee):\n def __init__(self, main_name=\"Dark Roast\", cost=15):\n self.main_name = main_name,\n self.cost = cost,\n self.add_name = self.get_name(),\n self.cost_add = self.get_cost()\n\n def get_total_cost(self):\n return self.cost[0] + self.cost_add\n\n def description(self):\n description = f\"{self.main_name[0]} with {self.add_name[0]}\"\n return description\n\n\n@add_chocolate_syrup\nclass Decaf(Coffee):\n def __init__(self, main_name=\"Decaf\", cost=15):\n self.main_name = main_name,\n self.cost = cost,\n self.add_name = self.get_name(),\n self.cost_add = self.get_cost()\n\n def get_total_cost(self):\n return self.cost[0] + self.cost_add\n\n def description(self):\n description = f\"{self.main_name[0]} with {self.add_name[0]}\"\n return description\n\n\n@add_whipped_cream\nclass Espresso(Coffee):\n def __init__(self, main_name=\"Espresso\", cost=15):\n self.main_name = main_name,\n self.cost = cost,\n self.add_name = self.get_name(),\n self.cost_add = self.get_cost()\n\n def get_total_cost(self):\n return self.cost[0] + self.cost_add\n\n def description(self):\n description = f\"{self.main_name[0]} with {self.add_name[0]}\"\n return description\n\n\nif __name__ == \"__main__\":\n house_blend = HouseBlend()\n dark_rost = DarkRoast()\n decaf = Decaf()\n espresso = Espresso()\n print(f\"{house_blend.description()} costs {house_blend.get_total_cost()}$\")\n print(f\"{dark_rost.description()} costs {dark_rost.get_total_cost()}$\")\n print(f\"{decaf.description()} costs {decaf.get_total_cost()}$\")\n print(f\"{espresso.description()} costs {espresso.get_total_cost()}$\")\n\n\n\n\n"
},
{
"alpha_fraction": 0.5043988227844238,
"alphanum_fraction": 0.6598240733146667,
"avg_line_length": 16.100000381469727,
"blob_id": "39be4cf2b0de615cd85554370cc40292d9551f34",
"content_id": "44625db84bdade2f76914b7463ac995e654574ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 20,
"path": "/HW5/FuncEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Write a function func1() such that it can accept a variable length of\n# argument and print all arguments value\n#func1(20, 40, 60)\n#func1(80, 100)\n#Expected Output:\n#func1(20, 40, 60)\n#20\n#40\n#60\n\n#func1(80, 100)\n#80\n#100\n\ndef func1(*args):\n for i in range(len(args)):\n print(args[i])\n\nfunc1(20, 40, 60)\n#func1(80, 100)"
},
{
"alpha_fraction": 0.47641509771347046,
"alphanum_fraction": 0.650943398475647,
"avg_line_length": 29.285715103149414,
"blob_id": "985358af391738d2f1958ae78af72249dd2ad938",
"content_id": "003c15a64b15258c28207184d519a1e96356d976",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 7,
"path": "/HW3/TupleEx7.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 7: Modify the first item (22) of a list inside a following tuple to 222\ntuple1 = (11, [22, 33], 44, 55)\n#Expected output:\n#tuple1: (11, [222, 33], 44, 55)\n\ntuple1[1][0] = 222\nprint(f\"tuple1: {tuple1}\")\n"
},
{
"alpha_fraction": 0.44075828790664673,
"alphanum_fraction": 0.6161137223243713,
"avg_line_length": 15.15384578704834,
"blob_id": "3a201f6159f94d800d1aaa338e72bbeb524c6695",
"content_id": "70806670ce7af23be47e589cd5faea2cf68de790",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 13,
"path": "/HW3/ForEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Reverse the following list using for loop\n#list1 = [10, 20, 30, 40, 50]\n#Expected output:\n#50\n#40\n#30\n#20\n#10\n\nlist1 = [10, 20, 30, 40, 50]\n\nfor i in range(1, len(list1) + 1):\n print(list1[-i])\n\n"
},
{
"alpha_fraction": 0.6690140962600708,
"alphanum_fraction": 0.672535240650177,
"avg_line_length": 27.399999618530273,
"blob_id": "c19f0e508e6235e7f948eb19802204a5417f7adb",
"content_id": "52bca9f86aa2ee15061b80927e0efd93220d4dca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 10,
"path": "/HW3/SetEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Add a list of elements to a given set\n#Given:\nsampleSet = {\"Yellow\", \"Orange\", \"Black\"}\nsampleList = [\"Blue\", \"Green\", \"Red\"]\n#Expected output:\n#Note: Set is unordered.\n#{'Green', 'Yellow', 'Black', 'Orange', 'Red', 'Blue'}\n\nsampleSet.update(sampleList)\nprint(sampleSet)\n"
},
{
"alpha_fraction": 0.6647454500198364,
"alphanum_fraction": 0.6964457035064697,
"avg_line_length": 30.57575798034668,
"blob_id": "1dfdee2fc5ec3425a43640328cb4797fc5d3cd3a",
"content_id": "a63253483385823278d198b0f714607b0a997afc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 33,
"path": "/HW2/Task1163.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#У буртi було 600 кг кавунiв. Першого дня продали 1/6 всiх кавунiв,\n#а другого - на 27 кг бiльше. Скiльки кг кавунiв залишилося?\n\ntotal_watermelons = 600\n\n\ndef find_first_day_sales():\n sales_1st_day = total_watermelons / 6\n return sales_1st_day\n\n\ndef find_second_day_sales(first_day):\n sales_2nd_day = first_day + 27\n return sales_2nd_day\n\n\ndef find_remainder(first_day, second_day):\n remainder = total_watermelons - first_day - second_day\n return remainder\n\n\nfirst_day = (find_first_day_sales())\nsecond_day = (find_second_day_sales(first_day))\nleft = (find_remainder(first_day, second_day))\nprint(f\"\"\"\nУмова:\nУ буртi було 600 кг кавунiв. Першого дня продали 1/6 всiх кавунiв,\nа другого - на 27 кг бiльше. Скiльки кг кавунiв залишилося?\n\"\"\")\nprint(f\"1 дiя: 600 : 6 = {first_day} (к.) - продали першого дня\")\nprint(f\"2 дiя: {first_day} + 27 = {second_day} (к.) - продали другого дня\")\nprint(f\"3 дiя: {total_watermelons} - {first_day} - {second_day} = {left} (к.) - залишилося\")\nprint(f\"Вiдповiдь: {left} кг кавунiв залишилося.\")"
},
{
"alpha_fraction": 0.6419437527656555,
"alphanum_fraction": 0.6751918196678162,
"avg_line_length": 17.571428298950195,
"blob_id": "268c29ac0aa663b281fa8740c71c6ab1bbd946f2",
"content_id": "d723142beb8d7069a8ebf146b90a5e50216eec50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 21,
"path": "/HW4/StrEx7.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 7: String characters balance Test\n#We’ll assume that a String s1 and s2 is balanced if all the chars in the s1\n# are there in s2. characters’ position doesn’t matter.\n#Given:\n#Case 1:\n#s1 = \"Yn\"\n#s2 = \"PYnative\"\n#Expected Output:\n#True\n#Case 2:\ns1 = \"Ynf\"\ns2 = \"PYnative\"\n#Expected Output:\n#False\n\ncheck = True\n\nfor s in s1:\n if s not in s2:\n check = False\nprint(check)\n\n"
},
{
"alpha_fraction": 0.6279069781303406,
"alphanum_fraction": 0.6828752756118774,
"avg_line_length": 30.46666717529297,
"blob_id": "595f43ab6c5e5a71d98391afb7a7d604cf8ff192",
"content_id": "80c7f254ca9f21198b35bccaad2343b9d1a88daf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 15,
"path": "/HW4/StrEx9.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 9: Given a string, return the sum and average of the digits that appear in the string, ignoring all other characters\n#Given:\nstr1 = \"English = 78 Science = 83 Math = 68 History = 65\"\n#Expected Outcome:\n#sum is 294\n#average is 73.5\ndigits = []\n\n\nfor s in range(len(str1)):\n if str1[s].isdigit() and str1[s-1].isdigit():\n digits.append(int(str1[s-1]+str1[s]))\nsum1 = sum(digits)\naverage = sum1 / len(digits)\nprint(f\"sum is {sum1}\\naverage is {average}\")\n\n"
},
{
"alpha_fraction": 0.6726618409156799,
"alphanum_fraction": 0.701438844203949,
"avg_line_length": 26.799999237060547,
"blob_id": "b7966c0c6d5bb176c7927126ad7d4d683dcb2169",
"content_id": "fc6f6c8695f31516adeb078dae5af07fcd4dd7b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 10,
"path": "/HW4/StrEx8.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 8: Find all occurrences of “USA” in a given string ignoring the case\n#Given:\nstr1 = \"Welcome to USA. usa awesome, isn't it?\"\n#Expected Outcome:\n#The USA count is: 2\n\nstr2 = \"usa\"\nlow_str1 = str1.lower()\ncount = low_str1.count(str2)\nprint(f\"The USA count is: {count}\")\n"
},
{
"alpha_fraction": 0.5135951638221741,
"alphanum_fraction": 0.5377643704414368,
"avg_line_length": 24.461538314819336,
"blob_id": "91b4336c7fecf9fc5e0d429f7628e84c2677f10d",
"content_id": "667229ce057f87b5de5f788fae707f6b98f4b5d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 13,
"path": "/HW3/ListEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Concatenate two lists index-wise\n#Given:\n#list1 = [\"M\", \"na\", \"i\", \"Ke\"]\n#list2 = [\"y\", \"me\", \"s\", \"lly\"]\n#Expected output:\n#['My', 'name', 'is', 'Kelly']\n\nlist1 = [\"M\", \"na\", \"i\", \"Ke\"]\nlist2 = [\"y\", \"me\", \"s\", \"lly\"]\nnew_list = []\nfor i in range(len(list1)):\n new_list.append(list1[i] + list2[i])\nprint(new_list)\n"
},
{
"alpha_fraction": 0.35245901346206665,
"alphanum_fraction": 0.6516393423080444,
"avg_line_length": 33.85714340209961,
"blob_id": "ef2147e6203fb49deb585910231ef73a4d3f637b",
"content_id": "ceb93ed214dd954389176a321c7cac8ae37485b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 7,
"path": "/HW3/ListEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Add item 7000 after 6000 in the following Python List\n#Given:\nlist1 = [10, 20, [300, 400, [5000, 6000], 500], 30, 40]\n#Expected output:\n#[10, 20, [300, 400, [5000, 6000, 7000], 500], 30, 40\nlist1[2][2].insert(2, 7000)\nprint(list1)\n"
},
{
"alpha_fraction": 0.551886796951294,
"alphanum_fraction": 0.6415094137191772,
"avg_line_length": 12.1875,
"blob_id": "60f2c4ee48863e1b97366d979864b8e96c725623",
"content_id": "8b07cd3a719e3b1267eeb352cbc72f8833d7989b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 16,
"path": "/HW3/ForEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Print multiplication table of a given number\n#For example, num = 2 so the output should be\n#2\n#4\n#6\n#8\n#10\n#12\n#14\n#16\n#18\n\nnum = int(input(\"Input num:\"))\n\nfor i in range(1, 11):\n print(num * i)\n\n"
},
{
"alpha_fraction": 0.6006006002426147,
"alphanum_fraction": 0.6366366147994995,
"avg_line_length": 22.785715103149414,
"blob_id": "2e6c4913dda1d4877cf758dda03db42d59d94fc2",
"content_id": "a8ef34b0b3f6d362a103b6febfbeef9ddd95c4cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 333,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 14,
"path": "/HW4/StrEx15.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 15: Remove special symbols/Punctuation from a given string\n#Given:\nstr1 = \"/*Jon is @developer & musician\"\n#Expected Output:\n#\"Jon is developer musician\"\n\nstr2 = []\nfor s in range(len(str1)):\n if str1[s].isalpha():\n str2.append(str1[s])\n elif str1[s] == \" \":\n str2.append(str1[s])\n\nprint(''.join(str2))\n"
},
{
"alpha_fraction": 0.6138211488723755,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 26.22222137451172,
"blob_id": "24cd0944c7865d591af39202999d4a8c34e39e72",
"content_id": "06ff49a290030a29fc650006b62e9e76646fdaa9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 9,
"path": "/HW3/SetEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Given two Python sets, update the first set with items\n# that exist only in the first set and not in the second set.\nset1 = {10, 20, 30}\nset2 = {20, 40, 50}\n#Expected output:\n#set1 {10, 30}\n\nset1.difference_update(set2)\nprint(set1)\n\n"
},
{
"alpha_fraction": 0.3922652006149292,
"alphanum_fraction": 0.5082873106002808,
"avg_line_length": 11.928571701049805,
"blob_id": "d6ae8f6e41c504e71abd914089a804d763045273",
"content_id": "d4cd0cdd3c46cc6e8a61b640b29f960afab24e0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 14,
"path": "/HW3/ForEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Print the following pattern\n#1\n#1 2\n#1 2 3\n#1 2 3 4\n#1 2 3 4 5\n\nn = 5\n\n\nfor i in range(1, n + 1):\n for j in range(1, i + 1):\n print(j, end=\" \")\n print(\"\")\n"
},
{
"alpha_fraction": 0.6742502450942993,
"alphanum_fraction": 0.7063081860542297,
"avg_line_length": 34.814815521240234,
"blob_id": "52adde8b147e11a3c3964e74ed6e54172dec787e",
"content_id": "ff31d5d204070245b04c2315be30eec364b20c37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 967,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 27,
"path": "/HW5/TimeMashine30Dates.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Year in the proper decade printing task\n# Using the time_machine function try to generate 30 date variants but print only those\n# dates which pass to the desired year decade. I.e. for 1979 the decade will be between 1970 and 1980.\n\nfrom datetime import date\nimport random\n\ninputted_date = input(\"Input date in format YYYY-MM-DD: \")\n\n\ndef time_machine(full_date):\n full_date = date.fromisoformat(full_date)\n year = full_date.year\n month = full_date.month\n day = full_date.day\n first_year = year // 100 * 100\n last_year = first_year + 100\n year_given_by_time_machine = random.randint(first_year, last_year)\n return date(year_given_by_time_machine, month, day)\n\n\nfor _ in range(30):\n current_date = time_machine(inputted_date)\n first_decade_year = date.fromisoformat(inputted_date).year // 10 * 10\n last_decade_year = first_decade_year +10\n if first_decade_year <= current_date.year <= last_decade_year:\n print(current_date)\n"
},
{
"alpha_fraction": 0.5277777910232544,
"alphanum_fraction": 0.6620370149612427,
"avg_line_length": 20.600000381469727,
"blob_id": "54b446ac401a42aef3dc1b90b6c7bddc2c570e28",
"content_id": "7aaccc82228b465773100319db19827a3a2d0070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 10,
"path": "/HW3/TupleEx5.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 5: Swap the following two tuples\ntuple1 = (11, 22)\ntuple2 = (99, 88)\n#Expected output:\n#tuple1 = (99, 88)\n#tuple2 = (11, 22)\n\ntuple1, tuple2 = tuple2, tuple1\n\nprint(f\"tuple1 = {tuple1}\\ntuple2 = {tuple2}\")\n"
},
{
"alpha_fraction": 0.3462783098220825,
"alphanum_fraction": 0.38187703490257263,
"avg_line_length": 11.875,
"blob_id": "d913daf18a4f81be8f2e5b83f36a6d8281001dca",
"content_id": "6a881506dd30ae67de9ac2665e91c3ff41b9ca61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 24,
"path": "/HW3/ForEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Print the following pattern\n#*\n#* *\n#* * *\n#* * * *\n#* * * * *\n#* * * *\n#* * *\n#* *\n#*\n\nn = 5\n\n\nfor i in range(1, n + 1):\n for j in range(1, i + 1):\n print(\"*\", end=\" \")\n print(\"\")\n\n\nfor i in range(n-1, 0, -1):\n for j in range(i, 0, -1):\n print(\"*\", end=\" \")\n print(\"\")\n"
},
{
"alpha_fraction": 0.5319148898124695,
"alphanum_fraction": 0.6063829660415649,
"avg_line_length": 22.5,
"blob_id": "3630290670a2efa9d0e34d2c9c6239769820bb0f",
"content_id": "b10f473e23bb477c25375f2bf0e50ac77a981e27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 12,
"path": "/HW4/DictEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Below are the two lists convert it into the dictionary\nkeys = ['Ten', 'Twenty', 'Thirty']\nvalues = [10, 20, 30]\n#Expected output:\n#{'Ten': 10, 'Twenty': 20, 'Thirty': 30}\n\ndict1 = {\n keys[0]: values[0],\n keys[1]: values[1],\n keys[2]: values[2],\n}\nprint(dict1)\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 24.714284896850586,
"blob_id": "6e28bb281a25d39b003997595ce52157292e1360",
"content_id": "715d1c862af80c4df27e9451d3f977adf799ab45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 7,
"path": "/HW5/FuncEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Create a function that can accept two arguments name and age and print its value\n\ndef func1(name, age):\n print(f\"name is {name}, age is {age}\")\n\n\nfunc1(\"John\", 50)\n"
},
{
"alpha_fraction": 0.7165354490280151,
"alphanum_fraction": 0.7440944910049438,
"avg_line_length": 30.625,
"blob_id": "082ee8a82b6e8412d432fa0bec7bc55c04656120",
"content_id": "fede996362ed8fd816475e5aa7b21a5a9451c5e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 8,
"path": "/HW4/DictEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Initialize dictionary with default values\n#Given:\nfrom pprint import pprint\nemployees = ['Kelly', 'Emma', 'John']\ndefaults = {\"designation\": 'Application Developer', \"salary\": 8000}\n\ndict1 = dict.fromkeys(employees, defaults)\npprint(dict1)\n\n"
},
{
"alpha_fraction": 0.6477046012878418,
"alphanum_fraction": 0.6487026214599609,
"avg_line_length": 33.55172348022461,
"blob_id": "1d187b2ae3c6af04c71c88e3c6c92313c0bca97b",
"content_id": "ebb9e409d92a8264accb3775494bfcf71500f17f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 29,
"path": "/HW7/Task1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Завдання №1\n# Переробити домашню вправу на декоратори (минуле заняття) таким чином,\n# щоб вихідні заяви записувались у файл, шлях до якого вказує користувач :)\nimport os\n\nrequest_type = input(\"Enter type of vacation request, one of Vacation, Sick leave, Day off: \")\nfirst_name = input(\"Enter your first name: \")\nsurname = input(\"Enter your surname: \")\nfrom_date = input(f\"Enter date from which your {request_type} starts: \")\nto_date = input(f\"Enter date when your {request_type} ends: \")\n\n\ndef run_logger(func):\n def wrapper(*args, **kwargs):\n with open(os.path.join(os.path.curdir, \"request.txt\"), \"w\") as request:\n request.write(f\"Title: \\nCEO Red Bull Inc. \\nMr. John Bigbull\\nVacation type: {request_type} Pattern\\nHi \"\n f\"John,\\nI need the paid {request_type} from {from_date} to {to_date}.\\n{first_name} \"\n f\"{surname}\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@run_logger\ndef func():\n return None\n\n\nfunc()\n"
},
{
"alpha_fraction": 0.5890411138534546,
"alphanum_fraction": 0.7123287916183472,
"avg_line_length": 23.33333396911621,
"blob_id": "f411f391893ad8d163eacedd4cfd411708a01702",
"content_id": "f19d3aec57a2231c6b5554e4dca689bf4468f9d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 6,
"path": "/HW3/TupleEx8.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 8: Counts the number of occurrences of item 50 from a tuple\ntuple1 = (50, 10, 60, 70, 50)\n#Expected output:\n#2\n\nprint(tuple1.count(50))\n"
},
{
"alpha_fraction": 0.5874999761581421,
"alphanum_fraction": 0.6299999952316284,
"avg_line_length": 20.105262756347656,
"blob_id": "42e2109fe70751dbdfa2957cd77d3f86753b19d7",
"content_id": "5dc6e3306c4290b0c37a4220250e03f72fbfb55c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 19,
"path": "/HW4/DictEx5.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 5: Create a new dictionary by extracting the following keys from a below dictionary\n# Given dictionary:\nsampleDict = {\n \"name\": \"Kelly\",\n \"age\": 25,\n \"salary\": 8000,\n \"city\": \"New york\"\n\n}\n# Keys to extract\nkeys = [\"name\", \"salary\"]\n#Expected output:\n#{'name': 'Kelly', 'salary': 8000}\n\ndict1 = {\n keys[0]: sampleDict[keys[0]],\n keys[1]: sampleDict[keys[1]]\n}\nprint(dict1)"
},
{
"alpha_fraction": 0.7089337110519409,
"alphanum_fraction": 0.7348703145980835,
"avg_line_length": 25.69230842590332,
"blob_id": "7dcd9e7f437e117b1bed2bff99bec372475005de",
"content_id": "b81c3776db78139e8a83aca2daf86b0a9c4ab30f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 13,
"path": "/HW5/FuncEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Write a function calculation() such that it can accept two variables\n# and calculate the addition and subtraction of them. And also it must return both\n# addition and subtraction in a single return call\n#Given:\ndef calculation(a, b):\n# Your Code\n return (a+b), (a-b)\n\n\nres = calculation(40, 10)\nprint(res)\n#Expected Output\n#50, 30\n"
},
{
"alpha_fraction": 0.49496981501579285,
"alphanum_fraction": 0.577464759349823,
"avg_line_length": 32.13333511352539,
"blob_id": "6e87b1f275ed96d85cc83f44e356c439f6999eff",
"content_id": "21f4c9520d484c6518da207de479348be5e6c779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 15,
"path": "/HW4/DictEx10.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "from pprint import pprint\n#Exercise 10: Change Brad’s salary to 8500 from a given Python dictionary\nsampleDict = {\n 'emp1': {'name': 'Jhon', 'salary': 7500},\n 'emp2': {'name': 'Emma', 'salary': 8000},\n 'emp3': {'name': 'Brad', 'salary': 6500}\n}\n#Expected output:\n# sampleDict = {\n# 'emp1': {'name': 'Jhon', 'salary': 7500},\n# 'emp2': {'name': 'Emma', 'salary': 8000},\n# 'emp3': {'name': 'Brad', 'salary': 8500}\n# }\nsampleDict[\"emp3\"][\"salary\"] = 8500\npprint(sampleDict)\n"
},
{
"alpha_fraction": 0.5071428418159485,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 22.33333396911621,
"blob_id": "2a87d13a89e117491033b7dc980f7ac98ba4695a",
"content_id": "5994e7d5f1f1c1e63267dc5f1ca69272c774cceb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 12,
"path": "/HW3/ListEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Given a Python list of numbers. Turn every item of a list into its square\n#Given:\n#aList = [1, 2, 3, 4, 5, 6, 7]\n#Expected output:\n#[1, 4, 9, 16, 25, 36, 49]\n\naList = [1, 2, 3, 4, 5, 6, 7]\n\nfor i in range(len(aList)):\n aList[i] = aList[i] * aList[i]\n\nprint(aList)\n"
},
{
"alpha_fraction": 0.5336538553237915,
"alphanum_fraction": 0.6778846383094788,
"avg_line_length": 28.714284896850586,
"blob_id": "a862624b685ba4c9d43aa5551d18747f73f1fe4b",
"content_id": "1475b89baf2dc70ee29bd84a2fb80d83f27eadf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 7,
"path": "/HW3/TupleEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Copy element 44 and 55 from the following tuple into a new tuple\ntuple1 = (11, 22, 33, 44, 55, 66)\n#Expected output:\n#tuple2: (44, 55)\n\ntuple2 = (tuple1[3], tuple1[4])\nprint(f\"tuple2: {tuple2}\")\n"
},
{
"alpha_fraction": 0.5919661521911621,
"alphanum_fraction": 0.6279069781303406,
"avg_line_length": 21.4761905670166,
"blob_id": "01545742e971a337f5e514145e416d32acc1e781",
"content_id": "4f70bd0ce0ec8412ac9c1629163e30a6d15ed8da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 21,
"path": "/HW4/StrEx5.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 5: Count all lower case, upper case, digits, and special symbols from a given string\n#Given:\nstr1 = \"P@#yn26at^&i5ve\"\n#Expected Outcome:\n#Total counts of chars, digits,and symbols\n#Chars = 8\n#Digits = 3\n#Symbol = 4\n\nChars = 0\nDigits = 0\nSymbol = 0\nfor s in range(len(str1)):\n if str1[s].isdigit():\n Digits += 1\n elif str1[s].isalpha():\n Chars += 1\n else:\n Symbol +=1\n\nprint(f\"Chars = {Chars}\\nDegits = {Digits}\\nSymbol = {Symbol}\")\n\n"
},
{
"alpha_fraction": 0.698113203048706,
"alphanum_fraction": 0.7099056839942932,
"avg_line_length": 34.33333206176758,
"blob_id": "3d4c8a7c9a34bdc0d77ce4db343f6bbd9ced5fd9",
"content_id": "1f456132137d73081a1ecddcee7df558fc23f0ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 12,
"path": "/HW5/FuncEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Exercise 4: Create an inner function to calculate the addition in the following way\n# •\tCreate an outer function that will accept two parameters, a and b\n# •\tCreate an inner function inside an outer function that will calculate the addition of a and b\n# •\tAt last, an outer function will add 5 into addition and return it\n\ndef outer(a, b):\n def inner():\n return a + b\n return inner() + 5\n\n\nprint(outer(1, 2))\n"
},
{
"alpha_fraction": 0.7230273485183716,
"alphanum_fraction": 0.7407407164573669,
"avg_line_length": 30.049999237060547,
"blob_id": "6485250818a6af59406738631d7cb31b34f282b4",
"content_id": "6f9fa9ec8a1fe85173c125c836c0104bff9a4026",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 843,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 20,
"path": "/HW2/Task1172.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Зiбрали 322 кг плодiв шипшини. Пiсля сушiння маса плодiв зменшилась удвiчi.\n# Скiльки вийшло кiлограмiв сухих плодiв шипшини?\n\ncollected_dog_rose = 322\nreduced_times = 2\n\n\ndef find_after_dry():\n after_dry = collected_dog_rose / reduced_times\n return after_dry\n\n\ndog_roses_after_dry = find_after_dry()\nprint(f\"\"\"\nУмова:\nЗiбрали 322 кг плодiв шипшини. Пiсля сушiння маса плодiв зменшилась удвiчi.\nСкiльки вийшло кiлограмiв сухих плодiв шипшини?\n\"\"\")\nprint(f\"1 дiя: {collected_dog_rose} / {reduced_times} = {dog_roses_after_dry} (кг.) - сухих плодiв\")\nprint(f\"Вiдповiдь: {dog_roses_after_dry} зiйшло сажанцiв дуба\")\n"
},
{
"alpha_fraction": 0.6329966187477112,
"alphanum_fraction": 0.6936026811599731,
"avg_line_length": 27.70967674255371,
"blob_id": "4641b409bb3301a2c4c03f004a9ff0ec31284d8b",
"content_id": "b19d8e43bf7664a0f11e97e75ca55f7092920412",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 891,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 31,
"path": "/HW6/DecorEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Task #3\n# Generate 30 dates using time_machine function from previous home task\n# and generate 30 date for the exact date 2001-02-12. Create list which contains dates\n# between 2001 and 2010. Do not use for loops nor comprehensive lists.\n\nfrom datetime import date\nimport random\n\ninputted_date = \"2001-02-01\"\n\n\ndef time_machine(full_date):\n full_date = date.fromisoformat(full_date)\n year = full_date.year\n month = full_date.month\n day = full_date.day\n first_year = year // 100 * 100\n last_year = first_year + 100\n year_given_by_time_machine = random.randint(first_year, last_year)\n return date(year_given_by_time_machine, month, day)\n\n\ncounter = 0\nlist1 = []\nwhile counter <= 30:\n current_date = time_machine(inputted_date)\n counter += 1\n if 2001 <= current_date.year <= 2010:\n list1.append(current_date.isoformat())\n counter += 1\nprint(list1)\n\n"
},
{
"alpha_fraction": 0.5730129480361938,
"alphanum_fraction": 0.6025878190994263,
"avg_line_length": 22.521739959716797,
"blob_id": "8ddd7efd8cfa0e14e09e482f2b66489abdf15551",
"content_id": "c5a8c8b7c736f631d3ae37879b35c3f761a2fc14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 23,
"path": "/HW4/StrEx13.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 13: Split a given string on hyphens into several substrings and display each substring\n#Given:\nstr1 = \"Emma-is-a-data-scientist\"\n#Expected Output:\n#Displaying each substring\n#Emma\n#is\n#a\n#data\n#scientist\n\nindex = []\nfor s in range(len(str1)):\n if str1[s] == \"-\":\n index.append(s)\nfor i in range(len(index)):\n if i == 0:\n print(str1[:(index[i])])\n print(str1[(index[i]+1):index[i+1]])\n elif i == (len(index)-1):\n print(str1[(index[i]+1):])\n else:\n print(str1[(index[i]+1):index[i+1]])\n"
},
{
"alpha_fraction": 0.5782924294471741,
"alphanum_fraction": 0.5983408093452454,
"avg_line_length": 38.6301383972168,
"blob_id": "7632a55037da50074312d8a4055b0edc95238c6e",
"content_id": "d49fa5f6c0af8d4740cf65605b8afdd9962659c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3325,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 73,
"path": "/HW7/Task3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Завдання №3\n# Створіть програму гри у кості (у нас 5 костей, кожна має від 1 до 6 точок).\n# Щоразу гравець кидає кості та отримує очки. Комбінації наступні:\n# 2 кості з однаковими очками - кількість точок * 2\n# 3 кості з однаковми очками - кількість очків * 4\n# 2 пари костей з однаковими очками - кількість очків * 6\n# 4 кості з однаковими очками - кількість очків * 10\n# всі кості з однаковими очками - кількість очків * 20\n# В інших випадках - сума всіх очків кожної кості\n# Очки записуються в текстовий файл. Кості повинні викидатися з об'єкта ґенератора, створеного через функцію.\n\n# IMPLEMENTATION NOTE!\n# 1. sum of all points are multiplied on multiplier,\n# 2. for case where are 3 equal numbers and 2 another equal numbers sum is multiplied for both 2 and 4 multipliers\n\nimport random\nimport os\n\n\ndef throw_cubes():\n count = 0\n while count <= 5:\n result = random.randint(1, 6)\n count += 1\n yield result\n\n\ndef count_points():\n list_of_results = []\n for i in range(5):\n list_of_results.append(next(throw_cubes()))\n print(list_of_results)\n sum_of_points = sum(list_of_results)\n print(f\"Sum of all points is {sum_of_points}\")\n for j in range(5):\n if list_of_results.count(list_of_results[j]) == 5:\n print(\"There are 5 equal numbers\")\n return sum_of_points*20\n elif list_of_results.count(list_of_results[j]) == 4:\n print(\"There are 4 equal numbers\")\n return sum_of_points*10\n elif list_of_results.count(list_of_results[j]) == 3:\n list_of_results = [s for s in list_of_results if s != list_of_results[j]]\n print(list_of_results)\n for h in range(2):\n if list_of_results.count(list_of_results[h]) == 2:\n print(\"There are 3 equal numbers and 2 another equal numbers\")\n return sum_of_points * 4 * 2\n else:\n continue\n print(\"There are 3 equal numbers\")\n return sum_of_points*4\n elif list_of_results.count(list_of_results[j]) == 2:\n list_of_results.pop(j)\n for h in range(4):\n if list_of_results.count(list_of_results[h]) == 3:\n print(\"There are 2 equal numbers and 3 another equal numbers\")\n return sum_of_points * 4 * 2\n elif list_of_results.count(list_of_results[h]) == 2:\n print(\"There are 2 equal numbers and 2 another equal numbers\")\n return sum_of_points * 6\n else:\n continue\n print(\"There are 2 equal numbers\")\n return sum_of_points * 2\n else:\n continue\n print(\"No equal numbers\")\n return sum_of_points\n\n\nwith open(os.path.join(os.path.curdir, \"points.txt\"), \"w\") as points:\n points.write(str(count_points()))\n"
},
{
"alpha_fraction": 0.4871794879436493,
"alphanum_fraction": 0.5096153616905212,
"avg_line_length": 19.866666793823242,
"blob_id": "a647ff1182a9a7e9823e820ff5a842df5746f69e",
"content_id": "a9bb8955bd8ee1df98c9309a35f23f12ce7a4c12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/HW4/DictEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Access the value of key ‘history’ from the below dict\nsampleDict = {\n \"class\":{\n \"student\":{\n \"name\":\"Mike\",\n \"marks\":{\n \"physics\":70,\n \"history\":80\n }\n }\n }\n}\n#Expected output:\n#80\nprint(sampleDict[\"class\"][\"student\"][\"marks\"][\"history\"])"
},
{
"alpha_fraction": 0.6601467132568359,
"alphanum_fraction": 0.6955990195274353,
"avg_line_length": 29.33333396911621,
"blob_id": "0e00ae8bd3da4201b4f7e4deb89ea5595e7d4e84",
"content_id": "1afe066cd799a332516bc26ce1ee77756094d260",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1099,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 27,
"path": "/HW2/Task1168.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Коли щодня витрачати на опалення 9 кг вугiлля, то його вистачить на 72 днi.\n# На скiльки днiв вистачить вугiлля, коли щодня витрачати 8 кг\n\ncoal_1_case = 9\ndays_1_case = 72\n\n\ndef find_total_coals():\n total = coal_1_case * days_1_case\n return total\n\n\ndef find_days_2_case(total):\n days_2_case = total / 8\n return days_2_case\n\n\ntotal_coals = find_total_coals()\ndays_2case = (find_days_2_case(total_coals))\nprint(f\"\"\"\nУмова:\nКоли щодня витрачати на опалення 9 кг вугiлля, то його вистачить на 72 днi.\nНа скiльки днiв вистачить вугiлля, коли щодня витрачати 8 кг?\n\"\"\")\nprint(f\"1 дiя: {coal_1_case} * {days_1_case} = {total_coals} (в.) - всього\")\nprint(f\"2 дiя: {total_coals} : 8 = {days_2case} (д.) - вистачить вугiлля\")\nprint(f\"Вiдповiдь: На {days_2case} днiв вистачить вугiлля, коли щодня витрачати 8 кг.\")"
},
{
"alpha_fraction": 0.5542168617248535,
"alphanum_fraction": 0.6787148714065552,
"avg_line_length": 26.66666603088379,
"blob_id": "9ae20de480dc46d8547ce991788a00513fe623e1",
"content_id": "c9aa424018b0abed9fc22f828e3be4d745ae3756",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 9,
"path": "/HW3/SetEx2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 2: Return a new set of identical items from a given two set\nset1 = {10, 20, 30, 40, 50}\nset2 = {30, 40, 50, 60, 70}\n#Expected output:\n#{40, 50, 30}\n#Note. Try “intersection” method of “set” object\n\nnew = set2.intersection(set1)\nprint(new)\n"
},
{
"alpha_fraction": 0.5347222089767456,
"alphanum_fraction": 0.6597222089767456,
"avg_line_length": 23,
"blob_id": "3612567a0422cff2aa23c1e54f7b3bd4578a5dc2",
"content_id": "087f12f09ad3db1de0245e8f4dc91c1aab066bc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 6,
"path": "/HW3/TupleEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Access value 20 from the following tuple\naTuple = (\"Orange\", [10, 20, 30], (5, 15, 25))\n#Expected output:\n#20\n\nprint(aTuple[1][1])\n"
},
{
"alpha_fraction": 0.3156643211841583,
"alphanum_fraction": 0.3227972090244293,
"avg_line_length": 36.23958206176758,
"blob_id": "1765374a246002919a31e5d29ca8f1dad968d500",
"content_id": "950ef6fb47094f0ebed4cde28ff1b422c88c35e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7289,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 192,
"path": "/HW8/Task1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Завдання №1\n# Переробити завдання зі школою зі словника у клас!\n# Та мусимо зважити на те, що все дії та обчислення я хочу бачити через виклик методів.\n# Жодних вільних функцій.\nimport names\n\ndictSch = {\n \"Director\": \"Mr. John Smith\",\n \"Wise-Director\": \"Mrs. Jane Air\",\n \"Classes\": {\n 1: {\n \"Teacher\": \"Mrs. Liz Faxon\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 12\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 2\n }\n ]\n },\n 2: {\n \"Teacher\": \"Mr. John Snow\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 4\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 6\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 7\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n }\n ],\n\n },\n 3: {\n \"Teacher\": \"Mrs. Jenny Pow\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 2\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 12\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 5\n }\n ],\n\n },\n 4: {\n \"Teacher\": \"Mr. Tom Watson\",\n \"Students\": [\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 6\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 10\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 11\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 9\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 8\n },\n {\n \"First name\": names.get_first_name(),\n \"Second name\": names.get_last_name(),\n \"Average\": 7\n }\n ]\n }\n }\n }\n\n\nclass DictSchool:\n @classmethod\n def find_sum(cls, dict1, students_count, sum_marks):\n for i in range(1, len(dict1[\"Classes\"]) + 1):\n students_count += len(dict1[\"Classes\"][i][\"Students\"])\n students = dict1[\"Classes\"][i][\"Students\"]\n for j in range(len(students)):\n sum_marks += students[j][\"Average\"]\n\n average = sum_marks / students_count\n\n print(f\"Average score of all students in all classes in the school is {average}\")\n\n\nif __name__ == \"__main__\":\n DictSchool.find_sum(dictSch, 0, 0)\n"
},
{
"alpha_fraction": 0.6148409843444824,
"alphanum_fraction": 0.685512363910675,
"avg_line_length": 27.299999237060547,
"blob_id": "5d53c9a9dca0c15ed45b478938555ff5144b4f81",
"content_id": "3d9c86b8bb1e4d38286e751c7f177c461156c271",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 10,
"path": "/HW4/DictEx7.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 7: Check if a value 200 exists in a dictionary\nsampleDict = {'a': 100, 'b': 200, 'c': 300}\n#Expected output:\n#True\n#Note: You can check if something is in collection using in statement. I.e.\n#r = [1, 2, 3]\n#print(1 in r)\n#will print True\n\nprint(200 in sampleDict.values())\n"
},
{
"alpha_fraction": 0.558282196521759,
"alphanum_fraction": 0.7085889577865601,
"avg_line_length": 26.16666603088379,
"blob_id": "0049d88f3256bc6380c335f9a3e72dfffba70f7a",
"content_id": "2bc7e1f27af6bbd8194aa48dcd38877a6436b993",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 12,
"path": "/HW3/ListEx5.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 5: Given a two Python list. Iterate both lists simultaneously such that list1 should display item in original order and list2 in reverse order\n#Given\nlist1 = [10, 20, 30, 40]\nlist2 = [100, 200, 300, 400]\n#Expected output:\n#10 400\n#20 300\n#30 200\n#40 100\n\nfor i in range(len(list1)):\n print(list1[i], list2[-i-1])\n"
},
{
"alpha_fraction": 0.4816513657569885,
"alphanum_fraction": 0.6651375889778137,
"avg_line_length": 26.25,
"blob_id": "31b3f3afcaa2080265e8ebf38c756b1438972564",
"content_id": "17804d42bebf84499bafefb72ece9386e58e7982",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 8,
"path": "/HW3/SetEx3.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 3: Returns a new set with all items from both sets by removing duplicates\nset1 = {10, 20, 30, 40, 50}\nset2 = {30, 40, 50, 60, 70}\n#Expected output:\n#{70, 40, 10, 50, 20, 60, 30}\n\nset1.union(set2)\nprint(set1)\n"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6147959232330322,
"avg_line_length": 25.016666412353516,
"blob_id": "10b577dc60a79250a2211e908e9e825ef647337b",
"content_id": "34911727aa18e27474694ad039630ec4987f8101",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1882,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 60,
"path": "/HW8/Task2.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Розробити клас Людина. Людина має:\n#Ім'я\n# Прізвище\n# Вік (атрибут але ж змінний)\n# Стать\n# Люди можуть:\n# Їсти\n# Спати\n# Говорити\n# Ходити\n# Стояти\n# Лежати\n# Також ми хочемо контролювати популяцію людства. Змінювати популяцію можемо в __init__.\n# Треба сказати, що доступ до статичних полів класу з __init__ не можу іти через НазваКласу.статичий_атрибут,\n# позаяк ми не може бачити імені класу. Але натомість ми можемо сказати self.__class__.static_attribute.\n\nclass Human:\n def __init__(self, first_name: str, last_name: str, age: int, sex, population=1):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.sex = sex\n self.population = population\n\n def print_my_info(self):\n print(f\"Hi! My first name is {self.first_name}, last name is {self.last_name}, I am {age} years old and i am \"\n f\"a {self.sex}.\")\n\n @staticmethod\n def eat(self):\n print(\"I am eating\")\n\n @staticmethod\n def talk(self):\n print(\"I am talking\")\n\n @staticmethod\n def sleep(self):\n print(\"I am sleeping\")\n\n @staticmethod\n def walk(self):\n print(\"I am walking\")\n\n @staticmethod\n def lay(self):\n print(\"I am laying\")\n\n @staticmethod\n def stay(self):\n print(\"I am staying\")\n\n def born(self):\n self.population += 1\n self.age = 0\n print(f\"I am newborn. Now population = {self.population}\")\n\n def birthday(self):\n self.age += 1\n print(f\"It's my birthday! Now i'm {self.age} years old\")\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6837748289108276,
"alphanum_fraction": 0.6895695328712463,
"avg_line_length": 33.514286041259766,
"blob_id": "5f47cdadd783903d5ade92939b93add7b263d6cb",
"content_id": "cab3a73ebd5c0b1e15efae130cd564b424e63cf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1210,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 35,
"path": "/HW6/DecorEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# We have 3 vacation requests types:\n# 1. Vacation\n# 2. Sick leave\n# 3. Day off\n# Write a program, which will propose you to choose one of the vacation types,\n# enter First Name, Surname, from_date and to_date.\n# As a result, on the console should be displayed vacation request.\n# Each vacation type consists from 2 parts:\n# • Title (1 for all types. CEO Red Bull Inc. Mr. John Bigbull)\n# Vacation Request pattern (1 for each type. Listed below)\n# Request should contain entered First Name and Surname.\n\nrequest_type = input(\"Enter type of vacation request, one of Vacation, Sick leave, Day off: \")\nfirst_name = input(\"Enter your first name: \")\nsurname = input(\"Enter your surname: \")\nfrom_date = input(f\"Enter date from which your {request_type} starts: \")\nto_date = input(f\"Enter date when your {request_type} ends: \")\n\n\ndef run_logger(func):\n def wrapper(*args, **kwargs):\n print(\"Title: \\nCEO Red Bull Inc. \\nMr. John Bigbull\")\n print(f\"Vacation type: {request_type} Pattern\")\n print(f\"Hi John,\\nI need the paid {request_type} from {from_date} to {to_date}.\\n{first_name} {surname}\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@run_logger\ndef func():\n return None\n\n\nfunc()\n"
},
{
"alpha_fraction": 0.7167530059814453,
"alphanum_fraction": 0.7253885865211487,
"avg_line_length": 35.1875,
"blob_id": "bb7bbc98fa8d195a3389bf86567718137f836b4e",
"content_id": "2052706e039131b21f51e2bc05716dea4058b0f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 16,
"path": "/HW5/Timer.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Exercise regarding while loop and generator expression\n# Create chronometry function which counts time back from\n# given amount of the seconds to 0 and shows the seconds on\n# the console. Use while loop rather than for. Also use generator\n# object to calculate next second value rather than usual counter variable.\nimport time\n\ntotal = int(input(\"Enter total amount of seconds: \"))\ngenerator_counter = (s for s in range(total, -1, -1))\nwhile True:\n second = next(generator_counter)\n print(f\"{second}\")\n time.sleep(1)\n if second == 0:\n break\nprint(\"Finish!\")\n"
},
{
"alpha_fraction": 0.6740654110908508,
"alphanum_fraction": 0.7009345889091492,
"avg_line_length": 29.571428298950195,
"blob_id": "2dde65bf61b2123804985c4eb0d2a1554ca9d258",
"content_id": "1de2d143dfbabca67a7188f4aefe7d9b8cf8e979",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1087,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 28,
"path": "/HW2/Task1169.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "# Маса 300 жолудiв 1 кг. У лiсорозсаднику посадили 2 кг жолудiв.\n# Не зiйшла десята частина жолудiв. Скiльки зiйшло сажанцiв дуба?\n\nweight_of_1_kg_acorns = 300\nkg_acrons_in_forest = 2\nnot_grow_acrons = 0.1\n\n\ndef find_total():\n total = weight_of_1_kg_acorns * kg_acrons_in_forest\n return total\n\n\ndef find_grown_oaks(total):\n grown = total * (1 - not_grow_acrons)\n return grown\n\n\ntotal_acrons = find_total()\ngrown_oaks = find_grown_oaks(total_acrons)\nprint(f\"\"\"\nУмова:\nМаса 300 жолудiв 1 кг. У лiсорозсаднику посадили 2 кг жолудiв.\nНе зiйшла десята частина жолудiв. Скiльки зiйшло сажанцiв дуба?\n\"\"\")\nprint(f\"1 дiя: {weight_of_1_kg_acorns} * {kg_acrons_in_forest} = {total_acrons} (в.) - всього жолудiв\")\nprint(f\"2 дiя: {total_acrons} * (1- {not_grow_acrons}) = {grown_oaks} (д.) - зiйшло\")\nprint(f\"Вiдповiдь: {grown_oaks} зiйшло сажанцiв дуба\")\n"
},
{
"alpha_fraction": 0.6864563822746277,
"alphanum_fraction": 0.7077922224998474,
"avg_line_length": 30.705883026123047,
"blob_id": "7ed1f60c491fbee3ad583c3af3a924ccfa91d5d6",
"content_id": "3e81a6f3687ecb25859747ff803149e535c1451c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 34,
"path": "/HW5/TimeMachine.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "from datetime import date\nimport random\n\ninputted_date = input(\"Input date in format YYYY-MM-DD: \")\n\n\ndef time_machine(full_date):\n full_date = date.fromisoformat(full_date)\n year = full_date.year\n month = full_date.month\n day = full_date.day\n first_year = year // 100 * 100\n last_year = first_year + 100\n year_given_by_time_machine = random.randint(first_year, last_year)\n return date(year_given_by_time_machine, month, day)\n\n\nwhile True:\n date_from_time_machine = time_machine(inputted_date)\n print(date_from_time_machine)\n if date_from_time_machine.year == date.fromisoformat(inputted_date).year:\n print(f\"You win\")\n break\n\n\n# Year in the proper decade printing task\n# Using the time_machine function try to generate 30 date variants but print only those\n# dates which pass to the desired year decade. I.e. for 1979 the decade will be between 1970 and 1980.\n\n# Important note\n# If you have the date object you can easily access each part of the date from it.\n# Year – date_obj.year\n# Month – date_obj.month\n# Day – date_obj.day\n"
},
{
"alpha_fraction": 0.5914893746376038,
"alphanum_fraction": 0.693617045879364,
"avg_line_length": 22.5,
"blob_id": "ba81d996b523cf1986aad892d97fc8d96856d819",
"content_id": "ce81db70efe927575076f188aab2c938afecb8b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 10,
"path": "/HW3/TupleEx1.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 1: Reverse the following tuple\naTuple = (10, 20, 30, 40, 50)\n#Expected output:\n#(50, 40, 30, 20, 10)\n#Note: You can’t reverse tuple, but list you can.\n\nlist1 = list(aTuple)\nlist1.reverse()\nbTuple = tuple(list1)\nprint(bTuple)\n"
},
{
"alpha_fraction": 0.5582329034805298,
"alphanum_fraction": 0.6867470145225525,
"avg_line_length": 23.899999618530273,
"blob_id": "bcd84fd26bb4408df6f8a16c8be4dd70c835fd72",
"content_id": "029a3a827894b7862ce6a9f30e7e79bbadbcec80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 10,
"path": "/HW3/SetEx5.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 5: Remove items 10, 20, 30 from the following set at once\nset1 = {10, 20, 30, 40, 50}\n#Expected output:\n#{40, 50}\n#Note. Try to use “difference_update” method of “set” object.\n\nset2 = {10, 20, 30}\nset1.difference_update(set2)\n\nprint(set1)\n"
},
{
"alpha_fraction": 0.5567765831947327,
"alphanum_fraction": 0.6776556968688965,
"avg_line_length": 29.33333396911621,
"blob_id": "8a7f1b749b1e09a87eda899186b20128540c8459",
"content_id": "e0f27d9c12651d68f002c7ad8f1b770e70c1b299",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 9,
"path": "/HW3/SetEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Return a set of all elements in either A or B, but not both\nset1 = {10, 20, 30, 40, 50}\nset2 = {30, 40, 50, 60, 70}\n#Expected output:\n#{20, 70, 10, 60}\n#Note. Try “symmetric_difference” method of “set” object.\n\nnew = set1.symmetric_difference(set2)\nprint(new)\n"
},
{
"alpha_fraction": 0.5860214829444885,
"alphanum_fraction": 0.6102150678634644,
"avg_line_length": 25.64285659790039,
"blob_id": "fd2d895bf67a7525ffdc0ef4251225bab05dde28",
"content_id": "341d63c94e3c9518f86649b93b9d74e9660f418c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 14,
"path": "/HW3/ListEx4.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 4: Concatenate two lists in the following order\n#list1 = [\"Hello \", \"take \"]\n#list2 = [\"Dear\", \"Sir\"]\n#Expected output:\n#['Hello Dear', 'Hello Sir', 'take Dear', 'take Sir']\n\nlist1 = [\"Hello \", \"take \"]\nlist2 = [\"Dear\", \"Sir\"]\nnew_list = []\nfor i in range(len(list1)):\n for j in range(len(list2)):\n new_list.append(list1[i] + list2[j])\n\nprint(new_list)"
},
{
"alpha_fraction": 0.6389610171318054,
"alphanum_fraction": 0.6701298952102661,
"avg_line_length": 28.615385055541992,
"blob_id": "e71d9a65b7ff5958b9d63d49d36b606742fcc8ac",
"content_id": "dccfd372ad80f80ecc6418d99697873b4fb2e13e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/HW4/StrEx12.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 12: Find the last position of a substring “Emma” in a given string\n#Given:\nstr1 = \"Emma is a data scientist who knows Python. Emma works at google.\"\n#Expected Output:\n#Last occurrence of Emma starts at index 43\n\n\nfor s in range(len(str1)-1, -1, -1):\n if str1[(s-4):s] == \"Emma\":\n index = s-4\n break\n\nprint(f\"Last occurrence of Emma starts at index {index}\")\n"
},
{
"alpha_fraction": 0.60429447889328,
"alphanum_fraction": 0.6380367875099182,
"avg_line_length": 19.375,
"blob_id": "11cfc83ccdd2683928e6069628b6ceebdbc4eca6",
"content_id": "9a1c0681d5fbe4968271fcada373bd04facdbb99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 16,
"path": "/HW4/DictEx6.py",
"repo_name": "EvelinaZakharchenko/SchoolTasksHillel",
"src_encoding": "UTF-8",
"text": "#Exercise 6: Delete set of keys from a dictionary\n# Given:\nsampleDict = {\n \"name\": \"Kelly\",\n \"age\": 25,\n \"salary\": 8000,\n \"city\": \"New york\"\n\n}\nkeysToRemove = [\"name\", \"salary\"]\n# Expected output:\n# {'city': 'New york', 'age': 25}\n\ndel sampleDict[keysToRemove[0]]\ndel sampleDict[keysToRemove[1]]\nprint(sampleDict)\n"
}
] | 73 |
autonlab/autonbox
|
https://github.com/autonlab/autonbox
|
8ecf3b2a74f1c18242427cafa594a8ab7ef54e7e
|
2d8bce738dd0dffc528540e86704f6a15140a16c
|
4e7ef5bba1051efc64b8a5ab902cd7dd26af38ce
|
refs/heads/master
| 2023-05-03T18:54:03.149633 | 2023-02-20T17:06:27 | 2023-02-20T17:06:27 | 189,454,758 | 2 | 3 |
MIT
| 2019-05-30T17:25:04 | 2023-01-29T16:15:25 | 2023-02-20T17:06:27 |
Python
|
[
{
"alpha_fraction": 0.7157894968986511,
"alphanum_fraction": 0.7336384654045105,
"avg_line_length": 41.7843132019043,
"blob_id": "40685245abf077de3c01f38eff9b719f8262c36b",
"content_id": "0ad90a5461cc3451fb27a84dabc9bdc92f6862fb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2185,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 51,
"path": "/autonbox/contrib/resnet/README.md",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "## Video Feature Extraction for Action Classification With 3D ResNet\n\n* This repo is forked from [this work](https://github.com/kenshohara/video-classification-3d-cnn-pytorch) and added \nwith changes to run feature extraction from videos\n* This method is based on 3D ResNet trained by [this work](https://github.com/kenshohara/3D-ResNets-PyTorch)\n\n## Citation\nIf you use this code, please cite the original paper:\n```\n@article{hara3dcnns,\n author={Kensho Hara and Hirokatsu Kataoka and Yutaka Satoh},\n title={Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?},\n journal={arXiv preprint},\n volume={arXiv:1711.09577},\n year={2017},\n}\n```\n\n## Requirements\n* [PyTorch](http://pytorch.org/) version0.3\n* FFmpeg, FFprobe if need video processing\n* Python 3\n* Pillow for frame image processing\n\n## Before feature extraction\n* Download pre-trained models into ```$MODEL_DIR``` folder\n* Prepare video features as numpy arrays with shape ```F x H x W x C``` per video in ```$VIDEO_ROOT```, where \nF is frame number, H and W are height and width of videos and C is number of channels (3 for RGB)\n* Prepare the list of videos(paths) in ```$LIST_DIR```\n* If videos are stored in form of jpg files, run ```python generate_matrix.py $jpg_root $dst_npy_root``` to \ngenerate numpy matrices.\n\n## Featrue extraction\n* Run following for features extraction (this script calls ```extract_feature.py``` with options). Specify output \ndirectory in ```$OUT_DIR``` and a json file name \n```\nbash run_extract_module.sh\n``` \n* Function in ```extract_feature.py``` will take in video matrices and output a json file containing feature vectors \nof dimension 2048, for details see function ```generate_vid_feature```. \n* Make sure option ```n_classes``` in the script aligns with pre-trained model of choice. For instance, kinetics \ndataset has ```n_classes=400```, HMDB dataset has ```n_classes=51```.\n* The feature for each video has dimension 2048.\n\n## Embedding Visualization\n* To visualize features using TSNE embedding, run\n```\npython visualize_features.py \\path_to_json \\path_to_video_labels\n```\noutput:\n\n\n\n\n"
},
{
"alpha_fraction": 0.5946386456489563,
"alphanum_fraction": 0.606561005115509,
"avg_line_length": 39.43413162231445,
"blob_id": "e33dbd2b5d70b00eb5b2c3c892784056f81e88fa",
"content_id": "5817a68571f91c5655b5e07f28d678821330a286",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13504,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 334,
"path": "/test/test_autoNHITS.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import unittest\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom ray import tune\nfrom ray.tune.search.hyperopt import HyperOptSearch\n#from neuralforecast import NeuralForecast\nfrom neuralforecast.tsdataset import TimeSeriesDataset\nfrom neuralforecast.auto import AutoNHITS\nfrom neuralforecast.losses.pytorch import MAE\n\nfrom d3m import runtime, index\nfrom d3m.container import dataset\nfrom d3m.metadata import problem\nfrom d3m.metadata.base import ArgumentType, Context\nfrom d3m.metadata.pipeline import Pipeline, PrimitiveStep\n\nclass AutoNHITSTestCase(unittest.TestCase):\n\n #hyperparams argument is a list of (name, data) tuples\n def construct_pipeline(self, hyperparams) -> Pipeline:\n # Creating pipeline\n pipeline_description = Pipeline()\n pipeline_description.add_input(name='inputs')\n\n # Step 0: dataset_to_dataframe\n step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common'))\n step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='inputs.0')\n step_0.add_output('produce')\n pipeline_description.add_step(step_0)\n\n # Step 1: profiler\n # Automatically determine semantic types of columns\n step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.schema_discovery.profiler.Common'))\n step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.0.produce')\n step_1.add_output('produce')\n pipeline_description.add_step(step_1)\n\n # Step 2: column parser\n step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common'))\n step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.1.produce')\n step_2.add_output('produce')\n\n #not adding this hyperparameter messes it up\n #I have NO IDEA why\n #looking at the documentation for this primitive,\n # using defaults should be better\n # if if you use defaults it messes up the values in a lot of the columns\n step_2.add_hyperparameter(\n name='parse_semantic_types',\n argument_type=ArgumentType.VALUE,\n data=[\n \"http://schema.org/Boolean\",\n \"http://schema.org/Integer\",\n \"http://schema.org/Float\",\n \"https://metadata.datadrivendiscovery.org/types/FloatVector\"\n ]\n )\n\n pipeline_description.add_step(step_2)\n\n # Step 3: extract_columns_by_semantic_types(attributes)\n step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))\n step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.2.produce')\n step_3.add_output('produce')\n step_3.add_hyperparameter(\n name='semantic_types',\n argument_type=ArgumentType.VALUE,\n data=['https://metadata.datadrivendiscovery.org/types/Attribute'],\n )\n pipeline_description.add_step(step_3)\n\n # Step 4: extract_columns_by_semantic_types(targets)\n step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))\n step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.2.produce')\n step_4.add_output('produce')\n step_4.add_hyperparameter(\n name='semantic_types',\n argument_type=ArgumentType.VALUE,\n data=[\n \"https://metadata.datadrivendiscovery.org/types/Target\",\n \"https://metadata.datadrivendiscovery.org/types/TrueTarget\",\n \"https://metadata.datadrivendiscovery.org/types/SuggestedTarget\"\n ]\n )\n pipeline_description.add_step(step_4)\n\n # Step 5: autoNHITS\n step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.time_series_forecasting.nhits.AutonBox'))\n step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.3.produce')\n step_5.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data='steps.4.produce')\n #add hyperparams from argument\n for h in hyperparams:\n (name, data) = h\n step_5.add_hyperparameter(\n name = name,\n argument_type = ArgumentType.VALUE,\n data = data\n )\n step_5.add_output('produce')\n pipeline_description.add_step(step_5)\n\n # Step 6: construct_predictions\n # This is a primitive which assures that the output of a standard pipeline has predictions\n # in the correct structure (e.g., there is also a d3mIndex column with index for every row).\n step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common'))\n step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.5.produce')\n # This is a primitive which uses a non-standard second argument, named \"reference\".\n step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data='steps.0.produce')\n step_6.add_output('produce')\n pipeline_description.add_step(step_6)\n\n # Final output\n pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce')\n\n # print json for reference\n #print(pipeline_description.to_json())\n\n return pipeline_description\n\n def run_pipeline(self, pipeline_description : Pipeline, dataset_location : str):\n problem_path = os.path.join(dataset_location, \"TRAIN\", \"problem_TRAIN\", \"problemDoc.json\")\n train_doc_path = os.path.join(dataset_location, \"TRAIN\", \"dataset_TRAIN\", \"datasetDoc.json\")\n test_doc_path = os.path.join(dataset_location, \"TEST\", \"dataset_TEST\", \"datasetDoc.json\")\n\n # Loading problem description.\n problem_description = problem.get_problem(problem_path)\n\n # Loading train and test datasets.\n train_dataset = dataset.get_dataset(train_doc_path)\n test_dataset = dataset.get_dataset(test_doc_path)\n\n print(train_dataset)\n print(test_dataset)\n\n # Fitting pipeline on train dataset.\n fitted_pipeline, train_predictions, fit_result = runtime.fit(\n pipeline_description,\n [train_dataset],\n problem_description=problem_description,\n context=Context.TESTING,\n )\n\n # Any errors from running the pipeline are captured and stored in\n # the result objects (together with any values produced until then and\n # pipeline run information). Here we just want to know if it succeed.\n fit_result.check_success()\n\n # Producing predictions using the fitted pipeline on test dataset.\n test_predictions, produce_result = runtime.produce(\n fitted_pipeline,\n [test_dataset],\n )\n produce_result.check_success()\n\n return test_predictions\n \n def run_direct(self, train, test, h):\n #run AutoNHITS directly\n future_exog = list(set(train.columns) - set(['ds', 'unique_id', 'y']))\n \n print(\"Fitting NeuralForecast AutoNHITS (direct)\")\n print(\"train:\")\n print(train)\n print(\"h:\" + str(h))\n print(\"future exog: \" + str(future_exog))\n\n nhits_config = {\n \"input_size\": 3*h,\n \"n_pool_kernel_size\": tune.choice(\n [3 * [1], 3 * [2], 3 * [4], [8, 4, 1], [16, 8, 1]]\n ),\n \"n_freq_downsample\": tune.choice(\n [\n [168, 24, 1],\n [24, 12, 1],\n [180, 60, 1],\n [60, 8, 1],\n [40, 20, 1],\n [1, 1, 1],\n ]\n ),\n \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n \"scaler_type\" : 'robust',\n \"max_steps\": 100, #TODO: change to 1000 after testing\n \"batch_size\": tune.choice([32, 64, 128, 256]),\n \"windows_batch_size\": tune.choice([128, 256, 512, 1024]), # Initial Learning rate\n \"random_seed\": 1, \n \"futr_exog_list\" : future_exog\n }\n \n model = AutoNHITS(\n h=h,\n loss=MAE(),\n config=nhits_config,\n search_alg=HyperOptSearch(),\n num_samples=10)\n\n train, uids, last_dates, ds = TimeSeriesDataset.from_df(df=train)\n model.fit(train, val_size=h*2)\n\n y = test['y']\n del test['y']\n\n print(\"future:\")\n print(test)\n dataset = TimeSeriesDataset.update_dataset(\n dataset=train, future_df=test\n )\n\n model.set_test_size(h) # To predict h steps ahead\n model_fcsts = model.predict(dataset=dataset)\n #print(\"model_fcsts:\")\n #print(model_fcsts)\n return(pd.DataFrame({\"y\": list(model_fcsts.flatten())}))\n #----------------------------------\n '''\n if issubclass(last_dates.dtype.type, np.integer):\n last_date_f = lambda x: np.arange(\n \tx + 1, x + 1 + h, dtype=last_dates.dtype\n \t)\n else:\n last_date_f = lambda x: pd.date_range(\n x + self.freq, periods=h, freq=self.freq\n )\n\n if len(np.unique(last_dates)) == 1:\n dates = np.tile(last_date_f(last_dates[0]), len(train))\n else:\n dates = np.hstack([last_date_f(last_date) for last_date in last_dates])\n \n idx = pd.Index(np.repeat(uids, h), name=\"unique_id\")\n fcsts_df = pd.DataFrame({\"ds\": dates}, index=idx)\n\n \n\n col_idx = 0\n fcsts = np.full((h * len(uids), 1), fill_value=np.nan)\n \n\n # Append predictions in memory placeholder\n output_length = len(model.loss.output_names)\n fcsts[:, col_idx : col_idx + output_length] = model_fcsts\n col_idx += output_length\n\n # Declare predictions pd.DataFrame\n fcsts = pd.DataFrame.from_records(fcsts, columns=cols, index=fcsts_df.index)\n fcsts_df = pd.concat([fcsts_df, fcsts], axis=1)\n '''\n #----------------------------------\n\n def test_nfsample(self):\n print(\"testing nf sample dataset\")\n dataset_location = \"/home/mkowales/datasets/nfsample/d3m\"\n\n train_data_path = os.path.join(dataset_location, \"TRAIN\", \"dataset_TRAIN\", \"tables\", \"learningData.csv\")\n test_data_path = os.path.join(dataset_location, \"TEST\", \"dataset_TEST\", \"tables\", \"learningData.csv\")\n\n target_name = 'y'\n\n train = pd.read_csv(train_data_path)\n test = pd.read_csv(test_data_path)\n train['ds'] = pd.to_datetime(train['ds'])\n test['ds'] = pd.to_datetime(test['ds'])\n\n del train['d3mIndex']\n del test['d3mIndex']\n\n h = int(test.shape[0]/2)\n\n #----------\n\n #run AutoNHITS directly\n direct_predictions = self.run_direct(train, test, h)\n\n #run simple pipeline with AutoNHITS primitive\n pipeline_description = self.construct_pipeline(hyperparams=[])\n pipeline_predictions = self.run_pipeline(pipeline_description, dataset_location)\n pipeline_predictions = pipeline_predictions[target_name]\n\n print(\"direct:\")\n print(direct_predictions)\n print(type(direct_predictions))\n print(\"from pipeline:\")\n print(pipeline_predictions)\n print(type(pipeline_predictions))\n\n #predictions will not necessarily be identical but should be similar\n #assert((direct_predictions['y'] == pipeline_predictions).all())\n\n def test_sunspots(self):\n\n dataset_location = \"/home/mkowales/datasets/sunspots/d3m\"\n\n train_data_path = os.path.join(dataset_location, \"TRAIN\", \"dataset_TRAIN\", \"tables\", \"learningData.csv\")\n test_data_path = os.path.join(dataset_location, \"TEST\", \"dataset_TEST\", \"tables\", \"learningData.csv\")\n\n target_name = 'sunspots'\n\n train = pd.read_csv(train_data_path)\n test = pd.read_csv(test_data_path)\n train['ds'] = pd.to_datetime(train['year'], format=\"%Y\")\n test['ds'] = pd.to_datetime(test['year'], format=\"%Y\")\n del train['year']\n del test['year']\n\n train['unique_id'] = ['a']*train.shape[0]\n test['unique_id'] = ['a']*test.shape[0]\n\n train.rename(columns={\"sunspots\":\"y\"}, inplace=True)\n test.rename(columns={\"sunspots\":\"y\"}, inplace=True)\n\n h = int(test.shape[0])\n\n #----------\n #run AutoNHITS directly\n direct_predictions = self.run_direct(train, test, h)\n\n #run simple pipeline with AutoNHITS primitive\n pipeline_description = self.construct_pipeline(hyperparams=[])\n pipeline_predictions = self.run_pipeline(pipeline_description, dataset_location)\n pipeline_predictions = pipeline_predictions[target_name]\n\n print(\"direct:\")\n print(direct_predictions)\n print(\"from pipeline:\")\n print(pipeline_predictions)\n\n #predictions will not necessarily be identical but should be similar\n #assert((direct_predictions == pipeline_predictions).all())\n \nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.8742514848709106,
"alphanum_fraction": 0.8892215490341187,
"avg_line_length": 94.57142639160156,
"blob_id": "5c4cc1521b04106bdd7fe000d7a400c09e5800b5",
"content_id": "d25389df7cda27d1e8312a190c7924dfcf8e7d26",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 668,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 7,
"path": "/entry_points.ini",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "[d3m.primitives]\nsemisupervised_classification.iterative_labeling.AutonBox = autonbox.iterative_labeling:IterativeLabelingPrimitive\nfeature_extraction.resnext101_kinetics_video_features.VideoFeaturizer = autonbox.resnext101_kinetics_video_features:ResNext101KineticsPrimitive\ndata_cleaning.clean_augmentation.AutonBox = autonbox.clean_augment:CleanAugmentationPrimitive\ndata_transformation.merge_partial_predictions.AutonBox = autonbox.merge_partial_multipredictions:MergePartialPredictionsPrimitive\ntime_series_forecasting.arima.AutonBox = autonbox.autoARIMA_wrapper:AutoARIMAWrapperPrimitive\ntime_series_forecasting.nhits.AutonBox = autonbox.NHITS:AutoNHITSPrimitive"
},
{
"alpha_fraction": 0.6509259343147278,
"alphanum_fraction": 0.6585648059844971,
"avg_line_length": 42.862945556640625,
"blob_id": "5e9923a27afb888897b21ba71ae4069ee4eefcd8",
"content_id": "31674e53fb096ebfd48fe8a42ed441ff60c67bd8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8640,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 197,
"path": "/test/test_autoARIMA_wrapper.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import unittest\nimport os\n\nimport pandas as pd\nfrom statsforecast.arima import AutoARIMA\n\nfrom d3m import runtime, index\nfrom d3m.container import dataset\nfrom d3m.metadata import problem\nfrom d3m.metadata.base import ArgumentType, Context\nfrom d3m.metadata.pipeline import Pipeline, PrimitiveStep\n\nDATASET_LOCATION = \"/home/mkowales/datasets/sunspots/\"\nPROBLEM_PATH = os.path.join(DATASET_LOCATION, \"TRAIN\", \"problem_TRAIN\", \"problemDoc.json\")\nTRAIN_DOC_PATH = os.path.join(DATASET_LOCATION, \"TRAIN\", \"dataset_TRAIN\", \"datasetDoc.json\")\nTEST_DOC_PATH = os.path.join(DATASET_LOCATION, \"TEST\", \"dataset_TEST\", \"datasetDoc.json\")\nTRAIN_DATA_PATH = os.path.join(DATASET_LOCATION, \"TRAIN\", \"dataset_TRAIN\", \"tables\", \"learningData.csv\")\nTEST_DATA_PATH = os.path.join(DATASET_LOCATION, \"TEST\", \"dataset_TEST\", \"tables\", \"learningData.csv\")\n\nclass AutoARIMAWrapperTestCase(unittest.TestCase):\n\n #hyperparams argument is a list of (name, data) tuples\n def construct_pipeline(self, hyperparams) -> Pipeline:\n # Creating pipeline\n pipeline_description = Pipeline()\n pipeline_description.add_input(name='inputs')\n\n # Step 0: dataset_to_dataframe\n step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common'))\n step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='inputs.0')\n step_0.add_output('produce')\n pipeline_description.add_step(step_0)\n\n # Step 1: profiler\n # Automatically determine semantic types of columns\n step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.schema_discovery.profiler.Common'))\n step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.0.produce')\n step_1.add_output('produce')\n pipeline_description.add_step(step_1)\n\n # Step 2: column parser\n step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common'))\n step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.1.produce')\n step_2.add_output('produce')\n step_2.add_hyperparameter(\n name='parse_semantic_types',\n argument_type=ArgumentType.VALUE,\n data=[\n \"http://schema.org/Boolean\",\n \"http://schema.org/Integer\",\n \"http://schema.org/Float\",\n \"https://metadata.datadrivendiscovery.org/types/FloatVector\"\n ]\n )\n pipeline_description.add_step(step_2)\n\n # Step 3: extract_columns_by_semantic_types(attributes)\n step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))\n step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.2.produce')\n step_3.add_output('produce')\n step_3.add_hyperparameter(\n name='semantic_types',\n argument_type=ArgumentType.VALUE,\n data=['https://metadata.datadrivendiscovery.org/types/Attribute'],\n )\n pipeline_description.add_step(step_3)\n\n # Step 4: extract_columns_by_semantic_types(targets)\n step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))\n step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.2.produce')\n step_4.add_output('produce')\n step_4.add_hyperparameter(\n name='semantic_types',\n argument_type=ArgumentType.VALUE,\n data=[\n \"https://metadata.datadrivendiscovery.org/types/Target\",\n \"https://metadata.datadrivendiscovery.org/types/TrueTarget\",\n \"https://metadata.datadrivendiscovery.org/types/SuggestedTarget\"\n ]\n )\n pipeline_description.add_step(step_4)\n\n # Step 5: autoARIMA\n step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.time_series_forecasting.arima.AutonBox'))\n step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.3.produce')\n step_5.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data='steps.4.produce')\n #add hyperparams from argument\n for h in hyperparams:\n (name, data) = h\n step_5.add_hyperparameter(\n name = name,\n argument_type = ArgumentType.VALUE,\n data = data\n )\n step_5.add_output('produce')\n pipeline_description.add_step(step_5)\n\n # Step 6: construct_predictions\n # This is a primitive which assures that the output of a standard pipeline has predictions\n # in the correct structure (e.g., there is also a d3mIndex column with index for every row).\n step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common'))\n step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data='steps.5.produce')\n # This is a primitive which uses a non-standard second argument, named \"reference\".\n step_6.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data='steps.0.produce')\n step_6.add_output('produce')\n pipeline_description.add_step(step_6)\n\n # Final output\n pipeline_description.add_output(name='output predictions', data_reference='steps.6.produce')\n\n # print json for reference\n #print(pipeline_description.to_json())\n\n return pipeline_description\n\n def run_pipeline(self, pipeline_description : Pipeline):\n # Loading problem description.\n problem_description = problem.get_problem(PROBLEM_PATH)\n\n # Loading train and test datasets.\n train_dataset = dataset.get_dataset(TRAIN_DOC_PATH)\n test_dataset = dataset.get_dataset(TEST_DOC_PATH)\n\n # Fitting pipeline on train dataset.\n fitted_pipeline, train_predictions, fit_result = runtime.fit(\n pipeline_description,\n [train_dataset],\n problem_description=problem_description,\n context=Context.TESTING,\n )\n\n # Any errors from running the pipeline are captured and stored in\n # the result objects (together with any values produced until then and\n # pipeline run information). Here we just want to know if it succeed.\n fit_result.check_success()\n\n # Producing predictions using the fitted pipeline on test dataset.\n test_predictions, produce_result = runtime.produce(\n fitted_pipeline,\n [test_dataset],\n )\n produce_result.check_success()\n\n return test_predictions.sunspots\n \n '''\n def test_default_params(self):\n\n #run simple pipeline with AutoARIMA primitive\n pipeline_description = self.construct_pipeline(hyperparams=[])\n pipeline_predictions = self.run_pipeline(pipeline_description).to_numpy().flatten()\n\n #run statsforecast AutoARIMA directly\n train = pd.read_csv(TRAIN_DATA_PATH)\n test = pd.read_csv(TEST_DATA_PATH)\n y = train.sunspots.to_numpy().flatten()\n h = test.shape[0] #number of rows in test\n\n arima = AutoARIMA()\n arima.fit(y)\n direct_predictions = arima.predict(h=h, X=).to_numpy().flatten()\n\n print(\"direct:\")\n print(direct_predictions)\n print(\"from pipeline:\")\n print(pipeline_predictions)\n\n assert((direct_predictions == pipeline_predictions).all())\n '''\n\n def test_exogenous(self):\n\n #run simple pipeline with AutoARIMA primitive\n pipeline_description = self.construct_pipeline(hyperparams=[])\n pipeline_predictions = self.run_pipeline(pipeline_description).to_numpy().flatten()\n\n #run statsforecast AutoARIMA directly\n train = pd.read_csv(TRAIN_DATA_PATH)\n test = pd.read_csv(TEST_DATA_PATH)\n y = train.sunspots.to_numpy().flatten()\n X = train.loc[:, list((\"sd\", \"observations\"))].to_numpy()\n Xf = test.loc[:, list((\"sd\", \"observations\"))].to_numpy()\n h = test.shape[0] #number of rows in test\n\n arima = AutoARIMA()\n arima.fit(y, X=X)\n direct_predictions = arima.predict(h=h, X=Xf).to_numpy().flatten()\n\n print(\"direct:\")\n print(direct_predictions)\n print(\"from pipeline:\")\n print(pipeline_predictions)\n\n assert((direct_predictions == pipeline_predictions).all())\n\nif __name__ == '__main__':\n unittest.main()"
},
{
"alpha_fraction": 0.6154716610908508,
"alphanum_fraction": 0.6257586479187012,
"avg_line_length": 43.59174346923828,
"blob_id": "78966e3d81e28fc662d0ba07875f4348dc8a8085",
"content_id": "b51b578ac9282e588a021b2b6f3edcd7d81c74cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9721,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 218,
"path": "/autonbox/iterative_labeling.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import os\nimport warnings\nfrom typing import Any, Sequence\n\nimport numpy as np\n\nimport d3m.metadata\nfrom d3m import container, utils as d3m_utils\nfrom d3m.metadata import base as metadata_base, hyperparams\nfrom d3m.metadata import params\nfrom d3m.metadata.base import PrimitiveFamily\nfrom d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase\nfrom d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin\nfrom d3m.primitive_interfaces import base\nfrom d3m.primitives.classification.random_forest import SKlearn as SKRandomForestClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.utils.multiclass import type_of_target\n\nimport autonbox\n\nInput = container.DataFrame\nOutput = container.DataFrame\n\n\nclass IterativeLabelingParams(params.Params):\n is_fitted: bool\n calibclf: Any\n prim_instance: Any\n\n\nclass IterativeLabelingHyperparams(hyperparams.Hyperparams):\n iters = hyperparams.UniformInt(lower=1, upper=100, default=5,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],\n description='The number of iterations of labeling')\n frac = hyperparams.Uniform(lower=0.01, upper=1.0, default=0.2,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],\n description='The fraction of unlabeled item to label')\n blackbox = hyperparams.Primitive[SupervisedLearnerPrimitiveBase](\n primitive_families=[PrimitiveFamily.CLASSIFICATION],\n default=SKRandomForestClassifier,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],\n description='Black box model for the classification')\n cv = hyperparams.UniformInt(lower=1, upper=100, default=5,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],\n description='The number of CV folds. Only used when the blackbox estimator '\n 'doesn\\'t suuport predict_proba()')\n\n\nclass IterativeLabelingPrimitive(SupervisedLearnerPrimitiveBase[Input, Output, IterativeLabelingParams, IterativeLabelingHyperparams],\n\t\t\t\t ProbabilisticCompositionalityMixin[Input, Output, IterativeLabelingParams, IterativeLabelingHyperparams]):\n \"\"\"\n Blackbox based iterative labeling for semi-supervised classification\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata(\n {\n 'id': '6bb5824f-cf16-4615-8643-8c1758bd6751',\n 'version': '0.2.1',\n \"name\": \"Iterative labeling for semi-supervised learning\",\n 'description': \"Blackbox based iterative labeling for semi-supervised classification\",\n 'python_path': 'd3m.primitives.semisupervised_classification.iterative_labeling.AutonBox',\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n \"installation\": [{\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package\": \"autonbox\",\n \"version\": \"0.2.4\"\n }],\n 'algorithm_types': [\n metadata_base.PrimitiveAlgorithmType.ITERATIVE_LABELING,\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.SEMISUPERVISED_CLASSIFICATION,\n },\n )\n\n def __init__(self, *, hyperparams: IterativeLabelingHyperparams) -> None:\n super().__init__(hyperparams=hyperparams)\n self._prim_instance = None\n self._is_fitted = False\n self.X = None\n self.y = None\n self._iters = hyperparams['iters']\n self._frac = hyperparams['frac']\n self._cv = hyperparams['cv']\n\n self._calibclf = None\n\n def __getstate__(self):\n return (\n self.hyperparams, self._prim_instance, self._is_fitted)\n\n def __setstate__(self, state):\n self.hyperparams, self._prim_instance, self._is_fitted = state\n\n def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:\n X = self.X\n y = self.y\n\n primitive = self.hyperparams['blackbox']\n primitive_hyperparams = primitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']\n custom_hyperparams = {'n_estimators': 100}\n if isinstance(primitive, d3m.primitive_interfaces.base.PrimitiveBaseMeta): # is a class\n valid_params = {k: custom_hyperparams[k] for k in\n set(custom_hyperparams).intersection(set(primitive_hyperparams.configuration))}\n self._prim_instance = primitive(\n hyperparams=primitive_hyperparams(primitive_hyperparams.defaults(), **valid_params))\n else: # is an instance\n self._prim_instance = primitive\n\n # Does _prim_instance._clf support predict_proba() call?\n if not hasattr(self._prim_instance._clf, 'predict_proba'):\n calibclf = CalibratedClassifierCV(self._prim_instance._clf, cv=self._cv)\n self._calibclf = OneVsRestClassifier(calibclf)\n\n for labelIteration in range(self._iters):\n labeledSelector = y.iloc[:, 0].notnull() & (y.iloc[:, 0].apply(lambda x: x != ''))\n labeledIx = np.where(labeledSelector)[0]\n unlabeledIx = np.where(~labeledSelector)[0]\n\n if (labelIteration == 0):\n num_instances_to_label = int(self._frac * len(unlabeledIx) + 0.5)\n\n labeledX = X.iloc[labeledIx]\n labeledy = y.iloc[labeledIx]\n\n if self._calibclf is not None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n self._calibclf.fit(labeledX, labeledy)\n probas = self._calibclf.predict_proba(X.iloc[unlabeledIx])\n else:\n self._prim_instance.set_training_data(inputs=labeledX, outputs=labeledy)\n self._prim_instance.fit()\n probas = self._prim_instance._clf.predict_proba(X.iloc[unlabeledIx])\n\n entropies = np.sum(np.log2(probas.clip(0.0000001, 1.0)) * probas, axis=1)\n # join the entropies and the unlabeled indecies into a single recarray and sort it by entropy\n entIdx = np.rec.fromarrays((entropies, unlabeledIx))\n entIdx.sort(axis=0)\n\n labelableIndices = entIdx['f1'][-num_instances_to_label:].reshape((-1,))\n\n if self._calibclf is not None:\n predictions = self._calibclf.predict(X.iloc[labelableIndices])\n predictions = container.DataFrame(predictions, generate_metadata=False)\n else:\n predictions = self._prim_instance.produce(inputs=X.iloc[labelableIndices]).value\n ydf = y.iloc[labelableIndices, 0]\n ydf.loc[:] = predictions.iloc[:, 0]\n\n labeledSelector = y.iloc[:, 0].notnull() & (y.iloc[:, 0].apply(lambda x: x != ''))\n labeledIx = np.where(labeledSelector)[0]\n labeledX = X.iloc[labeledIx]\n labeledy = y.iloc[labeledIx]\n\n if self._calibclf is not None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n self._calibclf.fit(labeledX, labeledy)\n else:\n self._prim_instance.set_training_data(inputs=labeledX, outputs=labeledy)\n self._prim_instance.fit()\n self._is_fitted = True\n\n return base.CallResult(None)\n\n def set_training_data(self, *, inputs: Input, outputs: Output) -> None:\n \"\"\"\n Sets input and output feature space.\n\n :param inputs:\n :param outputs:\n :return:\n \"\"\"\n self.X = inputs\n self.y = outputs\n\n def get_params(self) -> IterativeLabelingParams:\n return IterativeLabelingParams(\n is_fitted=self._is_fitted,\n calibclf=self._calibclf,\n prim_instance=self._prim_instance)\n\n def set_params(self, *, params: IterativeLabelingParams) -> None:\n self._is_fitted = params['is_fitted']\n self._calibclf = params['calibclf']\n self._prim_instance = params['prim_instance']\n\n def produce(self, *, inputs: Input, timeout: float = None, iterations: int = None) -> base.CallResult[Output]:\n if self._calibclf is not None:\n pred = self._calibclf.predict(inputs)\n df = container.DataFrame(pred, generate_metadata=False)\n else:\n pred = self._prim_instance.produce(inputs=inputs)\n df = pred.value\n\n # if output is a binary array of floats then convert values to int\n if type_of_target(df) == 'binary' and len(df) > 0 \\\n and df.iloc[0].dtype == np.float64:\n df = df.astype(int)\n\n output = container.DataFrame(df, generate_metadata=True)\n # if output[0].dtype == np.float64:\n # output = output.astype(int) # we don't want [\"-1.0\", \"1.0\"] when runtime computes the metric\n return base.CallResult(output)\n\n def log_likelihoods(self, *,\n outputs: Output,\n inputs: Input,\n timeout: float = None,\n iterations: int = None) -> base.CallResult[Sequence[float]]:\n results = self._prim_instance.log_likelihoods(outputs=outputs, inputs=inputs)\n return results\n"
},
{
"alpha_fraction": 0.6156634092330933,
"alphanum_fraction": 0.6219633221626282,
"avg_line_length": 37.74916458129883,
"blob_id": "90610bdb4c030f03363e00470e350d5ccff4bddb",
"content_id": "5af3c25482e0c7e6f5d297d138b279443417ac02",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23175,
"license_type": "permissive",
"max_line_length": 396,
"num_lines": 598,
"path": "/autonbox/autoARIMA_wrapper.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import typing\nfrom frozendict import FrozenOrderedDict\n\nfrom d3m import container\nfrom d3m.primitive_interfaces import base\nfrom d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase\nfrom d3m.metadata import base as metadata_base\nfrom d3m.metadata import hyperparams, params\nfrom d3m.exceptions import MissingValueError, PrimitiveNotFittedError\n\nimport autonbox\nfrom statsforecast.arima import AutoARIMA\nfrom autonbox import __version__\n\n__all__ = ('AutoARIMAWrapperPrimitive',)\n\nInputs = container.DataFrame\nOutputs = container.DataFrame\n\nclass Params(params.Params):\n fitted: bool\n new_training_data: bool\n autoARIMA: typing.Any\n\nclass Hyperparams(hyperparams.Hyperparams):\n\n #removing \"Exogenous cols\" hyperparameter because it no longer applies--just assuming all columns are exogenous\n #removing \"level\" hyperparam because it is not useful in a D3M pipeline\n '''\n exogenous_cols = hyperparams.List(\n elements=hyperparams.Hyperparameter[str](\"\"),\n default=[],\n semantic_types = [\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description = \"Columns to use as exogenous variables to be passed in to AutoARIMA.fit() and AutoARIMA.predict().\",\n )\n\n #currently, setting this to anything other than default throws an error\n level = hyperparams.List(\n elements=hyperparams.Uniform(\n default=95,\n lower=50,\n upper=100,\n lower_inclusive=True,\n upper_inclusive=False\n ),\n default=[],\n semantic_types = [\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"An optional list of ints between 50 and 100 representing %% confidence levels for prediction intervals\",\n )\n '''\n\n d = hyperparams.Union(\n configuration=FrozenOrderedDict([\n (\"auto\",\n hyperparams.Constant(\n default=None\n )\n ),\n (\"manual\", \n hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=2\n )\n )\n ]),\n default=\"auto\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Order of first-differencing. Either set manually, or have it be chosen automatically.\"\n )\n\n D = hyperparams.Union(\n configuration=FrozenOrderedDict([\n (\"auto\",\n hyperparams.Constant(\n default=None\n )\n ),\n (\"manual\", \n hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=2\n )\n )\n ]),\n default=\"auto\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Order of seasonal-differencing. Either set manually, or have it be chosen automatically.\"\n )\n\n max_p = hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=5,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum value of p\"\n )\n\n max_q = hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=5,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum value of q\"\n )\n\n max_P = hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=2,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum value of P\"\n )\n\n max_Q = hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=2,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum value of Q\"\n )\n\n max_order = hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=5,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum value of p+q+P+Q if model selection is not stepwise\"\n )\n\n max_d = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=2,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum number of non-seasonal differences\"\n )\n\n max_D = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=1,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum number of seasonal differences\"\n )\n\n start_p = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=2,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Starting value of p in stepwise procedure\"\n )\n\n start_q = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=2,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Starting value of q in stepwise procedure\"\n )\n\n start_P = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=1,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Starting value of P in stepwise procedure\"\n )\n\n start_Q = hyperparams.UniformInt(\n lower=1,\n upper=10,\n default=1,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Starting value of Q in stepwise procedure\"\n )\n\n stationary = hyperparams.UniformBool(\n default=False,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, restricts search to stationary models.\"\n )\n\n seasonal = hyperparams.UniformBool(\n default=True,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If False, restricts search to non-seasonal models.\"\n )\n\n ic = hyperparams.Enumeration(\n values=[\"aic\", \"aicc\", \"bic\"],\n default=\"aicc\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"information criterion used in model selection\"\n )\n\n #currently changing this causes autoARIMA to fail\n stepwise = hyperparams.UniformBool(\n default=True,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, will do stepwise selection (faster). Otherwise, it searches over all models. Non-stepwise selection can be very slow, especially for seasonal models. At the time of writing, setting stepwise to False causes AutoARIMA to fail.\"\n )\n\n nmodels=hyperparams.UniformInt(\n lower=1,\n upper=500,\n default=94,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Maximum number of models considered in the stepwise search.\"\n )\n\n trace=hyperparams.UniformBool(\n default=False,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, the list of ARIMA models considered will be reported.\"\n )\n\n approximation = hyperparams.Enumeration(\n values = [True, False, None],\n default=None,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, estimation is via conditional sums of squares and the information criteria used for model selection are approximated. The final model is still computed using maximum likelihood estimation. Approximation should be used for long time series or a high seasonal period to avoid excessive computation times. If set to None, AutoARIMA will decide whether to approximate.\"\n )\n\n method = hyperparams.Enumeration(\n values = (\"CSS\", \"CSS-ML\", \"ML\", None),\n default=None,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"fitting method: maximum likelihood or minimize conditional sum-of-squares. If None, will choose automatically\"\n )\n\n truncate = hyperparams.Union(\n configuration=FrozenOrderedDict([\n (\"none\",\n hyperparams.Constant(\n default=None\n )\n ),\n (\"truncate\", \n hyperparams.UniformInt(\n lower=1,\n upper=100,\n default=50\n )\n )\n ]),\n default=\"none\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"An integer value indicating how many observations to use in model selection. The last truncate values of the series are used to select a model when truncate is not None and approximation=True. All observations are used if either truncate=None or approximation=False.\"\n )\n\n test = hyperparams.Enumeration(\n values=(\"kpss\",),\n default=\"kpss\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Type of unit root test to use. See ndiffs for details. As of this writing, this argument doesn't seem to do anything in statsforecast\"\n )\n\n test_kwargs = hyperparams.Constant(\n default = {},\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description = \"A dictionary of keyword arguments to be passed to the unit root test. At the time of writing, the only argument that is read is alpha.\"\n )\n\n seasonal_test = hyperparams.Enumeration(\n values=(\"seas\", \"ocsb\"),\n default=\"seas\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"This determines which method is used to select the number of seasonal differences. The default method is to use a measure of seasonal strength computed from an STL decomposition. Other possibilities involve seasonal unit root tests.\"\n )\n\n seasonal_test_kwargs = hyperparams.Constant(\n default = {},\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description = \"A dictionary of keyword arguments to be passed to the seasonal unit root test. At the time of writing, the only argument that is read is alpha.\"\n )\n\n allowdrift = hyperparams.UniformBool(\n default=True,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, models with drift terms are considered.\"\n )\n\n allowmean = hyperparams.UniformBool(\n default=True,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"If True, models with a non-zero mean are considered.\"\n )\n\n blambda = hyperparams.Union(\n configuration=FrozenOrderedDict([\n (\"none or auto\",\n hyperparams.Enumeration(\n values = (None, \"auto\"),\n default=None\n )\n ),\n (\"blambda\", \n hyperparams.Uniform(\n lower=-5.0,\n upper=5.0,\n default=0.0\n )\n )\n ]),\n default=\"none or auto\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Box-Cox transformation parameter. If lambda=\\\"auto\\\", then a transformation is automatically selected using BoxCox.lambda. The transformation is ignored if None. Otherwise, data transformed before model is estimated.\"\n )\n\n biasadj = hyperparams.UniformBool(\n default=False,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is True, an adjustment will be made to produce mean forecasts and fitted values.\"\n )\n\n parallel = hyperparams.UniformBool(\n default=False,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter\"],\n description=\"If True and stepwise = False, then the specification search is done in parallel. This can give a significant speedup on multicore machines.\"\n )\n\n num_cores = hyperparams.Union(\n configuration=FrozenOrderedDict([\n (\"none\",\n hyperparams.Constant(\n default=None\n )\n ),\n (\"num_cores\", \n hyperparams.UniformInt(\n lower=1,\n upper=20,\n default=2\n )\n )\n ]),\n default=\"num_cores\",\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter\"],\n description=\"Allows the user to specify the amount of parallel processes to be used if parallel = True and stepwise = False. If None, then the number of logical cores is automatically detected and all available cores are used.\"\n )\n\n period = hyperparams.UniformInt(\n lower=1,\n upper=1000,\n default=1,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/ControlParameter\"],\n description=\"Number of observations per unit of time. For example 24 for Hourly data.\"\n )\n\nclass AutoARIMAWrapperPrimitive(SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):\n\n \"\"\"\n A wrapper primitive of statsforecast.arima.AutoARIMA\n Code Available at https://github.com/Nixtla/statsforecast/blob/main/statsforecast/arima.py#L2148-L2465\n\n An AutoARIMA estimator.\n Returns best ARIMA model according to either AIC, AICc or BIC value.\n The function conducts a search over possible model within the order constraints provided.\n\n Notes\n -----\n * This implementation is a mirror of Hyndman's forecast::auto.arima.\n\n References\n ----------\n [1] https://github.com/robjhyndman/forecast\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata({\n \"id\": \"434d4d25-dd61-4a32-a624-0f983995e189\",\n \"version\": \"0.1.0\",\n \"name\": \"statsforecast.arima.AutoARIMA\",\n \"description\": \"Wrapper of the AutoARIMA class from statsforecast package\",\n \"python_path\": \"d3m.primitives.time_series_forecasting.arima.AutonBox\",\n \"primitive_family\": metadata_base.PrimitiveFamily.TIME_SERIES_FORECASTING,\n \"algorithm_types\": [\"AUTOREGRESSIVE_INTEGRATED_MOVING_AVERAGE\"],\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n \"keywords\": [\"ARIMA\", \"time series\", \"forecasting\"],\n \"installation\": [{\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package\": \"autonbox\",\n \"version\": __version__\n }]\n })\n\n def __init__(self, *, hyperparams: Hyperparams) -> None:\n #print(\"calling __init__\")\n\n super().__init__(hyperparams=hyperparams)\n\n self._fitted = False\n self._training_target = None\n self._training_exogenous = None\n self._new_training_data = False\n self._autoARIMA = None\n\n def get_params(self) -> Params:\n #print(\"calling get_params\")\n return Params(\n fitted = self._fitted,\n new_training_data = self._new_training_data,\n autoARIMA = self._autoARIMA\n )\n\n def set_params(self, *, params: Params) -> None:\n #print(\"calling set_params, params argument:\")\n #print(params)\n self._fitted = params['fitted']\n self._new_training_data = params['new_training_data']\n self._autoARIMA = params['autoARIMA']\n\n def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n '''\n print(\"calling set_training_data\")\n print(\"Inputs:\")\n print(inputs)\n print(\"Outputs:\")\n print(outputs)\n '''\n \n '''\n inputs is a dataframe that will be used as exogenous data, excepting time columns\n outputs is a dataframe containing one column, the time series that we want to predict future values of\n '''\n\n #TODO: check that outputs has one column\n #TODO: check that inputs and outputs have same number of rows\n #TODO: check at np.nan and np.inf are not present\n\n self._training_exogenous = inputs\n self._training_target = outputs\n self._new_training_data = True\n\n #private method\n #determine columns to be used as exogenous data from column semantic types\n def _format_exogenous(self, inputs):\n timestamp_cols = inputs.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/Time\",\n )\n )\n #print(\"timestamp cols: \" + str(timestamp_cols))\n #TODO: raise error if there are multiple time cols or it is not a valid time series?\n\n grouping_cols = inputs.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/GroupingKey\",\n \"https://metadata.datadrivendiscovery.org/types/SuggestedGroupingKey\"\n )\n )\n #print(\"grouping cols: \" + str(grouping_cols))\n #TODO: raise error if there are any grouping cols\n\n attribute_cols = inputs.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/Attribute\",\n )\n )\n #print(\"attribute cols: \" + str(attribute_cols))\n \n exogenous_cols = list(set(attribute_cols) - set(grouping_cols + timestamp_cols))\n #print(\"exogneous_cols: \" + str(exogenous_cols))\n\n #return None\n if (exogenous_cols == []):\n return None\n else:\n exogenous = inputs.iloc[:, exogenous_cols]\n #print(\"exogenous: \")\n #print(exogenous)\n X = exogenous.to_numpy().astype(float)\n return X \n\n def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:\n #print(\"Fitting StatsForecast AutoARIMA\")\n\n #make hyperparams into local variables for convenience\n d = self.hyperparams['d']\n D = self.hyperparams['D']\n max_p = self.hyperparams['max_p']\n max_q = self.hyperparams['max_q']\n max_P = self.hyperparams['max_P']\n max_Q = self.hyperparams['max_Q']\n max_order = self.hyperparams['max_order']\n max_d = self.hyperparams['max_d']\n max_D = self.hyperparams['max_D']\n start_p = self.hyperparams['start_p']\n start_q = self.hyperparams['start_q']\n start_P = self.hyperparams['start_P']\n start_Q = self.hyperparams['start_Q']\n stationary = self.hyperparams['stationary']\n seasonal = self.hyperparams['seasonal']\n ic = self.hyperparams['ic']\n stepwise = self.hyperparams['stepwise']\n nmodels = self.hyperparams['nmodels']\n trace = self.hyperparams['trace']\n approximation = self.hyperparams['approximation']\n method = self.hyperparams['method']\n truncate = self.hyperparams['truncate']\n test = self.hyperparams['test']\n test_kwargs = self.hyperparams['test_kwargs']\n seasonal_test = self.hyperparams['seasonal_test']\n seasonal_test_kwargs = self.hyperparams['seasonal_test_kwargs']\n allowdrift = self.hyperparams['allowdrift']\n allowmean = self.hyperparams['allowmean']\n blambda = self.hyperparams['blambda']\n biasadj = self.hyperparams['biasadj']\n parallel = self.hyperparams['parallel']\n num_cores = self.hyperparams['num_cores']\n period = self.hyperparams['period']\n\n self._autoARIMA = AutoARIMA(\n d = d,\n D = D,\n max_p = max_p,\n max_q = max_q,\n max_P = max_P,\n max_Q = max_Q,\n max_order = max_order,\n max_d = max_d,\n max_D = max_D,\n start_p = start_p,\n start_q = start_q,\n start_P = start_P,\n start_Q = start_Q,\n stationary = stationary,\n seasonal = seasonal,\n ic = ic,\n stepwise = stepwise,\n nmodels = nmodels,\n trace = trace,\n approximation = approximation,\n method = method,\n truncate = truncate,\n test = test,\n test_kwargs = test_kwargs,\n seasonal_test = seasonal_test,\n seasonal_test_kwargs = seasonal_test_kwargs,\n allowdrift = allowdrift,\n allowmean = allowmean,\n blambda = blambda,\n biasadj = biasadj,\n parallel = parallel,\n num_cores = num_cores,\n period = period\n )\n\n if self._training_target is None:\n raise MissingValueError(\"fit() called before training data set, call set_training_data() first.\")\n \n if self._fitted == True and self._new_training_data == False:\n self.logger.warning(\"Model is already fit and training data has not changed. Model will be refit from scratch, but expect nothing to change.\")\n\n #AutoARIMA takes a 1-dimensional ndarray for y\n y = self._training_target.to_numpy().flatten()\n #print(\"y\")\n #print(y)\n\n #extract exogenous columns if there are any, and turn them into a 2d numpy array of floats.\n X = self._format_exogenous(self._training_exogenous)\n #print(\"X:\")\n #print(X)\n\n self._autoARIMA.fit(y=y, X=X)\n self._fitted = True\n return base.CallResult(None)\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:\n #print(\"calling produce\")\n #print(\"Inputs:\")\n #print(inputs)\n\n #inputs is non-target columns that can optionally be used as future exogenous data.\n\n if not self._fitted:\n raise PrimitiveNotFittedError(\"Primitive not fitted.\")\n\n #predict for a number of periods corresponding to number of rows in inputs\n nrows = inputs.shape[0]\n #print(\"nrows:\")\n #print(nrows)\n \n X = self._format_exogenous(inputs)\n #print(\"X:\")\n #print(X)\n\n predictions = self._autoARIMA.predict(h=nrows, X=X, level=[])\n #print(\"predictions:\")\n #print(predictions)\n output = container.DataFrame(predictions, generate_metadata=True)\n return base.CallResult(output)\n\n\n\n"
},
{
"alpha_fraction": 0.5281385183334351,
"alphanum_fraction": 0.7186146974563599,
"avg_line_length": 45.20000076293945,
"blob_id": "447081a5432152d315f174cdcc26208aac904499",
"content_id": "822bb483df66980eae64725e044d1f509c2c9dfd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 231,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 5,
"path": "/autonbox/__init__.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "__version__ = '0.3.0'\n__author__ = 'CMU'\n__author_email__ = '[email protected]'\n__key_static_file_resnext__ = 'cmu.resnext-101-kinetics.pth'\n__digest_static_file_resnext__ = 'f82e4e519723fc7b2ff3761ea35600bdaf796fb7a4e62ee4c5591da7ffe48326'\n"
},
{
"alpha_fraction": 0.5951382517814636,
"alphanum_fraction": 0.6050792336463928,
"avg_line_length": 38.12765884399414,
"blob_id": "680ffa857120a00e9a37ef3115e992ddbc128a80",
"content_id": "3c0b48636aad8b24b443399178b9e86a53f9f4e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12876,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 329,
"path": "/autonbox/NHITS.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import typing\nimport copy\n\nfrom d3m import container\nfrom d3m.primitive_interfaces import base\nfrom d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase\nfrom d3m.metadata import base as metadata_base\nfrom d3m.metadata import hyperparams, params\nfrom d3m.exceptions import MissingValueError, PrimitiveNotFittedError\n\nimport pandas as pd\n\nfrom ray import tune\nfrom neuralforecast.auto import AutoNHITS\nfrom neuralforecast.losses.pytorch import MAE\nfrom ray.tune.search.hyperopt import HyperOptSearch\n#from neuralforecast import NeuralForecast\nfrom neuralforecast.tsdataset import TimeSeriesDataset\n\nimport autonbox\n\n\"\"\"\nA wrapper primitive for AutoNHITS from NeuralForecast (https://nixtla.github.io/neuralforecast/models.html#autonhits)\nMore information on Neural Hierarchical Interpolation for Time Series (NHITS): https://nixtla.github.io/neuralforecast/models.nhits.html\nFor an intro to NeuralForecast, see https://nixtla.github.io/neuralforecast/examples/installation.html\n\n\nTODO: Add more information here\n\"\"\"\n\n#not sure if necessary\n#TODO: uncomment if this not being here causes errors\n#__all__ = ('AutoNHITSPrimitive',)\n\nInputs = container.DataFrame\nOutputs = container.DataFrame\n\nclass Params(params.Params):\n has_training_data: bool\n new_training_data: bool\n\n training_target: typing.Any\n training_attributes: typing.Any\n nf_train_data: typing.Any\n target_name: str\n exog_names: typing.List\n ngroups: int\n\n fitted: bool\n model: typing.Any\n\nclass Hyperparams(hyperparams.Hyperparams):\n pass\n\nclass AutoNHITSPrimitive(SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):\n\n metadata = metadata_base.PrimitiveMetadata({\n \"id\": \"91c8bd09-cf10-4fde-a471-e092ef3df6b4\",\n \"version\": \"0.1.0\",\n \"name\": \"neuralforecast.models.AutoNHITS\",\n \"description\": \"Wrapper of the AutoNHITS model from the neuralforecast package\",\n \"python_path\": \"d3m.primitives.time_series_forecasting.nhits.AutonBox\",\n \"primitive_family\": metadata_base.PrimitiveFamily.TIME_SERIES_FORECASTING,\n \"algorithm_types\": [\"DEEP_NEURAL_NETWORK\"],\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n \"keywords\": [\"time series\", \"forecasting\", \"deep neural network\"],\n \"installation\": [{\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package\": \"autonbox\",\n \"version\": autonbox.__version__\n }]\n })\n\n def __init__(self, *, hyperparams: Hyperparams) -> None:\n print(\"calling __init__\")\n\n super().__init__(hyperparams=hyperparams)\n\n self._has_training_data = False\n self._new_training_data = False\n\n self._training_target = None\n self._training_attributes = None\n self._nf_train_data = None\n self._target_name = None\n self._exog_names = []\n self._ngroups = 0\n\n self._fitted = False\n self._model = None\n\n def get_params(self) -> Params:\n print(\"calling get_params\")\n return Params(\n has_training_data = self._has_training_data,\n new_training_data = self._new_training_data,\n training_target = self._training_target,\n training_attributes = self._training_attributes,\n nf_train_data = self._nf_train_data,\n target_name = self._target_name,\n exog_names = self._exog_names,\n ngroups = self._ngroups,\n fitted = self._fitted,\n model = self._model\n )\n\n def set_params(self, *, params: Params) -> None:\n print(\"calling set_params\")\n self._has_training_data = params['has_training_data']\n self._new_training_data = params['new_training_data']\n\n self._training_target = params['training_target']\n self._training_attributes = params['training_attributes']\n self._nf_train_data = params['nf_train_data']\n self._target_name = params['target_name']\n self._exog_names = params['exog_names']\n self._ngroups = params['ngroups']\n\n self._fitted = params['fitted']\n self._model = params['model']\n\n #private method\n def _format_data(self, attributes, target=None):\n #transform data from d3m input format to neuralforcast ingest format\n print(\"formatting data for neuralforecast\")\n\n #extract time column as series\n time_col_indices = attributes.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/Time\",\n )\n )\n print(\"timestamp cols: \" + str(time_col_indices))\n #TODO: make sure there's only 1 timestamp col\n #TODO: make sure it's valid datetime\n time_col = attributes.iloc[:,time_col_indices[0]]\n #print(\"time col before conversion to datetime:\")\n #print(time_col)\n time_col = pd.to_datetime(time_col)\n #print(\"time col after conversion to datetime:\")\n #print(time_col)\n\n #extract grouping column as series\n group_col_indices = attributes.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/GroupingKey\",\n \"https://metadata.datadrivendiscovery.org/types/SuggestedGroupingKey\"\n )\n )\n print(\"grouping cols: \" + str(group_col_indices))\n #TODO: make sure theres <=1 grouping col\n #TODO: make sure grouping col is valid\n if len(group_col_indices) > 0:\n group_col = attributes.iloc[:,group_col_indices[0]]\n else:\n #data has only 1 time series (no grouping col)\n #however neuralforecast still requires a grouping col\n #create a grouping col putting all rows in same group\n group_col = ['a']*attributes.shape[0]\n #TODO: ensure groups are all same length\n self._ngroups = len(set(group_col))\n\n #extract names of exogneous variable columns and save them\n attribute_col_indices = attributes.metadata.list_columns_with_semantic_types(\n (\n \"https://metadata.datadrivendiscovery.org/types/Attribute\",\n )\n )\n print(\"attribute cols: \" + str(attribute_col_indices))\n exog_col_inidices = list(set(attribute_col_indices) - set(group_col_indices + time_col_indices))\n print(\"exogenous cols: \" + str(exog_col_inidices))\n exogenous_colnames = [list(attributes.columns)[i] for i in exog_col_inidices]\n #print(\"exogenous colnames: \" + str(exogenous_colnames))\n self._exog_names = exogenous_colnames\n\n #construct dataframe formatted to be ingested by neuralforecast\n nf_df = copy.deepcopy(attributes[exogenous_colnames]) #exogenous cols retain name from dataset\n #TODO: check that no exogenous cols are named \"ds\", \"y\" or \"unique_id\"\n nf_df['ds'] = time_col\n nf_df['unique_id'] = group_col\n\n #add target col if we're given one and save target colname\n if target is not None:\n target_colname = target.columns[0]\n nf_df['y'] = target[target_colname]\n self._target_name = target_colname\n\n return(nf_df)\n\n\n def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n print(\"calling set_training_data\")\n #print(\"Inputs:\")\n #print(inputs)\n #print(\"Outputs:\")\n #print(outputs)\n \n #inputs is a dataframe that will be used as exogenous data, excepting time columns\n #outputs is a dataframe containing one column, the time series that we want to predict future values of\n \n #TODO: check that outputs has one column\n #TODO: check that inputs and outputs have same number of rows\n #TODO: check at np.nan and np.inf are not present\n\n if self._has_training_data:\n self._new_training_data = True\n\n self._has_training_data = True\n\n #save data in d3m format\n self._training_attributes = inputs\n self._training_target = outputs\n\n #save data in neuralforecast format\n self._nf_train_data = self._format_data(inputs, outputs)\n #this method also sets self._target_name and self._exog_names\n\n def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:\n #in order to fit NHITS, need to know forecasting horizon\n #so fit in the produce method and do nothing here\n print(\"calling fit, do nothing\")\n return base.CallResult(None)\n \n #private method\n def _fit_nf(self, train_ts, h):\n\n print(\"Fitting NeuralForecast AutoNHITS\")\n print(\"train:\")\n print(self._nf_train_data)\n print(\"h:\" + str(h))\n print(\"future exog: \" + str(self._exog_names))\n\n nhits_config = {\n \"input_size\": 3*h,\n \"n_pool_kernel_size\": tune.choice(\n [3 * [1], 3 * [2], 3 * [4], [8, 4, 1], [16, 8, 1]]\n ),\n \"n_freq_downsample\": tune.choice(\n [\n [168, 24, 1],\n [24, 12, 1],\n [180, 60, 1],\n [60, 8, 1],\n [40, 20, 1],\n [1, 1, 1],\n ]\n ),\n \"learning_rate\": tune.loguniform(1e-4, 1e-1),\n \"scaler_type\" : 'robust',\n \"max_steps\": 100, #TODO: change to 1000 after testing\n \"batch_size\": tune.choice([32, 64, 128, 256]),\n \"windows_batch_size\": tune.choice([128, 256, 512, 1024]), # Initial Learning rate\n \"random_seed\": 1, \n \"futr_exog_list\" : self._exog_names\n }\n\n self._model = AutoNHITS(\n h=h,\n loss=MAE(),\n config=nhits_config,\n search_alg=HyperOptSearch(),\n num_samples=10)\n \n self._model.fit(train_ts, val_size=h*2)\n\n \n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:\n print(\"calling produce\")\n #print(\"Inputs:\")\n #print(inputs)\n #inputs is non-target columns that can optionally be used as future exogenous data.\n #also includes time and grouping columns\n\n if inputs.equals(self._training_attributes):\n #D3M likes to get in-sample predictions before actual forecasts\n #neuralforecast doesn't like to do that\n #so, if inputs match training data (i.e. D3M is looking for in-sample predictions)\n #return essentially dummy data\n #hopefully this will not mess anything up down the line\n #it doesn't seem like training predictions are really used despite D3M wanting them\n #and they dont really make sense for time series forecasting\n #dataframe that is the same length as expected output\n #contains one column with the target's name which is all 0's\n print(\"returning dummy data for in-sample predictions\")\n nrows = inputs.shape[0]\n predictions = pd.DataFrame({self._target_name:[0]*nrows})\n\n else:\n train_ts = TimeSeriesDataset.from_df(df=self._nf_train_data)[0]\n \n #fit if we have not fit the model yet\n #refit if there is new training data\n if not self._fitted or self._new_training_data:\n #predict for a number of periods corresponding to number of rows in inputs\n h = int(inputs.shape[0]/self._ngroups)\n #print(\"h:\" + str(h))\n\n self._fit_nf(train_ts, h)\n\n self._fitted = True\n self._new_training_data = False #we have fit on current train data, no longer new\n\n #TODO: check that self._model not None\n future = self._format_data(inputs)\n print(\"future:\")\n print(future)\n\n predict_ts = TimeSeriesDataset.update_dataset(\n dataset=train_ts, future_df=future\n )\n self._model.set_test_size(h)\n\n predictions = self._model.predict(dataset=predict_ts)\n predictions = list(predictions.flatten())\n #print(\"raw predictions:\")\n #print(predictions)\n #print(type(predictions))\n predictions = pd.DataFrame({self._target_name : predictions})\n\n #print(\"predictions to return:\")\n #print(predictions)\n\n #need to put predictions in right format for d3m\n output = container.DataFrame(predictions, generate_metadata=True)\n return base.CallResult(output)\n\n\n\n"
},
{
"alpha_fraction": 0.6229652166366577,
"alphanum_fraction": 0.640123188495636,
"avg_line_length": 35.66128921508789,
"blob_id": "d42f655f921faaa6983fd2abf965994a39206d66",
"content_id": "3ffcd4ca3d3a57c997af2c2f0bb4e5c996883aa0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2273,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 62,
"path": "/autonbox/merge_partial_multipredictions.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import os\nimport typing\nimport numpy as np\nimport pandas as pd\n\nfrom d3m import container\nfrom d3m import utils as d3m_utils\nfrom d3m.metadata import base as metadata_base, hyperparams\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\n\nimport autonbox\n\nInputs = container.List\nOutputs = container.DataFrame\n\nclass MergePartialPredictionsPrimitive(TransformerPrimitiveBase[Inputs, Outputs, hyperparams.Hyperparams]):\n \"\"\"\n Merge predictions of multiple models\n Useful if model do not produce predictions for each points and that it is necessary\n to merge those predictions (the first non nan will be returned)\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata({\n 'id': '1cc95f70-0716-11ea-9762-3dd2bb86dde8',\n 'version': '0.1.0',\n 'name': \"Merge predictions of multiple models\",\n 'python_path': 'd3m.primitives.data_transformation.merge_partial_predictions.AutonBox',\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n \"installation\": [{\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package\": \"autonbox\",\n \"version\": \"0.2.4\"\n }],\n 'algorithm_types': [\n metadata_base.PrimitiveAlgorithmType.DATA_CONVERSION, #TODO: Choose\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.DATA_TRANSFORMATION\n })\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n # Force index to be d3mIndex for concatenation\n inputs_reindex = []\n for i in inputs:\n i_copy = i.copy()\n i_copy.index = i_copy.d3mIndex\n i_copy = i_copy.drop(columns=\"d3mIndex\")\n inputs_reindex.append(i_copy)\n \n # Merge inputs \n output = pd.concat(inputs_reindex, axis = 1)\n output.metadata = inputs[-1].metadata\n \n # Propagate best non nan score\n output = output.T.fillna(method = 'bfill').T.iloc[:, :1]\n output = output.reset_index().dropna()\n\n return CallResult(output)\n"
},
{
"alpha_fraction": 0.7767978310585022,
"alphanum_fraction": 0.7876526713371277,
"avg_line_length": 45.09375,
"blob_id": "f5e194f63a920ee47881483d94e8591b1ecc5c5e",
"content_id": "463dd894db6c9dc389434e79d70184f89b3674cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1474,
"license_type": "permissive",
"max_line_length": 209,
"num_lines": 32,
"path": "/README.md",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "# The Auton Lab TA1 primitives\n\nThis repository contains additional Auton Lab TA1 primitives for the D3M program.\n\n1. [`Iterative Labeling`](autonbox/iterative_labeling.py) - Blackbox based iterative labeling for semi-supervised learning\n1. [`Video featurizer`](autonbox/resnext101_kinetics_video_features.py) - Video Feature Extraction for Action Classification With 3D ResNet\n\n## Installation\nTo install primitives, run:\n```bash\npip install -U -e git+https://github.com/autonlab/autonbox.git#egg=autonbox\n```\n\n`Video featurizer` requires a static file, pre-trained model weights.\nTo download it, run: \n```bash\nmkdir -p /tmp/cmu/pretrained_files\npython3 -m d3m index download -o /tmp/cmu/pretrained_files # requires d3m core\n```\n\n## Video featurizer\nThe primitive outputs a data frame of size N x M, where N is the number of videos and M is 2024 features of type float.\n\nIt supports running on GPUs.\n\n## Merge Partial MultiPredictions\nThis primitive allows to merge partial predictions. These partial predictions may happen when removing rows of a dataset.\nIt is however necessary to provide a fallback predictions to offer a prediction to each initial row.\nThe strategy adopted in this primitive is to take the first vote for each row; therefore the order of the inputs predictions is crucial (for instance, one can use a cross correlation score to sort this input).\n\n## Clean Augmentation\nThis primitive removes rows of a dataset if they contain less than x% of features."
},
{
"alpha_fraction": 0.652430534362793,
"alphanum_fraction": 0.6663194298744202,
"avg_line_length": 39,
"blob_id": "8dfff826efa6646d27ba284c5298f14e105c4955",
"content_id": "9ae4e6f512d26316f8771a193e5c79b15eaa244c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2880,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 72,
"path": "/autonbox/clean_augment.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import os\nimport pandas as pd\n\nfrom d3m import container\nfrom d3m import utils as d3m_utils\nfrom d3m.metadata import base as metadata_base, hyperparams\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\n\nimport autonbox\n\nInputs = container.Dataset\nOutputs = container.Dataset\n\nclass Hyperparams(hyperparams.Hyperparams):\n original_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[str](''),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"List of the original columns\"\n )\n percentage_missing = hyperparams.Uniform(\n lower=0.0, \n upper=1.0, \n default=0.5,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],\n description='Percentage of missing data allowed (above this percentage and the line is deleted)'\n )\n\n\nclass CleanAugmentationPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):\n \"\"\"\n Remove rows which haven't been augmented following the datamart augmentation \n (any rows with more than percentage_missing columns which are resulting from augmentation)\n \n NB: This primitive results might reduce the number of row\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata({\n 'id': 'fe0f1ac8-1d39-463a-b344-7bd498a31b92',\n 'version': '0.1.0',\n 'name': \"Clean dataset of unaugmented rows\",\n 'python_path': 'd3m.primitives.data_cleaning.clean_augmentation.AutonBox',\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n \"installation\": [{\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package\": \"autonbox\",\n \"version\": \"0.2.4\"\n }],\n 'algorithm_types': [\n #metadata_base.PrimitiveAlgorithmType.ROW_SELECTION\n metadata_base.PrimitiveAlgorithmType.DATA_RETRIEVAL, #TODO: Delete when new algo released\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.DATA_CLEANING\n })\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n # Selection augmentation columns\n augmented_data = inputs['learningData'][[c for c in inputs['learningData'].columns if c not in self.hyperparams['original_columns']]]\n \n # Count absent data: at Dataset level: empty field\n absent = (augmented_data == '').mean(axis = 1)\n \n # Selection of the augmented lines\n output = inputs.copy()\n output['learningData'] = output['learningData'].loc[absent < self.hyperparams['percentage_missing']]\n\n return CallResult(output)\n"
},
{
"alpha_fraction": 0.5846105813980103,
"alphanum_fraction": 0.5982808470726013,
"avg_line_length": 41.72566223144531,
"blob_id": "7685e42d7cdd6785f90fff8e57909b8ea59ff698",
"content_id": "766fd6e461b00e754a9ed973c7f700947896ac7f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9656,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 226,
"path": "/autonbox/resnext101_kinetics_video_features.py",
"repo_name": "autonlab/autonbox",
"src_encoding": "UTF-8",
"text": "import os\nimport warnings\n\nimport autonbox\nimport cv2\nimport numpy as np\nimport torch\nimport typing\nfrom autonbox.contrib.resnet.dataset import Video\nfrom autonbox.contrib.resnet.model import generate_model\nfrom autonbox.contrib.resnet.spatial_transforms import (Compose, Normalize, Scale, CenterCrop, ToTensor)\nfrom autonbox.contrib.resnet.temporal_transforms import LoopPadding\nfrom d3m import container, utils as d3m_utils\nfrom d3m.metadata import base as metadata_base, hyperparams\nfrom d3m.primitive_interfaces import base\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.primitive_interfaces.featurization import FeaturizationTransformerPrimitiveBase\nfrom torch.autograd import Variable\n\nInputs = container.DataFrame\nOutputs = container.DataFrame\n\n\nclass Hyperparams(hyperparams.Hyperparams):\n num_workers = hyperparams.Hyperparameter[int](\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter'],\n default=0,\n description='The number of subprocesses to use for data loading. 0 means that the data will be loaded in the '\n 'main process.'\n )\n\n\nclass ResNext101KineticsParams(object):\n # TODO determine which to make as hyper-parameters\n n_classes = 400\n mode = 'feature'\n clip_vid = 'mean'\n down_rate = 1\n model_name = 'resnext'\n model_depth = 101\n resnet_shortcut = 'B'\n resnext_cardinality = 32\n sample_size = 112\n sample_duration = 16 # number of frames in one clip\n no_cuda = True\n mean = [114.7748, 107.7354, 99.4750]\n batch_size = 32\n\n\nclass ResNext101KineticsPrimitive(FeaturizationTransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):\n \"\"\"\n Video Feature Extraction for Action Classification With 3D ResNet\n \"\"\"\n metadata = metadata_base.PrimitiveMetadata(\n {\n 'id': '15935e70-0605-4ded-87cf-2933ca35d4dc',\n 'version': '0.2.0',\n \"name\": \"Video Feature Extraction for Action Classification With 3D ResNet\",\n 'description': \"Video Feature Extraction for Action Classification With 3D ResNet\",\n 'python_path': 'd3m.primitives.feature_extraction.resnext101_kinetics_video_features.VideoFeaturizer',\n 'source': {\n 'name': autonbox.__author__,\n 'uris': ['https://github.com/autonlab/autonbox'],\n 'contact': 'mailto:[email protected]'\n },\n 'installation': [{\n 'type': metadata_base.PrimitiveInstallationType.PIP,\n 'package_uri': 'git+https://github.com/autonlab/autonbox.git@{git_commit}#egg=autonbox'.format(\n git_commit=d3m_utils.current_git_commit(os.path.dirname(__file__)),\n ),\n }, {\n 'type': metadata_base.PrimitiveInstallationType.FILE,\n 'key': autonbox.__key_static_file_resnext__,\n 'file_uri': 'http://public.datadrivendiscovery.org/resnext-101-kinetics.pth',\n 'file_digest': autonbox.__digest_static_file_resnext__\n }],\n 'algorithm_types': [\n metadata_base.PrimitiveAlgorithmType.CONVOLUTIONAL_NEURAL_NETWORK,\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,\n },\n )\n\n def __init__(self, *, hyperparams: Hyperparams, volumes: typing.Union[typing.Dict[str, str], None] = None) -> None:\n super().__init__(hyperparams=hyperparams, volumes=volumes)\n\n self._config = ResNext101KineticsParams\n\n torch.manual_seed(self.random_seed) # seed the RNG for all devices (both CPU and CUDA):\n\n # Use GPU if available\n if torch.cuda.is_available():\n self.logger.info(\"Use GPU.\")\n self._config.no_cuda = False\n # For reproducibility on CuDNN backend\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else:\n self.logger.info(\"Use CPU.\")\n self._config.no_cuda = True\n\n self.logger.info('Number of workers: {}'.format(self.hyperparams['num_workers']))\n\n self._down_rate = 1\n\n def _instantiate_model(self):\n model = generate_model(self._config)\n model_data = self._load_model()\n if self._config.no_cuda:\n state_dict = {k.replace('module.', ''): v for k, v in model_data['state_dict'].items()} # for cpu only\n else:\n state_dict = model_data['state_dict']\n model.load_state_dict(state_dict)\n model.eval()\n return model\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n \"\"\"\n :param inputs: assume the first column is the filename\n :param timeout:\n :param iterations:\n :return:\n \"\"\"\n model = self._instantiate_model()\n features = []\n # TODO consider a more robust means to 1) get location_base_uris and remove file://\n media_root_dir = inputs.metadata.query((0, 0))['location_base_uris'][0][len('file://'):] # remove file://\n for filename in inputs.iloc[:, 0]:\n file_path = os.path.join(media_root_dir, filename)\n if os.path.isfile(file_path):\n video = self._read_fileuri(file_path) # video is a ndarray of F x H x W x C, e.g. (408, 240, 320, 3)\n feature = self._generate_vid_feature(model, video)\n else:\n self.logger.warning(\"No such file {}. Feature vector will be set to all zeros.\".format(file_path))\n feature = np.zeros(2048)\n features.append(feature)\n\n results = container.DataFrame(features, generate_metadata=True)\n\n return base.CallResult(results)\n\n def _generate_vid_feature(self, model, vid_matrix):\n \"\"\"\n Modified from function classify_video()\n :param vid_matrix: takes in video matrix F(frames) x H(height) x W(width) x C(channels)\n dtype of matrix is uint8\n :return: ndarray representation of video\n \"\"\"\n assert vid_matrix.ndim == 4 and self._down_rate <= 1 # sanity check\n spatial_transform = Compose([Scale(self._config.sample_size),\n CenterCrop(self._config.sample_size),\n ToTensor(),\n Normalize(self._config.mean, [1, 1, 1])])\n temporal_transform = LoopPadding(self._config.sample_duration)\n data = Video(vid_matrix, spatial_transform=spatial_transform,\n temporal_transform=temporal_transform,\n sample_duration=self._config.sample_duration, down_rate=self._down_rate)\n data_loader = torch.utils.data.DataLoader(data, batch_size=self._config.batch_size,\n num_workers=self.hyperparams['num_workers'],\n shuffle=False, pin_memory=True)\n video_outputs = []\n with torch.no_grad():\n for i, inputs in enumerate(data_loader):\n inputs = Variable(inputs)\n # input is of shape n x 3 x sample_duration x 112 x 112\n outputs = model(inputs)\n # output is of format n(batch size) x d(dimension of feature)\n video_outputs.append(outputs.cpu().data)\n video_outputs = np.concatenate(video_outputs, axis=0)\n mean_feature = np.mean(video_outputs, axis=0) # shape of (d, )\n return mean_feature\n\n def _load_model(self):\n \"\"\"\n Loads the model from the volume\n :return:\n \"\"\"\n key_filename = autonbox.__key_static_file_resnext__\n static_dir = os.getenv('D3MSTATICDIR', '/static')\n if key_filename in self.volumes:\n _weight_file_path = self.volumes[key_filename]\n self.logger.info(\"Weights file path found in static volumes\")\n else:\n self.logger.info(\"Trying to locate weights file in the static folder {}\".format(static_dir))\n _weight_file_path = os.path.join(static_dir, autonbox.__digest_static_file_resnext__)\n\n if os.path.isfile(_weight_file_path):\n if torch.cuda.is_available(): # GPU\n model_data = torch.load(_weight_file_path)\n else: # CPU only\n model_data = torch.load(_weight_file_path, map_location='cpu')\n self.logger.info(\"Loaded weights file\")\n else:\n raise ValueError(\"Can't get weights file from the volume by key: {} or in the static folder: {}\".format(\n key_filename, static_dir))\n\n return model_data\n\n def _read_fileuri(self, fileuri: str) -> container.ndarray:\n \"\"\"\n @see https://gitlab.com/datadrivendiscovery/common-primitives/blob/master/common_primitives/video_reader.py#L65\n :param fileuri:\n :return:\n \"\"\"\n capture = cv2.VideoCapture(fileuri)\n frames = []\n\n try:\n while capture.isOpened():\n ret, frame = capture.read()\n if not ret:\n break\n else:\n assert frame.dtype == np.uint8, frame.dtype\n\n if frame.ndim == 2:\n # Make sure there are always three dimensions.\n frame = frame.reshape(list(frame.shape) + [1])\n\n assert frame.ndim == 3, frame.ndim\n\n frames.append(frame)\n finally:\n capture.release()\n\n return container.ndarray(np.array(frames), generate_metadata=False)\n"
}
] | 12 |
Cumminsc9/SVG_Parser_Python
|
https://github.com/Cumminsc9/SVG_Parser_Python
|
daed1a2596d9a77ec5ede329dcc712c48ed54b0e
|
adab73f9a619b1be8aaedd07ffcc5913aa8160f1
|
8e83cdf81621e360609a80af89e7fe0c2dd77f08
|
refs/heads/master
| 2020-12-02T16:17:23.978309 | 2017-07-18T14:42:31 | 2017-07-18T14:42:31 | 96,529,605 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5832280516624451,
"alphanum_fraction": 0.5872280597686768,
"avg_line_length": 31.312925338745117,
"blob_id": "6ce54504f29d610da721d2c54e32c66fa82e042a",
"content_id": "5d5240a88eda5a95d73e4a425bd3c6d48c88ff8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14250,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 441,
"path": "/Main.py",
"repo_name": "Cumminsc9/SVG_Parser_Python",
"src_encoding": "UTF-8",
"text": "import os\nimport re\nimport pdb\n\nfrom bs4 import BeautifulSoup\nfrom enum import Enum\n\ncount_iteration = None\nrelations_list = []\nclass_to_build = []\n\nclass ClassToBuild(object):\n def __init__(self, class_type, class_name, class_variables, class_constructors, class_methods):\n self.class_type = class_type\n self.class_name = class_name\n self.class_variables = class_variables\n self.class_methods = class_methods\n self.class_constructors = class_constructors\n\n\nclass Attribute(object):\n def __init__(self, attribute_access_type, attribute_type, attribute_name):\n self.attribute_name = attribute_name\n self.attribute_type = attribute_type\n self.attribute_access_type = attribute_access_type\n\n def print_attribute(self):\n return (\"ACCESS TYPE: %s \\n\\t\\tRETURN TYPE: %s \\n\\t\\tNAME: %s\\n\" % (\n self.attribute_access_type, self.attribute_type, self.attribute_name))\n\n\nclass Method(object):\n def __init__(self, method_access_type, method_type, method_name, method_arguments):\n self.method_type = method_type\n self.method_access_type = method_access_type\n self.method_arguments = method_arguments\n self.method_name = method_name\n\n def print_method(self):\n if self.method_arguments is not None:\n return (\"ACCESS TYPE: %s \\n\\t\\tRETURN TYPE: %s \\n\\t\\tNAME: %s\\n\\t\\tARGUMENTS: %s\\n\" % (\n self.method_access_type, self.method_type, self.method_name, self.method_arguments))\n else:\n return (\"ACCESS TYPE: %s \\n\\t\\tRETURN TYPE: %s \\n\\t\\tNAME: %s\\n\\t\\tARGUMENTS: None\\n\" % (\n self.method_access_type, self.method_type, self.method_name))\n\n\nclass Constructor(object):\n def __init__(self, constructor_access_type, constructor_name, constructor_arguments):\n self.constructor_access_type = constructor_access_type\n self.constructor_name = constructor_name\n self.constructor_arguments = constructor_arguments\n\n def print_constructor(self):\n if self.constructor_arguments is not None:\n return (\"ACCESS TYPE: %s \\n\\t\\tNAME: %s \\n\\t\\tARGUMENTS: %s\\n\" % (\n self.constructor_access_type, self.constructor_name, self.constructor_arguments))\n else:\n return (\"ACCESS TYPE: %s \\n\\t\\tNAME: %s \\n\\t\\tARGUMENTS: None\\n\" % (\n self.constructor_access_type, self.constructor_name))\n\n\nclass ClassMember(object):\n def __init__(self, class_type, class_name, class_member_type, class_member_value):\n self.class_type = class_type\n self.class_name = class_name\n self.class_member_type = class_member_type\n self.class_member_value = class_member_value\n\n def print_class_member(self):\n print(\"CLASS NAME: %s \\n\\tCLASS MEMBER VALUE: %s \\n\\tCLASS MEMBER TYPE: %s\\n\" % (\n self.class_name, self.class_member_value, self.class_member_type))\n\n\nclass Relation(object):\n def __init__(self, location, value, rel_type):\n self.location = location\n self.value = value\n self.type = rel_type\n\n def __eq__(self, other):\n return self.type == other.type\n\n def __hash__(self):\n return hash(self.type)\n\n\nclass Element(object):\n def __init__(self, location_element, title_element, text_element):\n self.location_ele = location_element\n self.title_ele = title_element\n self.text_ele = text_element\n\n def check_is_class(self):\n if self.title_ele == Title.CLAZZ:\n return True\n\n def print_element(self):\n print(self.location_ele, self.title_ele, self.text_ele)\n\n\nclass AccessType(Enum):\n PUBLIC = \"+\"\n PROTECTED = \"#\"\n PRIVATE = \"-\"\n\n\nclass CollectionType(Enum):\n ARRAY = \"[\"\n LIST = \"List<\"\n MAP = \"Map<\"\n\n\nclass Title(Enum):\n CLAZZ = \"Class\"\n ENUM = \"Enumeration\"\n INTERFACE = \"Interface\"\n MEMBER = \"Member\"\n PACKAGE = \"Package\"\n NOTE = \"Note\"\n PAGE = \"Page\"\n SHEET = \"Sheet\"\n INHERITANCE = \"Inheritance\"\n ASSOCIATION = \"Association\"\n AGGREGATION = \"Aggregation\"\n COMPOSITION = \"Composition\"\n DEPENDENCY = \"Dependency\"\n DIRECTED_ASSOCIATION = \"Directed Association\"\n INTERFACE_REALIZATION = \"Interface Realization\"\n CONSTRUCTOR = \"Constructor\"\n METHOD = \"Method\"\n VARIABLE = \"Variable\"\n\n\ndef check_title(title):\n del title[-1]\n for t in title:\n if t is not None:\n parsed_title = str(t)\n if \".\" in parsed_title:\n parsed_title = parsed_title.split(\".\", 1)[0]\n if \"-\" in parsed_title:\n parsed_title = parsed_title.split(\"-\", 1)[0]\n for e in Title:\n if e.value in parsed_title:\n return e\n\n\ndef check_member(members):\n class_member = members.class_name\n class_method = members.class_member_value\n str_array = members.class_member_value.split(\"\\\\(\")[0].split(\" \")\n\n if len(str_array) >= 2:\n class_name = str_array[1]\n else:\n class_name = str_array[0]\n\n if \"(\" in class_method and \")\" in class_method and str(class_member).startswith(class_name):\n members.class_member_type = Title.CONSTRUCTOR\n elif \"(\" in class_method and \")\" in class_method:\n members.class_member_type = Title.METHOD\n else:\n members.class_member_type = Title.VARIABLE\n\n\ndef arrange_method_variables():\n class_list = []\n dict_list = []\n\n for relation in relations_list:\n class_type = relation.type\n class_details = [relation.value, relation.type]\n\n if class_type == Title.CLAZZ or class_type == Title.INTERFACE or class_type == Title.ENUM:\n temp_class_dict = (relation.location, [class_details[0], class_details[1]])\n dict_list.append(temp_class_dict)\n\n for entry in dict_list:\n temp_class = []\n for r in relations_list:\n class_type = r.type\n\n if class_type != Title.CLAZZ:\n if class_type != Title.INTERFACE:\n if class_type != Title.ENUM:\n member_location = float(r.location)\n class_location = float(entry[0])\n new_cal_location = class_location - member_location\n if new_cal_location <= 0:\n if new_cal_location <= -10:\n continue\n else:\n temp_class.append(ClassMember(entry[1][1], entry[1][0], r.type, r.value))\n\n class_list.append(temp_class)\n\n return class_list\n\n\ndef check_for_class(current_iteration, entry):\n i = int(0)\n\n if entry.check_is_class():\n if count_iteration is not None:\n count_iteration[i] = current_iteration\n\n location_entry = str(entry.location_ele)\n xyz = re.search('\\(([^)]+)', location_entry)\n if xyz is None:\n xyz = \"0.0\"\n else:\n xyz = xyz.group(1)\n i += 1\n\n return [str(xyz).split(\",\")[0], str(entry.text_ele), entry.title_ele]\n\n\ndef parse_variable(class_members):\n variable_list = []\n\n for c in class_members:\n if c.class_member_type != Title.CONSTRUCTOR and c.class_member_type != Title.METHOD:\n if c.class_member_type == Title.VARIABLE:\n class_value = c.class_member_value\n\n if \" \" not in class_value:\n variable_name = class_value\n access_type = \"\"\n return_type = \"\"\n else:\n variable_name = parse_variable_name(class_value)\n access_type = parse_access_type(class_value)\n return_type = parse_variable_return_type(class_value)\n\n if access_type is not None and variable_name is not None and return_type is not None:\n variable_list.append(Attribute(access_type, return_type, variable_name))\n\n return variable_list\n\n\ndef parse_name(cv):\n for l in str(cv).splitlines():\n foo = l.split(\"(\", 1)[0]\n new_line = foo.translate(None, \"+-#\")\n return new_line.strip()\n\n\ndef parse_arguments(cv):\n cv_str = str(cv)\n new_str = cv_str[cv_str.find(\"(\")+1:cv_str.find(\")\")]\n args = re.search(\"[a-zA-Z]\", new_str).string\n args_dict = {}\n\n if \",\" in cv:\n new_f = args.split(\",\")\n for a_new_f in new_f:\n f2 = a_new_f.split(\":\")\n args_dict[str(f2[0]).strip()] = str(f2[1]).strip()\n else:\n if args is not None:\n f2 = args.split(\":\")\n args_dict[str(f2[0]).strip()] = str(f2[1]).strip()\n\n return args_dict\n\n\ndef check_for_arguments(cv):\n cv_str = str(cv)\n new_str = cv_str[cv_str.find(\"(\")+1:cv_str.find(\")\")]\n are_args = re.search(\"[a-zA-Z]\", new_str)\n return are_args\n\n\ndef parse_access_type(cv):\n access_type = str(cv)[0:1]\n\n if access_type == AccessType.PUBLIC.value:\n return \"public\"\n elif access_type == AccessType.PROTECTED.value:\n return \"protected\"\n elif access_type == AccessType.PRIVATE.value:\n return \"private\"\n else:\n return \"public\"\n\n\ndef parse_variable_name(cv):\n return str(cv).split(\" \")[1]\n\n\ndef parse_variable_return_type(cv):\n if CollectionType.LIST.value in cv or CollectionType.MAP.value in cv:\n return str(cv).split(\":\")[1]\n else:\n return str(cv).split(\" \")[3]\n\n\ndef parse_constructor(constructor):\n constructor_list = []\n new_arg_dict = dict\n\n for cm in constructor:\n if cm.class_member_type == Title.METHOD:\n if cm.class_member_type == Title.VARIABLE:\n class_value = cm.class_member_value\n\n if check_for_arguments(class_value) is not None:\n new_arg_dict = parse_arguments(class_value)\n\n constructor_name = parse_name(class_value)\n access_type = parse_access_type(class_value)\n\n if access_type is not None and constructor_name is not None:\n if new_arg_dict is not None:\n constructor_list.append(Constructor(access_type, constructor_name, new_arg_dict))\n else:\n constructor_list.append(Constructor(access_type, constructor_name, None))\n\n return constructor_list\n\n\ndef parse_method(method):\n method_list = []\n\n for cm in method:\n new_arg_dict = {}\n\n if cm.class_member_type != Title.CONSTRUCTOR:\n if cm.class_member_type != Title.VARIABLE:\n class_value = cm.class_member_value\n\n if check_for_arguments(class_value) is not None:\n new_arg_dict = parse_arguments(class_value)\n\n method_name = parse_name(class_value)\n access_type = parse_access_type(class_value)\n\n if check_for_arguments(class_value) is not None:\n return_type = parse_argument_method_return_type(class_value)\n else:\n return_type = parse_method_return_type(class_value)\n\n if access_type is not None and method_name is not None and return_type is not None:\n if len(new_arg_dict) != 0:\n method_list.append(Method(access_type, return_type, method_name, new_arg_dict))\n else:\n method_list.append(Method(access_type, return_type, method_name, None))\n\n return method_list\n\n\ndef parse_argument_method_return_type(cv):\n index = int(str(cv).rfind(\":\"))\n if index != -1:\n foo = str(cv)[index:].split(\" \")\n return foo[1]\n return None\n\n\ndef parse_method_return_type(cv):\n foo = str(cv).split(\":\")\n return foo[1]\n\n\ndef begin_conversion(content_list):\n html_str = str()\n element_list = []\n\n for cl in content_list:\n html_str += cl\n\n document = BeautifulSoup(html_str, \"html.parser\")\n for loc in document.select(\"[transform*=translate]\"):\n location_element = loc.find(\"g\", attrs={\"transform\": True})\n title_element = loc.find(\"title\")\n text_element = loc.find(\"text\")\n\n if text_element is not None:\n new_title = check_title(title_element)\n if location_element is None:\n text_location_element = text_element.parent.attrs\n element_list.append(Element(text_location_element, new_title, text_element.text))\n else:\n translate = location_element.attrs[\"transform\"]\n if \"0\" in translate:\n new_class_location = location_element.parent.attrs\n element_list.append(Element(new_class_location, new_title, text_element.text))\n\n current_iteration = int(0)\n\n for xy in element_list:\n current_iteration += 1\n va = check_for_class(current_iteration, xy)\n relations_list.append(Relation(va[0], va[1], va[2]))\n\n class_map = arrange_method_variables()\n\n for class_members in class_map:\n for temp_object in class_members:\n check_member(temp_object)\n\n variables = parse_variable(class_members)\n methods = parse_method(class_members)\n constructor = parse_constructor(class_members)\n clazz = ClassToBuild(class_members[0].class_member_type, class_members[0].class_name, variables, constructor, methods)\n class_to_build.append(clazz)\n\n output_classes(class_to_build)\n\n\ndef output_classes(class_list):\n for cl in class_list:\n print \"CLASS: \" + cl.class_name\n\n print \"\\tVARIABLES:\"\n class_attributes = cl.class_variables\n for ca in class_attributes:\n print \"\\t\\t\" + ca.print_attribute()\n\n print \"\\tMETHODS:\"\n class_methods = cl.class_methods\n for cm in class_methods:\n print \"\\t\\t\" + cm.print_method()\n\n\nif __name__ == '__main__':\n current_dir = os.getcwd()\n\n file_path_list = list()\n\n for root, dirs, files in os.walk(os.getcwd()):\n for f in files:\n if f.endswith(\".svg\"):\n file_path_list.append(os.path.join(root, f))\n\n for file_path in file_path_list:\n with open(file_path) as f:\n line_list = f.readlines()\n line_list = [x.strip() for x in line_list]\n begin_conversion(line_list)\n"
}
] | 1 |
Ge0f3/My_Web_API
|
https://github.com/Ge0f3/My_Web_API
|
df19d5f72ef7dffc8fdfbdbebd9a513e26cf35e4
|
f81da3950264e5d84dc8214ea43bbd7728cbb827
|
a4a1293bfea406cf913467a1f049cfb67614ed6b
|
refs/heads/master
| 2021-01-14T02:05:59.434669 | 2020-11-11T07:06:18 | 2020-11-11T07:06:18 | 242,564,858 | 0 | 0 | null | 2020-02-23T18:00:42 | 2020-11-11T07:06:26 | 2021-03-20T02:59:59 |
Python
|
[
{
"alpha_fraction": 0.7141203880310059,
"alphanum_fraction": 0.7233796119689941,
"avg_line_length": 26.799999237060547,
"blob_id": "b0e370232c5713d77646c08efd2e3799dd04b733",
"content_id": "5dd3502fe52aede38928b150f5febf0695defdc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 30,
"path": "/services/common/v1/api.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "import logging\r\nfrom flask_restplus import Api\r\n\r\nfrom services.blueprints import common_blueprint_v1\r\n\r\nfrom services.common.v1.resources.ML.ML import ns as ML\r\nfrom services.common.v1.resources.DL.DL import ns as DL\r\nfrom services.common.v1.resources.health.health_routes import ns as health_routes\r\nfrom services.common.v1.resources.misc.misc import ns as misc\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\napi = Api(common_blueprint_v1,\r\n version='1.0',\r\n title='My Web APIs',\r\n description='Service that handles processing user input and sending back response')\r\n\r\n# prefixes\r\nMachineLearning = '/ml'\r\n\r\nDeepLearning = '/dl'\r\n\r\nMisc = '/misc'\r\n\r\nhealth = '/health'\r\n\r\napi.add_namespace(ML, path=MachineLearning)\r\napi.add_namespace(DL, path=DeepLearning)\r\napi.add_namespace(health_routes, path=health)\r\napi.add_namespace(misc, path=Misc)\r\n"
},
{
"alpha_fraction": 0.5125094652175903,
"alphanum_fraction": 0.5193328261375427,
"avg_line_length": 29.911291122436523,
"blob_id": "b52417d0df38136ab1ee7be0dcdc7523aa1c152e",
"content_id": "db2bab8f1b7434fc01343c433cad2330d7bc900b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3957,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 124,
"path": "/services/common/v1/resources/ML/ML.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "import logging\r\nfrom flask import request, jsonify\r\nimport requests\r\nimport json\r\nimport os\r\nfrom services.config import RequiredConstants as RC\r\nfrom flask_restplus import Resource, Namespace, reqparse\r\nfrom services.common.v1.schemas.ML_schemas import spam, mpg ,iris\r\nfrom services.common.v1.resources.ML.ServiceLayer import ServiceLayer\r\n\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nns = Namespace(\r\n 'Machine Learning',\r\n description='Operation Realted to Machine learning Model API')\r\nns.models[spam.name] = spam\r\nns.models[mpg.name] = mpg\r\nns.models[iris.name] = iris\r\n\r\n\r\[email protected]('/spam')\r\nclass Spam(Resource):\r\n @ns.expect(spam)\r\n def post(self):\r\n form_data = request.json\r\n\r\n email = form_data['email']\r\n\r\n try:\r\n prediction = ServiceLayer.predict_spam(email)\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': prediction[0]}\r\n except Exception as E:\r\n log.error(E)\r\n return jsonify({'Error': \"The error is {}\".format(E)})\r\n\r\[email protected]('/spam_batch')\r\nclass SpamBatch(Resource):\r\n def post(self):\r\n form_data = request.files['file']\r\n log.info(\"File recieved\")\r\n try:\r\n response = ServiceLayer.predict_spam_batch(form_data)\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': response}\r\n except Exception as E:\r\n log.error(E)\r\n return jsonify({'Error': \"The error is {}\".format(E)})\r\n\r\n\r\[email protected]('/mpg')\r\nclass MPG(Resource):\r\n @ns.expect(mpg)\r\n def post(self):\r\n form_data = request.json\r\n print(form_data)\r\n data = [int(form_data['cylinders']), int(form_data['displacement']), int(form_data['horepower']), int(form_data['weight']), int(form_data['acceleration']), int(form_data['model_year'])]\r\n if(form_data['Origin'] == 'USA'):\r\n data.extend([int(0), int(0), int(1)])\r\n elif(form_data['Origin'] == 'Europe'):\r\n data.extend([int(0), int(1), int(0)])\r\n elif(form_data['Origin'] == 'Japan'):\r\n data.extend([int(1), int(0), int(0)])\r\n\r\n try:\r\n print(data)\r\n prediction = ServiceLayer.predict_mpg(data)\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': prediction}\r\n except Exception as E:\r\n log.error(E)\r\n return jsonify({'Error': \"The error is {}\".format(E)})\r\n\r\n\r\[email protected]('/house_price')\r\nclass HousePrice(Resource):\r\n def post(self):\r\n form_data = request.form\r\n\r\n result = form_data['text']\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': result}\r\n\r\n\r\[email protected]('/iris')\r\nclass iris(Resource):\r\n @ns.expect(iris)\r\n def post(self):\r\n form_data = request.json\r\n\r\n data = [float(form_data['sepal_length']), float(form_data['petal_length']), float(form_data['petal_width'])]\r\n print(data)\r\n try:\r\n prediction = ServiceLayer.predict_iris(data)\r\n print(prediction)\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': prediction}\r\n except Exception as E:\r\n log.error(E)\r\n return jsonify({'Error': \"The error is {}\".format(E)})\r\n"
},
{
"alpha_fraction": 0.680942177772522,
"alphanum_fraction": 0.7194860577583313,
"avg_line_length": 18.5,
"blob_id": "62ef58b1f47eef8279481b26ca7c27be1daf3175",
"content_id": "24bb3c1e7958b32276eb9fb661f8092ba51623d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 467,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 24,
"path": "/Dockerfile",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "# Format: FROM repository[:version]\nFROM python:3.7\n\nLABEL MAINTAINER Geoffrey '[email protected]'\n\nRUN apt-get update -y && \\\n apt-get install -y python-pip python-dev\n\n#copying just the requirements.txt first to leverage Docker cache\nCOPY ./requirements.txt /app/requirements.txt\n\nWORKDIR /app\n\nRUN pip3 install -r requirements.txt\nRUN pip install gunicorn\n\nCOPY . /app\n\nRUN mkdir /logs\n\nCMD [\"gunicorn\", \"-w 1\", \"-b 0.0.0.0:5000\", \"manage:app\"]\n\nEXPOSE 5000\nEXPOSE 80"
},
{
"alpha_fraction": 0.6440176963806152,
"alphanum_fraction": 0.6440176963806152,
"avg_line_length": 20.838708877563477,
"blob_id": "c9dce45cedd4493b6c3e2f58e7bf3027b32277c0",
"content_id": "c63cc9c2b2cc74b483847abfbf0afe27b0720d88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 677,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 31,
"path": "/services/common/v1/schemas/ML_schemas.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "from flask_restplus import fields, Model, reqparse\nimport werkzeug\n\nspam = Model('spam Schema', {\n 'email': fields.String,\n})\n\nmpg = Model('AutoMPG Schema', {\n 'cylinders': fields.String,\n 'displacement': fields.String,\n 'horepower': fields.String,\n 'weight': fields.String,\n 'acceleration': fields.String,\n 'model_year': fields.String,\n 'Origin': fields.String\n\n})\n\niris = Model('Iris Schema', {\n 'sepal_length': fields.Float,\n 'sepal_width': fields.Float,\n 'petal_length': fields.Float,\n 'petal_width': fields.Float\n})\n\nsend_email = Model('Send Email', {\n 'name': fields.String,\n 'email': fields.String,\n 'msg': fields.String\n\n})\n"
},
{
"alpha_fraction": 0.4412546455860138,
"alphanum_fraction": 0.4433811902999878,
"avg_line_length": 36.599998474121094,
"blob_id": "6923446b73f587831b2a20d690d7d57424f03752",
"content_id": "1d72e1c0a642b705eab7af4093382e4db65737a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1881,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 50,
"path": "/services/common/v1/resources/misc/ServiceLayer.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "\nimport requests\nimport json\nfrom flask import request\nimport os\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\nfrom services.config import RequiredConstants as RC\n\n\nclass ServiceLayer:\n\n @staticmethod\n def send_email(data):\n message = Mail(\n from_email='[email protected]',\n to_emails='[email protected]',\n subject=\"Howdy !! Website Email Enquiry\",\n html_content=\"\"\"<html>\n <head></head>\n <body>\n <h1>Email Enquiry ! </h1>\n <p> Contacted Person - {} </p>\n <p> Email Address - {} </p>\n <p> Message - {} </p>\n </body>\n </html>\n \"\"\".format(data['name'], data['email'], data['msg'])\n )\n gretting_message = Mail(\n from_email='[email protected]',\n to_emails=data['email'],\n subject=\"Howdy !! Thanks for Email Enquiry\",\n html_content=\"\"\"<html>\n <head></head>\n <body>\n <h1>Thank you for your Email Enquiry </h1>\n <p> I am Geoffrey :). Thank you for visiting my portfolio website . I will be in touch with you shortly.</p>\n <p>Thanks,<br/>Geoffrey<br/>Machien learning engineer</p>\n \n </body>\n </html>\n \"\"\"\n )\n sg = SendGridAPIClient(RC.SENDGRID_API_KEY)\n response = sg.send(message)\n response_two = sg.send(gretting_message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n return response\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6093023419380188,
"avg_line_length": 10.88888931274414,
"blob_id": "5ebc006a129b3a61b4cd7cc099de0b08bb2ba439",
"content_id": "db5b35c48f3ee4f826d7302939d5b2632d9a7fc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 18,
"path": "/services/common/v1/resources/DL/ServiceLayer.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "\nimport requests\nimport json\nfrom flask import request\n\n\nclass ServiceLayer:\n\n @staticmethod\n def static_method1():\n\n return []\n\n @staticmethod\n def static_method2():\n\n return {\n\n }\n"
},
{
"alpha_fraction": 0.6150943636894226,
"alphanum_fraction": 0.6198112964630127,
"avg_line_length": 29.285715103149414,
"blob_id": "336668e69df8f7d634a56f5f34cb2094b6cc4e1d",
"content_id": "802a46ce1c88f322e88ca0e267c8dcbee3349734",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1060,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 35,
"path": "/services/common/v1/resources/misc/misc.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "import logging\nimport datetime\nimport time\n\nfrom flask import request, abort, jsonify\nfrom flask_restplus import Resource, Namespace\nfrom services.common.v1.schemas.ML_schemas import send_email\nfrom services.common.v1.resources.misc.ServiceLayer import ServiceLayer\nfrom services.config import RequiredConstants as RC\n\nlog = logging.getLogger(__name__)\n\nns = Namespace(\n \"Misc API's\",\n description=\"API's related to misc servic \")\nns.models[send_email.name] = send_email\n\n\[email protected]('/send_email')\nclass SendEmail(Resource):\n @ns.expect(send_email)\n def post(self):\n form_data = request.json\n try:\n response = ServiceLayer.send_email(form_data)\n print(response)\n return {\n 'statusCode': 200,\n 'headers': {\n 'Content-Type': 'text/plain',\n 'Access-Control-Allow-Origin': '*'},\n 'body': 'Success'}\n except Exception as E:\n log.error(E)\n return jsonify({'Error': \"The error is {}\".format(E)})\n"
},
{
"alpha_fraction": 0.5799396634101868,
"alphanum_fraction": 0.5859728455543518,
"avg_line_length": 26.212766647338867,
"blob_id": "906c0af546f44a34f10d44de3ab4293d15046f80",
"content_id": "499f7aca732dce88818f4f043e4aa9c5385d50ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 47,
"path": "/services/common/v1/resources/DL/DL.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "import logging\r\nfrom flask import request, jsonify\r\nimport requests\r\nimport json\r\nimport os\r\nfrom services.config import RequiredConstants as RC\r\nfrom flask_restplus import Resource, Namespace, reqparse\r\nfrom services.common.v1.schemas.ML_schemas import spam\r\nfrom services.common.v1.resources.DL.ServiceLayer import ServiceLayer\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\nns = Namespace(\r\n 'DeepLearning',\r\n description='Operation Realted to Deep learning Model API')\r\nns.models[spam.name] = spam\r\n\r\n\r\[email protected]('/mnsit')\r\nclass mnsit(Resource):\r\n @ns.expect(spam)\r\n def post(self):\r\n form_data = request.form\r\n app.logger.info(\"MINST API \")\r\n result = form_data['text']\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': result}\r\n\r\[email protected]('/img_recog')\r\nclass Img_Recog(Resource):\r\n @ns.expect(spam)\r\n def post(self):\r\n form_data = request.form\r\n app.logger.info(\"MINST API \")\r\n result = form_data['text']\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Content-Type': 'text/plain',\r\n 'Access-Control-Allow-Origin': '*'},\r\n 'body': result}\r\n"
},
{
"alpha_fraction": 0.6408640742301941,
"alphanum_fraction": 0.6408640742301941,
"avg_line_length": 26.85714340209961,
"blob_id": "9d56e51ba556ec11a13c24f63c44ec36f7bccd84",
"content_id": "109bba954e6755d82a29c76558f29edc5d90c210",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2222,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 77,
"path": "/services/application.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "from flask import Flask\r\nfrom importlib import import_module\r\nfrom flask_bcrypt import Bcrypt\r\nfrom werkzeug.contrib.fixers import ProxyFix\r\nimport decimal\r\nimport flask.json\r\nimport datetime\r\nfrom bson.objectid import ObjectId\r\nfrom services.blueprints import all_blueprints\r\nimport os\r\nimport json\r\nimport logging\r\nimport logging.config\r\nfrom werkzeug.utils import cached_property\r\n\r\n\r\nclass MyJSONEncoder(flask.json.JSONEncoder):\r\n\r\n def default(self, obj):\r\n if isinstance(obj, decimal.Decimal):\r\n # Convert decimal instances to strings.\r\n return str(obj)\r\n elif isinstance(obj, ObjectId):\r\n return str(obj)\r\n elif isinstance(obj, datetime.datetime):\r\n return obj.isoformat()\r\n return super(MyJSONEncoder, self).default(obj)\r\n\r\n\r\ndef setup_logging(\r\n default_path='logging_config.json',\r\n default_level=logging.INFO,\r\n env_key='LOG_CFG'):\r\n \"\"\"\r\n Set up logging configuration for interaction service\r\n \"\"\"\r\n\r\n path = default_path\r\n value = os.getenv(env_key, None)\r\n log_path = os.getenv('LOG_LOCATION', '')\r\n\r\n if value:\r\n path = value\r\n\r\n if os.path.exists(path):\r\n with open(path, 'rt') as f:\r\n config = json.load(f)\r\n\r\n info_filename = config['handlers']['info_file_handler']['filename']\r\n error_filename = config['handlers']['error_file_handler']['filename']\r\n\r\n config['handlers']['info_file_handler']['filename'] = log_path + info_filename\r\n config['handlers']['error_file_handler']['filename'] = log_path + \\\r\n error_filename\r\n\r\n logging.config.dictConfig(config)\r\n else:\r\n logging.basicConfig(level=default_level)\r\n\r\n\r\ndef create_app(config_obg=None, **kwargs):\r\n app = Flask(__name__)\r\n setup_logging()\r\n print('Created app and set up logging!')\r\n\r\n register_blueprints(app)\r\n # https proxy\r\n app.wsgi_app = ProxyFix(app.wsgi_app)\r\n # custom json encoderd to deal with nonjson convertables\r\n app.json_encoder = MyJSONEncoder\r\n return app\r\n\r\n\r\ndef register_blueprints(app):\r\n for bp in all_blueprints:\r\n import_module(bp.import_name)\r\n app.register_blueprint(bp)\r\n"
},
{
"alpha_fraction": 0.47664836049079895,
"alphanum_fraction": 0.6978021860122681,
"avg_line_length": 15.930233001708984,
"blob_id": "b8516e6b93feec5161eadb986ef6220086524d7b",
"content_id": "4cf25096212122a7c1dae818b0b12eac12d5e58d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 43,
"path": "/requirements.txt",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "aniso8601==8.0.0\nattrs==19.3.0\nbcrypt==3.2.0\nboto3==1.9.211\nbotocore==1.12.253\ncertifi==2020.6.20\ncffi==1.14.2\nchardet==3.0.4\nclick==7.1.2\ndocutils==0.15.2\nFlask==1.1.1\nFlask-Bcrypt==0.7.1\nFlask-Cors==3.0.8\nflask-restplus==0.13.0\nidna==2.8\nimportlib-metadata==1.7.0\nitsdangerous==1.1.0\nJinja2==2.11.2\njmespath==0.10.0\njoblib==0.17.0\njsonschema==3.2.0\nMarkupSafe==1.1.1\nnumpy==1.19.2\npandas==1.1.3\npycodestyle==2.6.0\npycparser==2.20\npymongo==3.9.0\npyrsistent==0.16.0\npython-dateutil==2.8.1\npython-http-client==3.3.1\npytz==2020.1\nrequests==2.22.0\ns3transfer==0.2.1\nscikit-learn==0.23.2\nscipy==1.5.2\nsendgrid==6.4.7\nsix==1.15.0\nsklearn==0.0\nstarkbank-ecdsa==1.1.0\nthreadpoolctl==2.1.0\nurllib3==1.25.10\nWerkzeug==0.16.1\nzipp==3.1.0\n"
},
{
"alpha_fraction": 0.744583785533905,
"alphanum_fraction": 0.7673888206481934,
"avg_line_length": 34.08000183105469,
"blob_id": "122d7e8dacc11ed9a681d1db88b04bdacb21e57d",
"content_id": "5434a78751b826ffaff381bf4a09529f921b8dd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 877,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 25,
"path": "/README.md",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "# MY Web API's\n\nService that handles My portfolio API requests and responses\n\nFlask application, install requirements in requirements.txt and run with 'python manage.py'\n\n# Working with Docker\n\nBuild docker using ` docker build -t name:version . `\n\nAfter deploy the app using ` docker run -d -p 5000:5000 name `\n\nIn your browser navigate to: **http://localhost:5000** (or whatever port you have mention in the docker build) to see the app up and running\n\n# Working without docker\n\nI highly recommend the use of docker as it is far simpler to get started than to run all of the following manually.\n\nTo Deploy manually Assure you have Python. installed.\n\nNavigate inside the directory\n\nInstall pip dependencies: `pip install -r requirements.txt`\n\nRun `python manage.py` to see the app up and running on port **5000** (will watch files and restart server on port 5000 on change)\n"
},
{
"alpha_fraction": 0.6198871731758118,
"alphanum_fraction": 0.6220028400421143,
"avg_line_length": 26.25,
"blob_id": "97649ef2eb632cb5088d108e8c6b9f79145f8c2f",
"content_id": "74aa0f77853ae1401c02ca373665142535ce5af6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1418,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 52,
"path": "/services/common/v1/resources/ML/ServiceLayer.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "\nimport requests\nimport json\nfrom flask import request\nimport pickle\nimport os\nimport numpy as np\nimport pandas as pd\n\nparentDirectory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n\nham_spam = pickle.load(open('./Models/model_file', 'rb'))\nxcount = pickle.load(open('./Models/countvect', 'rb'))\n\nauto_mpg = pickle.load(open('./Models/auto_mpg', 'rb'))\niris = pickle.load(open('./Models/iris','rb'))\n\n\nclass ServiceLayer:\n\n @staticmethod\n def predict_spam(email):\n # To verify the email\n xcounts = xcount.transform([email])\n prediction = ham_spam.predict(xcounts)\n return prediction\n\n @staticmethod\n def predict_mpg(data):\n prediction = auto_mpg.predict([data])\n print(prediction)\n return prediction[0]\n \n @staticmethod\n def predict_iris(data):\n prediction = iris.predict([data])\n print(prediction[0])\n return prediction[0]\n\n @staticmethod\n def predict_spam_batch(form_data):\n df = pd.read_csv(form_data)\n df_tf = xcount.transform(df.text)\n prediction = ham_spam.predict(df_tf)\n df['Prediction'] = pd.Series(prediction).values\n value_counts = df['Prediction'].value_counts().to_dict()\n df = df.to_json(orient='records')\n df = json.loads(df)\n response = {\n 'value_counts': value_counts,\n 'result': df\n }\n return response\n"
},
{
"alpha_fraction": 0.5950704216957092,
"alphanum_fraction": 0.6003521084785461,
"avg_line_length": 22.66666603088379,
"blob_id": "c0f9ba7e97800b8c60a1a7ebd9c315168714fe29",
"content_id": "38ec3af75fcb3bf2a6ca0aa2aec0248111618760",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 24,
"path": "/services/common/v1/resources/health/health_routes.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "import logging\nimport datetime\nimport time\n\nfrom flask import request, abort, jsonify\nfrom flask_restplus import Resource, Namespace\n\nlog = logging.getLogger(__name__)\n\nns = Namespace(\n 'Health Check',\n description='Operations related to servic health')\n\n\[email protected]('')\nclass HealthCheck(Resource):\n def get(self):\n return {\n 'statusCode': 200,\n 'headers': {\n 'Content-Type': 'text/plain',\n 'Access-Control-Allow-Origin': '*'},\n 'body': \"Everything looks Fine\",\n 'error': None}\n"
},
{
"alpha_fraction": 0.843137264251709,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 32,
"blob_id": "64f14474a2060e77d1ca08546e6269a1a52c5a7a",
"content_id": "df01c408d93ac2445418d7462a1ba579808f0ff5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 3,
"path": "/services/extensions.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "from services.config import RequiredConstants as RC\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\n"
},
{
"alpha_fraction": 0.6901041865348816,
"alphanum_fraction": 0.7005208134651184,
"avg_line_length": 27.69230842590332,
"blob_id": "9a14ec961d697aa28ac01e6f975458f6ad592099",
"content_id": "4f732b89238b7c8c57b110bc6ac10940ced875e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 13,
"path": "/services/blueprints.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint\r\n\r\n\r\ndef _factory(partial_module_string, url_prefix):\r\n name = partial_module_string\r\n import_name = 'services.{}'.format(partial_module_string)\r\n blueprint = Blueprint(name, import_name, url_prefix=url_prefix)\r\n return blueprint\r\n\r\n\r\ncommon_blueprint_v1 = _factory('common.v1.api', url_prefix='/v1')\r\n\r\nall_blueprints = (common_blueprint_v1, )"
},
{
"alpha_fraction": 0.6724386811256409,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 22.75,
"blob_id": "8507e8210f1c77574d7e363361ba52539eabb981",
"content_id": "fb9e15bcf454d8ccd76c9818fdbf6248daf95337",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 28,
"path": "/manage.py",
"repo_name": "Ge0f3/My_Web_API",
"src_encoding": "UTF-8",
"text": "from flask import Flask\r\nfrom services.application import create_app\r\nfrom services.config import RequiredConstants\r\nfrom flask_restplus import Resource, Api\r\n\r\nfrom flask_cors import CORS\r\n\r\nimport os\r\nimport logging\r\n\r\napp = create_app()\r\napi = Api(app)\r\n\r\n# cross origin.\r\nCORS(app, resources=r'/*')\r\napp.config['CORS_HEADERS'] = 'Content-Type'\r\n\r\n\r\[email protected]('/')\r\ndef testRoute():\r\n return '<h1> API service running! </h1>'\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, threaded=False, host='0.0.0.0', port=5000)\r\n gunicorn_logger = logging.getLogger('gunicorn.error')\r\n app.logger.handlers = gunicorn_logger.handlers\r\n app.logger.setLevel(gunicorn_logger.level)\r\n"
}
] | 16 |
zkhin/tensorlib
|
https://github.com/zkhin/tensorlib
|
26baf182459da57f871e34d9916a198de7042ea9
|
3420c3e2e7c066a6e8ef80cc7c919420b54f7c9d
|
c0051749d10105a8fbc1cf73d7f8cb67766d02fa
|
refs/heads/master
| 2020-05-19T23:28:00.670025 | 2019-05-07T16:58:22 | 2019-05-07T16:58:22 | 185,267,679 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.708527147769928,
"alphanum_fraction": 0.7116279006004333,
"avg_line_length": 28,
"blob_id": "098a5344ad99ff0484abcd84d20affd34fc3c07e",
"content_id": "1b24bb4b049b5c9be0cb0492c97d2f318b4ebe4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 645,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 22,
"path": "/loss.py",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA loss function measures how good our predictions are, we can use this to adjust the parameters of our network\n\"\"\"\nimport numpy as np\nfrom tensorlib.tensor import Tensor\n\nclass Loss:\n\tdef loss(self, predicted, actual):\t#Tensor, Tensor -> float\n\t\traise NotImplementedError\n\t\t\n\tdef grad(self, predicted, actual): #Tensor, Tensor -> Tensor\n\t\traise NotImplementedError\n\t\t\nclass MSE(Loss):\n\t\"\"\"\n\tThis is actually total squared error\n\t\"\"\"\n\tdef loss(self, predicted, actual):\t#Tensor, Tensor -> float\n\t\treturn np.sum(predicted - actual) ** 2\n\t\t\n\tdef grad(self, predicted, actual): #Tensor, Tensor -> Tensor\n\t\treturn 2 * (predicted - actual)\n\t\t\t\t\n\t\n"
},
{
"alpha_fraction": 0.6318147778511047,
"alphanum_fraction": 0.6329350471496582,
"avg_line_length": 24.912620544433594,
"blob_id": "e305ba3e10390868d5001268b941bcd1d583c134",
"content_id": "0ade04f27c94773e476cd4330796edab497e5a49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2678,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 103,
"path": "/layers.py",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "\"\"\"\nOur neural nets will be made up of layers. Each layer needs to pass its inputs forward and propagate gradients backward.\nFor example, a neural net might look like:\n\ninputs -> Linear -> Tanh -> Linear -> output\n\"\"\"\n#import mypy\nimport numpy as np\nfrom tensorlib.tensor import Tensor\n\n#import typing\nfrom typing import Callable\n#from typing import Dict\n\nclass Layer:\n\tdef __init__(self):\n\t\t#P = Dict[str, Tensor]\n\t\t#G = Dict[str, Tensor]\n\t\tself.params = {}\n\t\tself.grads = {}\n\t\t\n\tdef forward(self, inputs): #Tensor -> Tensor\n\t\t\"\"\"\n\t\tProduce the outputs corresponding to these inputs\n\t\t\"\"\"\n\t\traise NotImplementedError\n\t\n\tdef backward(self, grad): #Tensor -> Tensor\n\t\t\"\"\"\n\t\tBackpropagate this gradient through the layer\n\t\t\"\"\"\n\t\traise NotImplementedError\n\nclass Linear(Layer):\n\t\"\"\"\n\tcomputes output = inputs @ w + B\n\t\"\"\"\n\tdef __init__(self, input_size, output_size): #int, int -> None\n\t\t# inputs will be (batch_size, input_size)\n\t\t# outputs will be (batch_size, output_size)\n\t\tsuper().__init__()\n\t\tself.params[\"w\"] = np.random.randn(input_size, output_size)\n\t\tprint(\"initialized w\", self.params[\"w\"])\n\t\tself.params[\"b\"] = np.random.randn(output_size)\n\t\tprint(\"initialized b\", self.params[\"b\"])\t\n\tdef forward(self, inputs): #Tensor -> Tensor\n\t\t\"\"\"\n\t\toutputs = inputs @ w + b\n\t\t\"\"\"\n\t\tself.inputs = inputs\n\t\tprint(\"first inputs\", self.inputs)\n\t\ttmpoutput = np.array(np.asmatrix(inputs) * np.asmatrix(self.params[\"w\"])) + self.params[\"b\"]\n\t\treturn tmpoutput\n\t\tprint(\"linear forward\", tmpoutput)\n\t\t\n\tdef backward(self, grad): #Tensor -> Tensor\n\t\t\"\"\"\n\t\tif y = f(x) and x = a * b + c\n\t\tthen dy/da = f'(x) * b\n\t\tand dy/db = f'(x) * a\n\t\tand dy/dc = f'(x)\n\t\t\n\t\tif y = f(x) and x = a @ b + c\n\t\tthen dy/da = f'(x) @ b.T\n\t\tand dy/db = a.T @ f'(x)\n\t\tand dy/dc = f'(x)\n\t\t\"\"\"\n\t\tself.grads[\"b\"] = np.sum(grad, axis=0)\n\t\tself.grads[\"w\"] = np.array(np.asmatrix(self.inputs.T) * np.asmatrix(grad))\n\t\treturn np.array(np.asmatrix(grad) * np.asmatrix(self.params[\"w\"].T))\n\t\t\nF = Callable[[Tensor], Tensor]\n\nclass Activation(Layer):\n\t\"\"\"\n\tAn activation layer just applies a function elementwise to its inputs\n\t\"\"\"\n\tdef __init__(self, f, f_prime): #Function, Function' -> None\n\t\tsuper().__init__()\n\t\tself.f = f\n\t\tself.f_prime = f_prime\n\t\t\n\tdef forward(self, inputs): #Tensor -> Tensor\n\t\tself.inputs = inputs\n\t\treturn self.f(inputs)\n\t\t\n\tdef backward(self, grad): #Tensor -> Tensor\n\t\t\"\"\"\n\t\tif y = f(x) and x = g(z)\n\t\tthen dy/dz = f'(x) * g'(z)\n\t\t\"\"\"\n\t\treturn self.f_prime(self.inputs) * grad\n\ndef tanh(x): #Tensor -> Tensor\n\treturn np.tanh(x)\n\t\ndef tanh_prime(x): #Tensor -> Tensor\n\ty = tanh(x)\n\treturn 1 - y ** 2\n\t\nclass Tanh(Activation):\n\tdef __init__(self):\n\t\tsuper().__init__(tanh, tanh_prime)\n\t\t\n\t\t\t\t\t\n"
},
{
"alpha_fraction": 0.5555555820465088,
"alphanum_fraction": 0.5555555820465088,
"avg_line_length": 8,
"blob_id": "0fdbbe99e84678af5df496a5b1c2f9aedf570bf8",
"content_id": "be97123ee341f60797788c91b7145dcfd5df32c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 3,
"path": "/__init__.py",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "\"\"\"\nZayar's ML library\n\"\"\"\n"
},
{
"alpha_fraction": 0.8333333134651184,
"alphanum_fraction": 0.8333333134651184,
"avg_line_length": 55,
"blob_id": "b8b3a164949b594435716f9b70fcb3f79ce4a2f8",
"content_id": "45118f5a0f82905bb4db15fe1cc63f705b0cd05f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 3,
"path": "/README.md",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "# tensorlib\nSimple and extensible implementation of a neural network that I used to clarify existing libraries. \nHelpful to create a lightweight mobile implementation.\n"
},
{
"alpha_fraction": 0.7108843326568604,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 28.399999618530273,
"blob_id": "5d32781e2327e1eef3c22049346e9b5d0b890185",
"content_id": "9b623bd03a1aec48058f9febf69fcf331441aa85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 30,
"path": "/data.py",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFeed inputs into our network in batches\n\"\"\"\nimport numpy as np\nfrom tensorlib.tensor import Tensor\n#from typing import Iterator, NamedTuple\nfrom typing import Iterator, NamedTuple\n\nBatch = NamedTuple(\"Batch\", [(\"inputs\", Tensor), (\"targets\", Tensor)])\n\n\nclass DataIterator:\n\tdef __call__(self, inputs, targets): #Iterator(Batches)\n\t\traise NotImplementedError\n\nclass BatchIterator(DataIterator):\n\t\n\tdef __init__(self, batch_size=32, shuffle=True): # int, bool -> None\n\t\tself.batch_size = batch_size\n\t\tself.shuffle = shuffle\n\tdef __call__(self, inputs, targets): #Tensor, Tensor -> Iterator(Batches)\n\t\tstarts = np.arange(0, len(inputs), self.batch_size)\n\t\tif self.shuffle:\n\t\t\tnp.random.shuffle(starts)\n\t\t\t\n\t\tfor start in starts:\n\t\t\tend = start + self.batch_size\n\t\t\tbatch_inputs = inputs[start:end]\n\t\t\tbatch_targets = targets[start:end]\n\t\t\tyield Batch(batch_inputs, batch_targets)\n"
},
{
"alpha_fraction": 0.7425068020820618,
"alphanum_fraction": 0.7465940117835999,
"avg_line_length": 33.9523811340332,
"blob_id": "a0d2a9a144ff6806a68c9a61a07c1be1f274a174",
"content_id": "3d273cb0c79064670889e1c1939a96d4394cbf61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 21,
"path": "/train.py",
"repo_name": "zkhin/tensorlib",
"src_encoding": "UTF-8",
"text": "\"\"\"\nA function that trains a neural net\n\"\"\"\n\nfrom tensorlib.tensor import Tensor\nfrom tensorlib.nn import NeuralNet\nfrom tensorlib.loss import Loss, MSE\nfrom tensorlib.optim import Optimizer, SGD\nfrom tensorlib.data import DataIterator, BatchIterator\n\ndef train(net, inputs, targets, num_epochs=1, iterator=BatchIterator(), loss=MSE(), optimizer=SGD()): # NeuralNet, Tensor, Tensor, int, DataIterator, Loss, Optimizer -> None\n\t\n\tfor epoch in range(num_epochs):\n\t\tepoch_loss = 0.0\n\t\tfor batch in iterator(inputs, targets):\n\t\t\tpredicted = net.forward(batch.inputs)\n\t\t\tepoch_loss += loss.loss(predicted, batch.targets)\n\t\t\tgrad = loss.grad(predicted, batch.targets)\n\t\t\tnet.backward(grad)\n\t\t\toptimizer.step(net)\n\t\tprint(epoch, epoch_loss)\n"
}
] | 6 |
heikalb/thesis-scripts
|
https://github.com/heikalb/thesis-scripts
|
cf5e8323552660b1d9310086931535042126570e
|
cb9a59e4054b8470405c91b321bea24b6e55d66b
|
4d65f6e2d1e177d6f18f812ef3f3dbd1653b9fa9
|
refs/heads/master
| 2021-08-18T09:58:20.254099 | 2020-04-29T01:14:17 | 2020-04-29T01:14:17 | 173,040,384 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5684239864349365,
"alphanum_fraction": 0.5787997841835022,
"avg_line_length": 28,
"blob_id": "8213cc35b448138c87140194a2aa163b40b0098d",
"content_id": "c54ea4add3b9068dca64edf55343a33b706822bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3576,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 123,
"path": "/d3_preprocess_data/archive/collect_queries.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# —*— coding: utf—8 —*—\n\"\"\"\nGather all query results (verbs + context window) from different verbs into one file\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\n\nspell_corrections = open('verb_spelling_suggestions.txt').read().split('\\n')\nspell_corrections = [line.split() for line in spell_corrections]\nspell_corrections = [l for l in spell_corrections if not (l[0] == '#' or l[0] == '@' or len(l) < 2)]\nsentence_punct = ['.', '!', '?']\n\n\n# Apply spelling correction based on spell correction suggestion file\ndef apply_correction(target_word):\n target_word_normalized = ''.join([ch for ch in target_word if ch.isalpha()])\n target_word_normalized = target_word_normalized.lower()\n\n for sp in spell_corrections:\n if sp[0] == target_word_normalized:\n return sp[1]\n\n return target_word\n\n\n# Helper method for apply_correction(). Restores punctuation back into a word\ndef restore_punct(word_1, word_2):\n if not any(not ch.isalpha() for ch in word_1):\n return word_2\n\n left_punct = ''\n right_punct = ''\n midpoint = int(len(word_1)/2)\n\n for ch in word_1[:midpoint]:\n if not ch.isalpha():\n left_punct += ch\n else:\n break\n\n for ch in word_1[midpoint:]:\n if not ch.isalpha():\n right_punct += ch\n\n return '{0}{1}{2}'.format(left_punct, word_2, right_punct)\n\n\n# Given a sentence that may span multiple sentence boundaries, return the sentence containing the target word only\ndef to_one_sentence(sentence, target_i):\n tokens = sentence.split()\n right_sent = []\n left_sent = []\n\n for t in tokens[target_i:]:\n right_sent.append(t)\n\n if any(punct in t for punct in sentence_punct):\n break\n\n i = target_i - 1\n\n while i >= 0:\n if any(punct in tokens[i] for punct in sentence_punct):\n break\n\n left_sent.insert(0, tokens[i])\n\n i -= 1\n\n return ' '.join(left_sent + right_sent), len(left_sent)\n\n\n# Remove punctuation from a string\ndef depunctuate(st):\n st = st.split()\n new_sent = []\n\n for w in st:\n new_word = [ch for ch in w if ch.isalnum()]\n\n if new_word:\n new_sent.append(''.join(new_word))\n\n return ' '.join(new_sent)\n\n\ndef main():\n rows = []\n \n for i in range(20):\n # Open file\n with open('../d2_data/query_results/tnc_query_result_{0}.tsv'.format(i)) as f:\n csv_reader = csv.reader(f, delimiter='\\t')\n\n first_row = True\n for row in csv_reader:\n # Skip first row header of CSV\n if first_row:\n first_row = False\n continue\n\n # Spell correct target verb\n main_word = apply_correction(depunctuate(row[3]))\n left_context = depunctuate(row[2])\n right_context = depunctuate(row[4])\n\n # Join context windows and target verb, remove punctuation\n full_sentence = ' '.join([left_context, main_word, right_context])\n # Reduce context window to one sentence, get new index of target verb\n single_sent = to_one_sentence(full_sentence, len(left_context.split()))\n # Save processed context window\n rows.append([single_sent[0], main_word, single_sent[1]])\n\n # Save data\n with open('../d2_data/query_results_all_joined_sents.tsv', 'w') as f:\n csv_writer = csv.writer(f, delimiter='\\t')\n for r in rows:\n csv_writer.writerow(r)\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)"
},
{
"alpha_fraction": 0.5969447493553162,
"alphanum_fraction": 0.6063454747200012,
"avg_line_length": 29.035293579101562,
"blob_id": "ada56f2ef7668e900c20c05864fd17dd7d31c2a0",
"content_id": "33fec15b7f5e8ebbd9fe699fdc14e102f65195af",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2557,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 85,
"path": "/d4_parse/archive/parse_w_trnltk.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nParse verbs (only) using TRNLTK\nNote: because of the state of TRNLTK in python, this script has to be run in Python 2\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nfrom trnltk.playground import playground as pg\n\n\n# Morphological parser — a modified copy of the one in TRNLTK. Printing is disabled, and the function\n# now returns the parsing. \ndef morph_parse(word_str, *syntactic_categories):\n word_str = word_str.decode(encoding='UTF—8')\n parse_results = pg.contextless_parser.parse(word_str)\n\n if syntactic_categories:\n parse_results = filter(lambda parse_result: parse_result.get_last_state().syntactic_category in syntactic_categories, parse_results)\n \n parses = []\n\n if parse_results:\n for parse_result in parse_results:\n formatted_output = pg.formatter.format_morpheme_container_for_tests(parse_result)\n parses.append(formatted_output.encode('utf-8'))\n \n return parses\n\n\ndef get_shortest_parse(parse_list):\n length_parse = [(len(p.split('_')), p) for p in parse_list]\n return min(length_parse)[1]\n\n\ndef main():\n # Get list of verbs\n verb_file = open('../d2_data/all_verbs_spellchecked.txt', 'rb')\n words = verb_file.read().split('\\n')\n\n # Morphlogical parses and numbers to track parser performance\n parses = []\n one_parse_found = 0\n mult_parse_found = 0\n no_parse_found = 0\n \n # Get parses\n for w in words:\n curr_parses = morph_parse(w)\n curr_parses = [cp for cp in curr_parses if cp.split('_')[1] == 'Verb']\n parse_status = ''\n parse = ''\n\n # Get 3_parse information\n if len(curr_parses) == 1:\n one_parse_found += 1\n parse_status = 'parsed_single'\n parse = curr_parses[0]\n elif len(curr_parses) > 1:\n mult_parse_found += 1\n parse_status = 'parsed_multiple'\n parse = get_shortest_parse(curr_parses)\n else:\n no_parse_found += 1\n parse_status = 'parse_not_found'\n parse = '<no_parse>'\n\n parses.append([w, parse, parse_status])\n\n # Display findings about parser performance\n print('One 3_parse found: ', one_parse_found)\n print('Multiple parses found: ', mult_parse_found)\n print('No 3_parse found: ', no_parse_found)\n\n with open('morph_parses.csv', 'wb') as f:\n csv_writer = csv.writer(f)\n\n for p in parses:\n csv_writer.writerow(p)\n\n verb_file.close()\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.54666668176651,
"alphanum_fraction": 0.5745454430580139,
"avg_line_length": 21.324323654174805,
"blob_id": "095da6fbe39aa6e1f11dc98c22dccb784d05f78f",
"content_id": "d77537a14bde32aee895ab3cd816e6a2ae4b02e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 825,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 37,
"path": "/d3_preprocess_data/archive/itu_spellcheck.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSend list of word windows to ITU for spellchecking\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport pipeline_caller\nimport csv\n\n\ndef main():\n caller = pipeline_caller.PipelineCaller()\n tool_name = \"spellcheck\"\n api_token = \"sQj6zxcVt7JzWXHNTdRu3QRzc6i8KZz7\"\n result = ''\n\n data = open('../d2_data/query_results_all_joined_sents.csv')\n reader = csv.reader(data)\n sents = []\n indices =[]\n\n for r in reader:\n sents.append(r[0])\n indices.append(r[2])\n\n with open('../d2_data/target_indices.txt', 'w') as f:\n f.write('\\n'.join(indices))\n\n for i in range(0, 8):\n text = '\\n'.join(sents[i*10000:(i+1)*10000])\n result += caller.call(tool_name, text, api_token)\n\n with open('../d2_data/all_sents_spellchecked.txt', 'w') as f:\n f.write(result)\n\n\nif __name__ == '__main__':\n main()\n exit(0)"
},
{
"alpha_fraction": 0.5495750904083252,
"alphanum_fraction": 0.5623229742050171,
"avg_line_length": 36.157894134521484,
"blob_id": "b42ddb9a01bccc7cf1c7e6fcef8953bcc3dda324",
"content_id": "cff92fb531f67416953430671ac8768ab335330e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1431,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 38,
"path": "/d0_prep_query_terms/archive/process_freq_dict.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nExtract highest frequency verb stems from the text file of the Frequency Dictionary of Turkish.\nHeikal Badrulhisham <heikal93gmail.com>, 2019\n\"\"\"\nimport re\n\n\ndef main():\n # Section of the dictionary with the verb stems\n verb_section = open('freq_dict_.txt', 'r').read().split('\\n')[54671:58256]\n # Isolate verb stems\n stems = [line.split()[0] for line in verb_section if len(line.split()) >= 3 and line.split()[1] == 'to']\n stems = [s for s in stems if s != 'to' and not any(c in s for c in ';,')]\n\n # Spell correction\n h_words = ['hazırla', 'hisset', 'hesapla', 'bahset', 'harca', 'hızlan', 'hedefle', 'rahatla', 'hohlan', 'hallet', 'zehirle',\n 'haykır', 'heyecan', 'hükmet', 'hafifle', 'havalan', 'hastalan', 'hahla', 'fethet', 'sahiplen', 'hapset',\n 'hareketlen', 'buharlah', 'hıçkır', 'hüphelen', 'mahvet', 'kamah', 'hırpala', 'hatırla']\n h_correction = {'hohlan': 'hoşlan', 'hahla': 'haşla', 'buharlah': 'buharlaş', 'hüphelen': 'şüphelen', 'kamah': 'kamaş'}\n stems_ = []\n\n for s in stems:\n s = s.replace('j', 'ğ')\n if s not in h_words:\n stems_.append(s.replace('h', 'ş'))\n elif s in h_correction:\n stems_.append(h_correction[s])\n else:\n stems_.append(s)\n\n # Save data\n with open('freq_dict_verbs_.txt', 'w') as f:\n f.write('\\n'.join(stems_))\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.49087589979171753,
"alphanum_fraction": 0.5072992444038391,
"avg_line_length": 25.7560977935791,
"blob_id": "7ae13c07967a080eb8fccca33d8d867f63c57207",
"content_id": "43c40063cc8b18f89e81fc6281ca471be9cc8a9f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1096,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 41,
"path": "/d4_parse/archive/itu_parse.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSend context sentences containing verbs to ITU to get morphological parses\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport pipeline_caller\n\n\ndef main():\n results = []\n sents = open('../d2_data/all_sents_spellchecked.txt', 'r').read().split('\\n')\n\n caller = pipeline_caller.PipelineCaller()\n tool_name = \"morphanalyzer\"\n api_token = \"sQj6zxcVt7JzWXHNTdRu3QRzc6i8KZz7\"\n\n start_i = 346\n i = 0\n for s in sents[start_i:]:\n try:\n print(s)\n curr_result = []\n\n for w in s.split():\n r = caller.call(tool_name, w, api_token)\n r = ' '.join(r.split('\\n'))\n curr_result.append(r)\n\n curr_result = '\\n'.join(curr_result)\n results.append('<S> <S>+BSTag\\n{0}\\n</S> </S>+ESTag'.format(curr_result))\n i += 1\n print(curr_result)\n except ConnectionResetError:\n True\n\n if i % 1000 == 0 or i == len(sents[start_i:]):\n with open('parsed_sents_{0}.txt'.format(i), 'w') as f:\n f.write('\\n'.join(results))\n\nif __name__ == '__main__':\n main()\n exit(0)"
},
{
"alpha_fraction": 0.5926605463027954,
"alphanum_fraction": 0.6146789193153381,
"avg_line_length": 25,
"blob_id": "3b6fc1af3ae041beb25c474e56452a5f60cf1c09",
"content_id": "7c11a670137d56e9f3f0c9cdfaa686e92615b942",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 545,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 21,
"path": "/d3_preprocess_data/archive/fix_anlat.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nFilter out results from the file for 'anla-' to only include 'anlat-' (the base verb plus the causative)\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\n\nsaved_lines = []\n\nwith open('../d2_data/query_results/tnc_query_result_13_attn.csv', 'rb') as f:\n rdr = csv.reader(f)\n\n for row in rdr:\n if 'anlat' in row[3].lower():\n saved_lines.append(row)\n\nwith open('../d2_data/query_results/tnc_query_result_13.csv', 'wb') as f:\n wrtr = csv.writer(f)\n\n for line in saved_lines:\n wrtr.writerow(line)"
},
{
"alpha_fraction": 0.4774957597255707,
"alphanum_fraction": 0.4788494110107422,
"avg_line_length": 20.88888931274414,
"blob_id": "5ea9a0b5a9dc44a2eb11dbf58797e397367c0a51",
"content_id": "fe57eecd0b01998e9846ced73e519d2bdf2747f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3121,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 135,
"path": "/d3_preprocess_data/spelling_sub.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nOrthographic transformations for spell correction in collect_queries.py.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\n\nsuggestions = {\n r'kacaklayan': 'kucaklayan',\n r'kacakladı': 'kucakladı',\n r'^olca(k|ğ)': 'olacak',\n r'^[iı]s[ıi]t': 'ısıt',\n r'^[iı]s[iı]n': 'ısın',\n r'^[iı]s[ıi]r': 'ısır',\n r'^et[cç]e': 'edece',\n r'^yapilaşma': 'yapılaşma',\n r'^ismar': 'ısmar',\n r'^gımıl': 'kımıl',\n r'^isbat': 'ispat',\n r'^miril': 'mıril',\n r'^taptuk': 'taptık',\n r'^yabanci': 'yabancı',\n r'^heycan': 'heyecan',\n r'^haykiriri': 'haykırırı',\n r'^haykiri': 'haykırı',\n r'^haykir': 'haykır',\n r's[iı]n[iı]fland[iı]r[iı]lmas[iı]': 'sınıflandırılması',\n r'^sinif': 'sınıf',\n r'^farkli': 'farklı',\n r'^tika': 'tıka',\n r'^kacak': 'kucak',\n r'^zayifl': 'zayıf',\n r'^danişma': 'danış',\n r'^aydin': 'aydın',\n r'koy[iı]m': 'koyayım',\n r'çıkucak': 'çıkacak',\n r'söyliyim': 'söyleyeyim',\n\n r'uo$': 'uyor',\n r'ıo$': 'ıyor',\n r'io$': 'iyor',\n r'üo$': 'üyor',\n r'iyo$': 'iyor',\n r'ıyo$': 'ıyor',\n r'uyo$': 'uyor',\n r'üyo$': 'üyor',\n r'yolar': 'yorlar',\n r'yodu': 'yordu',\n r'yomuş': 'yormuş',\n\n r'io(ru)?m$': 'iyorum',\n r'ıo(ru)?m$': 'ıyorum',\n r'iyom$': 'iyorum',\n r'ıyom$': 'ıyorum',\n r'uyom$': 'uyorum',\n r'üyom$': 'üyorum',\n\n r'iyon$': 'iyorsun',\n r'uyon$': 'uyorsun',\n r'ıyon$': 'ıyorsun',\n r'üyon$': 'üyorsun',\n r'uos': 'uyors',\n r'ıos': 'ıyors',\n r'ios': 'iyors',\n r'üos': 'üyors',\n r'iyos': 'iyors',\n r'ıyos': 'ıyors',\n r'uyos': 'uyors',\n r'üyos': 'üyors',\n\n r'iyonuz$': 'iyorsunuz',\n r'uyonuz$': 'uyorsunuz',\n r'ıyonuz$': 'ıyorsunuz',\n r'üyonuz$': 'üyorsunuz',\n\n\n r'iyoz$': 'iyoruz',\n r'ıyoz$': 'ıyoruz',\n r'uyoz$': 'uyoruz',\n r'üyoz$': 'üyoruz',\n\n r'ioz$': 'iyoruz',\n r'ıoz$': 'ıyoruz',\n r'iyoz$': 'iyoruz',\n r'ıyoz$': 'ıyoruz',\n r'uyoz$': 'uyoruz',\n r'üyoz$': 'üyoruz',\n\n r'[uı]cak': 'acak',\n r'icek': 'ecek',\n r'lcek': 'lecek',\n r'lcak': 'lacak',\n r'micek': 'meyecek',\n r'mıcek': 'mayacak',\n r'macak': 'mayacak',\n r'mecek': 'meyecek',\n r'iycek': 'eyecek',\n r'ıycak': 'ayacak',\n\n r'[uı]cağ': 'acağ',\n r'iceğ': 'eceğ',\n r'miceğ': 'meyeceğ',\n r'mıceğ': 'mayacağ',\n r'macağ': 'mayacağ',\n r'meceğ': 'meyeceğ',\n r'iyceğ': 'eyeceğ',\n r'ıycağ': 'ayacağ',\n r'mıcanı': 'mayacağını',\n\n r'[ıau]caz$': 'acağız',\n r'[ie]cez$': 'eceğiz',\n r'micez$': 'meyeceğiz',\n r'mıcaz$': 'mayacağız',\n r'iycez$': 'eyeceğiz',\n r'ıycaz$': 'ayacağız',\n\n r'[auı]cam$': 'acağım',\n r'[ie]cem$': 'eceğim',\n r'mıcam$': 'mayacağım',\n r'micem$': 'meyeceğim',\n r'iycem': 'eyecem',\n r'ıycam': 'ayacam',\n\n r'e?cen$': 'eceksin',\n r'[auı]?can$': 'acaksın',\n\n r'eğe$': 'eye',\n r'ağa$': 'aya',\n\n r'lari$': 'ları',\n\n r'̇z': 'z',\n r'̇r': 'r',\n r'̇̇̇ṁ': 'm',\n r'̇̇̇ẏ': 'y',\n r'̇n': 'n',\n}\n"
},
{
"alpha_fraction": 0.5452194213867188,
"alphanum_fraction": 0.5707005262374878,
"avg_line_length": 30.756038665771484,
"blob_id": "b207a9fa7af2427862be0651ee8d7af6794182f5",
"content_id": "bb858beef070c0781c8f339c62625af1f0b9be71",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13161,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 414,
"path": "/d5_statistics/archive/risk_ratio_analysis.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: UTF-8 -*-\n\"\"\"\nDisplay data for steps of analysis in Section 4.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nimport os\nimport math\nimport numpy\nfrom collections import defaultdict\nfrom scipy import stats\n\n\ndef freq_filter(rows):\n return [r for r in rows if float(r[12]) >= 100 and float(r[13]) >= 100]\n\n\ndef rr_ranges():\n \"\"\"\n Display risk ratio by ranges for Table 4.1-1.\n \"\"\"\n rr = [float(d[1]) for d in data]\n rr_ci = [float(d[8]) for d in data]\n\n measures = [(rr, 'Risk ratio'), (rr_ci, 'Risk ratio CI')]\n\n for measure in measures:\n upto_1 = len([d for d in measure[0] if d <= 1])\n below_2 = len([d for d in measure[0] if 1 < d < 2])\n above_2 = len([d for d in measure[0] if 2 <= d])\n\n print(measure[1])\n print(f'RR below 1: {upto_1} ({upto_1 / len(measure[0])}%)')\n print(f'RR below 2: {below_2} ({below_2 / len(measure[0])}%)')\n print(f'RR above 2: {above_2} ({above_2 / len(measure[0])}%)')\n\n\ndef register():\n \"\"\"\n Show ranges of risk ratio by register for Table 4.1-4.\n \"\"\"\n for reg in ['', '_written', '_spoken']:\n file_path = f'association_stats{reg}/000__association_stats{reg}.csv'\n\n with open(file_path, 'r') as f:\n data = [r for r in csv.reader(f)][1:]\n data = freq_filter(data)\n\n total = len(data)\n up_to_1 = len([r for r in data if float(r[1]) <= 1])\n more_than_1 = len([r for r in data if float(r[1]) > 1])\n\n print(f'Pair types in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1: {more_than_1} ({round(100*more_than_1/total)}%)')\n print(total, '\\n')\n\n total = sum([int(r[-3]) for r in data])\n up_to_1 = sum([int(r[-3]) for r in data if float(r[1]) <= 1])\n more_than_1 = sum([int(r[-3]) for r in data if float(r[1]) > 1])\n\n print(f'Pair instances in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1 {more_than_1} ({round(100*more_than_1/total)}%)')\n print(total, '\\n')\n\n\ndef adjacency():\n \"\"\"\n Display data related to risk ratio and suffix adjacency for Table 13.\n Run ANOVA on effect of adjacency for Section 4.1.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get adjacency frequencies\n def f(x): return [float(r[1]) for r in data_ if x(int(r[-2])/int(r[-3]))]\n\n adjacent = f(lambda x: x == 1)\n subadjacent = f(lambda x: 0 < x < 1)\n nonadjacent = f(lambda x: x == 0)\n\n # Display adjacent frequencies and related data\n def g(x, y): print(x, len(y), sum(y)/len(y), numpy.var(y), numpy.median(y))\n\n print('Pair frequency, average RR,variance, median')\n g('Adjacent:', adjacent)\n g('Subadjacent:', subadjacent)\n g('Nonadjacent:', nonadjacent)\n\n # Conduct tests on adjacency\n print('\\n')\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=True))\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=False))\n print('Levene\\'s test:', stats.levene(adjacent, subadjacent, nonadjacent))\n print('1-way ANOVA:', (stats.f_oneway(adjacent, subadjacent, nonadjacent)))\n print('Pearson correlation:', stats.pearsonr([float(r[1]) for r in data],\n [int(r[-1]) for r in data]))\n\n\ndef asymmtery():\n \"\"\"\n Display ratios of risk ratio to risk ratio reverse for Table 14.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get ratios\n def f(x): return max(float(x[1])/float(x[2]), float(x[2])/float(x[1]))\n\n atleast_2 = [f(d) for d in data_ if f(d) >= 2]\n atleast_2_types = [d[0] for d in data_ if f(d) >= 2]\n below_2 = [f(d) for d in data_ if f(d) < 2]\n\n # Display data\n print(len(atleast_2), min(atleast_2), max(atleast_2))\n print(len(below_2), min(below_2), max(below_2))\n\n for e in atleast_2_types:\n print(e)\n\n\ndef has_subordinate():\n \"\"\"\n Count trigrams containing one of the subordinate suffixes for Section 4.2,\n approximately on page 57.\n \"\"\"\n # Open data files\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Count trigrams with a subordinate suffix\n subordinates = ['Inf2→Noun', 'PastPart→Noun', 'FutPart→Noun']\n has_subordinate = 0\n\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram, trigram_freq = trigram_line.split(') ')\n\n if any([suffix in trigram for suffix in subordinates]):\n has_subordinate += 1\n\n print('Number of trigrams with a subordinate marker: ', has_subordinate)\n\n\ndef test_normality():\n \"\"\"\n Test the main risk ratio data for normality for Section 4.3, approximately\n on page 58.\n \"\"\"\n # Get risk ratios\n data_ = [d[1] for d in data]\n\n # Run normality test\n print('Shapiro-Wilk test for normality:')\n print(stats.shapiro([math.log(float(d)) for d in data_]))\n\n\ndef integrity():\n \"\"\"\n Display ranges of integrity ratios for Table 18.\n \"\"\"\n # Get collocate pairs of different formulaicity\n all_pairs = [row for row in data]\n formulaic_pairs = [row for row in data if float(row[1]) > 1]\n nonformulaic_pairs = [row for row in data if float(row[1]) <= 1]\n\n for subdataset in [all_pairs, formulaic_pairs, nonformulaic_pairs]:\n\n def f(x):\n return len([r for r in subdataset if x(float(r[-2])/float(r[-3]))])\n\n exactly_1 = f(lambda x: x == 1)\n half = f(lambda x: 0.5 <= x < 1)\n below_half = f(lambda x: 0 < x < 0.5)\n zero = f(lambda x: x == 0)\n\n print(exactly_1, half, below_half, zero)\n print(exactly_1/len(subdataset), half/len(subdataset),\n below_half/len(subdataset), zero/len(subdataset))\n\n\ndef trigram_link_ratios():\n \"\"\"\n Display ranges of trigram link ratios for Table 19.\n \"\"\"\n # Get trigrams\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Store risk ratio of suffix pairs\n data_ = dict(zip([r[0] for r in data if float(r[1]) > 1], [float(r[1]) for r in data if float(r[1]) > 1]))\n\n # Store risk ratios\n risk_ratios = []\n\n # Get risk ratios of stem-trigrams\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram = trigram_line.split(') ')[0]\n\n # Form tuples from trigram strings\n trigram = trigram[1:].split(', ')\n trigram = tuple([suffix[1:-1] for suffix in trigram])\n\n # Get constituent bigrams within the trigram\n bigrams = [(trigram[0], trigram[1]), (trigram[1], trigram[2])]\n\n # Get risk ratio of each bigram\n try:\n curr_rr = (data_[str(bigrams[0])], data_[str(bigrams[1])])\n\n if all([rr > 1 for rr in curr_rr]):\n risk_ratios.append(curr_rr)\n except KeyError:\n continue\n\n # Get ranges of risk ratio ratios\n def f(x): return len([r for r in risk_ratios\n if x(min(r[0]/r[1], r[1]/r[0]))])\n\n rr_ratio_1 = f(lambda x: 0.9 <= x <= 1)\n rr_ratio_2 = f(lambda x: 0.5 <= x < 0.9)\n rr_ratio_3 = f(lambda x: 0.1 <= x < 0.5)\n rr_ratio_4 = f(lambda x: x < 0.1)\n\n # Display data\n print('0.9 ≤ x ≤ 1:', rr_ratio_1, rr_ratio_1/len(risk_ratios))\n print('0.5 ≤ x < 0.9:', rr_ratio_2, rr_ratio_2/len(risk_ratios))\n print('0.1 ≤ x < 0.5:', rr_ratio_3, rr_ratio_3/len(risk_ratios))\n print('x < 0.1:', rr_ratio_4, rr_ratio_4/len(risk_ratios))\n\n\ndef stem_trigram_formulas():\n \"\"\"\n Tell how many stem-trigram pairs have a risk ratio above 1 for\n approximately page 62.\n \"\"\"\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n print(len([r for r in data if float(r[2]) > 1]))\n print(len(data))\n\n\ndef stem_by_trigram():\n \"\"\"\n Tell how many verbs are associated with certain trigrams for Table 20.\n \"\"\"\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n above_1 = defaultdict(int)\n up_to_1 = defaultdict(int)\n num_hosting_verbs = defaultdict(int)\n risk_ratios = defaultdict(list)\n\n for row in data:\n trigram = row[1]\n risk_ratio = float(row[2])\n\n num_hosting_verbs[trigram] += 1\n risk_ratios[trigram].append(risk_ratio)\n\n if risk_ratio > 1:\n above_1[trigram] += 1\n if risk_ratio <= 1:\n up_to_1[trigram] += 1\n\n for trigram in above_1:\n print(trigram)\n\n print('\\n')\n\n for trigram in above_1:\n print(up_to_1[trigram],\n '({0:.0%})'.format(up_to_1[trigram]/num_hosting_verbs[trigram]))\n\n print('\\n')\n\n for trigram in above_1:\n print(above_1[trigram],\n '({0:.0%})'.format(above_1[trigram] / num_hosting_verbs[trigram]))\n\n print('\\n')\n\n for trigram in above_1:\n print(f'{round(math.log(min(risk_ratios[trigram]), 2), 2)}'\n f' - {round(math.log(max(risk_ratios[trigram]), 2), 2)}')\n\n\n# Get the formula frequency and proportion associated with verb types\ndef rr_dist(fpaths=[], save_file_name='rr_dist_by_verbs.csv'):\n save_rows = []\n\n for fpath in fpaths:\n with open(fpath, 'r') as f:\n rows = [r for r in csv.reader(f)][1:]\n\n if not rows:\n continue\n\n f_freq = sum([int(r[-1]) for r in rows if float(r[1]) > 1])\n num_f_types = len([r for r in rows if float(r[1]) > 1])\n inst_sum = sum([int(r[-1]) for r in rows])\n type_sum = len([r for r in rows])\n save_rows.append([fpath.split('_')[2], f_freq, f_freq/inst_sum,\n num_f_types, num_f_types/type_sum])\n\n with open(save_file_name, 'w') as f:\n row_1 = ['verb_lemma', 'formula_freq', 'formula_freq_norm',\n 'num_formula', 'formula_prop']\n\n csv.writer(f).writerow(row_1)\n csv.writer(f).writerows(save_rows)\n\n\n# Show how many collocate pairs appear with how many verb types\ndef top_pairs(fpaths):\n pair_count_byverbs = defaultdict(int)\n overall_rr = dict()\n\n # Tally verb type occurrences\n for fpath in fpaths:\n with open(fpath, 'r') as f:\n rows = [r for r in csv.reader(f)][1:]\n\n for r in rows:\n pair_count_byverbs[r[0]] += 1\n\n if '000' in fpath:\n overall_rr[r[0]] = r[1]\n\n # Display number of collocate pairs in different ranges of verb type freq.\n for i in range(8):\n pairs = [p for p in pair_count_byverbs\n if i*100 <= pair_count_byverbs[p] < (i+1)*100]\n\n print(i*100, (i+1)*100)\n print(len(pairs), '\\n')\n\n # Get the most verb-frequent collocate pairs\n keys = [k for k in pair_count_byverbs]\n keys.sort(reverse=True, key=lambda x: pair_count_byverbs[x])\n\n for p in keys[:81]:\n print(f'{p}\\t\\t\\t\\t{pair_count_byverbs[p]}\\t\\t\\t\\t{overall_rr[p]}')\n\n return keys[:20]\n\n\n# Find the trend of the RR of a pair across verbs\ndef cross_verb_trend(fpaths):\n # Get all pairs in whole dataset\n with open(fpaths[-1], 'r') as f:\n target_pairs = [r[0] for r in csv.reader(f)][1:]\n\n target_rrs = dict(zip(target_pairs,\n [defaultdict(lambda:'') for t in target_pairs]))\n\n # Get RR of collocate pairs across verb files\n for fpath in fpaths:\n curr_stem = fpath.split('_')[2]\n\n with open(fpath, 'r') as f:\n for r in [r_ for r_ in csv.reader(f)][1:]:\n target_rrs[r[0]][curr_stem] = r[1]\n\n # Save data\n with open('cross_verb_trends.csv', 'w') as f:\n stems = [fpath.split('_')[2] for fpath in fpaths]\n row_1 = ['Pair'] + [s for s in stems] + ['Verb_type_frequency']\n rows = [[k] + [target_rrs[k][s] for s in stems] +\n [len([s for s in target_rrs[k] if target_rrs[k][s]]) - 1]\n for k in target_rrs]\n\n csv.writer(f).writerow(row_1)\n csv.writer(f).writerows(rows)\n\n\ndef formulas():\n for r in data:\n if int(r[-3]) == int(r[-2] or int(r[-4]) == int(r[-2])):\n print(r[0])\n\n\nif __name__ == '__main__':\n data_dir = os.listdir('association_stats/')\n data_dir.sort()\n data_files = [os.path.join('association_stats/', fp) for fp in data_dir]\n\n with open(f'association_stats/000__association_stats.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = freq_filter(data)\n\n # rr_ranges()\n # register()\n # adjacency()\n # asymmtery()\n # has_subordinate()\n # test_normality()\n # integrity()\n trigram_link_ratios()\n # stem_trigram_formulas()\n # stem_by_trigram()\n # rr_dist(data_files)\n # tops = top_pairs(data_files)\n # cross_verb_trend(data_files)\n # test_normality('association_stats/000__association_stats.csv')\n # formulas()\n\n exit(0)\n"
},
{
"alpha_fraction": 0.5840283036231995,
"alphanum_fraction": 0.5898407697677612,
"avg_line_length": 33.71052551269531,
"blob_id": "853e53518d2c1acd947d7112ebde1d2dd2ebb3dd",
"content_id": "f45504d92131b7f834544b2b197cd7f6ab9d81cd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4009,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 114,
"path": "/d0_prep_query_terms/get_freq_dict_verbs.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nExtract highest frequency verb stems from the Frequency Dictionary of Turkish.\nHeikal Badrulhisham <heikal93gmail.com>, 2019\n\"\"\"\nimport re\n\n\ndef get_indices_stems(freq_dict):\n \"\"\"\n Get pairs of frequency indices and verb stems from the Frequency Dictionary\n :param freq_dict: .txt file of the Frequency Dictioary\n :return: list of index-verb stem pairs\n \"\"\"\n\n # Section of the dictionary with the verb stems\n verb_section = freq_dict.read().split('\\n')[75067:77349]\n verb_section = '\\n'.join([line for line in verb_section if line])\n\n # Isolate verb stems\n verb_lines = re.findall(r'\\d+\\s*\\|\\s*\\d+\\n*.+\\sto.*', verb_section)\n\n verb_lines = [line.replace('\\n', ' ') for line in verb_lines\n if line.split()[4] == 'to']\n\n # Pairs of frequency indices and stems\n indices_stems = []\n\n # Get frequency indices from the dictionary for sorting the stems\n for line in verb_lines:\n index = int(line.split('|')[0].strip())\n stem_line = ' '.join(line.split()[3:])\n indices_stems.append((index, stem_line))\n\n return indices_stems\n\n\ndef stem_correction(stem_index, h_words, h_corrections, morph_corrections):\n \"\"\"\n Apply spelling and morphological corrections to stems\n\n :param stem_index: pair of verb stem and its index\n :param h_words: list of misspelled words involving 'ş'\n :param h_corrections: list of correct spelling for words involving 'ş'\n :param morph_corrections: list of morphological mappings\n :return: corrected stem\n \"\"\"\n stem = stem_index[1].split()[0]\n stem = stem.replace('j', 'ğ')\n\n if stem not in h_words:\n stem = stem.replace('h', 'ş')\n elif stem in h_corrections:\n stem = h_corrections[stem]\n\n if stem in morph_corrections:\n stem = morph_corrections[stem]\n\n return stem\n\n\ndef main():\n \"\"\"\n Extract verbs from the Frequency Dictionary and save them in a .txt file.\n \"\"\"\n\n # Open Frequency Disctionary file\n freq_dict = open('freq_dict.txt', 'r')\n\n # Get pairs of frequency indices and verb stems\n indices_stems = get_indices_stems(freq_dict)\n\n # Account for error in txt file of the dictionary\n indices_stems.append((699, 'yapılaş to strcuture'))\n\n # For spelling and morphological correction\n h_words = ['hazırla', 'hisset', 'hesapla', 'bahset', 'harca',\n 'hızlan', 'hedefle', 'rahatla', 'hohlan', 'hallet',\n 'zehirle', 'haykır', 'heyecan', 'hükmet', 'hafifle',\n 'havalan', 'hastalan', 'hahla', 'fethet', 'sahiplen',\n 'hapset', 'hareketlen', 'buharlah', 'hıçkır', 'hüphelen',\n 'mahvet', 'kamah', 'hırpala', 'hatırla', 'haberleh',\n 'heyecanlan']\n\n h_corrections = {'hohlan': 'hoşlan', 'hahla': 'haşla',\n 'buharlah': 'buharlaş', 'hüphelen': 'şüphelen',\n 'kamah': 'kamaş', 'haberleh': 'haberleş'}\n\n morph_corrections = {'adlandır': 'adlan', 'bulundur': 'bulun',\n 'sınıflandır': 'sınıflan', 'görevlendir': 'görevlen',\n 'haberleş': 'haberle', 'abart': 'abar',\n 'kararlaştır': 'kararla', 'sınırlandır': 'sınırlan',\n 'ödüllendir': 'ödüllen', 'savrul': 'savrul',\n 'biçimlendir': 'biçimlen', 'ilişkilendir': 'ilişkilen',\n 'isimlendir': 'isimlen', 'anlamlandır': 'anlamlan',\n 'yardımlaş': 'yardımla'}\n\n # For storing corrected verb stems\n corrected_stems = []\n\n # Apply corrections to verb stems\n for index_stem in sorted(indices_stems):\n corrected_stem = stem_correction(index_stem, h_words, h_corrections,\n morph_corrections)\n\n corrected_stems.append(corrected_stem)\n\n # Save data\n with open('freq_dict_verbs.txt', 'w') as f:\n f.write('\\n'.join(corrected_stems))\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.5630667209625244,
"alphanum_fraction": 0.5889217853546143,
"avg_line_length": 32.551204681396484,
"blob_id": "13454faa04d86ff6498a64bf7a66b53f260eacdb",
"content_id": "8c06f0524a38662ed015e15618dc7a4a6eeb0587",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11153,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 332,
"path": "/d5_statistics/risk_ratio_analysis.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: UTF-8 -*-\n\"\"\"\nDisplay data for steps of analysis in Section 4.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nimport numpy\nfrom scipy import stats\nimport math\nfrom collections import defaultdict\n\n\ndef rr_ranges():\n \"\"\"\n Display risk ratio by ranges for Table 10.\n \"\"\"\n # Get risk ratios and risk ratio confidence intervals\n rr = [float(d[1]) for d in data]\n rr_ci = [float(d[8]) for d in data]\n\n # Pair up data with correct label\n measures = [(rr, 'Risk ratio'), (rr_ci, 'Risk ratio CI')]\n\n # Display ranges of data\n for measure in measures:\n # Divide data into ranges\n upto_1 = len([d for d in measure[0] if d <= 1])\n below_2 = len([d for d in measure[0] if 1 < d < 2])\n above_2 = len([d for d in measure[0] if 2 <= d])\n\n # Display ranges\n print(measure[1])\n print(f'RR below 1: {upto_1} ({upto_1 / len(measure[0])}%)')\n print(f'RR below 2: {below_2} ({below_2 / len(measure[0])}%)')\n print(f'RR above 2: {above_2} ({above_2 / len(measure[0])}%)')\n\n\ndef rr_ranges_by_register():\n \"\"\"\n Show ranges of risk ratios by register for Table 12.\n \"\"\"\n # Iterate by register. Use empty string for entire dataset\n for reg in ['', '_written', '_spoken']:\n # Get correct path to data file\n file_path = f'association_stats{reg}/000__association_stats{reg}.csv'\n\n # Get data, filter out low frequency pairs\n with open(file_path, 'r') as f:\n reg_data = [r for r in csv.reader(f)][1:]\n reg_data = [r for r in reg_data if float(r[12]) >= 100\n and float(r[13]) >= 100]\n\n # Get frequencies in ranges (type frequency)\n total = len(reg_data)\n up_to_1 = len([r for r in reg_data if float(r[1]) <= 1])\n more_than_1 = len([r for r in reg_data if float(r[1]) > 1])\n\n # Display ranges (type frequency)\n print(f'Pair types in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1: {more_than_1} ({round(100*more_than_1/total)}%)')\n print('Total:', total, '\\n')\n\n # Get frequencies in ranges (token frequency)\n total = sum([int(r[-3]) for r in reg_data])\n up_to_1 = sum([int(r[-3]) for r in reg_data if float(r[1]) <= 1])\n more_than_1 = sum([int(r[-3]) for r in reg_data if float(r[1]) > 1])\n\n # Display ranges (token frequency)\n print(f'Pair instances in {reg} register')\n print(f'Up to 1: {up_to_1} ({round(100*up_to_1/total)}%)')\n print(f'More than 1 {more_than_1} ({round(100*more_than_1/total)}%)')\n print('Total:', total, '\\n')\n\n\ndef adjacency():\n \"\"\"\n Display data related to risk ratio and suffix adjacency for Table 13.\n Run ANOVA on effect of adjacency for Section 4.1.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get adjacency frequencies\n def f(x): return [float(r[1]) for r in data_ if x(int(r[-2])/int(r[-3]))]\n\n adjacent = f(lambda x: x == 1)\n subadjacent = f(lambda x: 0 < x < 1)\n nonadjacent = f(lambda x: x == 0)\n\n # Display adjacent frequencies and related data\n def g(x, y): print(x, len(y), sum(y)/len(y), numpy.var(y), numpy.median(y))\n\n print('Pair frequency, average RR,variance, median')\n g('Adjacent:', adjacent)\n g('Subadjacent:', subadjacent)\n g('Nonadjacent:', nonadjacent)\n\n # Conduct tests on adjacency\n print('\\n')\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=True))\n print(stats.ttest_ind(adjacent, nonadjacent, equal_var=False))\n print('Levene\\'s test:', stats.levene(adjacent, subadjacent, nonadjacent))\n print('1-way ANOVA:', (stats.f_oneway(adjacent, subadjacent, nonadjacent)))\n print('Pearson correlation:', stats.pearsonr([float(r[1]) for r in data],\n [int(r[-1]) for r in data]))\n\n\ndef asymmtery():\n \"\"\"\n Display ratios of risk ratio to risk ratio reverse for Table 14.\n \"\"\"\n # Get collocate pairs with risk ratio above 1\n data_ = [row for row in data if float(row[1]) > 1]\n\n # Get ratios\n def f(x): return max(float(x[1])/float(x[2]), float(x[2])/float(x[1]))\n\n atleast_2 = [f(d) for d in data_ if f(d) >= 2]\n atleast_2_types = [d[0] for d in data_ if f(d) >= 2]\n below_2 = [f(d) for d in data_ if f(d) < 2]\n\n # Display data\n print(len(atleast_2), min(atleast_2), max(atleast_2))\n print(len(below_2), min(below_2), max(below_2))\n\n for e in atleast_2_types:\n print(e)\n\n\ndef has_subordinate():\n \"\"\"\n Count trigrams containing one of the subordinate suffixes for Section 4.2.\n \"\"\"\n # Open data files\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Count trigrams with a subordinate suffix\n subordinates = ['Inf2→Noun', 'PastPart→Noun', 'FutPart→Noun']\n num_has_subordinate = 0\n\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram, trigram_freq = trigram_line.split(') ')\n\n # Update count\n if any([suffix in trigram for suffix in subordinates]):\n num_has_subordinate += 1\n\n # Display data\n print('Number of trigrams with a subordinate marker: ', num_has_subordinate)\n\n\ndef test_normality():\n \"\"\"\n Test the main risk ratio data for normality for Section 4.3.\n \"\"\"\n # Get risk ratios\n data_ = [d[1] for d in data]\n\n # Run normality test\n print('Shapiro-Wilk test for normality:')\n print(stats.shapiro([math.log(float(d)) for d in data_]))\n\n\ndef integrity():\n \"\"\"\n Display ranges of integrity ratios for Table 18.\n \"\"\"\n # Get collocate pairs of different formulaicity\n all_pairs = data\n formulaic_pairs = [row for row in data if float(row[1]) > 1]\n nonformulaic_pairs = [row for row in data if float(row[1]) <= 1]\n\n # Iterate over different subsets of collocate pairs\n for subdataset in [all_pairs, formulaic_pairs, nonformulaic_pairs]:\n\n # For a given list of collocate pairs, return the number of collocate\n # pairs whose integrity ratio meet a threshold given by x().\n def f(x):\n return len([r for r in subdataset if x(float(r[-2])/float(r[-3]))])\n\n # Get frequencies of different integrity ratio categories\n exactly_1 = f(lambda x: x == 1)\n half = f(lambda x: 0.5 <= x < 1)\n below_half = f(lambda x: 0 < x < 0.5)\n zero = f(lambda x: x == 0)\n\n # Display data\n print(exactly_1, half, below_half, zero)\n print(exactly_1/len(subdataset), half/len(subdataset),\n below_half/len(subdataset), zero/len(subdataset))\n\n\ndef trigram_link_ratios():\n \"\"\"\n Display ranges of trigram link ratios for Table 19.\n \"\"\"\n # Get trigrams\n with open('trigram/suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Store risk ratio of suffix pairs\n collocate_pairs = [r[0] for r in data if float(r[1]) > 1]\n risk_ratios = [float(r[1]) for r in data if float(r[1]) > 1]\n data_ = dict(zip(collocate_pairs, risk_ratios))\n\n # Store risk ratios\n risk_ratios = []\n\n # Get risk ratios of stem-trigrams\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram = trigram_line.split(') ')[0]\n\n # Form tuples from trigram strings\n trigram = trigram[1:].split(', ')\n trigram = tuple([suffix[1:-1] for suffix in trigram])\n\n # Get constituent bigrams within the trigram\n bigrams = [(trigram[0], trigram[1]), (trigram[1], trigram[2])]\n\n # Get risk ratio of each bigram\n try:\n curr_rr = (data_[str(bigrams[0])], data_[str(bigrams[1])])\n\n if all([rr > 1 for rr in curr_rr]):\n risk_ratios.append(curr_rr)\n except KeyError:\n continue\n\n # Get ranges of risk ratio ratios\n def f(x): return len([r for r in risk_ratios\n if x(min(r[0]/r[1], r[1]/r[0]))])\n\n rr_ratio_1 = f(lambda x: 0.9 <= x <= 1)\n rr_ratio_2 = f(lambda x: 0.5 <= x < 0.9)\n rr_ratio_3 = f(lambda x: 0.1 <= x < 0.5)\n rr_ratio_4 = f(lambda x: x < 0.1)\n\n # Display data\n print('0.9 ≤ x ≤ 1:', rr_ratio_1, rr_ratio_1/len(risk_ratios))\n print('0.5 ≤ x < 0.9:', rr_ratio_2, rr_ratio_2/len(risk_ratios))\n print('0.1 ≤ x < 0.5:', rr_ratio_3, rr_ratio_3/len(risk_ratios))\n print('x < 0.1:', rr_ratio_4, rr_ratio_4/len(risk_ratios))\n\n\ndef stem_trigram_formulas():\n \"\"\"\n Tell how many stem-trigram pairs have a risk ratio above 1 for Section 4.4.\n \"\"\"\n # Get stem-trigram pairs\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n # Count stem-trigram pairs with risk ratio above 1\n num_above_1 = len([r for r in data if float(r[2]) > 1])\n\n # Display data\n print('Stem-trigram pairs with risk ratio above 1:', num_above_1)\n print('Total number of stem-trigram pairs', len(data))\n\n\ndef stem_by_trigram():\n \"\"\"\n Tell how many verbs are associated with certain trigrams for Table 20.\n \"\"\"\n # Get stem-trigram pairs\n with open('trigram/stem_trigram_rr.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[4]) >= 100 and float(r[5]) >= 100]\n\n # Categories of association\n above_1 = defaultdict(int)\n up_to_1 = defaultdict(int)\n num_hosting_verbs = defaultdict(int)\n risk_ratios = defaultdict(list)\n\n # Get frequencies\n for row in data:\n trigram = row[1]\n risk_ratio = float(row[2])\n\n num_hosting_verbs[trigram] += 1\n risk_ratios[trigram].append(risk_ratio)\n\n if risk_ratio > 1:\n above_1[trigram] += 1\n if risk_ratio <= 1:\n up_to_1[trigram] += 1\n\n # Display data\n # Display trigrams\n for trigram in above_1:\n print(trigram)\n\n # Display absolute and relative frequencies\n for trigram in above_1:\n print(up_to_1[trigram],\n '({0:.0%})'.format(up_to_1[trigram]/num_hosting_verbs[trigram]))\n\n for trigram in above_1:\n print(above_1[trigram],\n '({0:.0%})'.format(above_1[trigram] / num_hosting_verbs[trigram]))\n\n # Display range of log risk ratio for above 1 category\n for trigram in above_1:\n print(f'{round(math.log(min(risk_ratios[trigram]), 2), 2)}'\n f' - {round(math.log(max(risk_ratios[trigram]), 2), 2)}')\n\n\nif __name__ == '__main__':\n # Get data, filter out low frequency pairs\n with open(f'association_stats/000__association_stats.csv', 'r') as f:\n data = [row for row in csv.reader(f)][1:]\n data = [r for r in data if float(r[12]) >= 100 and float(r[13]) >= 100]\n\n # Run analyses\n rr_ranges()\n rr_ranges_by_register()\n adjacency()\n asymmtery()\n has_subordinate()\n test_normality()\n integrity()\n trigram_link_ratios()\n stem_trigram_formulas()\n stem_by_trigram()\n\n exit(0)\n"
},
{
"alpha_fraction": 0.5931528806686401,
"alphanum_fraction": 0.5967356562614441,
"avg_line_length": 33.88888931274414,
"blob_id": "4f01868b1b3147948740a2010b10df2b9306957e",
"content_id": "c426434bf2fb22fb4b2d4e73474e014557c956f7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2512,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 72,
"path": "/d4_parse/src/ParseMorphemes.java",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "/**\n * Get morphological parses on TNC data.\n * Heikal Badrulhisham <[email protected]>, 2019\n */\n\nimport java.util.List;\nimport zemberek.morphology.TurkishMorphology;\nimport zemberek.morphology.analysis.SentenceAnalysis;\nimport zemberek.morphology.analysis.WordAnalysis;\nimport java.io.BufferedReader;\nimport java.io.FileReader;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.io.FileWriter;\nimport java.io.File;\n\npublic class ParseMorphemes\n{\n public static void main(String[] args) throws IOException\n {\n /**\n * Perform morphological parses on words in sentences in preprocessed\n * TNC data file for all verb stems. Save parses in another set of files\n * for all verb stems.\n */\n // Files of TNC query results by verbs\n File[] queryFiles = new File(\"../d2_data/joined/\").listFiles();\n\n // Process each file\n for(File f: queryFiles)\n {\n // Read the content of the file\n String dataFile = f.getName();\n FileReader fileReader = new FileReader(\"../d2_data/joined/\" + dataFile);\n BufferedReader reader = new BufferedReader(fileReader);\n\n // Turkish morphological parser\n TurkishMorphology parser = TurkishMorphology.createWithDefaults();\n\n // For collecting parses\n ArrayList<List> parses = new ArrayList();\n\n // Go through lines in file\n String line;\n\n while ((line = reader.readLine()) != null)\n {\n // Get words in sentence as list\n String sentence = line.split(\"\\t\")[0];\n // Get morphological parses for each word in sentence\n List<WordAnalysis> analyses = parser.analyzeSentence(sentence);\n // Disambiguate between multiple parses\n SentenceAnalysis finalAnalysis = parser.disambiguate(sentence, analyses);\n // Collect best parse\n parses.add(finalAnalysis.bestAnalysis());\n }\n\n // Save data\n String fileIndex = dataFile.split(\"_\")[0];\n String fileStem = dataFile.split(\"_\")[1];\n String path = \"parses/\" + fileIndex + \"_\" + fileStem + \"_parses.txt\";\n File saveFile = new File(path);\n FileWriter writer = new FileWriter(saveFile);\n\n for(List parseList: parses)\n writer.append(parseList.toString() + '\\n');\n\n writer.flush();\n writer.close();\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5491868853569031,
"alphanum_fraction": 0.5643828511238098,
"avg_line_length": 30.788135528564453,
"blob_id": "eb18aefd38b7bfdb3b785af0fc5d455a2e62fd65",
"content_id": "7d8d8fe61cdf74989acf1b8b7516458e0acd54a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3752,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 118,
"path": "/d5_statistics/archive/get_stats_by_verbs.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "import csv\nfrom collections import defaultdict\nimport re\nimport colloc_measures as cm\n\n\ndef main():\n # Open file of parses\n parse_file = open('../3_parse/morph_parses.csv')\n csv_reader = csv.reader(parse_file)\n\n stem = 'önlen'\n stem2 = 'de'\n\n # Stats\n suffix_counts = defaultdict(int)\n cooccurence_counts = defaultdict(int)\n num_suffixes = 0\n mutual_infos = {}\n t_scores = {}\n dice_coeff = {}\n chi_squared = {}\n\n\n\n # Go through parses\n for row in csv_reader:\n # Skip unparseable words\n if row[2] == 'parse_not_found':\n continue\n\n #if not (row[0].startswith(stem) or row[0].startswith(stem2)):\n # continue\n if not row[0].startswith(stem):\n continue\n\n # Only consider non-null morphemes\n parse = row[1]\n suffixes = [s for s in parse.split('_') if '[' in s or ']' in s]\n\n # Put back 3.Sg null morphemes\n if parse[-4:] == 'A3sg':\n suffixes.append('A3sg')\n\n # Collapse allomorphs\n suffixes = [re.sub(r'\\(.*\\)', '', s) for s in suffixes]\n\n # Count suffix co-occurrences\n num_suffixes += len(suffixes)\n for i in range(len(suffixes)):\n # Updata single suffix count\n suffix_counts[suffixes[i]] += 1\n\n # Update count for co-occurring pair\n for j in range(i + 1, len(suffixes)):\n curr_key = '{0} & {1}'.format(suffixes[i], suffixes[j])\n cooccurence_counts[curr_key] += 1\n\n # Get association measures\n for k in cooccurence_counts:\n morpheme_pair = k.split(' & ')\n morph_1, morph_2 = morpheme_pair[0], morpheme_pair[1]\n\n mutual_infos[k] = (cm.mutual_info(cooccurence_counts, suffix_counts, k, morph_1, morph_2, num_suffixes),\n suffix_counts[morph_1], suffix_counts[morph_2])\n\n t_scores[k] = (cm.t_score(cooccurence_counts, suffix_counts, k, morph_1, morph_2, num_suffixes),\n suffix_counts[morph_1], suffix_counts[morph_2])\n\n dice_coeff[k] = (cm.dice_coeff(cooccurence_counts, suffix_counts, k, morph_1, morph_2),\n suffix_counts[morph_1], suffix_counts[morph_2])\n\n chi_squared[k] = (cm.chi_squared(cooccurence_counts, suffix_counts, k, morph_1, morph_2, num_suffixes),\n suffix_counts[morph_1], suffix_counts[morph_2])\n\n\n\n # Save data\n with open('cooccurrence_count_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in cooccurence_counts:\n csv_writer.writerow([k, cooccurence_counts[k]])\n\n with open('suffix_count_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in suffix_counts:\n csv_writer.writerow([k, suffix_counts[k]])\n\n with open('mutual_info_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in mutual_infos:\n csv_writer.writerow([k, mutual_infos[k][0], mutual_infos[k][1], mutual_infos[k][2]])\n\n with open('t_scores.csv_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in t_scores:\n csv_writer.writerow([k, t_scores[k][0], t_scores[k][1], t_scores[k][2]])\n\n with open('dice_coeff.csv_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in dice_coeff:\n csv_writer.writerow([k, dice_coeff[k][0], dice_coeff[k][1], dice_coeff[k][2]])\n\n with open('chi_squared.csv_{0}.csv'.format(stem), 'w') as f:\n csv_writer = csv.writer(f)\n\n for k in chi_squared:\n csv_writer.writerow([k, chi_squared[k][0], chi_squared[k][1], chi_squared[k][2]])\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.570080041885376,
"alphanum_fraction": 0.578083336353302,
"avg_line_length": 27.4970760345459,
"blob_id": "a016b90fa95b9d45871e07a31d3443ed49a9c476",
"content_id": "ab0f75a4af11fbed1e60676e3b2e94049a791982",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4873,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 171,
"path": "/d1_get_data/get_query_results.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMake lemma-based queries on the TNC web interface based on a list of lemmas\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport argparse\nfrom selenium import webdriver\nfrom selenium.common.exceptions import *\nimport time\nfrom selenium.webdriver.support.ui import Select\n\n\ndef sign_in():\n \"\"\"\n Sign into a TNC account\n \"\"\"\n while True:\n try:\n browser.find_element_by_css_selector(\n 'input[placeholder=Email]').send_keys(email)\n\n browser.find_element_by_name('password').send_keys(password)\n browser.find_element_by_css_selector('input[type=submit]').click()\n break\n except NoSuchElementException:\n time.sleep(1)\n\n while browser.find_elements_by_name('password'):\n time.sleep(1)\n\n\ndef open_query():\n \"\"\"\n Open the page containing lemma-based query function on the TNC\n \"\"\"\n while True:\n try:\n browser.find_elements_by_css_selector(\n '.btn-group.col-md-12 .btn.btn-default')[1].click()\n break\n except Exception:\n time.sleep(1)\n\n\ndef submit_query(search_term):\n \"\"\"\n Submit query on the TNC\n :param search_term: term to query on\n \"\"\"\n while True:\n try:\n browser.find_element_by_id('query').send_keys(search_term)\n pos_options = Select(browser.find_element_by_id('type'))\n pos_options.select_by_value('VB')\n browser.find_element_by_id('submit_button').click()\n break\n except Exception:\n time.sleep(1)\n\n\n#\ndef download_file():\n \"\"\"\n Download the data file from the TNC website\n \"\"\"\n while not browser.find_elements_by_id('sonuc_paneli'):\n time.sleep(1)\n\n j = 0\n while (not browser.find_elements_by_id('dizilim_yakinlik_data_tablosu')) or\\\n not browser.find_elements_by_class_name('odd'):\n\n time.sleep(1)\n j += 1\n if j >= 10:\n raise Exception\n\n attmpt = 0\n time.sleep(5)\n\n while True:\n try:\n attmpt += 1\n browser.find_elements_by_css_selector('.buttons-csv')[1].click()\n time.sleep(2)\n break\n except Exception:\n time.sleep(1)\n\n if attmpt >= 5:\n raise Exception\n\n\ndef main(query_terms, start=0, end=-1):\n \"\"\"\n Open browser. Sign in into a TNC account. Iteratively submit queries on the\n TNC based on verbs. Download data file of the results of each query.\n Data files will be in the browser's default download folder.\n :param query_terms: list of query terms\n :param start: index of first term to query on\n :param end: index of last term to query on\n \"\"\"\n if end == -1:\n end = len(query_terms)\n\n # Open query page\n browser.maximize_window()\n browser.get(url_1)\n sign_in()\n\n # Submit query and download TSV file\n i = 0\n for search_term in query_terms[start:end]:\n try:\n print('Starting query {0}: '.format(start + i), search_term)\n browser.get(url_2)\n open_query()\n submit_query(search_term)\n download_file()\n print('Done: ', search_term)\n except Exception:\n # Warn about problem with query, create a file for indication\n print('Problem with query: ', search_term)\n decoy_file = open(f'Problem with query_{i}_{search_term}.txt', 'w')\n decoy_file.close()\n i += 1\n\n browser.close()\n\n\nif __name__ == '__main__':\n # Command line arguments\n parse = argparse.ArgumentParser(description='Get query results from TNC')\n\n parse.add_argument('-s', '--start', help='Start index on query term list',\n default=0, type=int)\n\n parse.add_argument('-e', '--end', help='End index on query term list',\n default=-1, type=int)\n\n parse.add_argument('-f', '--file', help='File path of query terms',\n default='query_terms.txt', type=str)\n\n parse.add_argument('-b', '--browser', help='Browser to use',\n default='safari', type=str, choices=['safari'])\n\n parse.add_argument('-u', '--username', help='Username on TNC',\n required=True, type=str)\n\n parse.add_argument('-p', '--password', help='Password of TNC account',\n required=True, type=str)\n\n args = parse.parse_args()\n\n # TNC-related information\n url_1 = \"https://v3.tnc.org.tr/login\"\n url_2 = \"https://v3.tnc.org.tr/basic-query\"\n email = args.username\n password = args.password\n\n # Browser to use\n browsers = {'safari': webdriver.Safari()}\n browser = browsers[args.browser]\n\n # Get query terms\n with open(args.file, 'r') as f:\n query_terms = f.read().split('\\n')\n\n # Run queries\n main(query_terms, start=args.start, end=args.end)\n\n exit(0)\n"
},
{
"alpha_fraction": 0.5636743307113647,
"alphanum_fraction": 0.5709812045097351,
"avg_line_length": 22.950000762939453,
"blob_id": "1035c45681a7f7afc4413ffe46d9ebd9aeca1ffe",
"content_id": "5ea4bd1ecaa427de965b07551332da01b826e6f3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 958,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 40,
"path": "/d4_parse/archive/reduce_to_verbs.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nReduce the file of parses to parses of target verbs only.\n\"\"\"\nimport csv\n\n\n# Get position of target verbs in the context windows\ndef get_verb_indices():\n with open('../d2_data/query_results_all_joined_sents.tsv') as f:\n reader = csv.reader(f, delimiter='\\t')\n indices = [int(row[2]) for row in reader]\n\n return indices\n\n\ndef main():\n verb_parses = []\n parses = open('parses_all.txt', 'r').read().split('\\n')\n indices = get_verb_indices()\n\n c = 0\n\n for i in range(len(indices)):\n curr_word_parses = [p for p in parses[i][1:-1].split(', ') if ':Punc' not in p]\n verb_parses.append(curr_word_parses[indices[i]])\n\n if 'Verb' not in curr_word_parses[indices[i]]:\n print(curr_word_parses[indices[i]])\n c += 1\n print(i)\n print(c)\n\n with open('parses_verbs.txt', 'w') as f:\n f.write('\\n'.join(verb_parses))\n\n\nif __name__ == '__main__':\n\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.5490196347236633,
"alphanum_fraction": 0.5580065250396729,
"avg_line_length": 26.200000762939453,
"blob_id": "7ee1f3b7c506153511612bfb9771bc81fbff4353",
"content_id": "f2acfd37a6e188b3b8082a17bbde799288fe3b41",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1224,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 45,
"path": "/d4_parse/get_exponents.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGet the exponents of morphemes from morphological parses in the TNC.\nHeikal Badrulhisam <[email protected]>, 2019\n\"\"\"\nfrom collections import defaultdict\nimport re\n\n\ndef main():\n \"\"\"\n Get the exponents of morphemes from the file of morphological parses of\n verbs and save the exponents in a file.\n \"\"\"\n # Open parse file\n with open('verb_parses.txt', 'r') as f:\n parses = [p.split() for p in f.read().split('\\n')]\n\n # Map morphemes to lists of allomorphs\n morphemes = defaultdict(list)\n\n # Get morphs attached to verbs\n for parse in parses:\n # Get suffixes, exclude stems\n suffixes = re.split(r'[|+]', parse[1])[1:]\n\n for suffix in suffixes:\n if len(suffix.split(':')) == 2:\n exponent = suffix.split(':')[0]\n morpheme = suffix.split(':')[1]\n else:\n exponent = suffix[0]\n morpheme = \"\"\n\n if exponent not in morphemes[morpheme]:\n morphemes[morpheme].append(exponent)\n\n # Save data\n with open('exponents.txt', 'w') as f:\n lines = [k + \": \" + str(morphemes[k]) for k in morphemes]\n f.write('\\n'.join(lines))\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.547630786895752,
"alphanum_fraction": 0.5557749271392822,
"avg_line_length": 27.94285774230957,
"blob_id": "d4d44e7a40b7098a7f6ca8219568dcc8983450a8",
"content_id": "d28ab9f8679c383f2111ca46c6bc3b74b8f34a33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4076,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 140,
"path": "/d5_statistics/trigram/stem_trigram_assoc.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGet association data on stems and trigrams\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nimport re\nimport sys\nfrom nltk import ngrams\nfrom collections import defaultdict\nfrom collections import Counter\nsys.path.append('../')\nimport colloc_measures as cm\nimport get_stats\n\n\ndef tally():\n \"\"\"\n Get frequency of stems, trigrams and stem-trigram pairs\n \"\"\"\n for parse in parses:\n # Decompose information on parse file line\n parse = parse.split()\n\n # Get suffixes\n stem = parse[-1]\n suffixes = re.split(suff_boundary, parse[1])[1:]\n suffixes = get_stats.remove_forbidden_suffixes(suffixes)\n suffixes = [re.sub(morph_boundary, '', s) for s in suffixes]\n\n # Get suffix trigrams\n curr_trigrams = ngrams(suffixes, 3)\n\n # Update frequencies\n frequency[stem] += 1\n\n for g in curr_trigrams:\n frequency[g] += 1\n pair_frequency[(stem, g)] += 1\n\n\ndef calc_association():\n \"\"\"\n Calculate risk ratios of stem-trigram pairs\n \"\"\"\n for pair in pair_frequency:\n # Only do for specified trigrams\n if pair[1] not in target_trigrams:\n continue\n\n # Get collocate members\n stem, trigram = pair\n\n # Get total number of stem-trigram pairs\n total = sum(pair_frequency[p] for p in pair_frequency)\n\n # Calculate risk ratio, even in reverse orientation\n args = [frequency[stem], frequency[trigram], pair_frequency[pair],\n total, stem, trigram, pair_frequency]\n\n risk_ratio[pair] = cm.risk_ratio(*args)[0]\n risk_ratio_reverse[pair] = cm.risk_ratio_reverse(*args)[0]\n\n\ndef save_data():\n \"\"\"\n Save risk ratio data in a .csv file\n \"\"\"\n # Save stem-trigram association data\n with open('stem_trigram_rr_.csv', 'w') as f:\n csv_writer = csv.writer(f)\n\n # Write file header\n header = ['stem', 'trigram', 'risk_ratio', 'risk_ratio_reverse',\n 'stem_frequency', 'trigram_frequency', 'pair_frequency']\n\n csv_writer.writerow(header)\n\n # Sort pairs by risk ratio\n pairs = [k for k in pair_frequency if type(k) == tuple and\n len(k) == 2 and k[1] in target_trigrams]\n\n pairs.sort(key=lambda x: risk_ratio[x], reverse=True)\n\n # Write conntent data\n for k in pairs:\n row = [k[0], k[1], risk_ratio[k], risk_ratio_reverse[k],\n *[frequency[s] for s in k], pair_frequency[k]]\n\n csv_writer.writerow(row)\n\n # Save trigrams\n with open('suffix_trigrams_.txt', 'w') as f:\n f.write('\\n'.join([f'{e} {frequency[e]}'\n for e in frequency if type(e) == tuple]))\n\n\nif __name__ == \"__main__\":\n # Open morphological parse file\n with open('../../d4_parse/verb_parses.txt', 'r') as f:\n parses = [p for p in f.read().split('\\n')]\n\n # For counting stem frequency of trigrams\n stems = [p.split()[-1] for p in parses]\n stems = Counter(stems)\n\n # Frequency data\n frequency = defaultdict(int)\n pair_frequency = defaultdict(int)\n\n # Specific trigrams to get data on\n target_trigrams = [('PastPart→Noun', 'P3sg', 'Acc'),\n ('Pass→Verb', 'Inf2→Noun', 'P3sg'),\n ('Neg', 'PastPart→Noun', 'P3sg'),\n ('Pass→Verb', 'PastPart→Noun', 'P3sg'),\n ('FutPart→Noun', 'P3sg', 'Acc'),\n ('Inf2→Noun', 'P3sg', 'Acc'),\n ('Inf2→Noun', 'P3sg', 'Dat'),\n ('Able→Verb', 'Neg', 'Aor'),\n ('PastPart→Noun', 'P3pl', 'Acc'),\n ('PastPart→Noun', 'P3sg', 'Dat')]\n\n # For segmenting parses\n pos = 'Verb'\n suff_boundary = r'[\\|\\+]'\n morph_boundary = r'.*:'\n\n # Store risk ratio values\n risk_ratio = dict()\n risk_ratio_reverse = dict()\n\n # Tally trigrams and stem-trigram collocates\n tally()\n\n # Get risk ratio\n calc_association()\n\n # Save data in files\n save_data()\n\n exit(0)\n"
},
{
"alpha_fraction": 0.45498546957969666,
"alphanum_fraction": 0.47144240140914917,
"avg_line_length": 26.91891860961914,
"blob_id": "3500c6b3e633821ecd0b742f2c1e89ecda67e47b",
"content_id": "3514ea7126d832797e565cd93f66c09253aef479",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1043,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 37,
"path": "/d3_preprocess_data/archive/collect_verbs.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# —*— coding: utf—8 —*—\n\"\"\"\nGather all the verbs (only) from the various query result files\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\n\n\ndef main():\n words = []\n \n for i in range(20):\n with open('../d2_data/query_results/tnc_query_result_{0}.tsv'.format(i)) as f:\n csv_reader = csv.reader(f, delimiter='\\t')\n first_row = True\n\n for row in csv_reader:\n if first_row:\n first_row = False\n continue\n\n if len(row[3].split()) > 1:\n curr_word = ''.join(ch.lower() for ch in row[3].split()[1] if ch.isalpha())\n else:\n curr_word = ''.join(ch.lower() for ch in row[3] if ch.isalpha())\n # curr_word = row[3]\n words.append(curr_word)\n print(curr_word)\n \n save_file = open('../d2_data/all_verbs.txt', 'w')\n save_file.write('\\n'.join(words))\n save_file.close()\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.6007511019706726,
"alphanum_fraction": 0.6067363023757935,
"avg_line_length": 36.372806549072266,
"blob_id": "62dbd5ea7e99fde0124c269ccfafe031b2b5cc95",
"content_id": "62944c50a1ad9f9a9f35731c1386c75e6e2bb0d9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8521,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 228,
"path": "/d5_statistics/get_stats.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGet association data from morphological parses of verbs extracted from the TNC.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport colloc_measures as cm\nfrom collections import defaultdict\nimport re\nimport os\nimport csv\n\n\ndef tally(freq, stem, register=''):\n \"\"\"\n Tally frequencies of suffixes and collocate pairs in the dataset.\n :param freq: various types of frequency data\n :param stem: verb stem (if given to narrow down data)\n :param register: register of the subcorpus (if given to narrow down data)\n \"\"\"\n for parse in parses:\n parse = parse.split()\n\n # Stem that appears in the morphological parse\n stem_in_parse = parse[1].split(':')[0]\n # Stem that is determined for the word prior to parsing\n stem_end_parse = parse[3]\n\n # Skip parses for a different stem or wrong parses\n if (stem and stem_in_parse not in stem) or \\\n (pos not in parse[0] and pos not in parse[1]) or \\\n (register and register != parse[2]):\n continue\n\n # Get suffixes, exclude stems. Collapse allomorphs.\n # Remove unneeded suffixes the parser introduced\n suffixes = re.split(suffix_boundary, parse[1])[1:]\n suffixes = remove_forbidden_suffixes(suffixes)\n suffixes = [re.sub(morph_boundary, '', s) for s in suffixes]\n\n # Update frequencies\n update_freq(freq, suffixes, stem_end_parse)\n\n # Report frequencies\n print(f'Stem: {stem}\\tPair types: {len(freq[\"pair\"])}\\t'\n f'Pair instances: {sum([freq[\"pair\"][p] for p in freq[\"pair\"]])}')\n\n\ndef update_freq(freq, suffixes, stem_end_parse):\n \"\"\"\n For a given morphologically parsed word, update frequencies based on each\n morpheme within.\n :param freq: various types of frequency data\n :param suffixes: list of suffixes in a morphological parse\n :param stem_end_parse: stem of the current parse\n \"\"\"\n for i in range(len(suffixes)):\n # Update single suffix frequency\n freq['suffix'][suffixes[i]] += 1\n\n # Update suffix pair cooccurrence frequency\n for j in range(i + 1, len(suffixes)):\n curr_pair = (suffixes[i], suffixes[j])\n freq['pair'][curr_pair] += 1\n\n # Update frequency of the two suffixes being adjacent\n if j - i == 1:\n freq['adjacency'][curr_pair] += 1\n\n # Update verb stem-wise frequency of pairs\n if stem_end_parse not in freq['stem'][curr_pair]:\n freq['stem'][curr_pair].append(stem_end_parse)\n \n\ndef remove_forbidden_suffixes(suffixes):\n \"\"\"\n For a given sequence of suffixes, remove the following:\n -word final 3rd person singular\n Helper method for tally()\n :param suffixes: list of suffixes\n :return: list of suffixes with unneeded suffixes removed\n \"\"\"\n if suffixes[-1] == 'A3sg':\n return [suff for suff in suffixes if ':' in suff] + [suffixes[-1]]\n else:\n return [suff for suff in suffixes if ':' in suff]\n\n\ndef get_association(freq, measure_vals, ci_dict):\n \"\"\"\n Calculate association measurement values for collocate pairs.\n :param freq: various types of frequency data\n :param measure_vals: values of association measurements\n :param ci_dict: confidence intervals on association values\n \"\"\"\n num_suffixes = sum(freq['suffix'][s] for s in freq['suffix'])\n\n for pair in freq['pair']:\n suff_1, suff_2 = pair\n\n for msr in measures:\n args = [freq['suffix'][suff_1], freq['suffix'][suff_2],\n freq['pair'][pair], num_suffixes,\n suff_1, suff_2, freq['pair']]\n\n stat = measures[msr](*args)\n\n if type(stat) == tuple:\n measure_vals[msr][pair] = stat[0]\n ci_dict[msr][pair] = stat[1]\n else:\n measure_vals[msr][pair] = stat\n\n\ndef save_data(freq, file_affix, dir_affix, stem, measure_vals, ci_dict):\n \"\"\"\n Save association values in .csv files.\n :param freq: various types of frequency data\n :param file_affix: affix on data file to save (index)\n :param dir_affix: affix on directory of data file (register)\n :param stem: verb stem of the current subdataset (if given)\n :param measure_vals: values of association measurements\n :param ci_dict: onfidence intervals on association values\n \"\"\"\n\n # Create file if it's not alreeady there\n if not os.path.isdir(f'association_stats{dir_affix}'):\n os.mkdir(f'association_stats{dir_affix}')\n\n # Fill up data\n file_path = f'association_stats{dir_affix}/{file_affix}_{stem}' +\\\n f'_association_stats{dir_affix}.csv'\n\n with open(file_path, 'w') as f:\n csv_writer = csv.writer(f)\n\n # Column labels\n first_row = [\"collocate_pair\",\n *[m for m in measure_vals],\n *[f'{k}_confidence_interval_{d}'\n for k in confidence_intervals\n for d in ['left', 'right']],\n 'suffix1_frequency', 'suffix2_frequency',\n 'suffix1-suffix2_frequency',\n 'suffix1-suffix2_adjacent_frequency', 'stem_freq']\n\n csv_writer.writerow(first_row)\n\n # Fill in row values, sort by risk ratio\n sorted_pairs = sorted(freq['pair'], reverse=True,\n key=lambda x: measure_vals['risk_ratio'][x])\n\n for k in sorted_pairs:\n row = [k,\n *[measure_vals[m][k] for m in measure_vals],\n *[ci_dict[c][k][i] for c in ci_dict for i in [0, 1]],\n *[freq['suffix'][suff] for suff in k],\n freq['pair'][k], freq['adjacency'][k], len(freq['stem'][k])]\n\n csv_writer.writerow(row)\n\n\ndef main(stem=\"\", file_affix=\"\", dir_affix='', register=''):\n \"\"\"\n Get frequencies of suffixes and collocate pairs and other frequency data\n from morphological parses and filter out data based on stem and register (if\n specified).\n Then calculate association values on each collocate pair.\n Then save association values and other frequency data in .csv files.\n :param stem: stem for filtering the data (empty string for no filter)\n :param file_affix: affix on data file to be saved (index)\n :param dir_affix: affix on directory of data file (register)\n :param register: register for filtering the data(empty string for no filter)\n \"\"\"\n # Dictionaries for association values and confidence intervals\n measure_vals = dict(zip(measures, [dict() for m in measures]))\n ci_dict = dict(zip(confidence_intervals,\n [dict() for m in confidence_intervals]))\n \n # Various types of frequency data to collect\n freq = {'suffix': defaultdict(int), 'pair': defaultdict(int),\n 'adjacency': defaultdict(int), 'stem': defaultdict(list)}\n\n # Tally suffixes and suffix collocates\n tally(freq, stem, register)\n\n # Get association measures\n get_association(freq, measure_vals, ci_dict)\n\n # Save stats in files\n save_data(freq, file_affix, dir_affix, stem, measure_vals, ci_dict)\n\n\nif __name__ == \"__main__\":\n # Run main operations in main() interatively here by verb stem and register\n\n # Association measurements and measurement confidence intervals\n measures = {'risk_ratio': cm.risk_ratio,\n 'risk_ratio_reverse': cm.risk_ratio_reverse,\n 'odds_ratio': cm.odds_ratio,\n 'mutual_information': cm.mutual_info,\n 'dice_coefficient': cm.dice_coeff,\n 't_score': cm.t_score,\n 'chi_squared': cm.chi_sq}\n \n # Association measurements with confidence intervals\n confidence_intervals = ['risk_ratio', 'risk_ratio_reverse']\n \n # Information for segmenting parse data\n pos = 'Verb'\n suffix_boundary = r'[\\|\\+]'\n morph_boundary = r'.*:'\n\n # Get parses\n with open('../d4_parse/verb_parses.txt', 'r') as f:\n parses = f.read().split('\\n')\n \n # Get verb stems and indices. Use empty string for selecting all verb stems\n verb_stem_file = open('../d0_prep_query_terms/freq_dict_verbs.txt', 'r')\n stems = [\"\"] + verb_stem_file.read().split('\\n')\n i = 0\n\n # Get statistics for each verb type\n for stem in stems:\n main(stem, f'00{i}'[-3:])\n # colloc_stats(stem, f'00{i}'[-3:], dir_affix='_written', register='w')\n # colloc_stats(stem, f'00{i}'[-3:], dir_affix='_spoken', register='s')\n i += 1\n\n exit(0)\n"
},
{
"alpha_fraction": 0.5967101454734802,
"alphanum_fraction": 0.6080544590950012,
"avg_line_length": 24.926469802856445,
"blob_id": "5512d1ed9359be362d853fa3a0907eb514ff9ff9",
"content_id": "b0e10f272d5b6b7acb06f4d2d54406a516369051",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1763,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 68,
"path": "/d3_preprocess_data/archive/itu_spellcheck_2.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSend list of word windows to ITU for spellchecking\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport pipeline_caller\nimport csv\nfrom collections import defaultdict\nimport pickle\nimport os\n\n# For calling ITU pipeline\ncaller = pipeline_caller.PipelineCaller()\ntool_name = \"spellcheck\"\napi_token = \"sQj6zxcVt7JzWXHNTdRu3QRzc6i8KZz7\"\n\n# Dictionary of past spellcheck results\nif os.path.isfile('spellcheck_history.pkl'):\n spellcheck_history = pickle.load(open('spellcheck_history.pkl', 'rb'))\nelse:\n spellcheck_history = defaultdict(str)\n\n\ndef spellcheck(word):\n if word in spellcheck_history:\n return spellcheck_history[word]\n else:\n sc = caller.call(tool_name, word, api_token).replace('\\r\\n', '')\n spellcheck_history[word] = sc\n return sc\n\n\ndef main():\n # Gather d2_data from csv\n results = []\n data = open('../d2_data/query_results_all_joined_sents.csv')\n reader = csv.reader(data)\n sents = []\n indices = []\n\n for r in reader:\n sents.append(r[0])\n indices.append(r[2])\n\n # Save indices of target verbs\n with open('../d2_data/target_indices.txt', 'w') as f:\n f.write('\\n'.join(indices))\n\n # Send contexts to ITU for spellchecking\n i = 0\n start = 0\n for s in sents[start:]:\n try:\n spellchecked_words = [spellcheck(w) for w in s.split()]\n results.append(' '.join(spellchecked_words))\n i += 1\n except ConnectionResetError:\n True\n\n if i % 5000 == 0 or i == len(sents[start:]):\n with open('../d2_data/all_sents_spellchecked{0}.txt'.format(i), 'w') as f:\n f.write('\\n'.join(results))\n\n pickle.dump(spellcheck_history, open(\"spellcheck_history.pkl\", 'wb'))\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.6146488189697266,
"alphanum_fraction": 0.6179283857345581,
"avg_line_length": 30.27350425720215,
"blob_id": "6ab454b1555a639695451bb78ad93052f5e2e0af",
"content_id": "db965d12ffa980f06a96cc7bc56b6cde14fcd709",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3659,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 117,
"path": "/d4_parse/reduce_to_verbs.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nReduce the file of parses to parses of target verbs only, and separately\nsave parsing errors.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport os\nimport csv\nfrom collections import Counter\n\n\ndef get_indices(file_path):\n \"\"\"\n Get position of target verbs in the context windows in a data file.\n :param file_path: path of data file\n :return: list of indices of target verbs\n \"\"\"\n\n with open(file_path) as f:\n reader = csv.reader(f, delimiter='\\t')\n indices = [int(row[2]) for row in reader]\n\n return indices\n\n\ndef get_register(file_path):\n \"\"\"\n Get register of data points in a data file.\n :param file_path: path of data file\n :return: list of registers for data points ('w' or 'r')\n \"\"\"\n with open(file_path) as f:\n reader = csv.reader(f, delimiter='\\t')\n indices = [row[-1] for row in reader]\n\n return indices\n\n\ndef get_parses(parse_file_names, parse_dir):\n \"\"\"\n Get verb parses and parsing errors.\n :param parse_file_names: list of filenames of parse files\n :param parse_dir: the directory of parse files\n :return: a list of verb parses and a list of parsing errors\n \"\"\"\n verb_parses = []\n parse_errors = []\n\n # Process each parse file\n for file_name in parse_file_names:\n file_path = os.path.join(parse_dir, file_name)\n parses = open(file_path, 'r').read().split('\\n')\n\n # Get indices of target verbs\n fnum, stem = file_name.split('_')[:-1]\n indices = get_indices(f'../d2_data/joined/{fnum}_{stem}_joined.tsv')\n registers = get_register(f'../d2_data/joined/{fnum}_{stem}_joined.tsv')\n\n # Get target verbs and parses with errors\n for i in range(len(indices)):\n cur_parses = [p for p in parses[i][1:-1].split(', ')\n if ':Punc' not in p]\n\n # Divide data into verb parses and parsing errors\n if 'UNK' not in cur_parses[indices[i]]:\n new_line = f'{cur_parses[indices[i]]} {registers[i]} {stem}'\n verb_parses.append(new_line)\n else:\n parse_errors.append(cur_parses[indices[i]])\n\n return verb_parses, parse_errors\n\n\ndef save_data(verb_parses, parse_errors):\n \"\"\"\n Save verb parses and parsing errors in .txt files.\n :param verb_parses: list of verb parses\n :param parse_errors: list of parsing errors\n \"\"\"\n with open('verb_parses.txt', 'w') as f:\n f.write('\\n'.join(verb_parses))\n\n parse_error_counter = Counter(parse_errors)\n parse_errors = sorted(list(set(parse_errors)),\n key=lambda x: parse_error_counter[x], reverse=True)\n\n with open('parse_errors.txt', 'w') as f:\n lines = [f'{k} {parse_error_counter[k]}' for k in parse_errors]\n f.write('\\n'.join(lines))\n\n\ndef main():\n \"\"\"\n From files of morphological parses of the entire dataset, derive files\n of morphological parses of target verbs only. Save derived parse files\n in a separate directory.\n \"\"\"\n # Get individual verb parse files\n parse_dir = 'parses/'\n parse_files = os.listdir(parse_dir)\n parse_file_names = [f for f in parse_files if 'parses.txt' in f]\n parse_file_names.sort()\n\n # Get verb parses and parsing errors\n verb_parses, parse_errors = get_parses(parse_file_names, parse_dir)\n\n # Save data and parse errors\n save_data(verb_parses, parse_errors)\n\n # Display summary statistics\n print('Number of verb parses: ', len(verb_parses))\n print('Number of parse errors: ', len(parse_errors))\n print('Net verb parses: ', len(parse_errors) - len(parse_errors))\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.8888888955116272,
"alphanum_fraction": 0.8888888955116272,
"avg_line_length": 6.400000095367432,
"blob_id": "8eade1a12890950c240b369920e7280cf4adf735",
"content_id": "f4f9a5192cedf6de915a9ccd2412cf65e85bd211",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 36,
"license_type": "permissive",
"max_line_length": 10,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "selenium\nnumpy\nscipy\nnltk\nmatplotlib"
},
{
"alpha_fraction": 0.5838384032249451,
"alphanum_fraction": 0.5979797840118408,
"avg_line_length": 20.565217971801758,
"blob_id": "0485642963a741b38e8c154b3c5cb34c687a5704",
"content_id": "b95477d86b8fb64eed32ffa38167e527509c53ea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 23,
"path": "/d3_preprocess_data/archive/itu_spellcheck_words.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSend list of verbs to ITU for spellchecking and correction\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport pipeline_caller\n\n\ndef main():\n caller = pipeline_caller.PipelineCaller()\n tool_name = \"spellcheck\"\n api_token = \"sQj6zxcVt7JzWXHNTdRu3QRzc6i8KZz7\"\n result = ''\n\n text = open('../d2_data/all_verbs.txt', 'r').read()\n result += caller.call(tool_name, text, api_token)\n\n with open('../d2_data/all_verbs_spellchecked.txt', 'w') as f:\n f.write(result)\n\n\nif __name__ == '__main__':\n main()\n exit(0)"
},
{
"alpha_fraction": 0.4112405776977539,
"alphanum_fraction": 0.41363948583602905,
"avg_line_length": 27.60784339904785,
"blob_id": "3a9e78d098560c3d6e91c05ab4067c7461da2041",
"content_id": "16bedda96865cb4a20b7dcec7603df1f782de3b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2980,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 102,
"path": "/d3_preprocess_data/archive/add_spell_suggestion.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nAdd manually identified spelling substitutions to the verb spelling correction suggestion file.\nHeikal Badrulhisham, 2019\n\"\"\"\n\nimport re\n# Spelling substitutions\nsuggestions = {\n r'uo([mdl].+)?$': 'uyor',\n r'ıo([mdl].+)?$': 'ıyor',\n r'io([mdl].+)?$': 'iyor',\n r'üo([mdl].+)?$': 'üyor',\n r'iyo([mdl].+)?$': 'iyor',\n r'ıyo([mdl].+)?$': 'ıyor',\n r'uyo([mdl].+)?$': 'uyor',\n r'üyo([mdl].+)?$': 'üyor',\n\n r'iom$': 'iyorum',\n r'ıom$': 'ıyorum',\n r'iyom$': 'iyorum',\n r'ıyom$': 'ıyorum',\n r'uyom$': 'uyorum',\n r'üyom$': 'üyorum',\n\n r'iyon$': 'iyorsun',\n r'uyon$': 'uyorsun',\n r'ıyon$': 'ıyorsun',\n r'üyon$': 'üyorsun',\n\n r'iyoz$': 'iyoruz',\n r'ıyoz$': 'ıyoruz',\n r'uyoz$': 'uyoruz',\n r'üyoz$': 'üyoruz',\n\n r'ioz$': 'iyoruz',\n r'ıoz$': 'ıyoruz',\n r'iyoz$': 'iyoruz',\n r'ıyoz$': 'ıyoruz',\n r'uyoz$': 'uyoruz',\n r'üyoz$': 'üyoruz',\n\n r'micek': 'meyecek',\n r'mıcek': 'mayacak',\n r'[uı]cak': 'acak',\n r'icek': 'ecek',\n r'iycek': 'eyecek',\n r'ıycak': 'ayacak',\n\n r'[ıau]caz$': 'acağız',\n r'[ie]cez$': 'eceğiz',\n r'micez$': 'meyeceğiz',\n r'mıcaz$': 'mayacağız',\n r'iycez$': 'eyeceğiz',\n r'ıycaz$': 'ayacağız',\n\n r'[au]cam$': 'acağım',\n r'[ie]cem$': 'eceğim',\n r'mıcam$': 'mayacağım',\n r'micem$': 'meyeceğim',\n r'iycem': 'eyecem',\n r'ıycam': 'ayacam',\n\n r'e+cen$': 'eceksin',\n r'[au]+can$': 'acaksın',\n \n r'̇z': 'z',\n r'̇r': 'r',\n r'̇̇̇ṁ': 'm',\n r'̇̇̇ẏ': 'y'\n}\n\n\ndef main():\n # Current spellcheck file\n pres_spelling = open('verb_spellcheck.txt', 'r').read().split('\\n')\n new_spelling = []\n\n # Go through file lines\n for line in pres_spelling:\n # Get the error word\n target = line.split()[0]\n # Default line to save\n new_line = line\n\n # Create a new line with a new correction candidate if there's a match\n for sug in suggestions:\n if re.search(sug, target):\n new_suggestion = re.sub(sug, suggestions[sug], target)\n new_line = ' '.join([target, new_suggestion, ' '.join(line.split()[1:])])\n break\n\n new_spelling.append(new_line)\n\n # Save modifications onto a new file\n with open('verb_spelling_suggestions_.txt', 'w') as f:\n f.write('\\n'.join(new_spelling))\n\n\nif __name__ == '__main__':\n main()\n exit()\n"
},
{
"alpha_fraction": 0.6241970062255859,
"alphanum_fraction": 0.640256941318512,
"avg_line_length": 31.20689582824707,
"blob_id": "102b570b07eab3568b52f383220173c99f659900",
"content_id": "93470b89b08ef35f7602905d637a6e44e4cbd6f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 934,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 29,
"path": "/d3_preprocess_data/archive/get_spelling_correspondences.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "import difflib\n\nspellings = open('verb_spelling_suggestions.txt', 'r').read().split('\\n')\nchanges = []\n\nfor sp in spellings:\n if '#' in sp or '@' in sp or not sp:\n continue\n\n error, correction = sp.split()[0:2]\n seqmatch = difflib.SequenceMatcher(None, error, correction)\n p1, p2, lgth = seqmatch.find_longest_match(0, len(error), 0, len(correction))\n common_string = error[p1: p2+lgth]\n\n error_sub = error[len(common_string):]\n correction_sub = correction[len(common_string):]\n\n if len(error_sub) < 3:\n error_sub = error[len(common_string)-2:]\n correction_sub = correction[len(common_string)-2:]\n\n if not error_sub == correction_sub:\n changes.append((error_sub, correction_sub))\n\nchanges = list(set(changes))\nchanges = [\"r'{0}$': '{1}'\".format(ch[0], ch[1]) for ch in changes]\nchanges.sort()\nwith open('spelling_correspondences.txt', 'w') as f:\n f.write(',\\n'.join(changes))\n"
},
{
"alpha_fraction": 0.7778580188751221,
"alphanum_fraction": 0.7877912521362305,
"avg_line_length": 40.6315803527832,
"blob_id": "61a7995d36e3837b8a78fc0f3907c820cf7265fb",
"content_id": "41ec11e8f5089732245d2639c047643b6e2218c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5538,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 133,
"path": "/README.md",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# Formulaicity of Affixes in Turkish\nThis project contains scripts written for extracting and processing data on \nsuffixes in the Turkish National Corpus for a Master's in Linguistics thesis. \nThe thesis can be accessed here: http://summit.sfu.ca/item/19822\n\n## Motivation\nThe main idea of this project is formulaicity, which is the notion that some\nsequences of multiple items (e.g., words) psycholinguistically function as \nunits, despite their apparent decompositionality. This thesis project examines \nwhether formulaicity also occurs among affixes, using the Turkish National \nCorpus (TNC) as a dataset. Although formulaicity is a psycholinguistic concept, \nthis project looks for an evidence for it in distributional data in a corpus.\n\nThe main questions explored in this project are:\n\n* Does affix formulaicity exist in the corpus?\n* Is affix formulaicity a gradient or discrete phenomenon?\n* Does formulaicity also apply to affixes and stems?\n\nOne contribution of this study is the method used to capture formulaicity \nbetween affixes. For this purpose, this study uses a measurement of association\ncalled risk ratio, which is likely have never been used to measure collocation\nin corpus studies. \n\n\n## Content and structure\n\nThe following are the directories in this repository, which sequentially \ncorrespond to the data collection, processing and analysis steps in this \nproject:\n\n* d0_prep_query_terms\n * Extract highest frequency verb stems from *A Frequency Dictionary of \n Turkish*. \n* d1_get_data\n * Use the extracted verb stems to iteratively make queries on the TNC and\n download the corpus data file after each query.\n* d2_data\n * Store corpus data files.\n* d3_preprocess_data\n * Apply spell correction to corpus data, correct formatting errors in data \n files and reduce data points to single sentences.\n* d4_parse\n * Apply morphological parsing to words in the dataset. Reduce the data to\n the parses of target words.\n* d5_statistics\n * Calculate risk ratio values for pairs of cooccurring suffixes in the \n dataset. Values of other association measures were also calculated. Run \n additional analyses on the risk ratio data.\n* d6_graphics\n * Create graphs of corpus data and risk ratio data for the thesis document.\n\n## Technology used\nProgramming languages used:\n* Python 3.6\n* Java\n\nPython dependencies:\n* selenium\n* numpy\n* scipy\n* nltk\n* matplotlib\n\nThe above packages can be individually installed with pip by entering, for example, \n`pip install nltk` in a command line. Alternatively, install the packages en masse\nby entering `pip install -r requirements.txt` in a command line.\n\nJava dependency:\n* [Zemberek-NLP](https://github.com/ahmetaa/zemberek-nlp)\n\nThis repository contains a JAR file of Zemberek-NLP (/zemberek-full.jar), which\nmay not be the current version. To get a possibly updated version of the JAR file\nor for instructions on generating it yourself, refer to the README of \nZemberek-NLP's repository (linked above). Once a Zemberek-NLP JAR file has been\ninstalled or generated, it may need to be added to the build path of a project\ncontaining /d4_parse/ParseMorphemes.java, which depends on Zemberek-NLP\nfor morphological parsing.\n\n## Usage\n### Using available data\nThe risk ratio dataset analyzed in the thesis are readily available in this \nrepository. The data are in the following files, which also contain values of\nother association measures:\n\n* d5_statistics/association_stats/000__association_stats.csv\n* d5_statistics/association_stats_spoken/000__association_stats_spoken.csv\n* d5_statistics/association_stats_written/000__association_stats_written.csv\n* d5_statistics/trigram/stem_trigram_rr.csv\n\nThese files can be downloaded and opened in a spreadsheet software.\n\n### Generating data yourself\nMost of the data files including ones containing query results, morphological\nparses and association values by verbs are not available in this repository \ndue to memory restriction. To generate data files yourself (including the ones\nlisted in the previous section) run the following programs in the following\norder:\n\n1. d0_prep_query_terms/get_freq_dict_verbs.py\n2. d1_get_data/get_query_results.py\n3. d3_preprocess_data/collect_queries.py\n4. d4_parse/reduce_to_verbs.py\n5. d4_parse/src/ParseMorphemes.java\n6. d5_statistics/get_stats.py\n7. d5_statistics/risk_ratio_analysis.py\n8. d5_statistics/trigram/stem_trigram_assoc.py\n\nAdditional instructions:\n\n* For running the Java file in step 5, you may need to add the \nJAR file for Zemberek-NLP to the build path first. The above programs can be run\nfrom an integrated development environment (IDE) or the command line. \n* However, for the program in step 2, you have to run it in a command line to specify your username\nand password of your TNC account (which you need to obtain beforehand). The \nminimum you need to enter is `python3 get_query_results.py -u exusername -p expassword`,\nfor example. You may tinker with d1_get_data/get_query_results.py so that you\ncan run it in an IDE with your user information.\n* In step 2, when the program downloads data files, the data files will be saved\nto the default download folder of the browser. You need to move those files to the directory\nd2_data/query_results_freq_dict so that it could be used by programs in the next steps.\n* In step 2, the program is currently designed to work only with Safari (the browser).\n\n## Project status\nThis project is complete and is no longer in active development.\n\n## Author\nHeikal Badrulhisham <[email protected]>\n\n## License\nMIT License \n\nCopyright © 2019 Heikal Badrulhisham "
},
{
"alpha_fraction": 0.5820156335830688,
"alphanum_fraction": 0.618431568145752,
"avg_line_length": 32.66128921508789,
"blob_id": "0346b5fe8fa45214e6b9713026650b591127fc1e",
"content_id": "3601dbe4ace5b4d162aa52a0b94d5fc4acc1117a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6261,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 186,
"path": "/d5_statistics/colloc_measures.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nImplementation of various measures of association.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\n\nimport math\n\n\ndef get_freq(s1, s2, pairs):\n \"\"\"\n Get the frequencies in a contingency table for a collocate pair. Substitute\n 0.5 for 0 frequencies.\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: tuple of frequencies in a contingency table\n \"\"\"\n a = max(sum(pairs[k] for k in pairs if s1 == k[0] and s2 == k[1]), 0.5)\n b = max(sum(pairs[k] for k in pairs if s1 == k[0] and s2 != k[1]), 0.5)\n c = max(sum(pairs[k] for k in pairs if s1 != k[0] and s2 == k[1]), 0.5)\n d = max(sum(pairs[k] for k in pairs if s1 != k[0] and s2 != k[1]), 0.5)\n\n return a, b, c, d\n\n\ndef get_freq_(f_s1, f_s2, f_s1s2, total):\n \"\"\"\n Get the frequencies in a contingency table for a collocate pair (alternative\n method).\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: frequencies in a contingency table\n \"\"\"\n a = f_s1s2\n b = max(f_s1 - f_s1s2, 0.5)\n c = max(f_s2 - f_s1s2, 0.5)\n d = max(total - a - b - c, 0.5)\n\n return a, b, c, d\n\n\ndef mutual_info(f_s1, f_s2, f_s1s2, total, *misc):\n \"\"\"\n Get the mutual information of a collocate pair.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :return: mutual information of the collocate pair\n \"\"\"\n p_1 = float(f_s1) / float(total)\n p_2 = float(f_s2) / float(total)\n p_1_2 = float(f_s1s2) / float(total)\n\n return math.log(p_1_2 / (p_1 * p_2), 2)\n\n\ndef t_score(f_s1, f_s2, f_s1s2, total, *misc):\n \"\"\"\n Get the t-score of a collocate pair.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :return: t-score of the collocate pair\n \"\"\"\n x_bar = float(f_s1s2) / float(total)\n p_1 = float(f_s1) / float(total)\n p_2 = float(f_s2) / float(total)\n mu = p_1*p_2\n s_sq = x_bar*(1-x_bar)\n\n return (x_bar - mu)/math.sqrt(s_sq/total)\n\n\ndef dice_coeff(f_s1, f_s2, f_s1s2, *misc):\n \"\"\"\n Get the Dice coefficient of a collocate pair.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :return: Dice coefficient of the collocate pair\n \"\"\"\n return 2*f_s1s2/(f_s1 + f_s2)\n\n\ndef chi_sq(f_s1, f_s2, f_s1s2, total, s1, s2, pairs):\n \"\"\"\n Get the chi-squared of a collocate pair.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: chi-squared of the collocate pair\n \"\"\"\n f_u = f_s1\n f_0u = total - f_u\n f_v = f_s2\n f_0v = total - f_v\n f_u_v = f_s1s2\n f_0u_v = sum(pairs[k] for k in pairs if s1 != k[0] and s2 == k[1])\n f_u_0v = sum(pairs[k] for k in pairs if s1 == k[0] and s2 != k[1])\n f_0u_0v = sum(pairs[k] for k in pairs if s1 != k[0] and s2 != k[1])\n\n return (f_u_v - f_u*f_v)**2/f_u*f_v + (f_u_0v - f_u*f_0v)**2/f_u*f_0v \\\n + (f_0u_v-f_0u*f_v)**2/f_0u*f_v + (f_0u_0v-f_0u*f_0v)**2/f_0u*f_0v\n\n\ndef risk_ratio(f_s1, f_s2, f_s1s2, total, s1, s2, pairs):\n \"\"\"\n Get the risk ratio of a collocate pair, with the first element as the\n conditioning variable.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: risk ratio of the collocate pair\n \"\"\"\n a, b, c, d = get_freq(s1, s2, pairs)\n rr = (a / (a + b)) / (c / (c + d))\n ci = risk_ratio_ci(a, b, c, d, rr)\n\n return rr, ci, (b == 0.5 or c == 0.5 or d == 0.5)\n\n\ndef risk_ratio_reverse(f_s1, f_s2, f_s1s2, total, s1, s2, pairs):\n \"\"\"\n Get the risk ratio reverse of a collocate pair. with the second element\n as the conditioning variable.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: risk ratio reverse of the collocate pair\n \"\"\"\n a, b, c, d = get_freq(s1, s2, pairs)\n rr = (a / (a + c)) / (b / (b + d))\n ci = risk_ratio_ci(a, b, c, d, rr)\n\n return rr, ci\n\n\ndef risk_ratio_ci(a, b, c, d, rr):\n \"\"\"\n Get the risk ratio of a confidence interval value.\n :param a: the upper left cell of the contingency table\n :param b: the lower left cell of the contingency table\n :param c: the upper right cell of the contingency table\n :param d: the lower right cell of the contingency table\n :param rr: the risk ratio value\n :return: lower and upper bounds of the confidence intervals\n \"\"\"\n stand_err = math.sqrt(1/a + 1/b + 1/c + 1/d)\n ci_r = math.log(rr) + 1.96*stand_err\n ci_l = math.log(rr) - 1.96*stand_err\n\n return math.exp(ci_l), math.exp(ci_r)\n\n\ndef odds_ratio(f_s1, f_s2, f_s1s2, total, s1, s2, pairs):\n \"\"\"\n Get the odds-ratio of a collocate pair.\n :param f_s1: frequency of the first suffix\n :param f_s2: frequency of the second suffix\n :param f_s1s2: frequency of the two suffixes together\n :param total: total number of suffixes in the corpus\n :param s1: the first suffix\n :param s2: the second suffix\n :param pairs: the collocate pair\n :return: odds ratio of the collocate pair\n \"\"\"\n a, b, c, d = get_freq(s1, s2, pairs)\n condp_s2_s1 = a/(a + b)\n condp_s2_nots1 = c/(c + d)\n\n return (condp_s2_s1/(1-condp_s2_s1))/(condp_s2_nots1/(1-condp_s2_nots1))\n"
},
{
"alpha_fraction": 0.5984121561050415,
"alphanum_fraction": 0.6177638173103333,
"avg_line_length": 29.53535270690918,
"blob_id": "c18e44eee2d4694b3a525b2263f32e983d30be06",
"content_id": "929450e6ee5562c74cec954be3d79466a050f8a3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6046,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 198,
"path": "/d6_graphics/graphing.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCreate figures on specific subsets of the data.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nimport os\nimport matplotlib.pyplot as pyplot\nimport math\n\n\ndef graph_it(title='', xlabel='', ylabel='', fname='', xticks=None, xlim=(),\n ylim=(), dim=(6, 4)):\n \"\"\"\n Create figures based on given parameters.\n :param title: title of the figure\n :param xlabel: label for the x-axis\n :param ylabel: label for the y-axis\n :param fname: name of the image file to be saved\n :param xticks: x-values that will be labeled in the graph\n :param xlim: interval of x-values to be displayed\n :param ylim: interval of y-values to be displayed\n :param dim: dimension of the image\n \"\"\"\n\n # Set mandatory graph parameters\n pyplot.gcf().set_size_inches(dim)\n pyplot.grid(axis='y', alpha=0.5)\n font = {'fontname': 'Cormorant'}\n # pyplot.title(title, fontsize='11', ** font)\n pyplot.xlabel(xlabel, fontsize='11', ** font)\n pyplot.ylabel(ylabel, fontsize='11', ** font)\n pyplot.yticks(fontsize='11', ** font)\n\n # Set optional graph parameters\n if xlim:\n pyplot.xlim(xlim[0], xlim[1])\n if ylim:\n pyplot.ylim(ylim[0], ylim[1])\n if xticks:\n pyplot.xticks(xticks, rotation=90, fontsize='11', ** font)\n\n # Final processing\n pyplot.tight_layout()\n pyplot.savefig(fname, dpi=300)\n pyplot.close()\n\n\ndef plot_num_datapoints():\n \"\"\"\n Plot number of datapoints by verb types for Figure 3.\n \"\"\"\n # Get query result files of verbs, sorted by frequency ranking\n data_dir = '../d2_data/query_results_freq_dict/'\n filenames = os.listdir(data_dir)\n filenames.sort()\n\n # x- and y-axis labels\n stems = []\n nums_data = []\n\n # Get number of datapoint per verb\n for filename in filenames:\n # Get frequency\n with open(os.path.join(data_dir, filename), 'r') as f:\n num_data = len([r for r in csv.reader(f)][1:])\n nums_data.append(num_data)\n\n # Get stem from file name\n stem = filename.split('_')[2]\n stems.append(stem)\n\n # Create graph\n title = 'Distribution of datapoints by verb types'\n xlabel = 'Verb types'\n ylabel = 'Number of datapoints'\n fname = 'num_datapoints.png'\n xticks = [s for s in stems if stems.index(s) % 30 == 0]\n\n pyplot.bar(stems, nums_data)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\ndef plot_rr():\n \"\"\"\n Plot risk ratio values for Figure 5 (1).\n \"\"\"\n # Get risk ratios from main dataset\n risk_ratios = [round(float(r[1]), 2) for r in all_data]\n\n # Create graph\n title = 'Distribution of risk ratio values'\n xlabel = 'Risk ratio (higher values not shown)'\n ylabel = 'Number of collocate pairs'\n fname = 'RR.png'\n xticks = [1*i for i in range(17)]\n bin_edges = [0.5*i for i in range(36)]\n\n pyplot.hist(risk_ratios, bin_edges, histtype='bar', edgecolor='w', alpha=.8)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\ndef plot_rrci():\n \"\"\"\n Plot risk ratio confidence interval lower bounds for Figure 5 (2).\n \"\"\"\n # Get risk ratio lower bounds from main dataset\n rr_ci = [round(float(r[8]), 2) for r in all_data]\n\n # Create graph\n title = 'Distribution of risk ratios (confidence interval lower bounds)'\n xlabel = 'Risk ratio confidence interval lower bounds ' \\\n '(higher values not shown)'\n ylabel = 'Number of collocate pairs'\n fname = 'RR_ci.png'\n xticks = [1*i for i in range(17)]\n bin_edges = [0.5 * i for i in range(36)]\n\n pyplot.hist(rr_ci, bin_edges, histtype='bar', edgecolor='w', alpha=0.8)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\ndef plot_logrr():\n \"\"\"\n Plot log risk ratios for Figure 6.\n \"\"\"\n # Get log risk ratio from main dataset\n log_rr = [round(math.log(float(r[1])), 2) for r in all_data]\n\n # Create graph\n title = 'Distribution of log risk ratio'\n xlabel = 'Log risk ratio'\n ylabel = 'Number of collocate pairs'\n fname = 'RR_log.png'\n xticks = [i for i in range(-8, 12)]\n bin_edges = [i*0.5 for i in range(-16, 24)]\n\n pyplot.hist(log_rr, bin_edges, histtype='bar', edgecolor='w', alpha=.8)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\ndef plot_integrity():\n \"\"\"\n Plot integrity ratios for Figure 7.\n \"\"\"\n # Get integrity ratios from main datset\n integrity = [float(r[-2])/float(r[-3]) for r in all_data if float(r[1]) > 1]\n\n # Create graph\n title = 'Distribution of collocate pair integrity'\n xlabel = 'Integrity'\n ylabel = 'Number of collocate pairs'\n fname = 'trigram_integrity.png'\n xticks = [i*0.1 for i in range(0, 11)]\n bin_edges = [i * 0.1 for i in range(0, 11)]\n\n pyplot.hist(integrity, bin_edges, histtype='bar', edgecolor='w', alpha=.8)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\ndef plot_stem_trigram_rr():\n \"\"\"\n Plot risk ratios of stem-trigram pairs for Figure 8.\n \"\"\"\n # Get risk ratio of stem-trigrams\n with open('../d5_statistics/trigram/stem_trigram_rr.csv', 'r') as f:\n risk_ratios = [r[2] for r in csv.reader(f)][1:]\n risk_ratios = [round(float(e), 2) for e in risk_ratios]\n\n # Create graph\n title = 'Distribution of risk ratio of stem-trigram pairs'\n xlabel = 'Risk ratio (higher values not shown)'\n ylabel = 'Number of collocate pairs'\n fname = 'stem_trigram_rr.png'\n xticks = [1*i for i in range(11)]\n bin_edges = [0.5*i for i in range(21)]\n\n pyplot.hist(risk_ratios, bin_edges, histtype='bar', edgecolor='w', alpha=.8)\n graph_it(title, xlabel, ylabel, fname, xticks)\n\n\nif __name__ == '__main__':\n # Get main dataset\n data_file = '../d5_statistics/association_stats/000__association_stats.csv'\n\n with open(data_file, 'r') as f:\n all_data = [r for r in csv.reader(f)][1:]\n all_data = [r for r in all_data\n if float(r[12]) >= 100 and float(r[13]) >= 100]\n\n # Plot data\n plot_num_datapoints()\n plot_rr()\n plot_rrci()\n plot_logrr()\n plot_integrity()\n plot_stem_trigram_rr()\n\n exit(0)\n"
},
{
"alpha_fraction": 0.6049228310585022,
"alphanum_fraction": 0.6136837601661682,
"avg_line_length": 31.391891479492188,
"blob_id": "674ca9399ec52aeaf007ab76ce78eaccf854932a",
"content_id": "90acbf020521d56b5c6ef99dfaed762e0d516f64",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2397,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 74,
"path": "/d5_statistics/trigram/get_formulaic_trigrams.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFind trigrams in which both constituent bigrams are associated.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport csv\nfrom nltk import ngrams\n\n\ndef main():\n \"\"\"\n Get trigrams where both constituent bigrams are associated, and save\n the data.\n \"\"\"\n # Get suffix trigrams\n with open('suffix_trigrams.txt', 'r') as f:\n trigram_lines = f.read().split('\\n')\n\n # Get risk ratio of suffix pairs\n with open('../association_stats/000__association_stats.csv', 'r') as f:\n data = [r for r in csv.reader(f)][1:]\n pair_risk_ratios = dict(zip([r[0] for r in data],\n [float(r[1]) for r in data]))\n\n # Get risk ratios of stem-trigrams\n saved_trigrams = filter_trigrams(trigram_lines, pair_risk_ratios)\n\n # Save trigram data, sorted by trigram frequency\n saved_trigrams.sort(reverse=True, key=lambda x: x[-2])\n\n with open('formulaic_trigrams.csv', 'w') as f:\n csv.writer(f).writerows(saved_trigrams)\n\n\ndef filter_trigrams(trigram_lines, pair_risk_ratios):\n \"\"\"\n Go through list of trigrams and filter ones where both constituent trigrams\n are associated.\n :param trigram_lines: lines of trigrams and their frequencies\n :param pair_risk_ratios: risk ratios of suffix pairs\n :return: trigrams where both constituent trigrams are associated.\n \"\"\"\n saved_trigrams = []\n\n for trigram_line in trigram_lines:\n # Get information from trigram file line\n trigram, trigram_freq = trigram_line.split(') ')\n trigram_freq = int(trigram_freq)\n\n # Form tuples from trigram strings\n trigram = trigram[1:].split(', ')\n trigram = tuple([suffix[1:-1] for suffix in trigram])\n\n # Get constituent bigrams within the trigram\n bigrams = [bigram for bigram in ngrams(list(trigram), 2)]\n\n try:\n # Get risk ratio of each bigram\n curr_rr = [pair_risk_ratios[str(bigram)] for bigram in bigrams]\n\n # Save trigrams where the constituent trigams are associated\n if all([rr > 1 for rr in curr_rr]):\n new_line = [*list(trigram), curr_rr, trigram_freq,\n min(curr_rr[0]/curr_rr[1], curr_rr[1]/curr_rr[0])]\n\n saved_trigrams.append(new_line)\n except KeyError:\n continue\n\n return saved_trigrams\n\n\nif __name__ == '__main__':\n main()\n exit(0)\n"
},
{
"alpha_fraction": 0.6107370257377625,
"alphanum_fraction": 0.614852249622345,
"avg_line_length": 28.700000762939453,
"blob_id": "251a01e13248a33f148ed35b1fd515c9b7cf25c0",
"content_id": "b1d15930b3971b67a53c4130cab75aa3e5395c04",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5356,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 180,
"path": "/d3_preprocess_data/collect_queries.py",
"repo_name": "heikalb/thesis-scripts",
"src_encoding": "UTF-8",
"text": "# —*— coding: utf—8 —*—\n\"\"\"\nCreate one file or multiple separate files with target verbs and their context\nwindows are joined.\nHeikal Badrulhisham <[email protected]>, 2019\n\"\"\"\nimport os\nimport csv\nfrom spelling_sub import suggestions\nimport re\n\n\ndef apply_correction(target_word):\n \"\"\"\n Apply spelling correction based on spell correction suggestion file.\n :param target_word: word to be spell-corrected\n :return: spell-corrected word\n \"\"\"\n # Normalize the word\n target_word = target_word.lower()\n\n # Apply spelling transformations\n for sug in suggestions:\n if re.search(sug, target_word):\n return re.sub(sug, suggestions[sug], target_word)\n\n return target_word\n\n\ndef to_one_sentence(sentence, target_i):\n \"\"\"\n Given a sentence that may span multiple sentence boundaries, return the\n sentence containing the target word only and the index of the target word.\n :param sentence: the sentence\n :param target_i: index of the target verb\n :return: pair containing the reduced sentence and index of the target verb\n \"\"\"\n # Tokenize the sentence\n tokens = sentence.split()\n\n # Sub-sentence to the left and right of the target verb\n left_sent = []\n right_sent = []\n\n # Sentence boundary characters\n sentence_punct = ['.', '!', '?']\n\n # Build the right sub-sentence\n for t in tokens[target_i:]:\n right_sent.append(t)\n\n if any(punct in t for punct in sentence_punct):\n break\n\n # Build the left sub-sentence\n i = target_i - 1\n\n while i >= 0:\n if any(punct in tokens[i] for punct in sentence_punct):\n break\n\n left_sent.insert(0, tokens[i])\n i -= 1\n\n # Join the entire sentence containing the target verb\n return ' '.join(left_sent + right_sent), len(left_sent)\n\n\ndef depunctuate(st):\n \"\"\"\n Remove punctuation from a string.\n :param st: the string\n :return: depuntuated string\n \"\"\"\n # Normalize the string\n st = st.strip().lower().split()\n new_sent = []\n\n for w in st:\n # Remove non-letters/numbers\n new_word = [ch for ch in w if ch.isalnum()]\n\n # Exclude purely punctuation words\n if new_word:\n new_sent.append(''.join(new_word))\n\n return ' '.join(new_sent)\n\n\ndef fix_columns(left, mid, right, stem):\n \"\"\"\n Fix errors related to sentence/word columns in the dataset.\n :param left: the left column\n :param mid: the middle word column\n :param right: the right column\n :param stem: the stem of the target verb\n :return: the fixed columns\n \"\"\"\n # Tokenize the middle column if there are multiple words in it\n mid_words = mid.split()\n\n # If if there are multiple words in middle column, fix it fix occurrences\n # of repeated words in the surrounding columns\n if len(mid_words) > 1:\n if left.endswith(mid):\n left = left.replace(mid, '').strip()\n elif right.startswith(mid):\n right = right.replace(mid, '').strip()\n\n if [w for w in mid_words if stem in w]:\n mid = [w for w in mid_words if stem in w][0]\n else:\n mid = mid_words[0]\n\n # Return fixed columns\n return left, mid, right\n\n\ndef save_file(fname, data_list):\n \"\"\"\n Save processed data.\n :param fname: filename to save in\n :param data_list: processed dataset\n \"\"\"\n with open(fname, 'w') as f:\n csv_writer = csv.writer(f, delimiter='\\t')\n\n for r in data_list:\n csv_writer.writerow(r)\n\n\ndef main():\n \"\"\"\n For each query result file, do the following. Apply spell correction to data\n within. Fix column-related formatting errors. Join context windows and\n target words into single sentences. Save modified version of data file in\n a separate directory.\n \"\"\"\n save_rows = []\n data_dir = '../d2_data/query_results_freq_dict/'\n\n filenames = os.listdir(data_dir)\n filenames.sort()\n\n for filename in filenames:\n # Get data windows\n with open(os.path.join(data_dir, filename), 'r') as f:\n csv_reader = csv.reader(f, delimiter='\\t')\n rows = [r for r in csv_reader]\n\n curr_stem = filename.split('_')[2]\n print(filename)\n\n # Process data windows, skip first rows\n for row in rows[1:]:\n # Orthographic processing, get the register of the datum\n left_span = depunctuate(row[2])\n mid_word = apply_correction(depunctuate(row[3]))\n right_span = depunctuate(row[4])\n register = row[0]\n\n # Fix main columns with multiple words. \n # Fix cases of main words duplicated in context windows.\n columns = fix_columns(left_span, mid_word, right_span, curr_stem)\n left_span, mid_word, right_context = columns\n\n # Join context windows, reduce to one sentence\n full_sentence = ' '.join([left_span, mid_word, right_context])\n fixed_sent = to_one_sentence(full_sentence, len(left_span.split()))\n single_sent, target_i = fixed_sent\n save_rows.append([single_sent, mid_word, target_i, register])\n\n # Save data in individual files by verb\n file = f'../d2_data/joined/{filename[:3]}_{curr_stem}_joined.tsv'\n save_file(file, save_rows)\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n"
}
] | 29 |
siudakp/geometric_shapes_recognition
|
https://github.com/siudakp/geometric_shapes_recognition
|
4d6bad49dcef8a51256888d151011d6084f91f05
|
4e4ef5ac2a831e8b423fb25e0bff70900fef0a8d
|
3d521af1d6c7a987d22e95dfbb68e29c3dbb7579
|
refs/heads/master
| 2020-03-10T19:39:20.660566 | 2018-04-14T21:05:57 | 2018-04-14T21:05:57 | 129,552,464 | 4 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8157894611358643,
"alphanum_fraction": 0.8157894611358643,
"avg_line_length": 75,
"blob_id": "513e5e6ef842c1146b6c68ac16d9e37fa86b551a",
"content_id": "7e34591a58bb45f563a66b25fa97648a882fea5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 2,
"path": "/README.md",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "# geometric_shapes_recognition\nA program that recognizes geometric shapes in an image such as triangle, circle, ellipse, square, rectangle or pentagon.\n"
},
{
"alpha_fraction": 0.5852090120315552,
"alphanum_fraction": 0.5852090120315552,
"avg_line_length": 26.454545974731445,
"blob_id": "300f1fb1c462271b67d908d27d33290909f0f562",
"content_id": "10c1018033617804aa08591b79d060b046839555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 11,
"path": "/classifier.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "from leaf import *\r\n\r\nclass Classifier:\r\n def classify(self, row, node):\r\n if isinstance(node, Leaf):\r\n return node.leaf\r\n\r\n if node.question.match(row):\r\n return self.classify(row, node.true_branch)\r\n else:\r\n return self.classify(row, node.false_branch)"
},
{
"alpha_fraction": 0.5221052765846252,
"alphanum_fraction": 0.524210512638092,
"avg_line_length": 31.5,
"blob_id": "03969467aee7b51d1bde45654fa2b4b6d5703756",
"content_id": "0551383598dfc31c1de5fb04dda9a6cdab661e1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 14,
"path": "/tree_presenter.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "from leaf import *\r\n\r\nclass TreePresenter:\r\n def display(self, node, spacing = \"\"):\r\n if isinstance(node, Leaf):\r\n leaf = list(node.leaf.keys())[0]\r\n print(spacing + \"Leaf: \" + leaf)\r\n return\r\n\r\n print(spacing + str(node.question))\r\n print(spacing + '-> True')\r\n self.display(node.true_branch, spacing + \" \")\r\n print(spacing + '-> False')\r\n self.display(node.false_branch, spacing + \" \")\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5118567943572998,
"alphanum_fraction": 0.5422818660736084,
"avg_line_length": 28.643835067749023,
"blob_id": "0a697b7e2a1fdb390f8d7e9fbe1f474b9dee7b6f",
"content_id": "480f7ad177865cdba8fccbeb5bd13f1f53ba0840",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2235,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 73,
"path": "/shape_detector.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "import cv2\r\n\r\nclass ShapeDetector:\r\n def __init__(self, classifier, tree):\r\n self.classifier = classifier\r\n self.tree = tree\r\n\r\n def thinness(self, contour):\r\n area = cv2.contourArea(contour)\r\n perimeter = cv2.arcLength(contour, True)\r\n\r\n return perimeter ** 2 / float(area)\r\n\r\n def extent(self, contour):\r\n area = cv2.contourArea(contour)\r\n rect = cv2.minAreaRect(contour)\r\n\r\n ((x, y), (w, h), r) = rect\r\n rect_area = w * h\r\n\r\n return area / float(rect_area)\r\n\r\n def shapeName(self, counts):\r\n total = sum(counts.values()) * 1.0\r\n probs = {}\r\n\r\n for label in counts.keys():\r\n probs[label] = str(int(counts[label] / total * 100))\r\n\r\n return probs\r\n\r\n def drawContours(self, img, contours):\r\n cv2.drawContours(img, contours, -1, (255, 0, 0), 1)\r\n\r\n def putText(self, img, contour, text):\r\n M = cv2.moments(contour)\r\n cX = int(M[\"m10\"] / M[\"m00\"])\r\n cY = int(M[\"m01\"] / M[\"m00\"])\r\n\r\n cv2.putText(img, str(text), (cX - 20, cY - 20),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 0), 2)\r\n\r\n def detect(self, img):\r\n img = cv2.imread(img)\r\n\r\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n ret, thresh = cv2.threshold(imgray, 127, 255, 0)\r\n\r\n image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contour_list = []\r\n\r\n for contour in contours:\r\n peri = cv2.arcLength(contour, True)\r\n approx = cv2.approxPolyDP(contour, 0.01 * peri, True)\r\n contour_list.append(contour)\r\n\r\n vertices = len(approx)\r\n extent = self.extent(contour)\r\n thinness = self.thinness(contour)\r\n\r\n #print(vertices)\r\n #print(extent)\r\n #print(thinness)\r\n\r\n row = [vertices, thinness, extent, 'Label']\r\n prediction = self.classifier.classify(row, self.tree)\r\n shape = list(prediction.keys())[0]\r\n\r\n self.drawContours(img, contour_list)\r\n self.putText(img, contour, shape)\r\n\r\n cv2.imshow('Geometric Shapes Detector', img)\r\n cv2.waitKey(0)"
},
{
"alpha_fraction": 0.620312511920929,
"alphanum_fraction": 0.621874988079071,
"avg_line_length": 28.380952835083008,
"blob_id": "b2621bbabf1930face980a62aa06f382024bbd5a",
"content_id": "26605c03d80f1052939e5aa8c2b2d3e7a6081584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 640,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 21,
"path": "/tree_builder.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "from leaf import *\r\nfrom decision_node import *\r\n\r\nclass TreeBuilder:\r\n def __init__(self, gain, partitioner, tree_splitter):\r\n self.gain = gain\r\n self.partitioner = partitioner\r\n self.tree_splitter = tree_splitter\r\n\r\n def build(self, rows):\r\n info_gain, question = self.tree_splitter.find_split(rows)\r\n\r\n if info_gain == 0:\r\n return Leaf(rows)\r\n\r\n true_rows, false_rows = self.partitioner.partition(rows, question)\r\n\r\n true_branch = self.build(true_rows)\r\n false_branch = self.build(false_rows)\r\n\r\n return DecisionNode(question, true_branch, false_branch)\r\n\r\n"
},
{
"alpha_fraction": 0.5567010045051575,
"alphanum_fraction": 0.5567010045051575,
"avg_line_length": 32.5,
"blob_id": "c5ee68c974935a5714ce9a8a98618df08fb2a269",
"content_id": "791bdf3ab90ce36adc9b10ce1870b7179a3669fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 485,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 14,
"path": "/partitioner.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "class Partitioner:\r\n def partition(self, dataset, question):\r\n \"\"\"Partitions a dataset. For each row in the dataset, check if it matches the question.\r\n If so, add it to true rows, otherwise, add it to false rows.\"\"\"\r\n\r\n true_rows, false_rows = [], []\r\n\r\n for row in dataset:\r\n if question.match(row):\r\n true_rows.append(row)\r\n else:\r\n false_rows.append(row)\r\n\r\n return true_rows, false_rows\r\n\r\n"
},
{
"alpha_fraction": 0.3654257655143738,
"alphanum_fraction": 0.4711168110370636,
"avg_line_length": 32.88059616088867,
"blob_id": "70560be6f0335786fde4c37590e0a8e4dab39457",
"content_id": "347b6ca75ec1f2d551205b2d1be27ef5e9fdda47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2337,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 67,
"path": "/main.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "import sys\r\nfrom partitioner import *\r\nfrom gini import *\r\nfrom tree_splitter import *\r\nfrom tree_builder import *\r\nfrom tree_presenter import *\r\nfrom classifier import *\r\nfrom shape_detector import *\r\n\r\ntraining_set_1 = [[3, 22, 0.5, 'Triangle'],\r\n [4, 16, 1, 'Square'],\r\n [4, 17.58, 1, 'Rectangle'],\r\n [5, 15.9, 0.69, 'Pentagon'],\r\n [14, 14, 0.79, 'Circle'],\r\n [8, 32.24, 0.79, 'Ellipse']]\r\n\r\ntraining_set_2 = [[3, 22, 0.5, 'Triangle'],\r\n [3, 31.64, 0.51, 'Triangle'],\r\n [4, 16, 1, 'Square'],\r\n [4, 18.11, 1, 'Rectangle'],\r\n [4, 16.17, 1, 'Rectangle'],\r\n [5, 15.9, 0.69, 'Pentagon'],\r\n [5, 16.2, 0.69, 'Pentagon'],\r\n [14, 14, 0.79, 'Circle'],\r\n [16, 13.97, 0.79, 'Circle'],\r\n [12, 22.46, 0.79, 'Ellipse'],\r\n [15, 14.99, 0.76, 'Ellipse']]\r\n\r\ntraining_set_3 = [[3, 22, 0.5, 'Triangle'],\r\n [3, 31.64, 0.51, 'Triangle'],\r\n [4, 16, 1, 'Square'],\r\n [4, 17.58, 1, 'Rectangle'],\r\n [4, 18.11, 1, 'Rectangle'],\r\n [4, 16.17, 1, 'Rectangle'],\r\n [5, 15.9, 0.69, 'Pentagon'],\r\n [5, 16.2, 0.69, 'Pentagon'],\r\n [14, 14, 0.79, 'Circle'],\r\n [16, 13.97, 0.79, 'Circle'],\r\n [8, 26.46, 0.79, 'Ellipse'],\r\n [8, 32.24, 0.79, 'Ellipse'],\r\n [12, 22.46, 0.79, 'Ellipse'],\r\n [12, 14.27, 0.78, 'Ellipse'],\r\n [12, 18.49, 0.78, 'Ellipse'],\r\n [12, 16.92, 0.78, 'Ellipse'],\r\n [15, 14.99, 0.76, 'Ellipse']]\r\n\r\npartitioner = Partitioner()\r\nlabel_counter = LabelCounter()\r\ngini_test = Gini(label_counter)\r\ntree_splitter = TreeSplitter(gini_test, partitioner)\r\ntree_builder = TreeBuilder(gini_test, partitioner, tree_splitter)\r\ntree_presenter = TreePresenter()\r\nclassifier = Classifier()\r\n\r\nmy_tree = tree_builder.build(training_set_3)\r\nprint(\"***** Decision Tree *****\")\r\ntree_presenter.display(my_tree)\r\nshapeDetector = ShapeDetector(classifier, my_tree)\r\n\r\ntry:\r\n sys.argv[1]\r\nexcept IndexError:\r\n img = 'images/shapes1.jpg'\r\nelse:\r\n img = 'images/' + sys.argv[1]\r\n\r\nshapeDetector.detect(img)\r\n"
},
{
"alpha_fraction": 0.5659340620040894,
"alphanum_fraction": 0.569858729839325,
"avg_line_length": 35.52941131591797,
"blob_id": "e4e8286971faf052a29a3f86b4250075aebe2e3f",
"content_id": "a726ce238c13c63a1c7f5d48ddcb4a134edcc76b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 34,
"path": "/tree_splitter.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "from question import *\r\n\r\nclass TreeSplitter:\r\n \"\"\"Find the best question by iterating over every\r\n attribute and calculating the information gain.\"\"\"\r\n\r\n def __init__(self, gini, partitioner):\r\n self.gini = gini\r\n self.partitioner = partitioner\r\n\r\n def find_split(self, dataset):\r\n best_info_gain = 0\r\n best_question = None\r\n entropy = self.gini.entropy(dataset)\r\n number_of_features = len(dataset[0]) - 1\r\n\r\n for column in range(number_of_features):\r\n unique_features = set([row[column] for row in dataset])\r\n\r\n for feature in unique_features:\r\n question = Question(column, feature)\r\n true_rows, false_rows = self.partitioner.partition(dataset, question)\r\n\r\n # Skip a split when it doesn't divide the dataset.\r\n if len(true_rows) == 0 or len(false_rows) == 0:\r\n continue\r\n \r\n # Calculate the information gain from the split.\r\n info_gain = self.gini.info_gain(true_rows, false_rows, entropy)\r\n\r\n if info_gain >= best_info_gain:\r\n best_info_gain, best_question = info_gain, question\r\n\r\n return best_info_gain, best_question"
},
{
"alpha_fraction": 0.4591194987297058,
"alphanum_fraction": 0.46855345368385315,
"avg_line_length": 20.85714340209961,
"blob_id": "bd98bb4aed2120f76c0eb976400f00fc8c87d362",
"content_id": "80b058276a894cea3739b6a2b72dbf074844404b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 318,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 14,
"path": "/class_counter.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "class LabelCounter:\r\n def count(self, rows):\r\n \"\"\"Counts the number of each type in a dataset.\"\"\"\r\n counts = {}\r\n\r\n for row in rows:\r\n label = row[-1]\r\n\r\n if label not in counts:\r\n counts[label] = 0\r\n\r\n counts[label] += 1\r\n\r\n return counts"
},
{
"alpha_fraction": 0.5345557332038879,
"alphanum_fraction": 0.5345557332038879,
"avg_line_length": 26.040000915527344,
"blob_id": "e005f15ac381aaf60c1536602bc67b0b3de1a118",
"content_id": "5b541e144f103a58c3f95da43139411bd235d5ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 25,
"path": "/question.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "class Question:\r\n header = [\"vertices\", \"thinness\", \"extent\", \"label\"]\r\n\r\n def __init__(self, column, value):\r\n self.column = column\r\n self.value = value\r\n\r\n def is_numeric(self, value):\r\n return isinstance(value, int) or isinstance(value, float)\r\n\r\n def match(self, attribute):\r\n value = attribute[self.column]\r\n\r\n if self.is_numeric(value):\r\n return value >= self.value\r\n else:\r\n return value == self.value\r\n\r\n def __repr__(self):\r\n condition = \"==\"\r\n\r\n if self.is_numeric(self.value):\r\n condition = \">=\"\r\n\r\n return \"Is {} {} {}?\".format(self.header[self.column], condition, self.value)\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5879120826721191,
"alphanum_fraction": 0.5879120826721191,
"avg_line_length": 26.66666603088379,
"blob_id": "c305f930a93aacae9f23ee8eed7c98b97466fc64",
"content_id": "1a0ee880ff16e38b1575dd7aa59a223f0538a76b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 6,
"path": "/leaf.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "from class_counter import *\r\n\r\nclass Leaf:\r\n def __init__(self, rows):\r\n self.label_counter = LabelCounter()\r\n self.leaf = self.label_counter.count(rows)\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5877659320831299,
"alphanum_fraction": 0.5917553305625916,
"avg_line_length": 32.272727966308594,
"blob_id": "5e22ba175349f1fdd67fac495510fa7c20b0ce0a",
"content_id": "63179019e0fb159a4e056278c8d31a51e837a7a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 752,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 22,
"path": "/gini.py",
"repo_name": "siudakp/geometric_shapes_recognition",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\nclass Gini:\r\n def __init__(self, label_counter):\r\n self.label_counter = label_counter\r\n\r\n def entropy(self, rows):\r\n labels = self.label_counter.count(rows)\r\n number_of_rows = len(rows)\r\n entropy = 0\r\n\r\n for label in labels:\r\n prop_of_label = labels[label] / float(number_of_rows)\r\n entropy = prop_of_label * np.log2(prop_of_label)\r\n\r\n return -entropy\r\n\r\n def info_gain(self, left, right, entropy):\r\n \"\"\"Information Gain. The uncertainty of the starting node, minus the weighted impurity of two child nodes.\"\"\"\r\n p = float(len(left)) / (len(left) + len(right))\r\n\r\n return entropy - p * self.entropy(left) - (1 - p) * self.entropy(right)"
}
] | 12 |
ttronline/Poem-Jack-and-Jill
|
https://github.com/ttronline/Poem-Jack-and-Jill
|
0003ba983eec575cc29036e5390b63f4e98f432a
|
8ba2f59fa4f5473e7eda30c3b5f896ee9a93d33d
|
c0c943b14b9fa9d87820dec086d045fbb4955aa2
|
refs/heads/main
| 2023-01-23T15:57:20.414429 | 2020-12-05T13:47:54 | 2020-12-05T13:47:54 | 318,799,450 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6468213796615601,
"alphanum_fraction": 0.6821392774581909,
"avg_line_length": 34.39285659790039,
"blob_id": "e6264e8eb68a4979c5bc55cd9ff0f5b7681f3359",
"content_id": "e231edc9dad83b8b02952910e4d25f02756d5ae2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 28,
"path": "/poem.py",
"repo_name": "ttronline/Poem-Jack-and-Jill",
"src_encoding": "UTF-8",
"text": "from tkinter import *\n\nroot = Tk()\npart1 = Label(root, text=\"Jack and Jill, went up the hill\",fg=\"Red\",bg=\"Yellow)\npart2 = Label(root, text=\"To fetch a pail of water, Jack fell down\",fg=\"Red\",bg=\"Yellow)\npart3 = Label(root, text=\"And broke his crown, And Jill came tumbling after.\",fg=\"Red\",bg=\"Yellow)\npart4 = Label(root, text=\"Up Jack got, And home did trot\",fg=\"Red\",bg=\"Yellow)\npart5 = Label(root, text=\"As fast as he could caper, Went to bed\",fg=\"Red\",bg=\"Yellow)\npart6 = Label(root, text=\"To mend his head, With vinegar and brown paper.\",fg=\"Red\",bg=\"Yellow)\npart7 = Label(root, text=\"---THE END---\")\n\npart1.config(font = (\"Arial Black\",16))\npart2.config(font = (\"Arial Black\",16))\npart3.config(font = (\"Arial Black\",16))\npart4.config(font = (\"Arial Black\",16))\npart5.config(font = (\"Arial Black\",16))\npart6.config(font = (\"Arial Black\",16))\npart7.config(font = (\"Arial\",16))\n\npart1.pack()\npart2.pack()\npart3.pack()\npart4.pack()\npart5.pack()\npart6.pack()\npart7.pack()\n\nroot.mainloop()\n"
},
{
"alpha_fraction": 0.7303370833396912,
"alphanum_fraction": 0.7303370833396912,
"avg_line_length": 43.5,
"blob_id": "eb5b2879d1d86977e010d7aaccad2da2f4b945a2",
"content_id": "296246ce8ce1086fc5fe5c3dcac09a61e6a85b60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ttronline/Poem-Jack-and-Jill",
"src_encoding": "UTF-8",
"text": "# Poem-Jack-and-Jill\nThis is a Python Application, of the famous poem \" Jack and Jill \".\n"
}
] | 2 |
DiptiBarnwal/Car-Mileage-Prediction
|
https://github.com/DiptiBarnwal/Car-Mileage-Prediction
|
d44cb59364e33da4e98adcc75e74678346572d09
|
7ef6c79c4fbf3cfb8dd68f64bcb92a834441db82
|
cbff4baccc4392a15364a67300d5af3e4fcd77b7
|
refs/heads/main
| 2023-08-31T19:42:22.342133 | 2021-11-02T08:42:34 | 2021-11-02T08:42:34 | 423,766,353 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6569169759750366,
"alphanum_fraction": 0.6803162097930908,
"avg_line_length": 14.277777671813965,
"blob_id": "c7636521bb4d1161f5dca6220df92d203a37def0",
"content_id": "f074dc50352dae0b5d8a2ed2e5b301935050abe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6327,
"license_type": "no_license",
"max_line_length": 233,
"num_lines": 414,
"path": "/Car_Mileage_Pred_LR.py",
"repo_name": "DiptiBarnwal/Car-Mileage-Prediction",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np \nfrom sklearn.linear_model import LinearRegression\nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\n\n\n# In[2]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# In[5]:\n\n\n# reading the CSV file into pandas dataframe\ndf = pd.read_csv(\"/content/drive/My Drive/Regression Models_Mahesh Anand/car_data.csv\") \n\n\n# In[6]:\n\n\ndf.head()\n\n\n# In[7]:\n\n\ndf.columns\n\n\n# In[36]:\n\n\ndf['gear'].value_counts()\n\n\n# In[37]:\n\n\nplt.scatter(df['gear'],df['mpg'])\n\n\n# In[8]:\n\n\ndf=df.drop('Unnamed: 0',axis=1)\ndf.head()\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\nsns.pairplot(df,diag_kind='kde')\n\n\n# In[9]:\n\n\n#Create dummy columns to categorical columns\nB1=pd.get_dummies(df['cyl'])\nB2=pd.get_dummies(df['gear'])\nB3=pd.get_dummies(df['carb'])\nB=pd.concat([df,B1,B2,B3],axis=1)\nB.head()\n\n\n# In[10]:\n\n\nB_update=B.drop(['cyl','gear','carb'],axis=1)\nB_update.head()\n\n\n# In[12]:\n\n\ndf['gear'].value_counts()\n\n\n# In[ ]:\n\n\nB_update.columns\n\n\n# In[13]:\n\n\nB_update.columns=['mpg','disp','hp','drat','wt','qsec','vs','am','c4','c6','c8','g3','g4','g5','cb1','cb2','cb3','cb4','cb6','cb8']\nB_update.head()\n\n\n# In[14]:\n\n\nX=B_update.drop('mpg',axis=1)\nY=B_update['mpg']\nX.head()\n\n\n# In[ ]:\n\n\ncols = list(X.columns)\nX = X[cols]\nX.head()\n\n\n# In[15]:\n\n\nxc =sm.add_constant(X)\n\n\n# In[17]:\n\n\nmodel = sm.OLS(Y,xc).fit()\nmodel.summary()\n\n\n# In[40]:\n\n\n#cols = list(X.columns)\n#X = X[cols]\nxc = sm.add_constant(X_final)\nmodel = sm.OLS(Y,xc).fit()\nmodel.summary()\n\n\n# In[30]:\n\n\np = pd.Series(model.pvalues.values[1:],index = cols) \nmax(p)\n\n\n# In[31]:\n\n\np.idxmax()\n\n\n# In[ ]:\n\n\np = pd.Series(model.pvalues.values[1:],index = cols) \np\n\n\n# In[26]:\n\n\nfeature_with_p_max = p.idxmax()\nfeature_with_p_max\n\n\n# In[27]:\n\n\ncols.remove(feature_with_p_max)\n\n\n# In[ ]:\n\n\np.idxmax()\n\n\n# In[28]:\n\n\ncols\n\n\n# In[32]:\n\n\ncols = list(X.columns)\npmax = 1\nwhile (len(cols)>0):\n p= []\n X = X[cols]\n xc = sm.add_constant(X)\n model = sm.OLS(Y,xc).fit()\n p = pd.Series(model.pvalues.values[1:],index = cols) \n pmax = max(p)\n feature_with_p_max = p.idxmax()\n if(pmax>0.05):\n cols.remove(feature_with_p_max)\n else:\n break\nselected_features_BE = cols\nprint(selected_features_BE)\n\n\n# In[ ]:\n\n\ndf.columns\n\n\n# In[38]:\n\n\nX_final=B_update[selected_features_BE]\nX_final.head()\n\n\n# In[47]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[48]:\n\n\nxtrain,xtest,ytrain,ytest=train_test_split(X_final,Y,test_size=0.30,random_state=0)\n\n\n# In[ ]:\n\n\nxtrain.shape,ytrain.shape,xtest.shape,ytest.shape\n\n\n# In[51]:\n\n\nmodel=LinearRegression()\nmodel.fit(xtrain,ytrain)\ny_pred_test=model.predict(xtest)\ny_pred_test\n\n\n# In[52]:\n\n\nresidue_test=ytest-y_pred_test\n\n\n# In[53]:\n\n\ny_pred_train=model.predict(xtrain) #to check the assumptions manually instead of OLS\n\n\n# In[54]:\n\n\n#training records residue\nresidue_train=ytrain-y_pred_train\n\n\n# In[55]:\n\n\nresidue_train.skew()\n\n\n# In[57]:\n\n\nfrom sklearn import metrics\n\n\n# In[56]:\n\n\n#RMSE score of LR model for the test records\nrmse_test=np.sqrt(np.mean((ytest-y_pred_test)**2))\nprint(rmse_test)\n\n\n# In[60]:\n\n\n#RMSE score of LR model for the training records\nrmse_train=np.sqrt(np.mean((ytrain-y_pred_train)**2))\nprint(rmse_train)\n\n\n# In[59]:\n\n\nmse=metrics.mean_squared_error(ytest,y_pred_test)\nrmse=np.sqrt(mse)\nprint(rmse)\n\n\n# For Linear Regression, we need to check if the 5 major assumptions hold.\n# \n# 1. No Auto correlation among the residues\n# 2. Linearity of variables\n# 3. Normality of error terms\n# 4. No Heteroscedacity\n# 5. No strong MultiCollinearity\n\n# 1) No Auto correlation. \n# \n# Test needed : Durbin- Watson Test.\n# \n# - It's value ranges from 0-4. If the value of Durbin- Watson is Between 0-2, it's known as Positive Autocorrelation.\n# - If the value ranges from 2-4, it is known as Negative autocorrelation.\n# - If the value is exactly 2, it means No Autocorrelation.\n# - For a good linear model, it should have low or no autocorrelation.\n# \n\n# In[42]:\n\n\nxc =sm.add_constant(X_final)\nfinal_model = sm.OLS(Y,xc).fit()\n\n\n# In[ ]:\n\n\nfinal_model.summary()\n\n\n# In[43]:\n\n\n# Check the Asumptions of Linear Regression\nimport statsmodels.tsa.api as smt\n\nacf = smt.graphics.plot_acf(final_model.resid, lags=10 , alpha=0.05)\nacf.show()\n\n\n# 2) The second assumption is the Normality of Residuals / Error terms.\n# \n# For this we prefer the Jarque Bera test. For a good model, the residuals should be normally distributed.\n# The higher the value of Jarque Bera test , the lesser the residuals are normally distributed.\n# We generally prefer a lower value of jarque bera test.\n# \n# The Jarque–Bera test is a goodness-of-fit test of whether sample data have the skewness and kurtosis matching a normal distribution. A large value for the jarque-bera test indicates non normality.\n# \n# The jarque bera test tests whether the sample data has the skewness and kurtosis matching a normal distribution.\n# \n\n# In[44]:\n\n\n#sample size is >2000 & <5000\n#If sample size >5000 (Anderson_Darling Test)\nfrom scipy import stats\nprint(stats.jarque_bera(final_model.resid))\n\n\n# In[45]:\n\n\n#sample size <2000\nfrom scipy import stats\nprint(stats.shapiro(final_model.resid))\n\n\n# In[46]:\n\n\nimport seaborn as sns\n\nsns.distplot(final_model.resid)\n\n\n# In[ ]:\n\n\ny_pred_train=model.predict(xtrain)\ny_pred_train\nres=(ytrain-y_pred_train)\nres.skew()\n\n\n# ##### Asssumption 3 - Linearity of residuals\n# We can plot the observed values Vs predicted values and see the linearity of residuals.\n# \n\n# In[ ]:\n\n\nplt.plot(ytrain,y_pred_train,'*')\n\n\n# ##### Assumption 4 - Homoscedasticity\n# Homoscedacity :: If the variance of the residuals are symmetrically distributed across the regression line , then the data is said to homoscedastic.\n# \n# Heteroscedacity :: If the variance is unequal for the residuals across the regression line, then the data is said to be heteroscedastic. \n# This can be visually noticed in sns scatter plot\n\n# ##### Assumption 5- NO MULTI COLLINEARITY\n# Multicollinearity effect can be be observed from correlation matrix, however the treatment for multicollinearity among independent variables can be effectively done through PCA technique (which we will be learning in future course)\n"
}
] | 1 |
madiwill/Final-Project
|
https://github.com/madiwill/Final-Project
|
5e2a23069320883a8914e0909a1b2d0f0511a721
|
218060b97d842f552dbf7700af5f204315078ee6
|
0adf966347fe817c6905fbd0df3c9dd29b8f4508
|
refs/heads/master
| 2021-08-20T03:31:16.380983 | 2017-11-28T04:38:43 | 2017-11-28T04:38:43 | 112,053,861 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6503759622573853,
"alphanum_fraction": 0.6563909649848938,
"avg_line_length": 26.70833396911621,
"blob_id": "fef31d6259d91819b659f44ef7174cae328d3ea4",
"content_id": "74df82a5ebfdaf181db0456a7db8d22482237784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1330,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 48,
"path": "/finalproject206.py",
"repo_name": "madiwill/Final-Project",
"src_encoding": "UTF-8",
"text": "# Name: Madison Willihnganz\n# uniqname: madiwill\n# Section Day/Time: Thursday/3-4pm\n\nimport json\nimport random\nimport requests\nfrom facebook import Facebook\n\n def __init__(self):\n # Facebooks Debugger URL\n self.fb_url = 'https://developers.facebook.com/tools/debug/og/object'\n\ndef pretty(obj):\n return json.dumps(obj, sort_keys=True, indent=2)\n\nclass Post():\n def __init__(self, post_dict={}):\n \tif 'message' in post_dict:\n \t\tself.message = post_dict['message']\n \telse:\n \t\tself.message = \"\"\n \tif 'comments' in post_dict:\n \t\tself.comments = post_dict['comments']['data']\n \telse:\n \t\tself.comments = []\n \tif 'likes' in post_dict:\n \t\tself.likes = post_dict['likes']['data']\n \telse:\n \t\tself.likes = []\n\naccess_token = \"EAACB3qWFbHMBAJYZBHCv8LaEcF66pT78Ycw1AnsIkBUdu5xoxQ2f2QZCinC3RMqiVpL7WIZAOCWc6disZCYxToWIGmI6U01ZCTdjKxZBnJUJZBDqKkTZAsxzOlkWJ42rrx7poA6sFkvKTLoZC6mj9U7DkpKpCMcGvbJWvNQcrYdL4jFnAsZAWb4nxZA42Hchs2FAXAZD\"\nif access_token == None:\n access_token = raw_input(\"\\nCopy and paste token from https://developers.facebook.com/tools/explorer\\n> \")\n\nbaseurl = \"https://graph.facebook.com/v2.3/me/feed\"\nurl_params = {}\nurl_params[\"access_token\"] = access_token\n# Write code to fill in other url_parameters dictionary here.\nurl_params['limit'] = 100\nurl_params['fields'] = 'message,comments,likes'\nurl_params['include_hidden'] = True\n\nfeed = requests.get(baseurl,params=url_params)\nd = json.loads(feed.text)\nfor post in d['data']:\n\tif 'message' in post:\n\t\tprint (post['message'])\n"
}
] | 1 |
Michal-Sob/lightweight-erp-python-erp_transformers
|
https://github.com/Michal-Sob/lightweight-erp-python-erp_transformers
|
eaba384647c746e3c252b9543212da49ce3a2e3f
|
9a90b19460a8f49c3424c57e5ea32cd952de7e58
|
2f1d7754b0ea55ac8aecfa73568a7f2a77507b4a
|
refs/heads/master
| 2020-12-15T11:02:28.726612 | 2020-01-20T11:15:21 | 2020-01-20T11:15:21 | 235,082,819 | 0 | 0 | null | 2020-01-20T11:09:29 | 2020-01-20T11:06:55 | 2020-01-20T11:06:53 | null |
[
{
"alpha_fraction": 0.5929918885231018,
"alphanum_fraction": 0.5963611602783203,
"avg_line_length": 22.935483932495117,
"blob_id": "0dbc6e80f24fb54ba7abed77d7b4e3ab3725c0d3",
"content_id": "3a7eb62bd420360fe9ba54571d5123b531f2da94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4452,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 186,
"path": "/hr/hr.py",
"repo_name": "Michal-Sob/lightweight-erp-python-erp_transformers",
"src_encoding": "UTF-8",
"text": "\"\"\" Human resources module\n\nData table structure:\n * id (string): Unique and random generated identifier\n at least 2 special characters (except: ';'), 2 number, 2 lower and 2 upper case letters)\n * name (string)\n * birth_year (number)\n\"\"\"\n\n# everything you'll need is imported:\n# User interface module\nimport ui\n# data manager module\nimport data_manager\n# common module\nimport common\n\n\ndef start_module():\n \"\"\"\n Starts this module and displays its menu.\n * User can access default special features from here.\n * User can go back to main menu from here.\n\n Returns:\n None\n \"\"\"\n\n # your code\n options = ['show table',\n 'add',\n 'remove',\n 'update',\n 'get oldest person',\n 'get persons closest to average']\n dict_menu = {'1': show_table_wrapper,\n '2': add_wrapper,\n '3': remove_wrapper,\n '4': update_wrapper,\n '5': get_oldest_person_wrapper,\n '6': get_persons_closest_to_average_wrapper}\n common.sub_menu(dict_menu, options, \"HR menu\")\n\n\ndef show_table_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n show_table(table)\n\n\ndef add_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n add(table)\n\n\ndef remove_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n remove(table, ui.get_inputs(['ID :'], 'Enter ID: '))\n\n\ndef update_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n update(table, ui.get_inputs(['ID :'], 'Enter ID: '))\n\n\ndef get_oldest_person_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n get_oldest_person(table)\n\n\ndef get_persons_closest_to_average_wrapper():\n table = data_manager.get_table_from_file('hr/persons.csv')\n get_persons_closest_to_average(table)\n\n\ndef show_table(table):\n \"\"\"\n Display a table\n\n Args:\n table (list): list of lists to be displayed.\n\n Returns:\n None\n \"\"\"\n\n # your code\n titles_list = ['ID', 'name', 'birth_year']\n ui.print_table(table, titles_list)\n\n\ndef add(table):\n \"\"\"\n Asks user for input and adds it into the table.\n\n Args:\n table (list): table to add new record to\n\n Returns:\n list: Table with a new record\n \"\"\"\n\n # your code\n ID_INDEX = 0\n record = ui.get_inputs(['Name: ', 'Birth year: '], \"Please insert data: \")\n record.insert(ID_INDEX, common.generate_random(table))\n table.append(record)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table\n\n\ndef remove(table, id_):\n \"\"\"\n Remove a record with a given id from the table.\n\n Args:\n table (list): table to remove a record from\n id_ (str): id of a record to be removed\n\n Returns:\n list: Table without specified record.\n \"\"\"\n\n # your code\n ID_LIST_INDEX = 0\n for row in table:\n if row[ID_LIST_INDEX] == id_[ID_LIST_INDEX]:\n table.remove(row)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table\n\n\ndef update(table, id_):\n \"\"\"\n Updates specified record in the table. Ask users for new data.\n\n Args:\n table (list): list in which record should be updated\n id_ (str): id of a record to update\n\n Returns:\n list: table with updated record\n \"\"\"\n\n # your code\n ID_LIST_INDEX = 0\n iterate = 0\n for row in table:\n if row[ID_LIST_INDEX] == id_[ID_LIST_INDEX]:\n updated_record = ui.get_inputs(['Name: ', 'Birth year: '], row)\n updated_record.insert(ID_LIST_INDEX, id_[ID_LIST_INDEX])\n table[iterate] = updated_record\n data_manager.write_table_to_file('hr/persons.csv', table)\n break\n iterate += 1\n return table\n\n\n# special functions:\n# ------------------\n\ndef get_oldest_person(table):\n \"\"\"\n Question: Who is the oldest person?\n\n Args:\n table (list): data table to work on\n\n Returns:\n list: A list of strings (name or names if there are two more with the same value)\n \"\"\"\n\n # your code\n\n\ndef get_persons_closest_to_average(table):\n \"\"\"\n Question: Who is the closest to the average age?\n\n Args:\n table (list): data table to work on\n\n Returns:\n list: list of strings (name or names if there are two more with the same value)\n \"\"\"\n\n # your code\n"
},
{
"alpha_fraction": 0.5077519416809082,
"alphanum_fraction": 0.5170542597770691,
"avg_line_length": 27.19672203063965,
"blob_id": "72738e7538d9078d078e449e026a45bcf25939b1",
"content_id": "41179d7d724ead0a16da83cc6b866be5884229c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5160,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 183,
"path": "/ui.py",
"repo_name": "Michal-Sob/lightweight-erp-python-erp_transformers",
"src_encoding": "UTF-8",
"text": "\"\"\" User Interface (UI) module \"\"\"\nimport data_manager\n\ndef print_table(table, title_list):\n \"\"\"\n Prints table with data.\n\n Example:\n /-----------------------------------\\\n | id | title | type |\n |--------|----------------|---------|\n | 0 | Counter strike | fps |\n |--------|----------------|---------|\n | 1 | fo | fps |\n \\-----------------------------------/\n\n Args:\n table (list): list of lists - table to display\n title_list (list): list containing table headers\n\n Returns:\n None: This function doesn't return anything it only prints to console.\n \"\"\"\n dash_char = \"-\"\n max_len_column = [0,0,0,0,0,0,0]\n column_counter = 0\n\n #maximum length of every element in title data base (max_len_column)\n for rows in title_list:\n if len(rows) > max_len_column[column_counter]:\n max_len_column[column_counter] = len(rows)\n column_counter += 1\n\n #maximum Length of every row in table data base. (max_len_column)\n for rows in table:\n column_counter = 0\n for column in rows:\n if len(column) > max_len_column[column_counter]:\n max_len_column[column_counter] = len(column)\n column_counter += 1\n\n #header\n print(\"/\",end = '')\n counter = 0\n for i in max_len_column:\n if i != 0:\n counter += 1\n if counter == len(title_list):\n print(f\"{dash_char * (i + 1)}\\\\\",end = '')\n else:\n print(f\"{dash_char * (i + 1)}|\",end = '')\n print(\"\")\n\n\n #title\n counter = 0\n for title in title_list:\n print(f'| {title.center(max_len_column[counter])}', end = \"\")\n counter += 1\n print(\"|\")\n\n #data\n for rows in table:\n counter = 0\n for i in max_len_column:\n if i != 0:\n print(f\"|{dash_char * (i + 1)}\", end = '')\n print(\"|\")\n for column in rows:\n print(f\"| {column.center(max_len_column[counter])}\", end = \"\")\n counter += 1\n print('|')\n\n #footer\n print(\"\\\\\",end = '')\n counter = 0\n for i in max_len_column:\n if i != 0:\n counter += 1\n if counter == len(title_list):\n print(f\"{dash_char * (i + 1)}/\",end = '')\n else:\n print(f\"{dash_char * (i + 1)}|\",end = '')\n\n\ndef print_result(result, label):\n \"\"\"\n Displays results of the special functions.\n\n Args:\n result: result of the special function (string, number, list or dict)\n label (str): label of the result\n\n Returns:\n None: This function doesn't return anything it only prints to console.\n \"\"\"\n\n # your code\n if type(result)==dict:\n lista = result.items()\n print(f\"{label}\")\n for i in lista:\n print(f\"{i[0]} {i[1]}\")\n # print(list(my_dict))\n if type(result)==str:\n print(f\"{label}\")\n print(f\"string\")\n if type(result)==list:\n print(f\"{label}\")\n for single_list in result:\n print(str(single_list).strip(\"()\"))\n if type(result)==int:\n print(f\"{label}\")\n print(f\"{result}\")\n\n\ndef print_menu(title, list_options, exit_message):\n \"\"\"\n Displays a menu. Sample output:\n Main menu:\n (1) Store manager\n (2) Human resources manager\n (3) Inventory manager\n (4) Accounting manager\n (5) Sales manager\n (6) Customer relationship management (CRM)\n (0) Exit program\n\n Args:\n title (str): menu title\n list_options (list): list of strings - options that will be shown in menu\n exit_message (str): the last option with (0) (example: \"Back to main menu\")\n\n Returns:\n None: This function doesn't return anything it only prints to console.\n \"\"\"\n print(f'\\n{title}: ')\n options_counter = 1\n for option in list_options:\n print(f'({options_counter}) {option}')\n options_counter += 1\n print(f'(0) {exit_message}')\n\n\ndef get_inputs(list_labels, title):\n \"\"\"\n Gets list of inputs from the user.\n Sample call:\n get_inputs([\"Name\",\"Surname\",\"Age\"],\"Please provide your personal information\")\n Sample display:\n Please provide your personal information\n Name <user_input_1>\n Surname <user_input_2>\n Age <user_input_3>\n\n Args:\n list_labels (list): labels of inputs\n title (string): title of the \"input section\"\n\n Returns:\n list: List of data given by the user. Sample return:\n [<user_input_1>, <user_input_2>, <user_input_3>]\n \"\"\"\n inputs = []\n print(f'{title}')\n for label in list_labels:\n get_input = input(label)\n inputs.append(get_input)\n return inputs\n\n\ndef print_error_message(message):\n \"\"\"\n Displays an error message (example: ``Error: @message``)\n\n Args:\n message (str): error message to be displayed\n\n Returns:\n None: This function doesn't return anything it only prints to console.\n \"\"\"\n\n print(f'{message}')\n"
}
] | 2 |
geoffkip/flight_delays
|
https://github.com/geoffkip/flight_delays
|
0cd7460d81cbadf47164b7ba5460d22f542d9514
|
4612b53f31e69a7aaa83a4bb6ea7ee6f9ce04752
|
762ffacc0ff30f70a0214d87dcdf12c63c3b153e
|
refs/heads/master
| 2020-03-28T05:19:33.039491 | 2018-09-09T14:32:24 | 2018-09-09T14:32:24 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7750294208526611,
"alphanum_fraction": 0.7985865473747253,
"avg_line_length": 76.18181610107422,
"blob_id": "3ac8fdfe9f4b380ca6952e7ec68ee38c34dc73df",
"content_id": "544606f36de61cb60d33513fac99b888dab9e72a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 849,
"license_type": "no_license",
"max_line_length": 368,
"num_lines": 11,
"path": "/README.md",
"repo_name": "geoffkip/flight_delays",
"src_encoding": "UTF-8",
"text": "# 2015 Flights exploratory analysis\n# Context\nThe U.S. Department of Transportation's (DOT) Bureau of Transportation Statistics tracks the on-time performance of domestic flights operated by large air carriers. Summary information on the number of on-time, delayed, canceled, and diverted flights is published in DOT's monthly Air Travel Consumer Report and in this dataset of 2015 flight delays and cancellations.\nData can be found on Kaggle https://www.kaggle.com/usdot/flight-delays.\n\nSome interesting facts\n1) Most of the flights (about 17.8%) came from Atlanta International airport.\n2) Southwest airlines had the most flights in 2015.\n3) July seemed to be the busiest month for flights.\n4) Thursday seemed to have the most flights (weird one I thought it would be a weekend day)\n5) The month of June had the most departure and arrival delays.\n"
},
{
"alpha_fraction": 0.6726508736610413,
"alphanum_fraction": 0.6934682726860046,
"avg_line_length": 37.77777862548828,
"blob_id": "b6e550fc9b8f5f951b5f651855ec93fe107d4e75",
"content_id": "9c71d1a2dba30dd2f2ca5712bd22121e577eb83a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5236,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 135,
"path": "/flight_delays.py",
"repo_name": "geoffkip/flight_delays",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 22:13:32 2018\n\n@author: geoffrey.kip\n\"\"\"\n\nfrom os import chdir\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Set working directory\nwd=\"/Users/geoffrey.kip/Projects/flight_delays\"\nchdir(wd)\n\n# Read in all data\n# Publicly available 2015 Airline Data\nflights= pd.read_csv(\"./data/flights.csv\")\nairports= pd.read_csv(\"./data/airports.csv\")\nairlines= pd.read_csv(\"./data/airlines.csv\")\n\n# Rename columns\nflights.describe()\nflights.shape\nflights1= flights[:1000]\nflights.rename(columns={'AIRLINE': 'IATA_CODE', 'ORIGIN_AIRPORT':'ORIGIN_AIRPORT_CODE',\n 'DESTINATION_AIRPORT':'DESTINATION_AIRPORT_CODE'}, inplace=True)\n\norigin_airports = airports.rename(columns={'IATA_CODE':'ORIGIN_AIRPORT_CODE','AIRPORT': 'ORIGIN_AIRPORT',\n 'CITY':'ORIGIN_CITY'})\ndestination_airports = airports.rename(columns={'IATA_CODE':'DESTINATION_AIRPORT_CODE','AIRPORT': 'DESTINATION_AIRPORT',\n 'CITY':'DESTINATION_CITY'})\n# Merge datasets together\nflights2 = pd.merge(flights, airlines, how='left', on=\"IATA_CODE\")\nflights3 = pd.merge(flights2, origin_airports[['ORIGIN_AIRPORT_CODE','ORIGIN_AIRPORT',\n 'ORIGIN_CITY']], how= 'left', left_on='ORIGIN_AIRPORT_CODE', right_on='ORIGIN_AIRPORT_CODE')\nflights_df = pd.merge(flights3, destination_airports[['DESTINATION_AIRPORT_CODE','DESTINATION_AIRPORT',\n 'DESTINATION_CITY']], how= 'left', left_on='DESTINATION_AIRPORT_CODE', right_on='DESTINATION_AIRPORT_CODE')\n\n# Recode some variables\n#TO DO CODE MONTHS DAYS, SEASONS, LATE EARLY\nmonth = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May',\n 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}\nday_of_week= {1:'Monday', 2:'Tuesday', 3:'Wednesday', 4:'Thursday', 5:'Friday', 6:'Saturday', 7:'Sunday'}\n\nflights_df.MONTH = flights_df.MONTH.map(month)\nflights_df[\"MONTH\"]= pd.Categorical(flights_df['MONTH'], categories=[\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"], ordered=True)\nflights_df.DAY_OF_WEEK= flights_df.DAY_OF_WEEK.map(day_of_week)\nflights_df['DAY_OF_WEEK'] = pd.Categorical(flights_df['DAY_OF_WEEK'], categories=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'], ordered=True)\n\n\n# Code departure delay and arrival delay fields\nflights_df['DEPARTURE_DELAY'] = np.where(flights_df['DEPARTURE_DELAY']>0,1,0)\nflights_df['ARRIVAL_DELAY'] = np.where(flights_df['ARRIVAL_DELAY']>0,1,0)\n\n\n# GRAPHS\n#1) Airports with the most origin flights\ntop_ten_origin_airports= flights_df['ORIGIN_AIRPORT'].value_counts()[:10]\nlabels = (np.array(top_ten_origin_airports.index))\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.pie(top_ten_origin_airports, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nax.set_title('Percentage of Flights from Origin Airports')\nplt.show()\n\n#2 Airlines with most flights \ntop_ten_airlines= flights_df['AIRLINE'].value_counts()[:10]\nlabels = (np.array(top_ten_airlines.index))\ny_pos=np.arange(len(labels))\n\nfig= plt.figure()\nax= fig.add_subplot(111)\nax.bar(labels,top_ten_airlines, align='center',color='cyan',alpha=0.5)\nax.set_facecolor('gray')\nplt.xticks(y_pos, labels,rotation=90)\nplt.ylabel('Flight Counts')\nplt.title('Flight Counts by Airline')\nplt.show()\n\n#3 Flights by month\nmonth_count= flights_df[\"MONTH\"].value_counts()\nmonth_count = month_count.sort_index()\nmonth = np.array([\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"])\ny_pos=np.arange(len(month))\n\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.bar(month,month_count,align='center',alpha=0.5,color='green')\nax.set_facecolor('gray')\nplt.xticks(y_pos,month)\nplt.ylabel('Flight Counts')\nplt.title('Flight Counts by Month')\nplt.show()\n\n#4 Flights by day\nday_count= flights_df[\"DAY_OF_WEEK\"].value_counts()\nday_count = day_count.sort_index()\nday= np.array([\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"])\ny_pos=np.arange(len(day))\n\nfig=plt.figure()\nax=fig.add_subplot(111)\nax.bar(day,day_count,align='center',alpha=0.5,color='green')\nax.set_facecolor('gray')\nplt.xticks(y_pos, day,rotation=90)\nplt.ylabel('Flight Counts')\nplt.title('Flight Counts by Day')\nplt.show()\n\n# 5 Plot percentage delay by month\n# Which months had the highest percentage of departure and arrival delays?\ndep_delay_counts = flights_df.groupby('MONTH').DEPARTURE_DELAY.mean().round(2)\narr_delay_counts = flights_df.groupby('MONTH').ARRIVAL_DELAY.mean().round(2)\n\ny_pos=np.arange(len(month))\n\nfig=plt.figure()\nax=fig.add_subplot(111)\n\n# Calculate optimal width\nwidth = np.min(np.diff(y_pos))/3.\n\nrect1= ax.bar(y_pos-width/2,dep_delay_counts,align='center',alpha=0.5,color='green',width=width,label=\"Departure Delays\")\nrect2= ax.bar(y_pos+width/2,arr_delay_counts,align='center',alpha=0.5,color='red',width=width,label=\"Arrival Delays\")\nax.set_facecolor('gray')\nax.legend( (rect1[0], rect2[0]), ('Departure Delays', 'Arrival Delays') )\nplt.xticks(y_pos,month,rotation=90)\nplt.ylabel('Percentage Delay %')\nplt.title('Flight Delays by month')\nplt.show()\n\n"
}
] | 2 |
binit9/Knowledge-Graph-Covid19
|
https://github.com/binit9/Knowledge-Graph-Covid19
|
a02eb231631f07c48b5ae7e0b524c32b80530973
|
ca908e934f819973b5c8e06d23e934bf1c658d91
|
fff43a91a047ed636ebf785494d782d42617a1e9
|
refs/heads/master
| 2022-04-22T04:01:22.704381 | 2020-04-22T03:05:06 | 2020-04-22T03:05:06 | 257,487,006 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6734463572502136,
"alphanum_fraction": 0.6891713738441467,
"avg_line_length": 40.814517974853516,
"blob_id": "881835afe802976b8033ca5ff7f4aeac8feff0e2",
"content_id": "764783b52eb6ab6ef71be81622307380a1fcb1dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10644,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 248,
"path": "/textual_entailment/Train_Vectors_Glove.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "\r\nimport numpy as np\r\nnp.random.seed(0)\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, Input, Dropout, LSTM, Activation\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.preprocessing import sequence\r\nfrom keras.initializers import glorot_uniform\r\nfrom keras.layers import Conv1D, GlobalMaxPooling1D, MaxPooling1D\r\n\r\nnp.random.seed(1)\r\n\r\n\r\n# GRADED FUNCTION: pretrained_embedding_layer\r\n\r\ndef pretrained_embedding_layer(word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.\r\n\r\n Arguments:\r\n word_to_vec_map -- dictionary mapping words to their GloVe vector representation.\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n embedding_layer -- pretrained layer Keras instance\r\n \"\"\"\r\n\r\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\r\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\r\n\r\n ### START CODE HERE ###\r\n # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\r\n emb_matrix = np.zeros((vocab_len, emb_dim))\r\n\r\n # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\r\n for word, index in word_to_index.items():\r\n emb_matrix[index, :] = word_to_vec_map[word]\r\n\r\n # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.\r\n embedding_layer = Embedding(vocab_len, emb_dim, trainable=False)\r\n ### END CODE HERE ###\r\n\r\n # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\r\n embedding_layer.build((None,))\r\n\r\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\r\n embedding_layer.set_weights([emb_matrix])\r\n\r\n return embedding_layer\r\n\r\n\r\ndef Emojify_V2_th(input_shape, word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Function creating the Emojify-v2 model's graph.\r\n\r\n Arguments:\r\n input_shape -- shape of the input, usually (max_len,)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n model -- a model instance in Keras\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_t = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_t = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_t = embedding_layer_t(sentence_indices_t)\r\n\r\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a batch of sequences.\r\n X_t = LSTM(128, return_sequences=True)(embeddings_t)\r\n # Add dropout with a probability of 0.5\r\n X_t = Dropout(0.5)(X_t)\r\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\r\n X_t = LSTM(128)(X_t)\r\n # Add dropout with a probability of 0.5\r\n X_t = Dropout(0.5)(X_t)\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_h = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_h = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_h = embedding_layer_t(sentence_indices_h)\r\n\r\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a batch of sequences.\r\n X_h = LSTM(128, return_sequences=True)(embeddings_h)\r\n # Add dropout with a probability of 0.5\r\n X_h = Dropout(0.5)(X_h)\r\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\r\n X_h = LSTM(128)(X_h)\r\n # Add dropout with a probability of 0.5\r\n X_h = Dropout(0.5)(X_h)\r\n\r\n X = concatenate([X_t, X_h])\r\n\r\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\r\n X = Dense(2, activation='softmax')(X)\r\n # Add a softmax activation\r\n X = Activation('softmax')(X)\r\n\r\n # Create Model instance which converts sentence_indices into X.\r\n model = Model(inputs=[sentence_indices_t, sentence_indices_h], outputs=X)\r\n\r\n ### END CODE HERE ###\r\n\r\n return model\r\n\r\n\r\ndef Emojify_V2_concat(input_shape, word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Function creating the Emojify-v2 model's graph.\r\n\r\n Arguments:\r\n input_shape -- shape of the input, usually (max_len,)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n model -- a model instance in Keras\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_t = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_t = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_t = embedding_layer_t(sentence_indices_t)\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_h = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_h = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_h = embedding_layer_t(sentence_indices_h)\r\n\r\n embeddings = concatenate([embeddings_t, embeddings_h])\r\n print(embeddings_t.shape, embeddings_h.shape, embeddings)\r\n\r\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a batch of sequences.\r\n X = LSTM(128, return_sequences=True)(embeddings)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(0.5)(X)\r\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\r\n X = LSTM(128)(X)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(0.5)(X)\r\n\r\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\r\n X = Dense(2, activation='softmax')(X)\r\n # Add a softmax activation\r\n X = Activation('softmax')(X)\r\n\r\n # Create Model instance which converts sentence_indices into X.\r\n model = Model(inputs=[sentence_indices_t, sentence_indices_h], outputs=X)\r\n\r\n ### END CODE HERE ###\r\n\r\n return model\r\n\r\n\r\ndef Emojify_V2_cnn(input_shape, word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Function creating the Emojify-v2 model's graph.\r\n\r\n Arguments:\r\n input_shape -- shape of the input, usually (max_len,)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n model -- a model instance in Keras\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_t = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_t = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_t = embedding_layer_t(sentence_indices_t)\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices_h = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer_h = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings_h = embedding_layer_t(sentence_indices_h)\r\n\r\n embeddings = concatenate([embeddings_t, embeddings_h])\r\n print(embeddings_t.shape, embeddings_h.shape, embeddings)\r\n\r\n filters = 250\r\n kernel_size = 3\r\n hidden_dims = 250\r\n\r\n # X = Dropout(0.2)(embeddings)\r\n # X = Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(X)\r\n # X = GlobalMaxPooling1D()(X)\r\n # X = Dense(hidden_dims)(X)\r\n # X = Dropout(0.2)(X)\r\n # X = Activation('relu')(X)\r\n # X = Dense(2)(X)\r\n # X = Activation('sigmoid')(X)\r\n\r\n pool_size = 4\r\n lstm_output_size = 70\r\n\r\n X = Dropout(0.25)(embeddings)\r\n X = Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(X)\r\n X = MaxPooling1D(pool_size=pool_size)(X)\r\n X = LSTM(lstm_output_size)(X)\r\n X = Dense(2)(X)\r\n X = Activation('sigmoid')(X)\r\n\r\n\r\n\r\n # Create Model instance which converts sentence_indices into X.\r\n model = Model(inputs=[sentence_indices_t, sentence_indices_h], outputs=X)\r\n\r\n ### END CODE HERE ###\r\n\r\n return model\r\n"
},
{
"alpha_fraction": 0.6455696225166321,
"alphanum_fraction": 0.6835442781448364,
"avg_line_length": 34.5,
"blob_id": "5fc4b7f1eea919d21f8759b238199e0531cb97d5",
"content_id": "e2c4dd390baa497dc1a1ec5942f61cddb64b68ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 2,
"path": "/textual_entailment/__init__.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "__version__ = '1.0.1'\r\nfrom .textualentailment import TextualEntailment\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6227464079856873,
"alphanum_fraction": 0.6350660920143127,
"avg_line_length": 37.60714340209961,
"blob_id": "fb14cebb2af3fb21624594755ffa1ec1667177c2",
"content_id": "7a1a61025de7105dab9c8ccdf1135f662a977652",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6656,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 168,
"path": "/textual_entailment/textualentailment.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "import nltk\r\nimport logging\r\nfrom gensim.models import Word2Vec\r\nimport sys\r\nimport os\r\nsys.path.append(os.path.abspath('.'))\r\n# print(sys.path)\r\nfrom text_analysis.textual_entailment import rte_classify\r\nfrom text_analysis.textual_entailment import RTE_Data\r\nfrom text_analysis.textual_entailment import Word2Vec_AverageVectors\r\nfrom text_analysis.textual_entailment import Word2Vec_Vectors\r\nfrom text_analysis.textual_entailment import Train_Vectors\r\nfrom sklearn import tree\r\nfrom sklearn import svm\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn import metrics\r\nimport numpy as np\r\nfrom text_analysis.textual_entailment.KaggleWord2VecUtility import KaggleWord2VecUtility\r\nfrom keras.models import load_model\r\nimport json\r\nimport keras\r\nimport traceback\r\n\r\nclass TextualEntailment():\r\n lstm = True\r\n\r\n def word2vec_model(self):\r\n min_word_count = 40 # Minimum word count\r\n num_workers = 4 # Number of threads to run in parallel\r\n context = 10 # Context window size\r\n downsampling = 1e-3 # Downsample setting for frequent words\r\n\r\n # Initialize and train the model (this will take some time)\r\n print(\"Training Word2Vec model...\")\r\n model_name = \"text_analysis/Word_Embeddings/brown_model\"\r\n # model.save(model_name)\r\n model = Word2Vec.load(model_name)\r\n\r\n return model\r\n\r\n def rte_classifier_w2v(self):\r\n (train, test) = RTE_Data.nltk_rte_data()\r\n\r\n # ****** Set parameters and train the word2vec model\r\n #\r\n # Import the built-in logging module and configure it so that Word2Vec\r\n # creates nice output messages\r\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \\\r\n level=logging.INFO)\r\n\r\n # Set values for various parameters\r\n num_features = 100 # Word vector dimensionality\r\n model_w2v = self.word2vec_model()\r\n\r\n (trainDataVecs, train_label, testDataVecs, test_label) = Word2Vec_Vectors.rte_avgVecs(train, test, num_features, model_w2v, self.lstm)\r\n # (trainDataVecs, train_label, testDataVecs, test_label) = Word2Vec_Vectors.rte_cosVecs(train, test, num_features, model_w2v, lstm)\r\n # (trainDataVecs, train_label, testDataVecs, test_label) = Word2Vec_Vectors.rte_featureVecs(train, test, lstm)\r\n # (trainDataVecs, train_label, testDataVecs, test_label) = Word2Vec_Vectors.rte_maxCosVecs(train, test, num_features, model_w2v, lstm)\r\n\r\n print(trainDataVecs.shape)\r\n print(train_label.shape)\r\n print(testDataVecs.shape)\r\n print(test_label.shape)\r\n\r\n model_name = 'text_analysis/textual_entailment/AverageVectors.h5'\r\n\r\n if True:\r\n if self.lstm:\r\n Train_Vectors.trainLSTM(model_name, trainDataVecs, train_label)\r\n else:\r\n Train_Vectors.trainSequential(model_name, trainDataVecs, train_label)\r\n # Train_Vectors.trainMLP(model_name, trainDataVecs, train_label)\r\n # Train_Vectors.trainCNN(model_name, trainDataVecs, train_label)\r\n\r\n model = load_model(model_name)\r\n\r\n # print(trainDataVecs.shape)\r\n # print(\"train_label\", train_label[:10])\r\n\r\n # predict_prob = model.predict(trainDataVecs[:10])\r\n # idx = np.argmax(predict_prob, axis=1)\r\n # proba = np.amax(predict_prob, axis=1)\r\n # result = keras.utils.to_categorical(idx, num_classes=2)\r\n\r\n # predict_prob = model.predict(testDataVecs)\r\n # idx = np.argmax(predict_prob, axis=1)\r\n # proba = np.amax(predict_prob, axis=1)\r\n # result = keras.utils.to_categorical(idx, num_classes=2)\r\n #\r\n print(\"test_label\", test_label[:10])\r\n # print(\"predict_prob\", predict_prob[:10])\r\n # print(\"argmax\", idx[:10])\r\n # print(\"proba\", proba[:10])\r\n\r\n if self.lstm:\r\n result = keras.utils.to_categorical(model.predict_classes(testDataVecs), num_classes=2)\r\n else:\r\n result = model.predict_classes(testDataVecs)\r\n print(\"result\", result[:10])\r\n\r\n loss, acc = model.evaluate(testDataVecs, test_label)\r\n # print(model.metrics_names)\r\n print('Test loss:', loss)\r\n print('Test acc:', acc)\r\n\r\n print(metrics.accuracy_score(test_label, result))\r\n print(metrics.classification_report(test_label, result))\r\n\r\n\r\n def rte_classify_w2v(self, text, hyp):\r\n # pair = nltk.corpus.rte.pairs(['rte1_test.xml'])[22]\r\n\r\n # print(pair.value)\r\n # print(pair.text)\r\n # print(pair.hyp)\r\n\r\n num_features = 100 # Word vector dimensionality\r\n model_w2v = self.word2vec_model()\r\n\r\n # input_text = [KaggleWord2VecUtility.review_to_wordlist(text, True)]\r\n # input_hyp = [KaggleWord2VecUtility.review_to_wordlist(hyp, True)]\r\n\r\n # print(input_text)\r\n # print(input_hyp)\r\n\r\n # inputTextDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(input_text, model_w2v, num_features, self.lstm)\r\n # inputHypDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(input_hyp, model_w2v, num_features, self.lstm)\r\n # inputDataVecs = np.concatenate((inputTextDataVecs, inputHypDataVecs), axis=1)\r\n\r\n # i = np.isnan(inputDataVecs)\r\n # inputDataVecs[i] = 0\r\n\r\n test = [(RTE_Data.Rte_Pair(text, hyp), 1)]\r\n (trainDataVecs, train_label, testDataVecs, test_label) = Word2Vec_Vectors.rte_avgVecs(test, test, num_features, model_w2v, self.lstm)\r\n\r\n model_name = 'text_analysis/textual_entailment/AverageVectors.h5'\r\n model = load_model(model_name)\r\n\r\n print(testDataVecs.shape)\r\n # if self.lstm:\r\n # a = model.predict_classes(testDataVecs)\r\n # print(\"test\",a.shape,a)\r\n # result = keras.utils.to_categorical(model.predict_classes(testDataVecs), num_classes=2)\r\n # else:\r\n # result = model.predict_classes(testDataVecs)\r\n # print(\"result\",result.shape,result)\r\n # print(model.predict_proba(testDataVecs), model.predict_proba(testDataVecs)[0][result[0]])\r\n\r\n predict_prob = model.predict(testDataVecs)\r\n print(\"predict_prob\", predict_prob)\r\n\r\n idx = np.argmax(predict_prob, axis=1)\r\n proba = np.amax(predict_prob, axis=1)\r\n print(idx, proba)\r\n\r\n return {\"text\": text, \"hyp\": hyp, \"entail\": str(idx[0]), \"proba\": str(proba[0])}\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n entail = TextualEntailment()\r\n entail.rte_classifier_w2v()\r\n except:\r\n traceback.print_exc()\r\n\r\n # entail.rte_classify_w2v()\r\n\r\n # RTE_Data.snli_rte_data()\r\n\r\n"
},
{
"alpha_fraction": 0.5381991863250732,
"alphanum_fraction": 0.5545702576637268,
"avg_line_length": 36.52631759643555,
"blob_id": "06a4bc6620cc7421408a5c705e6e802ce069283a",
"content_id": "c91ba2302a22c334b17bb538318a68585c6210f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1466,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 38,
"path": "/textual_entailment/RTE_Data.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "import nltk\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\n\r\n\r\nclass Rte_Pair:\r\n def __init__(self, text, hyp):\r\n self.text = text\r\n self.hyp = hyp\r\n\r\ndef nltk_rte_data():\r\n \"\"\"\r\n Classify RTEPairs\r\n \"\"\"\r\n train = [(pair, pair.value) for pair in\r\n nltk.corpus.rte.pairs(['rte1_dev.xml', 'rte2_dev.xml',\r\n 'rte3_dev.xml'])]\r\n test = [(pair, pair.value) for pair in\r\n nltk.corpus.rte.pairs(['rte1_test.xml', 'rte2_test.xml',\r\n 'rte3_test.xml'])]\r\n\r\n return (train, test)\r\n\r\ndef snli_rte_data():\r\n # \"C:/Users/shaun.c.dsouza/Documents/ai/text analysis/textanalysis_ai\r\n le = preprocessing.LabelEncoder()\r\n\r\n df_train = pd.read_csv('C:\\\\Users\\\\shaun.c.dsouza\\\\Documents\\\\ai\\\\text analysis\\\\snli_1.0\\\\snli_1.0_train.txt', sep='\\t', header='infer')\r\n i = df_train.isna()\r\n df_train[i] = \"\"\r\n train = [(Rte_Pair(i[1], i[2]), int(i[0])) for i in zip(le.fit_transform(df_train.loc[:, 'entail']), df_train.loc[:, 'sentence1'], df_train.loc[:, 'sentence2'])]\r\n\r\n df_test = pd.read_csv('C:\\\\Users\\\\shaun.c.dsouza\\\\Documents\\\\ai\\\\text analysis\\\\snli_1.0\\\\snli_1.0_test.csv', sep=',', header='infer')\r\n i = df_test.isna()\r\n df_test[i] = \"\"\r\n test = [(Rte_Pair(i[1], i[2]), int(i[0])) for i in zip(df_test.loc[:, 'entail'], df_test.loc[:, 'sentence1'], df_test.loc[:, 'sentence2'])]\r\n\r\n return (train, test)\r\n\r\n"
},
{
"alpha_fraction": 0.6337772011756897,
"alphanum_fraction": 0.6431650519371033,
"avg_line_length": 38.618675231933594,
"blob_id": "853854e0614203242dc10946c2128fd56e8efff6",
"content_id": "b391a36e01546b5fdf16f856f006b205bc696147",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10439,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 257,
"path": "/textual_entailment/Word2Vec_Vectors.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "import numpy as np # Make sure that numpy is imported\r\nimport keras\r\nfrom scipy import spatial\r\nfrom text_analysis.textual_entailment.KaggleWord2VecUtility import KaggleWord2VecUtility\r\nfrom text_analysis.textual_entailment import Word2Vec_AverageVectors\r\nfrom text_analysis.textual_entailment import rte_classify\r\n\r\n\r\ndef makeFeatureVec(words, model, num_features):\r\n # Function to average all of the word vectors in a given\r\n # paragraph\r\n #\r\n # Pre-initialize an empty numpy array (for speed)\r\n featureVec = np.zeros((num_features,),dtype=\"float32\")\r\n # featureVec = np.array([], dtype=\"float32\").reshape(num_features,)\r\n # featureVec = None\r\n #\r\n nwords = 0.\r\n #\r\n # Index2word is a list that contains the names of the words in\r\n # the model's vocabulary. Convert it to a set, for speed\r\n index2word_set = set(model.wv.index2word)\r\n #\r\n # Loop over each word in the review and, if it is in the model's\r\n # vocaublary, add its feature vector to the total\r\n # print(len(words))\r\n for word in words:\r\n if word in index2word_set:\r\n # if featureVec is None:\r\n # featureVec = model[word]\r\n # else:\r\n featureVec = np.vstack((featureVec, model[word]))\r\n\r\n return featureVec\r\n\r\n\r\ndef getCosFeatureVecs(text, hyp, model, num_features):\r\n # Initialize a counter\r\n counter = 0.\r\n #\r\n # Preallocate a 2D numpy array\r\n dataVecs = np.zeros((len(text),num_features),dtype=\"float32\")\r\n maxLen = 0\r\n\r\n for (i,j) in zip(text, hyp):\r\n trainTextDataVecs = makeFeatureVec(i, model, num_features)\r\n trainHypDataVecs = makeFeatureVec(j, model, num_features)\r\n\r\n # featureVec = np.zeros((num_features,),dtype=\"float32\")\r\n\r\n featureVec = np.array([spatial.distance.cosine(x, y) for x in trainTextDataVecs for y in trainHypDataVecs])\r\n\r\n # c = 0.\r\n # for x in trainTextDataVecs:\r\n # for y in trainHypDataVecs:\r\n # if x.shape and y.shape:\r\n # featureVec[int(c)] = spatial.distance.cosine(x, y)\r\n\r\n # c = c + 1.\r\n\r\n featureVec = np.pad(featureVec, (0, num_features), 'constant', constant_values=(0, 0))\r\n featureVec = np.resize(featureVec, (num_features,))\r\n\r\n #\r\n # Print a status message every 1000th rte\r\n if counter % 1000. == 0.:\r\n print(\"Rte %d of %d\" % (counter, len(text)))\r\n #\r\n # Call the function (defined above) that makes average feature vectors\r\n dataVecs[int(counter)] = featureVec\r\n\r\n #\r\n # Increment the counter\r\n counter = counter + 1.\r\n return dataVecs\r\n\r\ndef getMaxCosFeatureVecs(text, hyp, model, num_features):\r\n # Initialize a counter\r\n counter = 0.\r\n maxLen = 100\r\n #\r\n # Preallocate a 2D numpy array\r\n dataVecs = np.zeros((len(text),maxLen,num_features*2),dtype=\"float32\")\r\n\r\n for (i,j) in zip(text, hyp):\r\n trainTextDataVecs = makeFeatureVec(i, model, num_features)\r\n trainHypDataVecs = makeFeatureVec(j, model, num_features)\r\n\r\n featureVec = np.zeros((num_features*2,),dtype=\"float32\")\r\n\r\n # print(i, len(i), trainTextDataVecs.shape)\r\n # print(j, len(j), trainHypDataVecs.shape)\r\n\r\n if trainTextDataVecs.any() and trainHypDataVecs.any():\r\n for y, elem in enumerate(trainHypDataVecs):\r\n # featureVec = np.vstack((featureVec, elem))\r\n idx = np.argmax(np.array([spatial.distance.cosine(x, elem) for x in trainTextDataVecs]))\r\n # featureVec = np.vstack((featureVec, trainTextDataVecs[idx]))\r\n if y:\r\n # print(y,j[y-1],idx,i[idx-1])\r\n # print(elem.shape)\r\n # print(trainTextDataVecs[idx].shape)\r\n featureVec = np.vstack((featureVec, np.concatenate((elem, trainTextDataVecs[idx]))))\r\n else:\r\n featureVec = np.vstack((featureVec, np.zeros((1,num_features*2))))\r\n\r\n # print(\"featureVec \",featureVec.shape)\r\n featureVec = np.vstack((featureVec, np.zeros((maxLen-featureVec.shape[0],num_features*2),dtype=\"float32\")))\r\n # print(featureVec.shape)\r\n # featureVec = np.pad(featureVec, (0, maxLen, num_features), 'constant', constant_values=(0, 0))\r\n # featureVec = np.resize(featureVec, (num_features,))\r\n\r\n #\r\n # Print a status message every 1000th rte\r\n if counter % 1000. == 0.:\r\n print(\"Rte %d of %d\" % (counter, len(text)))\r\n #\r\n # Call the function (defined above) that makes average feature vectors\r\n dataVecs[int(counter)] = featureVec\r\n\r\n #\r\n # Increment the counter\r\n counter = counter + 1.\r\n return dataVecs\r\n\r\n\r\ndef rte_avgVecs(train, test, num_features, model, lstm=False):\r\n print(\"Creating average feature vecs for training reviews\")\r\n\r\n train_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in train]\r\n train_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in train]\r\n train_label = np.array([[label] for (pair, label) in train])\r\n\r\n trainTextDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(train_text, model, num_features)\r\n trainHypDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(train_hyp, model, num_features)\r\n trainDataVecs = np.concatenate((trainTextDataVecs, trainHypDataVecs), axis=1)\r\n\r\n i = np.isnan(trainDataVecs)\r\n trainDataVecs[i] = 0\r\n\r\n print(\"Creating average feature vecs for test reviews\")\r\n\r\n test_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in test]\r\n test_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in test]\r\n test_label = np.array([[label] for (pair, label) in test])\r\n\r\n testTextDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(test_text, model, num_features)\r\n testHypDataVecs = Word2Vec_AverageVectors.getAvgFeatureVecs(test_hyp, model, num_features)\r\n testDataVecs = np.concatenate((testTextDataVecs, testHypDataVecs), axis=1)\r\n\r\n i = np.isnan(testDataVecs)\r\n testDataVecs[i] = 0\r\n\r\n if lstm:\r\n (m, n) = trainDataVecs.shape\r\n print(trainDataVecs.shape)\r\n trainDataVecs = np.reshape(trainDataVecs, (m, 2, int(n/2)))\r\n (m, n) = testDataVecs.shape\r\n testDataVecs = np.reshape(testDataVecs, (m, 2, int(n/2)))\r\n # exit()\r\n\r\n train_label = keras.utils.to_categorical(train_label)\r\n test_label = keras.utils.to_categorical(test_label)\r\n\r\n return (trainDataVecs, train_label, testDataVecs, test_label)\r\n\r\n\r\ndef rte_cosVecs(train, test, num_features, model, lstm=False):\r\n print(\"Creating feature vecs for training reviews\")\r\n\r\n train_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in train]\r\n train_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in train]\r\n train_label = np.array([[label] for (pair, label) in train])\r\n\r\n trainDataVecs = getCosFeatureVecs(train_text, train_hyp, model, num_features)\r\n\r\n i = np.isnan(trainDataVecs)\r\n trainDataVecs[i] = 0\r\n\r\n print(\"Creating feature vecs for test reviews\")\r\n\r\n test_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in test]\r\n test_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in test]\r\n test_label = np.array([[label] for (pair, label) in test])\r\n\r\n testDataVecs = getCosFeatureVecs(test_text, train_hyp, model, num_features)\r\n\r\n i = np.isnan(testDataVecs)\r\n testDataVecs[i] = 0\r\n\r\n if lstm:\r\n (m, n) = trainDataVecs.shape\r\n trainDataVecs = np.reshape(trainDataVecs, (m, 1, n))\r\n (m, n) = testDataVecs.shape\r\n testDataVecs = np.reshape(testDataVecs, (m, 1, n))\r\n\r\n train_label = keras.utils.to_categorical(train_label)\r\n test_label = keras.utils.to_categorical(test_label)\r\n\r\n return (trainDataVecs, train_label, testDataVecs, test_label)\r\n\r\ndef rte_maxCosVecs(train, test, num_features, model, lstm=False):\r\n print(\"Creating feature vecs for training reviews\")\r\n\r\n train_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in train]\r\n train_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in train]\r\n train_label = np.array([[label] for (pair, label) in train])\r\n\r\n trainDataVecs = getMaxCosFeatureVecs(train_text, train_hyp, model, num_features)\r\n # for i in trainDataVecs:\r\n # print(i.shape)\r\n\r\n i = np.isnan(trainDataVecs)\r\n trainDataVecs[i] = 0\r\n\r\n print(\"Creating feature vecs for test reviews\")\r\n\r\n test_text = [KaggleWord2VecUtility.review_to_wordlist(pair.text, True) for (pair, label) in test]\r\n test_hyp = [KaggleWord2VecUtility.review_to_wordlist(pair.hyp, True) for (pair, label) in test]\r\n test_label = np.array([[label] for (pair, label) in test])\r\n\r\n testDataVecs = getMaxCosFeatureVecs(test_text, train_hyp, model, num_features)\r\n # for i in testDataVecs:\r\n # print(i.shape)\r\n\r\n i = np.isnan(testDataVecs)\r\n testDataVecs[i] = 0\r\n\r\n train_label = keras.utils.to_categorical(train_label)\r\n test_label = keras.utils.to_categorical(test_label)\r\n\r\n return (trainDataVecs, train_label, testDataVecs, test_label)\r\n\r\ndef rte_featureVecs(train, test, lstm=False):\r\n print(\"Creating feature vecs for training reviews\")\r\n\r\n trainDataVecs = np.array([rte_classify.rte_features_vector(pair) for (pair, label) in train])\r\n train_label = np.array([[label] for (pair, label) in train])\r\n\r\n print(\"Creating feature vecs for test reviews\")\r\n\r\n testDataVecs = np.array([rte_classify.rte_features_vector(pair) for (pair, label) in test])\r\n test_label = np.array([[label] for (pair, label) in test])\r\n\r\n if lstm:\r\n # trainDataVecs = np.array([[i] for i in trainDataVecs])\r\n # testDataVecs = np.array([[i] for i in testDataVecs])\r\n\r\n (m, n) = trainDataVecs.shape\r\n trainDataVecs = np.reshape(trainDataVecs, (m, 1, n))\r\n (m, n) = testDataVecs.shape\r\n testDataVecs = np.reshape(testDataVecs, (m, 1, n))\r\n\r\n train_label = keras.utils.to_categorical(train_label)\r\n test_label = keras.utils.to_categorical(test_label)\r\n\r\n return (trainDataVecs, train_label, testDataVecs, test_label)\r\n"
},
{
"alpha_fraction": 0.6697009205818176,
"alphanum_fraction": 0.6801040172576904,
"avg_line_length": 33.04545593261719,
"blob_id": "c2e8e6058fdb4ab8aada2d078814455a8038e4be",
"content_id": "d307d5ad909cd139514a138676592ce399625e59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 22,
"path": "/code/coref2.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "#import urllib.request\r\n#from bs4 import BeautifulSoup\r\nimport spacy\r\nimport neuralcoref\r\nnlp = spacy.load('en_core_web_lg')\r\nneuralcoref.add_to_pipe(nlp)\r\n\r\n# html = urllib.request.urlopen('https://www.law.cornell.edu/supremecourt/text/418/683').read()\r\n# soup = BeautifulSoup(html, 'html.parser')\r\n\r\n\r\ntext = 'Angela lives in Boston. She is quite happy in that city.'\r\ndoc = nlp(text)\r\nresolved_text = doc._.coref_resolved\r\nprint(resolved_text)\r\n\r\n# sentences = [sent.string.strip() for sent in nlp(resolved_text).sents]\r\n# output = [sent for sent in sentences if 'president' in \r\n # (' '.join([token.lemma_.lower() for token in nlp(sent)]))]\r\n# print('Fact count:', len(output))\r\n# for fact in range(len(output)):\r\n # print(str(fact+1)+'.', output[fact])"
},
{
"alpha_fraction": 0.4758497178554535,
"alphanum_fraction": 0.4919499158859253,
"avg_line_length": 26.052631378173828,
"blob_id": "f462206e4720244d5e4d02569c0e5ea328c39126",
"content_id": "d55ed383367184c5d733924622e57ae7e67a9811",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 559,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 19,
"path": "/textual_entailment/data/rte.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "import nltk\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ntrain = [(pair, pair.value) for pair in\r\n nltk.corpus.rte.pairs(['rte1_dev.xml', 'rte2_dev.xml',\r\n 'rte3_dev.xml'])]\r\n\r\ntest = [(pair, pair.value) for pair in\r\n nltk.corpus.rte.pairs(['rte1_test.xml', 'rte2_test.xml',\r\n 'rte3_test.xml'])]\r\n\r\ndata = []\r\nfor i in test:\r\n data.append((i[0].text, i[0].hyp, i[1]))\r\n\r\ndf = pd.DataFrame(np.array(data))\r\nprint(df)\r\ndf.to_csv('test.csv')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6591255068778992,
"alphanum_fraction": 0.6858820915222168,
"avg_line_length": 33.36153793334961,
"blob_id": "4bc5d82448a81933e7ced25a142db812756c20c5",
"content_id": "7022f33826e8514ec446a82d6647e00cd667334a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4597,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 130,
"path": "/textual_entailment/Train_Vectors.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "from keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout, LSTM\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.layers import Embedding\r\nfrom keras.layers import Conv1D, GlobalMaxPooling1D, MaxPooling1D\r\nimport numpy as np\r\n\r\n# https://keras.io/getting-started/sequential-model-guide/\r\n\r\ndef trainSequential(model_name, trainDataVecs, train_label):\r\n model = Sequential()\r\n model.add(Dense(32, activation='relu', input_dim=trainDataVecs.shape[1]))\r\n model.add(Dense(train_label.shape[1], activation='sigmoid'))\r\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.\r\n model.fit(trainDataVecs, train_label, epochs=100, batch_size=32)\r\n # loss_and_metrics = model.evaluate(testDataVecs, test_label, batch_size=128)\r\n\r\n model.save(model_name)\r\n del model\r\n\r\ndef trainMLP(model_name, trainDataVecs, train_label):\r\n model = Sequential()\r\n model.add(Dense(64, input_dim=trainDataVecs.shape[1], activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Dense(64, activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Dense(train_label.shape[1], activation='sigmoid'))\r\n model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.\r\n model.fit(trainDataVecs, train_label, epochs=500, batch_size=32)\r\n # loss_and_metrics = model.evaluate(testDataVecs, test_label, batch_size=128)\r\n\r\n model.save(model_name)\r\n del model\r\n\r\ndef trainLSTM(model_name, trainDataVecs, train_label):\r\n model = Sequential()\r\n model.add(LSTM(128, input_shape=trainDataVecs.shape[1:]))\r\n model.add(Dense(train_label.shape[1]))\r\n model.add(Activation('softmax'))\r\n\r\n model.summary()\r\n\r\n optimizer = RMSprop(lr=0.01)\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\r\n\r\n # x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.\r\n model.fit(trainDataVecs, train_label, epochs=100, batch_size=32)\r\n # loss_and_metrics = model.evaluate(testDataVecs, test_label, batch_size=128)\r\n\r\n model.save(model_name)\r\n del model\r\n\r\ndef trainLSTM1(model_name, trainDataVecs, train_label):\r\n model = Sequential()\r\n model.add(Embedding(max_features, 128))\r\n model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\r\n model.add(Dense(1, activation='sigmoid'))\r\n\r\n # try using different optimizers and different optimizer configs\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n # x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.\r\n model.fit(trainDataVecs, train_label, epochs=100, batch_size=32)\r\n # loss_and_metrics = model.evaluate(testDataVecs, test_label, batch_size=128)\r\n\r\n model.save(model_name)\r\n del model\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef trainMaxCosLSTM(model_name, trainDataVecs, train_label):\r\n model = Sequential()\r\n model.add(LSTM(128, input_shape=(None, 100)))\r\n model.add(Dense(train_label.shape[1]))\r\n model.add(Activation('softmax'))\r\n\r\n optimizer = RMSprop(lr=0.01)\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\r\n\r\n # x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.\r\n model.fit(trainDataVecs, train_label, epochs=100, batch_size=32)\r\n # loss_and_metrics = model.evaluate(testDataVecs, test_label, batch_size=128)\r\n\r\n model.save(model_name)\r\n del model\r\n\r\n# https://arxiv.org/pdf/1705.09054.pdf\r\n\r\n\r\ndef trainCNN(model_name, trainDataVecs, train_label):\r\n # Embedding\r\n max_features = 20000\r\n maxlen = 200\r\n embedding_size = 128\r\n\r\n # Convolution\r\n kernel_size = 5\r\n filters = 64\r\n pool_size = 4\r\n\r\n # LSTM\r\n lstm_output_size = 70\r\n\r\n # Training\r\n batch_size = 30\r\n epochs = 2\r\n\r\n model = Sequential()\r\n model.add(Embedding(max_features, embedding_size, input_length=maxlen))\r\n model.add(Dropout(0.25))\r\n model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))\r\n model.add(MaxPooling1D(pool_size=pool_size))\r\n model.add(LSTM(lstm_output_size))\r\n model.add(Dense(1))\r\n model.add(Activation('sigmoid'))\r\n\r\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n print('Train...')\r\n model.fit(trainDataVecs, train_label, batch_size=batch_size, epochs=epochs)\r\n\r\n model.save(model_name)\r\n del model\r\n"
},
{
"alpha_fraction": 0.6551724076271057,
"alphanum_fraction": 0.6687356233596802,
"avg_line_length": 35.482757568359375,
"blob_id": "35da2211ecc393f1356df9a4634d8cc9f3c06496",
"content_id": "1d7f755bd247d3b8f50bda2eb8a75d890cc590e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8708,
"license_type": "no_license",
"max_line_length": 381,
"num_lines": 232,
"path": "/textual_entailment/emoji_wordvectors.py",
"repo_name": "binit9/Knowledge-Graph-Covid19",
"src_encoding": "UTF-8",
"text": "\r\nimport numpy as np\r\nfrom emo_utils import *\r\nimport emoji\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nX_train_h, X_train_t, Y_train_e = read_csv_ht('text_analysis/textual_entailment/data/train.csv', rte=True)\r\nX_test_h, X_test_t, Y_test_e = read_csv_ht('text_analysis/textual_entailment/data/test.csv', rte=True)\r\n\r\n\r\n# In[5]:\r\n\r\nmaxLen = len(max(X_train_h, key=len).split())\r\nprint(maxLen)\r\n\r\nindex = 1\r\nprint(X_train_h[index], label_to_emoji(Y_train_e[index]))\r\n\r\n\r\n\r\nY_oh_train_e = convert_to_one_hot(Y_train_e, C = 2)\r\nY_oh_test_e = convert_to_one_hot(Y_test_e, C = 2)\r\n\r\nindex = 50\r\nprint(Y_train_e[index], \"is converted into one hot\", Y_oh_train_e[index])\r\n\r\n# word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('text_analysis/textual_entailment/data/glove.6B.50d.txt')\r\nword_to_index, index_to_word, word_to_vec_map = read_glove_vecs('text_analysis/Word_Embeddings/glove_100d.txt')\r\n\r\nword = \"cucumber\"\r\nindex = 289846\r\nprint(\"the index of\", word, \"in the vocabulary is\", word_to_index[word])\r\nprint(\"the\", str(index) + \"th word in the vocabulary is\", index_to_word[index])\r\n\r\n\r\n\r\n\r\n\r\nimport numpy as np\r\nnp.random.seed(0)\r\nimport re\r\nimport sys\r\nimport os\r\nsys.path.append(os.path.abspath('.'))\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, Input, Dropout, LSTM, Activation\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.preprocessing import sequence\r\nfrom keras.initializers import glorot_uniform\r\nfrom text_analysis.textual_entailment import Train_Vectors_Glove\r\nnp.random.seed(1)\r\n\r\n\r\n# GRADED FUNCTION: sentences_to_indices\r\n\r\ndef sentences_to_indices(X, word_to_index, max_len):\r\n \"\"\"\r\n Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.\r\n The output shape should be such that it can be given to `Embedding()` (described in Figure 4).\r\n\r\n Arguments:\r\n X -- array of sentences (strings), of shape (m, 1)\r\n word_to_index -- a dictionary containing the each word mapped to its index\r\n max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.\r\n\r\n Returns:\r\n X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)\r\n \"\"\"\r\n\r\n m = X.shape[0] # number of training examples\r\n\r\n ### START CODE HERE ###\r\n # Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)\r\n X_indices = np.zeros((m, max_len))\r\n\r\n for i in range(m): # loop over training examples\r\n\r\n review_text = re.sub(\"[^a-zA-Z]\", \" \", X[i])\r\n\r\n # Convert the ith training sentence in lower case and split is into words. You should get a list of words.\r\n sentence_words = review_text.lower().split()\r\n\r\n # Initialize j to 0\r\n j = 0\r\n\r\n # Loop over the words of sentence_words\r\n for w in sentence_words:\r\n # Set the (i,j)th entry of X_indices to the index of the correct word.\r\n try:\r\n X_indices[i, j] = word_to_index[w]\r\n except:\r\n # X_indices[i, j] = 0\r\n print(\"err \", w)\r\n # print(type(word_to_index[w]))\r\n # print(index_to_word[0])\r\n # Increment j to j + 1\r\n j = j + 1\r\n\r\n ### END CODE HERE ###\r\n\r\n return X_indices\r\n\r\n\r\nX1 = np.array([\"funny lol\", \"lets play baseball\", \"food is ready for you\"])\r\nX1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)\r\nprint(\"X1 =\", X1)\r\nprint(\"X1_indices =\", X1_indices)\r\n\r\n\r\n\r\n\r\n# In[26]:\r\n\r\n# embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n# print(\"weights[0][1][3] =\", embedding_layer.get_weights()[0][1][3])\r\n\r\n\r\n# GRADED FUNCTION: Emojify_V2\r\n\r\ndef Emojify_V2(input_shape, word_to_vec_map, word_to_index):\r\n \"\"\"\r\n Function creating the Emojify-v2 model's graph.\r\n\r\n Arguments:\r\n input_shape -- shape of the input, usually (max_len,)\r\n word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation\r\n word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)\r\n\r\n Returns:\r\n model -- a model instance in Keras\r\n \"\"\"\r\n\r\n ### START CODE HERE ###\r\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\r\n sentence_indices = Input(shape=input_shape, dtype='int32')\r\n\r\n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\r\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\r\n\r\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\r\n embeddings = embedding_layer(sentence_indices)\r\n\r\n print(\"embeddings\", embeddings.shape)\r\n print(\"sentence_indices\", sentence_indices.shape)\r\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a batch of sequences.\r\n X = LSTM(128, return_sequences=True)(embeddings)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(0.5)(X)\r\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\r\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\r\n X = LSTM(128)(X)\r\n # Add dropout with a probability of 0.5\r\n X = Dropout(0.5)(X)\r\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\r\n X = Dense(2, activation='softmax')(X)\r\n # Add a softmax activation\r\n X = Activation('softmax')(X)\r\n\r\n print(\"X\", X.shape)\r\n # Create Model instance which converts sentence_indices into X.\r\n model = Model(inputs=sentence_indices, outputs=X)\r\n\r\n ### END CODE HERE ###\r\n\r\n return model\r\n\r\n\r\nmodel = Train_Vectors_Glove.Emojify_V2_cnn((maxLen,), word_to_vec_map, word_to_index)\r\nmodel.summary()\r\n\r\n\r\n# As usual, after creating your model in Keras, you need to compile it and define what loss, optimizer and metrics your are want to use. Compile your model using `categorical_crossentropy` loss, `adam` optimizer and `['accuracy']` metrics:\r\n\r\n# In[34]:\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n\r\n# It's time to train your model. Your Emojifier-V2 `model` takes as input an array of shape (`m`, `max_len`) and outputs probability vectors of shape (`m`, `number of classes`). We thus have to convert X_train (array of sentences as strings) to X_train_indices (array of sentences as list of word indices), and Y_train (labels as indices) to Y_train_oh (labels as one-hot vectors).\r\n\r\n# In[35]:\r\n\r\nX_train_h_indices = sentences_to_indices(X_train_h, word_to_index, maxLen)\r\nX_train_t_indices = sentences_to_indices(X_train_t, word_to_index, maxLen)\r\nY_train_e_oh = convert_to_one_hot(Y_train_e, C = 2)\r\n\r\n\r\n# Fit the Keras model on `X_train_indices` and `Y_train_oh`. We will use `epochs = 50` and `batch_size = 32`.\r\n\r\n# In[36]:\r\n\r\nmodel.fit([X_train_h_indices, X_train_t_indices], Y_train_e_oh, epochs = 50, batch_size = 32, shuffle=True)\r\n\r\n\r\n# Your model should perform close to **100% accuracy** on the training set. The exact accuracy you get may be a little different. Run the following cell to evaluate your model on the test set.\r\n\r\n# In[37]:\r\n\r\nX_test_h_indices = sentences_to_indices(X_test_h, word_to_index, max_len = maxLen)\r\nX_test_t_indices = sentences_to_indices(X_test_t, word_to_index, max_len = maxLen)\r\nY_test_e_oh = convert_to_one_hot(Y_test_e, C = 2)\r\nloss, acc = model.evaluate([X_test_h_indices, X_test_t_indices], Y_test_e_oh)\r\nprint()\r\nprint(\"Test accuracy = \", acc)\r\nexit()\r\n\r\n# You should get a test accuracy between 80% and 95%. Run the cell below to see the mislabelled examples.\r\n\r\n# In[38]:\r\n\r\n# This code allows you to see the mislabelled examples\r\nC = 5\r\ny_test_oh = np.eye(C)[Y_test.reshape(-1)]\r\nX_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)\r\npred = model.predict(X_test_indices)\r\nfor i in range(len(X_test)):\r\n x = X_test_indices\r\n num = np.argmax(pred[i])\r\n if(num != Y_test[i]):\r\n print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())\r\n\r\n\r\n# Now you can try it on your own example. Write your own sentence below.\r\n\r\n# In[39]:\r\n\r\n# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.\r\nx_test = np.array(['not feeling happy'])\r\nX_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)\r\nprint(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))\r\n\r\n"
}
] | 9 |
moChen0607/RBF
|
https://github.com/moChen0607/RBF
|
0d02040a6fb87314360b0f53f1129ed924cea6a2
|
e91fd9137ef465f3803363c1e0fbf4c06492060b
|
d97ce9f15a170eb84b52455df53129fce1d98776
|
refs/heads/master
| 2021-06-10T00:20:53.195174 | 2016-11-24T01:13:33 | 2016-11-24T01:13:33 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 11.5,
"blob_id": "9b7a6c1879e0cff8d74941c9a3e4be259b2ca52f",
"content_id": "013b5f9df5b9449f0f3cbccb619f5ab5ef1247df",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 50,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 4,
"path": "/docs/source/nodes.rst",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "Nodes\n-----\n.. automodule:: rbf.nodes\n :members:\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 19.53333282470703,
"blob_id": "434a1f0a54bbb8b323a0de3e83a2fa9082b44092",
"content_id": "37be12b5e7e1a896f8990027b4a35eab45899d8a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 308,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 15,
"path": "/docs/source/filter.rst",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "Filter\n------\n.. automodule:: rbf.filter\n :members:\n\nExamples\n++++++++\n.. literalinclude:: ../scripts/filter.a.py\n.. image:: ../figures/filter.a.png\n\n.. literalinclude:: ../scripts/filter.b.py\n.. image:: ../figures/filter.b.png\n\n.. literalinclude:: ../scripts/filter.c.py\n.. image:: ../figures/filter.c.png\n"
},
{
"alpha_fraction": 0.4130958020687103,
"alphanum_fraction": 0.5767934322357178,
"avg_line_length": 44.130435943603516,
"blob_id": "15899b29fcf90c882f18afb784b23696b3478d17",
"content_id": "73bb4115b9d258b71a32d510a6a28ce16db4a56a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2077,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 46,
"path": "/docs/scripts/fd.a.py",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "''' \nIn this script we solve the 2-d wave equation with a RBF-FD scheme\n'''\nimport numpy as np\nfrom rbf.fd import weight_matrix\nfrom rbf.nodes import menodes\nimport matplotlib.pyplot as plt\n\n# define the problem domain\nvert = np.array([[0.762,0.057],[0.492,0.247],[0.225,0.06 ],[0.206,0.056],\n [0.204,0.075],[0.292,0.398],[0.043,0.609],[0.036,0.624],\n [0.052,0.629],[0.373,0.63 ],[0.479,0.953],[0.49 ,0.966],\n [0.503,0.952],[0.611,0.629],[0.934,0.628],[0.95 ,0.622],\n [0.941,0.607],[0.692,0.397],[0.781,0.072],[0.779,0.055]])\nsmp = np.array([[0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],\n [9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16],\n [16,17],[17,18],[18,19],[19,0]])\ndt = 0.000025 # time step size\nN = 100000 # total number of nodes\nnodes,smpid = menodes(N,vert,smp) # generate nodes\nboundary, = (smpid>=0).nonzero() # identify boundary nodes\ninterior, = (smpid==-1).nonzero() # identify interior nodes\nD = weight_matrix(nodes[interior],nodes,[[2,0],[0,2]],n=30)\nr = np.linalg.norm(nodes-np.array([0.49,0.46]),axis=1)\nu_prev = 1.0/(1 + (r/0.01)**4) # create initial conditions\nu_curr = 1.0/(1 + (r/0.01)**4)\nfig,axs = plt.subplots(2,2,figsize=(7,7))\naxs = [axs[0][0],axs[0][1],axs[1][0],axs[1][1]]\nfor i in range(15001):\n u_next = dt**2*D.dot(u_curr) + 2*u_curr[interior] - u_prev[interior]\n u_prev[:] = u_curr\n u_curr[interior] = u_next\n if i in [0,5000,10000,15000]: \n ax = axs[[0,5000,10000,15000].index(i)]\n p = ax.scatter(nodes[:,0],nodes[:,1],s=3,c=np.array(u_curr,copy=True),\n edgecolor='none',cmap='viridis',vmin=-0.1,vmax=0.1)\n for s in smp: ax.plot(vert[s,0],vert[s,1],'k-')\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False);ax.get_yaxis().set_visible(False)\n ax.set_xlim((0.025,0.975));ax.set_ylim((0.03,0.98))\n ax.text(0.57,0.85,'time : %s\\nnodes : %s' % (np.round(i*dt,1),N),\n transform=ax.transAxes,fontsize=12)\n\nplt.tight_layout() \n#plt.savefig('../figures/fd.a.png')\nplt.show()\n\n"
},
{
"alpha_fraction": 0.645552396774292,
"alphanum_fraction": 0.6532227993011475,
"avg_line_length": 29.609848022460938,
"blob_id": "754f58b70ac0ed4068c5ce6b17256a0dc910741c",
"content_id": "6bebe99f3384e28755762866338c978c6a302114",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8083,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 264,
"path": "/rbf/interpolate.py",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "''' \nThis module provides a class for RBF interpolation, *RBFInterpolant*. \nThis function has numerous features that are lacking in \n*scipy.interpolate.rbf*. They include:\n \n* variable weights on the data (when creating a smoothed interpolant)\n* more choices of basis functions (you can also easily make your own)\n* analytical differentiation of the interpolant \n* added polynomial terms for improved accuracy\n* prevent extrapolation by masking data that is outside of the \n convex hull defined by the data points\n\nThe RBF interpolant :math:`\\mathbf{f(x^*)}` is defined as\n \n.. math::\n \\mathbf{f(x^*)} = \\mathbf{K(x^*,x)a} + \\mathbf{T(x^*)b}\n \nwhere :math:`\\mathbf{K(x^*,x)}` consists of the RBFs with centers at \n:math:`\\mathbf{x}` evaluated at the interpolation points \n:math:`\\mathbf{x^*}`. :math:`\\mathbf{T(x^*)}` is a polynomial matrix \nwhere each column is a monomial evaluated at the interpolation points. \nThe monomials are those from a Taylor series expansion with a user \nspecified order. :math:`\\mathbf{a}` and :math:`\\mathbf{b}` are \ncoefficients that need to be estimated. The coefficients are found by \nsolving the linear system of equations\n \n.. math::\n (\\mathbf{WK(x,x)} + p\\mathbf{I})\\mathbf{a} + \\mathbf{WT(x)b} = \\mathbf{Wy}\n\n.. math::\n \\mathbf{T^T(x)a} = \\mathbf{0} \n\nwhere :math:`\\mathbf{W}` are the data weights (should be the inverse \nof the data covariance matrix), :math:`\\mathbf{y}` are the observations at \n:math:`\\mathbf{x}`, and :math:`p` is a penalty parameter. With :math:`p=0` \nthe observations are fit perfectly by the interpolant. Increasing\n:math:`p` degrades the fit while improving the smoothness of the \ninterpolant. This formulation closely follows chapter 19.4 of [1] \nand chapter 13.2.1 of [2].\n \nReferences\n----------\n[1] Fasshauer, G., Meshfree Approximation Methods with Matlab, World \nScientific Publishing Co, 2007.\n \n[2] Schimek, M., Smoothing and Regression: Approaches, Computations, \nand Applications. John Wiley & Sons, 2000.\n \n'''\nimport numpy as np\nfrom numpy.linalg import pinv\nimport scipy.optimize\nimport scipy.spatial\nimport rbf.basis\nimport rbf.poly\nimport rbf.geometry\n\ndef _coefficient_matrix(x,eps,basis,order):\n ''' \n returns the matrix used to compute the radial basis function \n coefficients\n '''\n # number of observation points and spatial dimensions\n N,D = x.shape\n\n # powers for the additional polynomials\n powers = rbf.poly.powers(order,D)\n # number of polynomial terms\n P = powers.shape[0]\n # allocate array \n A = np.zeros((N+P,N+P))\n A[:N,:N] = basis(x,x,eps=eps)\n Ap = rbf.poly.mvmonos(x,powers)\n A[N:,:N] = Ap.T\n A[:N,N:] = Ap\n return A \n\n\ndef _interpolation_matrix(xitp,x,diff,eps,basis,order):\n ''' \n returns the matrix that maps the coefficients to the function values \n at the interpolation points\n '''\n # number of interpolation points and spatial dimensions\n I,D = xitp.shape\n # number of observation points\n N = x.shape[0]\n # powers for the additional polynomials\n powers = rbf.poly.powers(order,D)\n # number of polynomial terms\n P = powers.shape[0]\n # allocate array \n A = np.zeros((I,N+P))\n A[:,:N] = basis(xitp,x,eps=eps,diff=diff)\n A[:,N:] = rbf.poly.mvmonos(xitp,powers,diff=diff)\n return A\n\n\ndef _in_hull(p, hull):\n ''' \n Tests if points in *p* are in the convex hull made up by *hull*\n '''\n dim = p.shape[1]\n # if there are not enough points in *hull* to form a simplex then \n # return False for each point in *p*.\n if hull.shape[0] <= dim:\n return np.zeros(p.shape[0],dtype=bool)\n \n if dim >= 2:\n hull = scipy.spatial.Delaunay(hull)\n return hull.find_simplex(p)>=0\n else:\n # one dimensional points\n min = np.min(hull)\n max = np.max(hull)\n return (p[:,0] >= min) & (p[:,0] <= max)\n\n\nclass RBFInterpolant(object):\n ''' \n Regularized radial basis function interpolant \n\n Parameters \n ---------- \n x : (N,D) array\n Source points.\n\n value : (N,) array\n Function values at the source points.\n\n sigma : (N,) array, optional\n One standard deviation uncertainty on each observation point.\n \n eps : (N,) array, optional\n Shape parameters for each RBF. this has no effect for odd\n order polyharmonic splines.\n\n basis : rbf.basis.RBF instance, optional\n Radial basis function to use.\n \n extrapolate : bool, optional\n Whether to allows points to be extrapolated outside of a \n convex hull formed by x. If False, then np.nan is returned for \n outside points.\n\n order : int, optional\n Order of added polynomial terms.\n \n penalty : float, optional\n The smoothing parameter. This decreases the size of the RBF \n coefficients while leaving the polynomial terms undamped. Thus \n the endmember for a large penalty parameter will be equivalent \n to polynomial regression.\n\n Notes\n -----\n This function involves solving a dense system of equations, which \n will be prohibitive for large data sets. See *rbf.filter* for \n smoothing large data sets. \n \n With certain choices of basis functions and polynomial orders this \n interpolant is equivalent to a thin-plate spline. For example, if the \n observation space is one-dimensional then a thin-plate spline can be \n obtained with the arguments *basis* = *rbf.basis.phs3* and *order* = \n 1. For two-dimensional observation space a thin-plate spline can be \n obtained with the arguments *basis* = *rbf.basis.phs2* and *order* = \n 1. See [2] for additional details on thin-plate splines.\n\n References\n ----------\n [1] Fasshauer, G., Meshfree Approximation Methods with Matlab, World \n Scientific Publishing Co, 2007.\n \n [2] Schimek, M., Smoothing and Regression: Approaches, Computations, \n and Applications. John Wiley & Sons, 2000.\n '''\n def __init__(self,\n x,\n value, \n sigma=None,\n eps=None, \n basis=rbf.basis.phs3,\n order=1, \n extrapolate=True,\n penalty=0.0):\n x = np.asarray(x) \n value = np.asarray(value)\n N,D = x.shape\n P = rbf.poly.count(order,D)\n\n if eps is None:\n eps = np.ones(N)\n else:\n eps = np.asarray(eps)\n\n if sigma is None:\n sigma = np.ones(N)\n else:\n sigma = np.asarray(sigma)\n \n # form matrix for the LHS\n A = _coefficient_matrix(x,eps,basis,order)\n # scale RHS and LHS by weight\n weight = 1.0/sigma**2\n A[:N,:] *= weight[:,None]\n value = value*weight\n # add smoothing along diagonals\n A[range(N),range(N)] += penalty**2\n # add zeros to the RHS for the polynomial constraints\n value = np.concatenate((value,np.zeros(P)))\n # find the radial basis function coefficients\n coeff = np.linalg.solve(A,value)\n\n self.x = x\n self.coeff = coeff\n self.basis = basis\n self.order = order \n self.eps = eps\n self.extrapolate = extrapolate\n\n def __call__(self,xitp,diff=None,max_chunk=100000):\n ''' \n Evaluates the interpolant at *xitp*\n\n Parameters \n ---------- \n xitp : (N,D) array\n Target points.\n\n diff : (D,) int array, optional\n Derivative order for each spatial dimension.\n \n max_chunk : int, optional \n Break *xitp* into chunks with this size and evaluate the \n interpolant for each chunk. Smaller values result in \n decreased memory usage but also decreased speed.\n\n Returns\n -------\n out : (N,) array\n Values of the interpolant at *xitp*\n \n '''\n n = 0\n xitp = np.asarray(xitp) \n #xitp = self.norm(xitp)\n Nitp = xitp.shape[0]\n # allocate output array\n out = np.zeros(Nitp)\n while n < Nitp:\n # xitp indices for this chunk\n idx = range(n,min(n+max_chunk,Nitp))\n A = _interpolation_matrix(xitp[idx],self.x,\n diff,self.eps,\n self.basis,self.order)\n out[idx] = np.einsum('ij,j...->i...',A,self.coeff)\n n += max_chunk\n\n # return zero for points outside of the convex hull if \n # extrapolation is not allowed\n if not self.extrapolate:\n out[~_in_hull(xitp,self.x)] = np.nan\n\n return out\n\n\n"
},
{
"alpha_fraction": 0.5462185144424438,
"alphanum_fraction": 0.5462185144424438,
"avg_line_length": 25.33333396911621,
"blob_id": "0e1406f37dfb40d9e5f259ae28f05625ed81d740",
"content_id": "27e68859a3b88899a941581f18d256f90d23ec4c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 238,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 9,
"path": "/docs/source/fd.rst",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "FD (Radial Basis Function Finite Difference)\n--------------------------------------------\n.. automodule:: rbf.fd\n :members: weights, weight_matrix\n\nExamples\n++++++++\n.. literalinclude:: ../scripts/fd.a.py\n.. image:: ../figures/fd.a.png\n\n"
},
{
"alpha_fraction": 0.7180570363998413,
"alphanum_fraction": 0.7265047430992126,
"avg_line_length": 26.449275970458984,
"blob_id": "7ec07e95b7a9ea00f1361bd173c52457112a2795",
"content_id": "5025977ddc8a33661a785356261772d39e43ada4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1894,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 69,
"path": "/docs/source/index.rst",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": ".. RBF documentation master file, created by\n sphinx-quickstart on Tue Oct 18 17:39:42 2016.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nRBF\n***\nPython package containing the tools necessary for radial basis \nfunction (RBF) applications. Applications include \ninterpolating/smoothing scattered data and solving PDEs over irregular \ndomains. RBF is developed by Trever Hines ([email protected]), \nUniversity of Michigan, and the source code for this project can be \nfound `here <http://www.github.com/treverhines/RBF>`_\n\nFeatures\n--------\n* Efficient functions to evaluate RBFs and their analytically derived \n derivatives\n* Regularized RBF interpolants (including smoothing splines) for\n noisy, scattered, data\n* An algorithm for generating Radial Basis Function Finite Difference \n (RBF-FD) weights\n* RBF-FD Filtering for denoising **BIG**, scattered data\n* Node and stencil generation algorithms for solving PDEs over\n irregular domains\n* Halton sequence generator\n* Computational geometry functions for 1, 2, and 3 spatial dimensions\n\nTable of Contents\n-----------------\n.. toctree::\n :maxdepth: 2\n\n installation\n basis\n interpolate\n fd\n filter\n nodes\n stencil\n geometry\n halton\n\nQuick Demo\n----------\n\nSmoothing Scattered Data\n++++++++++++++++++++++++\n.. literalinclude:: ../scripts/interpolate.a.py\n\nThe above code will produce this plot, which shows the observations as \nscatter points and the smoothed interpolant as the color field.\n\n.. image:: ../figures/interpolate.a.png\n\nSolving PDEs\n++++++++++++\n.. literalinclude:: ../scripts/basis.a.py\n\nThe above code will produce this plot, which shows the collocation \nnodes as black points and the interpolated solution as the color field.\n\n.. image:: ../figures/basis.a.png\n\nIndices and tables\n------------------\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n"
},
{
"alpha_fraction": 0.5731186270713806,
"alphanum_fraction": 0.5830407738685608,
"avg_line_length": 28.97711753845215,
"blob_id": "e024b48591eae108f1ec50d7b21e8da4c21db83b",
"content_id": "b600256c6bc5e25a7572002711d777568197b8f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13102,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 437,
"path": "/rbf/basis.py",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "''' \nThis module defines the commonly used radial basis functions (RBFs) \nshown in the below table. For each RBF expression,\n:math:`r = ||x - c||_2` and :math:`\\epsilon` is a shape parameter.\n:math:`x` and :math:`c` are the evaluation points and RBF centers, \nrespectively. RBFs which are not defined in this module can be created \nwith the *RBF* class.\n\n================================= ============ ======================================\nName Abbreviation Expression\n================================= ============ ======================================\nEighth-order polyharmonic spline phs8 :math:`(\\epsilon r)^8\\log(\\epsilon r)`\nSeventh-order polyharmonic spline phs7 :math:`(\\epsilon r)^7`\nSixth-order polyharmonic spline phs6 :math:`(\\epsilon r)^6\\log(\\epsilon r)`\nFifth-order polyharmonic spline phs5 :math:`(\\epsilon r)^5`\nFourth-order polyharmonic spline phs4 :math:`(\\epsilon r)^4\\log(\\epsilon r)`\nThird-order polyharmonic spline phs3 :math:`(\\epsilon r)^3`\nSecond-order polyharmonic spline phs2 :math:`(\\epsilon r)^2\\log(\\epsilon r)`\nFirst-order polyharmonic spline phs1 :math:`\\epsilon r`\nMultiquadratic mq :math:`(1 + (\\epsilon r)^2)^{1/2}`\nInverse multiquadratic imq :math:`(1 + (\\epsilon r)^2)^{-1/2}`\nInverse quadratic iq :math:`(1 + (\\epsilon r)^2)^{-1}`\nGaussian ga :math:`\\exp(-(\\epsilon r)^2)`\n================================= ============ ======================================\n\n''' \nfrom __future__ import division \nimport sympy \nfrom sympy.utilities.autowrap import ufuncify \nimport numpy as np \nimport warnings \nimport copy\n\n# define global symbolic variables\n_R = sympy.symbols('R')\n_EPS = sympy.symbols('EPS')\n_SYM_TO_NUM = 'cython'\n\n\ndef _check_lambdified_output(fin):\n ''' \n when lambdifying a sympy expression, the output is a scalar if the \n expression is independent of R. This function checks the output of a \n lambdified function and if the output is a scalar then it expands \n the output to the proper output size. The proper output size is \n (N,M) where N is the number of collocation points and M is the \n number of basis functions\n '''\n def fout(*args,**kwargs):\n out = fin(*args,**kwargs)\n x = args[0]\n eps = args[-1]\n if np.isscalar(out):\n arr = np.empty((x.shape[0],eps.shape[0]),dtype=float)\n arr[...] = out\n out = arr\n\n return out\n\n return fout \n\n\ndef _replace_nan(x):\n ''' \n this is orders of magnitude faster than np.nan_to_num\n '''\n x[np.isnan(x)] = 0.0\n return x\n\n\ndef get_R():\n ''' \n returns the symbolic variable for :math:`r` which is used to \n instantiate an *RBF*\n '''\n return copy.deepcopy(_R)\n\n\ndef get_EPS():\n ''' \n returns the symbolic variable for :math:`\\epsilon` which is used to \n instantiate an *RBF*\n '''\n return copy.deepcopy(_EPS)\n\n\ndef set_sym_to_num(package):\n ''' \n controls how the RBF class converts the symbolic expressions to \n numerical expressions\n \n Parameters\n ----------\n package : str\n either 'numpy' or 'cython'. If 'numpy' then the symbolic \n expression is converted using *sympy.lambdify*. If 'cython' then \n the expression if converted using \n *sympy.utilities.autowrap.ufuncify*, which converts the expression \n to cython code and then compiles it. Note that there is a ~1 \n second overhead to compile the cython code\n \n '''\n global _SYM_TO_NUM \n if package in ['cython','numpy']:\n _SYM_TO_NUM = package\n else:\n raise ValueError('package must either be \"cython\" or \"numpy\" ') \n \n\nclass RBF(object):\n ''' \n Stores a symbolic expression of a Radial Basis Function (RBF) and \n evaluates the expression numerically when called. \n \n Parameters\n ----------\n expr : sympy expression\n Symbolic expression of the RBF. This must be a function of the \n symbolic variable *R*, which is returned by the function *get_R*. \n *R* is the radial distance to the RBF center. The expression may \n optionally be a function of *EPS*, which is a shape parameter \n obtained by the function *get_EPS*. If *EPS* is not provided then \n *R* is substituted with *R* * *EPS* .\n \n Examples\n --------\n Instantiate an inverse quadratic RBF\n\n >>> R = get_R()\n >>> EPS = get_EPS()\n >>> iq_expr = 1/(1 + (EPS*R)**2)\n >>> iq = RBF(iq_expr)\n \n Evaluate an inverse quadratic at 10 points ranging from -5 to 5. \n Note that the evaluation points and centers are two dimensional \n arrays\n\n >>> x = np.linspace(-5.0,5.0,10)[:,None]\n >>> center = np.array([[0.0]])\n >>> values = iq(x,center)\n \n '''\n def __init__(self,expr): \n if not expr.has(_R):\n raise ValueError('RBF expression must be a function of rbf.basis.R')\n \n if not expr.has(_EPS):\n # if EPS is not in the expression then substitute EPS*R for R\n expr = expr.subs(_R,_EPS*_R)\n \n self.expr = expr\n self.cache = {}\n\n def __call__(self,x,c,eps=None,diff=None):\n ''' \n Evaluates the RBF\n \n Parameters \n ---------- \n x : (N,D) array \n evaluation points\n \n c : (M,D) array \n RBF centers \n \n eps : (M,) array, optional\n shape parameters for each RBF. Defaults to 1.0\n \n diff : (D,) int array, optional\n Tuple indicating the derivative order for each spatial dimension. \n For example, if there are three spatial dimensions then providing \n (2,0,1) would return the RBF after differentiating it twice along \n the first axis and once along the third axis.\n\n Returns\n -------\n out : (N,M) array\n Returns the RBFs with centers *c* evaluated at *x*\n\n Notes\n -----\n This function evaluates the RBF and its derivatives symbolically \n using sympy and then the symbolic expression is converted to a \n numerical function. The numerical function is cached and then reused \n when this function is called multiple times with the same derivative \n specification.\n\n '''\n x = np.asarray(x,dtype=float)\n c = np.asarray(c,dtype=float)\n if eps is None:\n eps = np.ones(c.shape[0],dtype=float) \n else: \n eps = np.asarray(eps,dtype=float)\n\n if diff is None:\n diff = (0,)*x.shape[1]\n else:\n # make sure diff is immutable\n diff = tuple(diff)\n\n # make sure the input arguments have the proper dimensions\n if not ((x.ndim == 2) & (c.ndim == 2)):\n raise ValueError(\n 'x and c must be two-dimensional arrays')\n\n if not (x.shape[1] == c.shape[1]):\n raise ValueError(\n 'x and c must have the same number of spatial dimensions')\n\n if not ((eps.ndim == 1) & (eps.shape[0] == c.shape[0])):\n raise ValueError(\n 'eps must be a one-dimensional array with length equal to '\n 'the number of rows in c')\n \n if not (len(diff) == x.shape[1]):\n raise ValueError(\n 'diff must have the same length as the number of spatial '\n 'dimensions in x and c')\n\n # expand to allow for broadcasting\n x = x[:,None,:]\n c = c[None,:,:]\n\n # this does the same thing as np.rollaxis(x,-1) but is much faster\n x = np.einsum('ijk->kij',x)\n c = np.einsum('ijk->kij',c)\n\n # add function to cache if not already\n if diff not in self.cache:\n dim = len(diff)\n c_sym = sympy.symbols('c:%s' % dim)\n x_sym = sympy.symbols('x:%s' % dim) \n r_sym = sympy.sqrt(sum((x_sym[i]-c_sym[i])**2 for i in range(dim)))\n expr = self.expr.subs(_R,r_sym) \n for direction,order in enumerate(diff):\n if order == 0:\n continue\n expr = expr.diff(*(x_sym[direction],)*order)\n\n if _SYM_TO_NUM == 'numpy':\n func = sympy.lambdify(x_sym+c_sym+(_EPS,),expr,'numpy')\n func = _check_lambdified_output(func)\n self.cache[diff] = func\n\n elif _SYM_TO_NUM == 'cython': \n func = ufuncify(x_sym+c_sym+(_EPS,),expr)\n self.cache[diff] = func\n \n args = (tuple(x)+tuple(c)+(eps,)) \n return self.cache[diff](*args)\n \n_FUNCTION_DOC = ''' \n Parameters \n ---------- \n x : (N,D) array \n evaluation points\n \n c : (M,D) array \n RBF centers \n \n eps : (M,) array, optional\n shape parameters for each RBF. Defaults to 1.0\n \n diff : (D,) int array, optional\n Tuple indicating the derivative order for each spatial dimension. \n For example, if there are three spatial dimensions then providing \n (2,0,1) would return the RBF after differentiating it twice along \n the first axis and once along the third axis.\n\n Returns\n -------\n out : (N,M) array\n Returns the RBFs with centers *c* evaluated at *x*\n\n Notes\n -----\n This function evaluates the RBF and its derivatives symbolically \n using sympy and then the symbolic expression is converted to a \n numerical function. The numerical function is cached and then reused \n when this function is called multiple times with the same derivative \n specification.\n\n'''\n\n\n_PHS8 = RBF((_EPS*_R)**8*sympy.log(_EPS*_R))\ndef phs8(*args,**kwargs):\n ''' \n Eighth-order polyharmonic spline \n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS8(*args,**kwargs))\n\nphs8.__doc__ += _FUNCTION_DOC \n\n\n_PHS7 = RBF((_EPS*_R)**7)\ndef phs7(*args,**kwargs):\n ''' \n Seventh-order polyharmonic spline \n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS7(*args,**kwargs))\n\nphs7.__doc__ += _FUNCTION_DOC\n\n\n_PHS6 = RBF((_EPS*_R)**6*sympy.log(_EPS*_R))\n\ndef phs6(*args,**kwargs):\n ''' \n Sixth-order polyharmonic spline \n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS6(*args,**kwargs))\n\nphs6.__doc__ += _FUNCTION_DOC\n\n\n_PHS5 = RBF((_EPS*_R)**5)\ndef phs5(*args,**kwargs):\n ''' \n Fifth-order polyharmonic spline\n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS5(*args,**kwargs))\n\nphs5.__doc__ += _FUNCTION_DOC\n\n\n_PHS4 = RBF((_EPS*_R)**4*sympy.log(_EPS*_R))\ndef phs4(*args,**kwargs):\n ''' \n Fourth-order polyharmonic spline \n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS4(*args,**kwargs))\n\nphs4.__doc__ += _FUNCTION_DOC\n\n\n_PHS3 = RBF((_EPS*_R)**3)\ndef phs3(*args,**kwargs):\n ''' \n Third-order polyharmonic spline\n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS3(*args,**kwargs))\n\nphs3.__doc__ += _FUNCTION_DOC\n\n\n_PHS2 = RBF((_EPS*_R)**2*sympy.log(_EPS*_R))\ndef phs2(*args,**kwargs):\n ''' \n Second-order polyharmonic spline\n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS2(*args,**kwargs))\n\nphs2.__doc__ += _FUNCTION_DOC\n\n\n_PHS1 = RBF(_EPS*_R)\ndef phs1(*args,**kwargs):\n ''' \n First-order polyharmonic spline\n '''\n # division by zero errors may occur for R=0. Ignore warnings and\n # replace nan's with zeros\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return _replace_nan(_PHS1(*args,**kwargs))\n\nphs1.__doc__ += _FUNCTION_DOC\n\n\n_IMQ = RBF(1/sympy.sqrt(1+(_EPS*_R)**2))\ndef imq(*args,**kwargs):\n ''' \n Inverse multiquadratic\n '''\n return _IMQ(*args,**kwargs)\n\nimq.__doc__ += _FUNCTION_DOC\n\n\n_IQ = RBF(1/(1+(_EPS*_R)**2))\ndef iq(*args,**kwargs):\n ''' \n Inverse quadratic\n ''' \n return _IQ(*args,**kwargs)\n\niq.__doc__ += _FUNCTION_DOC\n\n\n_GA = RBF(sympy.exp(-(_EPS*_R)**2))\ndef ga(*args,**kwargs):\n ''' \n Gaussian\n '''\n return _GA(*args,**kwargs)\n\nga.__doc__ += _FUNCTION_DOC\n\n\n_MQ = RBF(sympy.sqrt(1 + (_EPS*_R)**2))\ndef mq(*args,**kwargs):\n ''' \n Multiquadratic\n '''\n return _MQ(*args,**kwargs)\n\nmq.__doc__ += _FUNCTION_DOC\n\n\n"
},
{
"alpha_fraction": 0.5855728387832642,
"alphanum_fraction": 0.6607732176780701,
"avg_line_length": 32.650794982910156,
"blob_id": "7ee79265939c96a602c02a10501502585ccf8951",
"content_id": "7b73359f63111dfaefc026fb58ce72705a176455",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4242,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 126,
"path": "/README.rst",
"repo_name": "moChen0607/RBF",
"src_encoding": "UTF-8",
"text": "RBF\n***\nPython package containing the tools necessary for radial basis \nfunction (RBF) applications. Applications include \ninterpolating/smoothing scattered data and solving PDEs over irregular\ndomains. The complete documentation for this package can be found \n`here <http://rbf.readthedocs.io>`_.\n\nFeatures\n--------\n* Efficient functions to evaluate RBFs and their analytically derived \n derivatives\n* Regularized RBF interpolants (including smoothing splines) for \n noisy, scattered, data\n* An algorithm for generating Radial Basis Function Finite Difference \n (RBF-FD) weights\n* RBF-FD Filtering for denoising **BIG**, scattered data\n* Node and stencil generation algorithms for solving PDEs over\n irregular domains\n* Halton sequence generator\n* Computational geometry functions for 1, 2, and 3 spatial dimensions\n\nQuick Demo\n----------\n\nSmoothing Scattered Data\n++++++++++++++++++++++++\n.. code-block:: python\n\n ''' \n In this example we generate synthetic scattered data with added noise \n and then fit it with a smoothed interpolant. See rbf.filter for \n smoothing large data sets.\n '''\n import numpy as np\n from rbf.interpolate import RBFInterpolant\n import matplotlib.pyplot as plt\n np.random.seed(1)\n\n # create noisy data\n x_obs = np.random.random((100,2)) # observation points\n u_obs = np.sin(2*np.pi*x_obs[:,0])*np.cos(2*np.pi*x_obs[:,1])\n u_obs += np.random.normal(0.0,0.2,100)\n\n # create smoothed interpolant\n I = RBFInterpolant(x_obs,u_obs,penalty=0.001)\n\n # create interpolation points\n x_itp = np.random.random((10000,2))\n u_itp = I(x_itp)\n\n plt.tripcolor(x_itp[:,0],x_itp[:,1],u_itp,vmin=-1.1,vmax=1.1,cmap='viridis')\n plt.scatter(x_obs[:,0],x_obs[:,1],s=100,c=u_obs,vmin=-1.1,vmax=1.1,cmap='viridis')\n plt.xlim((0.05,0.95))\n plt.ylim((0.05,0.95))\n plt.colorbar()\n plt.tight_layout()\n plt.savefig('../figures/interpolate.a.png')\n plt.show()\n\n\nThe above code will produce this plot, which shows the observations as\nscatter points and the smoothed interpolant as the color field.\n\n.. image:: docs/figures/interpolate.a.png\n\nSolving PDEs\n++++++++++++\n.. code-block:: python\n\n ''' \n In this example we solve the Poisson equation with a constant forcing \n term using the spectral RBF method.\n '''\n import numpy as np\n from rbf.basis import phs3\n from rbf.domain import circle\n from rbf.nodes import menodes\n import matplotlib.pyplot as plt\n\n # define the problem domain\n vert = np.array([[0.762,0.057],[0.492,0.247],[0.225,0.06 ],[0.206,0.056],\n [0.204,0.075],[0.292,0.398],[0.043,0.609],[0.036,0.624],\n [0.052,0.629],[0.373,0.63 ],[0.479,0.953],[0.49 ,0.966],\n [0.503,0.952],[0.611,0.629],[0.934,0.628],[0.95 ,0.622],\n [0.941,0.607],[0.692,0.397],[0.781,0.072],[0.779,0.055]])\n\n smp = np.array([[0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],\n [9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16],\n [16,17],[17,18],[18,19],[19,0]])\n\n N = 500 # total number of nodes\n nodes,smpid = menodes(N,vert,smp) # generate nodes\n boundary, = (smpid>=0).nonzero() # identify boundary nodes\n interior, = (smpid==-1).nonzero() # identify interior nodes\n\n # create left-hand-side matrix and right-hand-side vector\n A = np.empty((N,N))\n A[interior] = phs3(nodes[interior],nodes,diff=[2,0])\n A[interior] += phs3(nodes[interior],nodes,diff=[0,2])\n A[boundary,:] = phs3(nodes[boundary],nodes)\n d = np.empty(N)\n d[interior] = -100.0\n d[boundary] = 0.0\n\n # Solve the PDE\n coeff = np.linalg.solve(A,d) # solve for the RBF coefficients\n itp = menodes(10000,vert,smp)[0] # interpolation points\n soln = phs3(itp,nodes).dot(coeff) # evaluate at the interp points\n\n fig,ax = plt.subplots()\n p = ax.scatter(itp[:,0],itp[:,1],s=20,c=soln,edgecolor='none',cmap='viridis')\n ax.set_aspect('equal')\n ax.plot(nodes[:,0],nodes[:,1],'ko',markersize=4)\n ax.set_xlim((0.025,0.975))\n ax.set_ylim((0.03,0.98))\n plt.colorbar(p,ax=ax)\n plt.tight_layout()\n plt.savefig('../figures/basis.a.png')\n plt.show()\n\n\nThe above code will produce this plot, which shows the collocation\nnodes as black points and the interpolated solution as the color field.\n\n.. image:: docs/figures/basis.a.png\n\n\n"
}
] | 8 |
dvogt/salt-check
|
https://github.com/dvogt/salt-check
|
e8162e76da088c978125a674df834e15015e66a3
|
158a887e47bbd63eb8e801dbb9da9fdaea20425e
|
dd89137ef8f5dd9db0fb8347fe0c3d0e699ec8e2
|
refs/heads/master
| 2020-03-21T11:51:23.556008 | 2018-06-25T00:03:18 | 2018-06-25T00:03:18 | 138,524,951 | 0 | 1 | null | 2018-06-25T00:17:21 | 2018-06-25T00:03:28 | 2018-06-25T00:03:26 | null |
[
{
"alpha_fraction": 0.5643648505210876,
"alphanum_fraction": 0.5831202268600464,
"avg_line_length": 34.54545593261719,
"blob_id": "20949a4e4c13e8eab260f817a479136e4b65a185",
"content_id": "64a661192f30d3f1cfff3342192a72d0dc90471f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1173,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 33,
"path": "/Dockerfile",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "############################################################\n# Dockerfile to build a saltcheck testing environment\n# Based on Ubuntu\n############################################################\n\n# Set the base image to Ubuntu\nFROM ubuntu:16.04\n\n# SaltStack version\nENV SALT_VERSION=2017.7.5\n\n# File Author / Maintainer\nLABEL maintainer=\"William Cannon\"\n\n################## BEGIN INSTALLATION ######################\n# Install salt-minion\n# Ref: https://repo.saltstack.com/#ubuntu \n############################################################\nRUN apt-get update && apt-get install -y wget sudo python-pip vim-nox\n\n# Upgrade pip to latest version\nRUN pip install --upgrade pip\n\n# Add salt repo key\nRUN wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/archive/${SALT_VERSION}/SALTSTACK-GPG-KEY.pub | apt-key add -\n\n# Add salt repo into apt sources\nRUN echo \"deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/archive/${SALT_VERSION} xenial main\" > /etc/apt/sources.list.d/saltstack.list\n\n# Update the repository sources list once more and install salt-minion\nRUN apt-get update && apt-get install -y salt-minion\n\n################## END INSTALLATION ######################\n"
},
{
"alpha_fraction": 0.6175000071525574,
"alphanum_fraction": 0.659500002861023,
"avg_line_length": 39,
"blob_id": "f8d39904544af5b152be352756b88c36d262c9af",
"content_id": "9b10b76f46fbe0242772cbd84524e5abc3281279",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 2000,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 50,
"path": "/vagrant_files/Vagrantfile-2-servers-ubuntu16.04-salt-develop-branch",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# All Vagrant configuration is done below. The \"2\" in Vagrant.configure\n# configures the configuration version (we support older styles for\n# backwards compatibility). Please don't change it unless you know what\n# you're doing.\nVagrant.configure(\"2\") do |config|\n config.vm.box = \"ubuntu/xenial64\"\n\n config.vm.define \"master\" do |master|\n master.vm.hostname = \"master\"\n master.vm.network \"private_network\", ip: \"192.168.1.10\"\n master.vm.synced_folder \"~/code/saltstack-clone\", \"/saltstack_repo\"\n master.vm.provider \"virtualbox\" do |vb|\n vb.memory = \"2048\"\n end\n master.vm.provision \"shell\", inline: <<-SHELL\n echo '192.168.1.10 salt' >> /etc/hosts\n apt-get update\n apt-get upgrade -y\n #wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -\n #echo 'deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2018.3 xenial main' > /etc/apt/sources.list.d/saltstack.list\n apt-get update\n #apt-get install -y salt-master salt-minion\n #ln -s /saltcheck_repo/pillar/ /srv/pillar\n #ln -s /saltcheck_repo/salt/ /srv/salt\n #sleep 15\n #salt-key -y -a ubuntu-xenial \n SHELL\n end\n\n config.vm.define \"minion\" do |minion|\n minion.vm.hostname = \"minion\"\n minion.vm.network \"private_network\", ip: \"192.168.1.11\"\n minion.vm.synced_folder \"~/code/saltstack-clone\", \"/saltstack_repo\"\n minion.vm.provider \"virtualbox\" do |vb|\n vb.memory = \"1024\"\n end\n minion.vm.provision \"shell\", inline: <<-SHELL\n echo '192.168.1.10 salt' >> /etc/hosts\n apt-get update\n apt-get upgrade -y\n #wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -\n #echo 'deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2018.3 xenial main' > /etc/apt/sources.list.d/saltstack.list\n apt-get update\n #apt-get install -y salt-minion\n SHELL\n end\nend\n"
},
{
"alpha_fraction": 0.6030150651931763,
"alphanum_fraction": 0.6121516823768616,
"avg_line_length": 26.708860397338867,
"blob_id": "7ad33729d9200776d4ec474ae60761398ccc1cca",
"content_id": "02433738638f673835f5121066c1b240527ef264",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2189,
"license_type": "permissive",
"max_line_length": 231,
"num_lines": 79,
"path": "/docker-how-to.md",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "This shows how to run docker container to test new functions with saltcheck\n\n# Build local docker image\n```\nsudo docker build -t wcannon/saltcheck:1.0 .\n```\n\n# Run docker local docker container after cd'ing into cloned repo\n```\nsudo docker run --name salt-check --add-host=salt:127.0.0.1 --rm -it -v ${PWD}/salt:/srv/salt/ -v ${PWD}/pillar:/srv/pillar -v ${PWD}/minion_config/minion:/etc/salt/minion wcannon/saltcheck:1.0 bash\n```\n\n> mapping the minion file to /etc/salt/minion means we don't need --local flag\n\n# Synchronize all custom modules\n```\nsalt-call saltutil.sync_all\n```\n\n# Check saltcheck documentaion examples\n```\nsalt-call saltcheck -d\n```\n\n# Run single state tests (example apache)\n```\nsalt-call saltcheck.run_state_tests apache\n```\n\n# Run single state tests with junit output (example apache)\n```\nsalt-call saltcheck.run_state_tests apache --out saltcheck_junit\n```\n\n# Run high states tests\n```\nsalt-call saltcheck.run_highstate_tests\n```\n\n# Run high states tests with junit output\n```\nsalt-call saltcheck.run_highstate_tests --out saltcheck_junit\n```\n\n# Simple Jenkinsfile with multiple steps pipeline\n```\npipeline {\n agent {\n label 'your-node-with-docker'\n }\n stages {\n stage('Checkout') {\n steps {\n git 'https://github.com/dawidmalina/salt-check.git'\n sh 'docker build -t wcannon/saltcheck:1.0 .'\n }\n }\n stage('Prepare') {\n steps {\n // start container\n sh 'docker run --name salt-check --add-host=salt:127.0.0.1 -d -v ${ORG_PATH}/salt:/srv/salt/ -v ${PWD}/pillar:/srv/pillar -v ${ORG_PATH}/minion_config/minion:/etc/salt/minion wcannon/saltcheck:1.0 tail -f /dev/null'\n sh 'docker exec -t salt-check salt-call saltutil.sync_all'\n }\n }\n stage('Test') {\n steps {\n sh 'docker exec -t salt-check salt-call saltcheck.run_highstate_tests --out saltcheck_junit > saltcheck-report.xml'\n junit '**/*.xml'\n }\n }\n }\n post {\n always {\n echo 'Cleanup test container!'\n sh 'docker rm -f salt-check'\n }\n }\n}\n```\n"
},
{
"alpha_fraction": 0.8021978139877319,
"alphanum_fraction": 0.8021978139877319,
"avg_line_length": 44.5,
"blob_id": "82d8311ff4a590311e9cc1b3718dfdf027a4d1e8",
"content_id": "828f8e038f1d3e48ec768336a8c1e6dc19d32195",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 2,
"path": "/README.md",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "# salt-check\nThis project is dedicated to testing the logic of salt states, and highstates\n"
},
{
"alpha_fraction": 0.5639923810958862,
"alphanum_fraction": 0.5768863558769226,
"avg_line_length": 18.754716873168945,
"blob_id": "09562590f5d515e68f37140c23522fc6eda1477c",
"content_id": "0440dff2b1d7d6141356c7a4f4fe67431a140d19",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2094,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 106,
"path": "/salt/_modules/saltcheck_returns.py",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n'''\nA module to enable easy functional testing of saltcheck\n\nThis module will make it easy to request data for testing saltcheck assertions\n\n:codeauthor: William Cannon <[email protected]>\n:maturity: new\n'''\n\n__virtualname__ = 'saltcheck_returns'\n\n\ndef __virtual__():\n '''\n Check dependencies - may be useful in future\n '''\n return __virtualname__\n\n''' return types to support, all will have a default value:\n string\n int\n float\n list\n dictionary - given dict\n empty - no value returned at all\n notempty - given string\n bool - True | False\n'''\n \n\ndef get_string(given_val=\"TestString\"):\n '''\n Return the given string\n\n CLI Example:\n salt '*' saltcheck_returns.get_string 'some-value-here'\n '''\n return given_val\n\ndef get_int(given_val=789):\n '''\n Return the given int\n\n CLI Example:\n salt '*' saltcheck_returns.get_int 777\n '''\n try:\n val = int(given_val)\n except:\n val = given_val\n return val\n\ndef get_float(given_val=600.245):\n '''\n Return the given float\n\n CLI Example:\n salt '*' saltcheck_returns.get_float 22.345\n '''\n try:\n val = float(given_val)\n except:\n val = given_val\n return val\n\ndef get_list(given_val=['one', 'two', 'three']):\n '''\n Return the given list\n\n CLI Example:\n salt '*' saltcheck_returns.get_list '[\"a\", \"b\", \"c\"]'\n '''\n return given_val\n\ndef get_dict(given_val={'one':1, 'two':2, 'three':3}):\n '''\n Return the given dict\n\n CLI Example:\n salt '*' saltcheck_returns.get_dict '{\"a\":12, \"b\":13, \"c\":14}'\n '''\n return given_val\n\ndef get_empty():\n '''\n Return nothing\n\n CLI Example:\n salt '*' saltcheck_returns.get_empty'\n '''\n return\n\ndef get_bool(given_val=True):\n '''\n Return a bool, interprets True|False using python logic\n\n CLI Example:\n salt '*' saltcheck_returns.get_bool True'\n salt '*' saltcheck_returns.get_bool False'\n '''\n try:\n val = bool(given_val)\n except:\n val = True\n return val\n"
},
{
"alpha_fraction": 0.48665955662727356,
"alphanum_fraction": 0.4887940287590027,
"avg_line_length": 43.60317611694336,
"blob_id": "dc95f8eb1d7f856dbedc297e5d9330e8c1aa5e33",
"content_id": "d09cbf1e19b3c5a56762062ba7077e27e1c9fa83",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2811,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 63,
"path": "/salt/_output/saltcheck_junit.py",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport logging\nimport yaml\nimport xml.etree.ElementTree as xml\nimport xml.dom.minidom as minidom\nlog = logging.getLogger(__name__)\n\ndef _text_node(name, text=None):\n element = xml.Element(name)\n if text:\n element.text = text\n return element\n\ndef _test_case(classname, name, time=False):\n element = xml.Element('testcase')\n element.attrib['name'] = name\n element.attrib['classname'] = classname\n if time is not False:\n element.attrib['time'] = time\n return element\n\ndef output(data):\n root = xml.Element('testsuite')\n for d_element in data:\n output = data[d_element]\n for o_element in output:\n for key in o_element:\n value = o_element[key]\n if key == 'TEST RESULTS':\n root.attrib['name'] = 'Saltcheck state(s) junit tests results'\n root.attrib['errors'] = '0'\n root.attrib['failures'] = str(value['Failed'])\n root.attrib['skipped'] = str(value['Missing Tests'] + value['Skipped'])\n root.attrib['tests'] = str(value['Passed'] + value['Failed'] + value['Missing Tests'])\n root.attrib['time'] = str(value['Execution Time'])\n else:\n if len(value) == 0:\n test_case_element = _test_case(('state.' + key), 'Undefined', '0.0')\n skipped_element = _text_node('skipped')\n test_case_element.append(skipped_element)\n root.append(test_case_element)\n else:\n for case in value:\n result = value[case]['status']\n time = str(value[case]['duration'])\n if result == 'Pass':\n test_case_element = _test_case(('state.' + key), case, time)\n root.append(test_case_element)\n elif result == 'Skip':\n test_case_element = _test_case(('state.' + key), case, time)\n skipped_element = _text_node('skipped')\n test_case_element.append(skipped_element)\n root.append(test_case_element)\n else:\n test_case_element = _test_case(('state.' + key), case, time)\n failure_element = _text_node('failure', result)\n test_case_element.append(failure_element)\n root.append(test_case_element)\n\n output = minidom.parseString(xml.tostring(root, encoding='utf8', method='xml'))\n\n return output.toprettyxml()\n\n"
},
{
"alpha_fraction": 0.5724502801895142,
"alphanum_fraction": 0.5829333066940308,
"avg_line_length": 32.03236389160156,
"blob_id": "c193de9be4b4792ef31c51748eeb2d5a89a02814",
"content_id": "b2c157bf9ab1f8679cdf257d27e0d2181a9c131e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10207,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 309,
"path": "/test/salt_check_test.py",
"repo_name": "dvogt/salt-check",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport unittest\nimport sys, os, os.path\nimport yaml\nsys.path.append(os.path.abspath(sys.path[0]) + '/../')\nfrom salt_check import SaltCheck\nfrom salt_check import StateTestLoader\n\n# Note: the order tests are run is arbitrary!\n\nclass MyClass2(unittest.TestCase):\n\n def setUp(self):\n self.st = StateTestLoader(\"/tmp\")\n\n def tearDown(self):\n pass\n\n #def test_load_file_1(self):\n # val = self.st.load_file(\"/tmp/testfile.tst\")\n # self.assertNotEqual(val, None) \n\nclass MyClass(unittest.TestCase):\n\n def setUp(self):\n self.mt = SaltCheck()\n\n def tearDown(self):\n pass\n\n def test_get_state_dir_1(self):\n val = self.mt.get_state_dir()\n self.assertNotEqual(val, None) \n\n def test_get_state_search_path_list_1(self):\n val = self.mt.get_state_search_path_list()\n self.assertNotEqual(val, None) \n\n def test_show_minion_options_1(self):\n val = self.mt.show_minion_options()\n self.assertNotEqual(val, None) \n\n def test_show_minion_options_2(self):\n val = self.mt.show_minion_options()\n cache = val.get('cachedir', None)\n root_dir = val.get('root_dir', None)\n states_dir = val.get('states_dir', None)\n environment = val.get('environment', None)\n file_roots = val.get('file_roots', None)\n #if cache and root_dir and states_dir and environment and file_roots:\n if cache and root_dir and file_roots:\n all_good = True\n else:\n all_good = False\n self.assertEqual(all_good, True) \n\n def test_run_test_1(self):\n mydict = {\"module_and_function\": \"test.echo\",\n \"assertion\": \"assertEqual\",\n \"expected-return\": \"This works!\",\n \"args\": [\"This works!\"] }\n val = self.mt.run_test(mydict)\n self.assertEqual(val, True) \n\n def test_run_test_2(self):\n mydict = {\"module_and_function\": \"invalidmod.invalidfunc\",\n \"assertion\": \"assertEqual\",\n \"expected-return\": \"This works!\",\n \"args\":[\"This works!\"] }\n val = self.mt.run_test(mydict)\n self.assertEqual(val, \"False: Invalid test\") \n\n def test_run_test_3(self):\n mydict = {\"module_and_function\": \"test.echo\",\n \"assertion\": \"assertEqual\",\n \"expected-rotten\": \"This works!\",\n \"arrgs\":[\"This works!\"] }\n val = self.mt.run_test(mydict)\n self.assertEqual(val, \"False: Invalid test\") \n\n def test_populate_salt_modules_list_1(self):\n val = self.mt.populate_salt_modules_list()\n length = len(val)\n self.assertGreater(length, 10) \n\n def test_is_valid_test_1(self):\n test_dict = {'module_and_function':'test.ping',\n 'assertion':'assertTrue',\n 'expected-return':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, True) \n\n def test_is_valid_test_2(self):\n test_dict = {'module_and_function':'test.ping-a-ring',\n 'assertion':'assertTrue',\n 'expected-return':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_is_valid_test_3(self):\n test_dict = {'module_and_function':'toast.ping',\n 'assertion':'assertTrue',\n 'expected-return':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_is_valid_test_4(self):\n test_dict = {'module_and_function':'toast.ping',\n 'assertion':'assertAbort',\n 'expected-return':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_is_valid_test_5(self):\n test_dict = {'module_and_function':'toast.ping',\n 'absorbtion':'assertTrue',\n 'expected-return':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_is_valid_test_6(self):\n test_dict = {'module_and_function':'toast.ping',\n 'assertion':'assertTrue',\n 'expected-rotunda':'True'}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_is_valid_test_7(self):\n test_dict = {}\n val = self.mt.is_valid_test(test_dict)\n self.assertEqual(val, False) \n\n def test_call_salt_command_1(self):\n val = self.mt.call_salt_command('test.ping')\n self.assertEqual(val, True) \n\n def test_call_salt_command_2(self):\n val = self.mt.call_salt_command('test.ping', 'bad-arg')\n self.assertNotEqual(val, True) \n\n def test_valid_module_1(self):\n val = self.mt.is_valid_module('invalid-name')\n self.assertEqual(val, False) \n\n def test_valid_module_2(self):\n val = self.mt.is_valid_module('test')\n self.assertEqual(val, True) \n\n def test_valid_function_1(self):\n val = self.mt.is_valid_function('test', 'ping')\n self.assertEqual(val, True) \n\n def test_valid_function_2(self):\n val = self.mt.is_valid_function('test', 'invalid-function')\n self.assertEqual(val, False) \n\n def test_1_assert_equal(self):\n val = SaltCheck.assert_equal(True, True)\n self.assertEqual(True, val)\n\n def test_2_assert_equal(self):\n val = SaltCheck.assert_equal(True, False)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_3_assert_equal(self):\n val = SaltCheck.assert_equal(False, False)\n self.assertEqual(True, val)\n\n def test_1_assert_not_equal(self):\n val = SaltCheck.assert_not_equal(True, False)\n self.assertEqual(True, val)\n\n def test_2_assert_not_equal(self):\n val = SaltCheck.assert_not_equal(True, True)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_3_assert_not_equal(self):\n val = SaltCheck.assert_not_equal(False, False)\n #fin_val = val[0]\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_true(self):\n val = SaltCheck.assert_true(True)\n self.assertEqual(True, val)\n\n def test_2_assert_true(self):\n val = SaltCheck.assert_true(False)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_3_assert_true(self):\n val = SaltCheck.assert_true(None)\n #fin_val = val[0]\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_false(self):\n val = SaltCheck.assert_false(False)\n self.assertEqual(True, val)\n #fin_val = val[0].startswith('False')\n #self.assertEqual(True, fin_val)\n\n def test_2_assert_false(self):\n val = SaltCheck.assert_false(True)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_3_assert_false(self):\n val = SaltCheck.assert_false(None)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_in(self):\n val = SaltCheck.assert_in(1, [1,2,3])\n self.assertEqual(True, val)\n\n def test_2_assert_in(self):\n val = SaltCheck.assert_in('a', \"abcde\")\n self.assertEqual(True, val)\n\n def test_3_assert_in(self):\n val = SaltCheck.assert_in('f', \"abcde\")\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_not_in(self):\n val = SaltCheck.assert_not_in(0, [1,2,3,4])\n self.assertEqual(True, val)\n\n def test_2_assert_not_in(self):\n val = SaltCheck.assert_not_in('a', \"abcde\")\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_greater(self):\n val = SaltCheck.assert_greater(100, 1)\n self.assertEqual(True, val)\n\n def test_2_assert_greater(self):\n val = SaltCheck.assert_greater(100, -1)\n self.assertEqual(True, val)\n\n def test_3_assert_greater(self):\n val = SaltCheck.assert_greater(-1, 0)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_4_assert_greater(self):\n val = SaltCheck.assert_greater(0, 0)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_greater_equal(self):\n val = SaltCheck.assert_greater_equal(0, 0)\n self.assertEqual(True, val)\n\n def test_2_assert_greater_equal(self):\n val = SaltCheck.assert_greater_equal(1, 0)\n self.assertEqual(True, val)\n\n def test_3_assert_greater_equal(self):\n val = SaltCheck.assert_greater_equal(-1, 0)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_1_assert_less(self):\n val = SaltCheck.assert_less(-1, 0)\n self.assertEqual(True, val)\n\n def test_2_assert_less(self):\n val = SaltCheck.assert_less(1, 100)\n self.assertEqual(True, val)\n\n def test_3_assert_less(self):\n val = SaltCheck.assert_less(0, 0)\n #fin_val = val[0]\n #self.assertEqual(False, fin_val)\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\n def test_4_assert_less(self):\n val = SaltCheck.assert_less(100, 0)\n #fin_val = val[0]\n fin_val = val.startswith('False')\n self.assertEqual(True, fin_val)\n\nif __name__ == '__main__':\n unittest.main()\n"
}
] | 7 |
mihgen/lp-assigner
|
https://github.com/mihgen/lp-assigner
|
8290d75b6f3213739ca9ba6057f94b0e0782626f
|
60bcf7cf168c5d84aa784145a6796bb761b2694a
|
12d185aeada05e47111bc181c56a66edff879354
|
refs/heads/master
| 2020-05-18T06:35:03.032291 | 2015-04-29T16:09:07 | 2015-04-29T16:09:07 | 31,055,689 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5437183976173401,
"alphanum_fraction": 0.5489423871040344,
"avg_line_length": 45.154151916503906,
"blob_id": "2046e477da9d13d7879b847cb86b29fd00816286",
"content_id": "d9669eadfc1918106432974c9aba13afab4f0da6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11677,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 253,
"path": "/lpassigner.py",
"repo_name": "mihgen/lp-assigner",
"src_encoding": "UTF-8",
"text": "import itertools\n\nfrom launchpadlib.launchpad import Launchpad\n\n\nPROJECTS = ['fuel', 'mos']\nSTATUS = ['New', 'Confirmed', 'Triaged', 'In Progress', 'Incomplete']\nTRUNC = 0\nBASE_URL = 'https://api.launchpad.net/devel/'\nCREATED_SINCE = '2014-09-01'\nDEBUG = 0\nMAX_CHANGES = -1\n\ndef make_changes(prj, milestones_map, bug, to_target_milestones):\n dev_focus_series = prj.development_focus\n prj_name = prj.name\n for tgt in to_target_milestones:\n milestone = prj.getMilestone(name=tgt)\n s_milestone = bug.milestone\n s_status = bug.status\n s_importance = bug.importance\n s_assignee = bug.assignee\n if tgt in milestones_map[dev_focus_series.name]:\n # This is real dirty magic. Needs refactoring.\n # LP, if you try to target dev focus series,\n # will move your bug data over new target. So, if you\n # had no-series bug with 5.1.2, and now targeting\n # 6.1.x, then you'll get moved all 5.1.2 data over.\n # To avoid data loss, we are exchanging data, and not\n # creating series if milestone is from current dev series\n if not DEBUG:\n try:\n bug.milestone = milestone\n bug.status = \"New\"\n bug.lp_save()\n series = s_milestone.series_target.name\n target = BASE_URL + prj_name + '/' + series\n task_link = bug.bug.addTask(target=target)\n task_link.milestone = s_milestone\n task_link.status = s_status\n task_link.importance = s_importance\n task_link.assignee = s_assignee\n task_link.lp_save()\n continue\n except Exception as e:\n print('Error: {}'.format(e))\n\n series = milestone.series_target.name\n target = BASE_URL + prj_name + '/' + series\n # It can raise exception - 400, if series already exists\n if not DEBUG:\n try:\n #TODO: if series already targeted, but not milestone,\n # then we will skip it - as addTask will return an\n # exception: series already exists\n task_link = bug.bug.addTask(target=target)\n # Series already changed at this point of time,\n # lp_save() below is for milestone to change\n milestone_target = BASE_URL + prj_name + \\\n '/+milestone/' + tgt\n task_link.milestone = milestone_target\n task_link.assignee = s_assignee\n task_link.lp_save()\n except Exception as e:\n print('Error: {}'.format(e))\n\ndef bug_milestones(bug, dev_focus_milestone_name):\n bug_info = \"\"\n bug_id = bug.bug.id\n bug_mstn = bug.milestone\n # milestone can be None. It can be non-triaged bug\n # Not sure, if other related tasks could have milestone\n milestones = []\n\n # Special list for milestones which are inconsistent with series\n ml_to_add = []\n if bug_mstn is not None:\n min_milestone_name = bug_mstn.name\n # We don't want to target milestone, which is there\n # even if there is no series associated\n milestones = [bug_mstn.name]\n else:\n min_milestone_name = dev_focus_milestone_name\n\n bug_info = \"** {} ** TOP bug object, milestone: {}\\n\".format(\n bug_id, bug_mstn)\n for task in bug.related_tasks:\n bug_info += \"**** {} ** affects: {}, milestone: {}\\n\".format(\n bug_id, task.target.name, task.milestone)\n # We are gethering all milestones bug affects\n # We are not interested in collecting series, as we think\n # that milestone is our primary key for all work with LP.\n # For instance, we filter search by milestone.\n if task.milestone is None:\n # Apparently affecting only series, no milestone set\n #TODO: As it seems to be impossible to update existing task,\n # for unknown reason, we may want to lp_delete() and create\n # task from scratch. We need to save assignee, etc. though.\n continue\n milestone_name = task.milestone.name\n milestones.append(milestone_name)\n if milestone_name < min_milestone_name:\n # Looking for lowest milestone set, as we don't trust\n # launchpad in sorting of tasks\n min_milestone_name = milestone_name\n\n if task.milestone.series_target.name != task.target.name:\n # This is inconsistency which can exist in LP. For instance,\n # your bug can be assigned to 4.1.2 milestone in 6.0.x series\n # We want to fix that. All attempts to just update series\n # for existing bug task failed. So we have to remove bug task\n # and create it again.\n print(\"%s: INCONSISTENCY DETECTED: series %s, milestone %s.\"\n \" Deleting... \" % (bug_id,\n task.target.name, milestone_name))\n if not DEBUG:\n # TODO: we need to save all status, assignee, etc.,\n # and reapply it after\n try:\n task.lp_delete()\n except Exception as e:\n print('Error: {}'.format(e))\n ml_to_add.append(milestone_name)\n return (bug_info, milestones, ml_to_add, min_milestone_name)\n\ndef main():\n # lpbugmanage is the app name. Can be anything\n lp = Launchpad.login_with(\n 'lpbugmanage', 'production',\n version='devel', credentials_file='credentials.conf',\n )\n\n changes = 0\n for prj_name in PROJECTS:\n prj = lp.projects[prj_name]\n dev_focus_series = prj.development_focus\n active_milestones = dev_focus_series.active_milestones\n dev_focus_milestone_name = min([m.name for m in active_milestones])\n dev_focus_milestone = prj.getMilestone(name=dev_focus_milestone_name)\n print(\"Dev focus milestone: %s\" % dev_focus_milestone_name)\n\n older_series = [s for s in prj.series if s.name <= dev_focus_series.name]\n milestones_map = {}\n milestones_active_map = {}\n for s in older_series:\n milestones_active_map[s.name] = [m.name for m in s.active_milestones]\n milestones_map[s.name] = [m.name for m in s.all_milestones]\n\n # Let's iterate over all milestones\n # Unfortunately, LP doesn't allow to search over list of milestones\n bugs = prj.searchTasks(status=STATUS, created_since=CREATED_SINCE)\n print(\"%s: amount of bugs found - %d\" % (prj_name, len(list(bugs))))\n for (counter, bug) in enumerate(bugs, 1):\n bug_id = bug.bug.id\n print(\"Processing bug #%s...\" % bug_id)\n bug_info, milestones, ml_to_add, min_milestone_name = \\\n bug_milestones(bug, dev_focus_milestone_name)\n if not ml_to_add:\n if min_milestone_name >= dev_focus_milestone_name:\n # It is whether non-triaged bug,\n # or has dev_focus/higher milestone only\n print(\"%s: Skipping this bug: non-triaged or\"\n \" has dev_focus/higher milestone only\" % bug_id)\n continue\n\n if not any(len(x) > 3 for x in milestones):\n # We don't want to any further processing with this bug:\n # we want to target only bugs from maintenance milestones\n # and maintenance are in format X.Y.Z, X.Y-updates,\n # or X.Y.Z-updates, so certainly more than 3 sym\n print(\"%s: This bug is not targeting any maintenance milestone,\"\n \" skipping.\" % bug_id)\n continue\n\n print(\"%s: Lowest milestone: %s\" % (bug_id, min_milestone_name))\n # This is real hack, but it does its job:\n # We need 6.0.x as min for any 6.0, 6.0.1, 6.0-updates, 6.0.1-updates\n min_series_name = min_milestone_name[:3] + '.x'\n\n # Without -updates for now...\n needed_series_names = filter(\n lambda x: x >= min_series_name,\n [s.name for s in older_series if 'updates' not in s.name])\n # Let's check if we have -updates\n milestones_updates = [x for x in milestones if 'updates' in x]\n if milestones_updates:\n series_with_updates = [prj.getMilestone(name=x).series_target.name\n for x in milestones_updates]\n min_series_with_updates = min(series_with_updates)\n needed_series_names += filter(\n lambda x: x >= min_series_with_updates,\n [s.name for s in older_series if 'updates' in s.name])\n\n print(\"%s: Verifying that bug targets series: %s\" %\n (bug_id, needed_series_names))\n to_target_milestones = []\n for s in needed_series_names:\n if not set(milestones_map[s]) & set(milestones):\n if s in milestones_active_map and milestones_active_map[s]:\n to_target_milestones.append(min(milestones_active_map[s]))\n\n to_target_milestones += ml_to_add\n if to_target_milestones:\n print bug_info\n to_target_milestones.sort()\n print(\"%s: ###### targeting to %s\" %\n (bug.bug.id, to_target_milestones))\n ############\n changes += 1\n make_changes(prj, milestones_map, bug, to_target_milestones)\n\n if counter > TRUNC and TRUNC > 0:\n break\n if changes >= MAX_CHANGES and MAX_CHANGES != -1:\n break\n if counter % 10 == 0:\n print(\"Processed %d bugs...\" % counter)\n\n # Let's process all High, Critical in current dev milestone\n status = STATUS + ['Fix Committed', 'Fix Released']\n bugs1 = prj.searchTasks(status=status, milestone=dev_focus_milestone,\n importance=[\"Critical\"],\n tags=[\"-devops\", \"-fuel-devops\"],\n tags_combinator=\"All\",\n created_since=CREATED_SINCE)\n bugs2 = prj.searchTasks(status=STATUS, milestone=dev_focus_milestone,\n tags=['customer-found'],\n created_since=CREATED_SINCE)\n # If current is 6.1.x, then previous is 6.0.x - which we want to target\n prev_series_name = older_series[-2].name\n for bug in itertools.chain(bugs1, bugs2):\n bug_info, milestones, ml_to_add, min_milestone_name = \\\n bug_milestones(bug, dev_focus_milestone_name)\n to_target_milestones = ml_to_add\n if not set(milestones_map[prev_series_name]) & set(milestones):\n to_target_milestones.append(min(milestones_active_map[prev_series_name]))\n\n if to_target_milestones:\n print bug_info\n to_target_milestones.sort()\n print(\"%s: ###### targeting to %s\" %\n (bug.bug.id, to_target_milestones))\n ############\n changes += 1\n make_changes(prj, milestones_map, bug, to_target_milestones)\n\n if changes >= MAX_CHANGES and MAX_CHANGES != -1:\n break\n\n print(\"Total changes made: %d\" % changes)\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
Joshualau23/Physics-375
|
https://github.com/Joshualau23/Physics-375
|
d8c100f74dd2e107eba857895293d38b185d3b2a
|
f0be76b209ffc5cb78a2eb229e4de6d633ed4d6a
|
2c6990c9c239346db9de45a7932ed8ddf0aa126d
|
refs/heads/master
| 2022-09-13T02:58:15.250311 | 2020-06-02T00:05:05 | 2020-06-02T00:05:05 | 268,659,030 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5172613859176636,
"alphanum_fraction": 0.5918189883232117,
"avg_line_length": 17.5053768157959,
"blob_id": "b2bd1ca1d249d7caa286fcf4086aa60698a897a9",
"content_id": "31579c28d44dafd07ed9ce697e548486364aee66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3447,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 186,
"path": "/Assignment5.py",
"repo_name": "Joshualau23/Physics-375",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 30 21:42:36 2019\n\n@author: joshu\n\"\"\"\n\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib\nfrom math import pi,sqrt,log10\nimport numpy as np\nimport scipy as sp\nfrom scipy import constants as con\nimport matplotlib.pyplot as plt\n\n\nRsun = 695510\nMsun = 1.9891e30\nhbar = con.hbar\nG = con.G\nMe = con.m_e\nMp = con.m_p\nMn = con.m_n\n\ndef R_wd(M):\n return (hbar**2 / (1000*G*Me*Mp**2))*(M*Msun/Mp)**(-1/3.0)\n #return (0.01*Rsun*(M/0.7)**(-1.0/3.0))\n\ndef R_ns(M):\n return (hbar**2 / (1000*G*Mn*Mp**2))*(M*Msun/Mp)**(-1/3.0)\n #return (11*(M/1.4)**(-1.0/3.0))\n\ndef R_bh(M):\n return 3*(M) / 1000\n\ndef R_density(M):\n rho = 1.5E5\n return (3*M*Msun /(4*pi*rho)**(1/3.0)) / 1000\n #return ((3*M/(4*np.pi*(0.599)))**(1.0/3.0)*0.02*Rsun)\n\nM = np.linspace(0.01,5,1000)\nlogM = []\nRwhite = []\nRneutron = []\nRblack = []\nRdensity = []\nfor i in range(0,len(M)):\n a = log10(R_wd(M[i]))\n b = log10(R_ns(M[i]))\n c = log10(M[i])\n d = log10(R_bh(M[i]))\n f = log10(R_density(M[i]))\n Rwhite.append(a)\n Rneutron.append(b)\n Rblack.append(d)\n Rdensity.append(f)\n logM.append(c)\n\n\nfig = plt.figure()\nplt.plot(logM,Rwhite,color = \"gold\")\nplt.plot(logM,Rneutron)\nplt.plot(logM,Rblack)\nplt.title(\"Radius vs Mass\" )\nplt.ylabel(\"log(R/km)\")\nplt.xlabel('log(M/Msun)')\nplt.grid()\nplt.axvspan(0, 0.5, alpha=0.5, color='grey')\nplt.show()\n\nwith open('test.txt', 'w') as f:\n for item in M:\n f.write(\"%s\\n\" % item)\n\n####2b\n\n\nfig = plt.figure()\nplt.plot(logM,Rwhite)\nplt.plot(logM,Rneutron)\nplt.plot(logM,Rblack)\nplt.plot(logM,Rdensity, color = \"gold\")\nplt.title(\"Radius vs Mass\" )\nplt.ylabel(\"log(R/km)\")\nplt.xlabel('log(M/Msun)')\nplt.grid()\n#fig.savefig('2b.png')\n#plt.show()\n\n\n###2c\n\nM_wd = np.linspace(0.01, 1.4, 1000)\nM_ns = np.linspace(1.4, 3, 1000)\nM_bh = np.linspace(3, 5, 1000)\n\nRwhite2 = []\nRneutron2 = []\nRblack2 = []\nRdensity2 = []\n\nfor i in range(0,len(M_wd)):\n a = log10(R_wd(M_wd[i]))\n b = log10(R_ns(M_ns[i]))\n d = log10(R_bh(M_bh[i]))\n Rwhite2.append(a)\n Rneutron2.append(b)\n Rblack2.append(d)\n\nM_wd = map(np.log10, M_wd)\nM_ns = map(np.log10, M_ns)\nM_bh = map(np.log10, M_bh)\n \n\n\n\nfig = plt.figure()\nplt.plot(M_wd,Rwhite2)\nplt.plot(M_ns,Rneutron2)\nplt.plot(M_bh,Rblack2)\nplt.title(\"Radius vs Mass\" )\nplt.ylabel(\"log(R/km)\")\nplt.xlabel('log(M/Msun)')\nplt.grid()\n#fig.savefig('2c.png')\n#plt.show()\n \n####2d\n\n\ndef w_wd(M):\n return 9.261e-6*M**(-1.0/3.0)\n\ndef w_ns(M):\n return 4.8214e-12*M**(-1.0/3.0)*Rsun**2\n\nw_bh = 2.704e-11*696342**2\n\nM_wd = np.linspace(0.01, 1.4, 1000)\nM_ns = np.linspace(1.4, 3, 1000)\nM_bh = 3\n\n\nwwhite = []\nwneutron = []\nfor i in range(0,len(M_wd)):\n a = w_wd(M_wd[i])\n b = w_ns(M_ns[i])\n wwhite.append(a)\n wneutron.append(b)\n \n\nfig = plt.figure()\nplt.loglog(M_wd,wwhite)\nplt.loglog(M_ns,wneutron)\nplt.loglog(M_bh,w_bh,marker='o',color = \"gold\")\nplt.title(\"Revolution vs Mass log-log Plot\" )\nplt.ylabel(\"Revolution (revs/min)\")\nplt.xlabel('M/Msun')\nplt.grid()\n#fig.savefig('2d.png')\n#plt.show()\n\n\nh = con.h\nG = con.G\nc = con.c\nkb = con.k\nhbar = con.hbar\n\ndef Mevap(t):\n return (t * (c**4 * h) / (2560.0 * pi**2 * 4.0 * G**2))**(1/3.0)\n\ntevap = 13.7e9 * 3.154e7\nMevaporation = Mevap(tevap)\n#print Mevaporation\n\n#print ((c**4 * h) / (2560.0 * pi**2 * 4.0 * G**2))**(1/3.0)\n\n\ndef temp(M):\n return (hbar*c**2)/ (8*pi*kb*G*M)\n\ntemperature = temp(Mevap(tevap))\n#print temperature\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5333933234214783,
"alphanum_fraction": 0.6031032204627991,
"avg_line_length": 17.138774871826172,
"blob_id": "9acc563ae4ea4dbda0a20178248eea75afd9d177",
"content_id": "e1e2d58f76777fbaae5d86ebd968b58365ebdeb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4447,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 245,
"path": "/Assignment3.py",
"repo_name": "Joshualau23/Physics-375",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 4 15:10:31 2019\n\n@author: joshu\n\"\"\"\n\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi,sqrt,log\n\n\ndef model(x,t,k):\n y = x[0]\n dy = x[1]\n xdot = [[],[]]\n xdot[0] = dy\n xdot[1] = (-2.0/t)*dy - y**k\n return xdot\n\n\n\n\ntime = np.linspace(.000001,10,1000)\n\n\nk=0\nLE0 = odeint(model,[1,0],time,args=(k,))\n\nk=1\nLE1 = odeint(model,[1,0],time,args=(k,))\nk=2\nLE2 = odeint(model,[1,0],time,args=(k,))\nk=3\nLE3 = odeint(model,[1,0],time,args=(k,))\nk=4\nLE4 = odeint(model,[1,0],time,args=(k,))\nk=5\nLE5 = odeint(model,[1,0],time,args=(k,))\n\nfig = plt.figure()\nplt.plot(time,LE0[:,0],color='black')\nplt.plot(time,LE1[:,0],color='red')\nplt.plot(time,LE2[:,0],color='yellow')\nplt.plot(time,LE3[:,0],color='green')\nplt.plot(time,LE4[:,0],color='blue')\nplt.plot(time,LE5[:,0],color='purple')\nplt.ylim(-1,1)\nplt.title(\"$\\Theta$ versus x\" )\nplt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"$\\Theta$\")\nplt.xlabel('$x$')\nplt.grid()\n#fig.savefig('3e.png')\nplt.show()\n\n\nplt.plot(time,LE3[:,0],color='Gold')\nplt.plot(time,LE3[:,1],color='pink')\nplt.ylim(-1,1)\nplt.title(\"$\\Theta$ versus x\" )\nplt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"$\\Theta$\")\nplt.xlabel('$x$')\nplt.grid()\nplt.show()\n\n\n###g\n\n\ntime = np.array(time)\ntheta = np.array(LE3[:,0])\ndtheta = np.array(LE3[:,1])\n\nx0 = 0\ndthetadx = 0\nfor i in range(0,len(time)):\n a = round(theta[i],8)\n if a == -1.98e-06:\n x0 = time[i]\n dthetadx = dtheta[i]\n else:\n continue\n \n\nprint x0\nprint dthetadx\n\n\n\n\nMsun = 0.5*1.9891e30\nRsun = 696000000*0.6\nrhoc = Msun / (-4*pi*(Rsun**3 / x0) * dthetadx)\nprint rhoc / 10**5\n\nG = 6.67408e-11\nn=3.0\n\nK = (Rsun**2 / x0**2)*(4*pi*G) / ((1.0/n + 1)*n*rhoc**(1/n -1))\nprint K / 10**9\n\n\nN = ((4*pi)**(1/3.0) / (n+1)) * (-x0**2 * dthetadx)**( (1-n) / n) * x0**((n-3) / n)\nK1 = Msun**(2/3.0)*N*G\nprint K1 / 10**9\n\n\n###h\n\nalpha = Rsun / x0\nx = np.array(time)\nrRstar = []\nrho = []\nfor i in range(0,len(x)):\n a = x[i]*alpha / Rsun\n b = rhoc*theta[i]**3\n rRstar.append(a)\n rho.append(b)\n\nfig = plt.figure()\nplt.plot(rRstar,rho,color='royalblue')\nplt.title(\"Density versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"Density\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3h1.png')\nplt.show()\n\n\n\nP = []\n\nfor i in range(len(rRstar)):\n a = K*rho[i]**(1 + 1.0/n)\n P.append(a)\n\nfig = plt.figure()\nplt.plot(rRstar,P,color='lawngreen')\nplt.title(\"Pressure versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"Pressure\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3h2.png')\nplt.show()\n\nmu = 1/ (2*.55 + 0.75*0.4 + 0.5*0.05)\nk = 1.38064852e-23\nmp = 1.6726219e-27\n\nT = [] \nfor i in range(0,len(rho)):\n a = (mu*mp*P[i]) / (rho[i]*k) \n T.append(a)\n\nfig = plt.figure()\nplt.plot(rRstar,T ,color='salmon')\nplt.title(\"Temperature versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"Temperature\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3h3.png')\nplt.show()\n\nprint T[0] \n\n###i\n\n\nX = 0.55\ndef epp(p,t):\n return 1.07e-7*X**2 * (p / 10**5)*(t/10**6)**4\nenergypp = []\n\nfor i in range(0,len(T)):\n a = epp(rho[i],T[i])\n energypp.append(a)\n\nfig = plt.figure()\nplt.plot(rRstar,energypp,color='salmon')\nplt.title(\"Energy generation rate (p-p chain) versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"Energy generation rate\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3i1.png')\nplt.show()\n\n\nXcno = 0.03*X\n\ndef ecno(p,t):\n return 8.24e-26*X*Xcno * (p / 10**5)*(t/10**6)**19.9\n\n\n\nenergycno = []\nfor i in range(0,len(T)):\n a = ecno(rho[i],T[i])\n energycno.append(a)\n\nfig = plt.figure()\nplt.plot(rRstar,energycno,color='salmon')\nplt.title(\"Energy generation rate (CNO) versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"Energy generation rate\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3i2.png')\nplt.show()\n\n\ndLdr = []\n\nfor i in range(0,len(rho)):\n dL = 4*pi*(rRstar[i])**2 *rho[i]*(energycno[i] + energypp[i])\n dLdr.append(dL)\n\nfig = plt.figure()\nplt.plot(rRstar,dLdr,color='salmon')\nplt.title(\"dL/dr versus radius\" )\n#plt.xticks(np.arange(0, 10, 1))\nplt.ylabel(\"dL/dr\")\nplt.xlabel('r/Rsun')\nplt.xlim(0,1)\nplt.grid()\n#fig.savefig('3i3.png')\nplt.show()\n\n\nluminosity = np.trapz(dLdr[:650],rRstar[:650])\nprint luminosity \nprint luminosity * Rsun**3 / (3.828*10**26)\nprint T[0]\nprint rho[0]\n\n\n\n"
},
{
"alpha_fraction": 0.5198090672492981,
"alphanum_fraction": 0.6081145405769348,
"avg_line_length": 19.899999618530273,
"blob_id": "68b61b789b77699e48f3ec85a5eb7ee5004c65bd",
"content_id": "a4d7ddc6e163eada342302139fc324f12ec7e74e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2095,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 100,
"path": "/Assignment4.py",
"repo_name": "Joshualau23/Physics-375",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 15 15:47:50 2019\n\n@author: joshu\n\"\"\"\n\nfrom scipy.integrate import odeint\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi,sqrt,log\nimport numpy as np\nimport scipy as sp\nfrom scipy import constants as con\nimport matplotlib.pyplot as plt\n\n\ndef model(x,r):\n y = x[0]\n dy = x[1]\n xdot = [[],[],[]]\n xdot[0] = dy\n xdot[1] = - (4*pi*y**2 + ((2.0/r) - (1.0/y) * dy)*dy)\n dm = 4*pi*r**2*y \n return [xdot[0],xdot[1]]\n\n \nr = np.linspace(0.00000001,5,10000)\nLE0 = odeint(model,[1,0],r)\nrho_list = LE0[:,0]\n\n\n\nfig = plt.figure()\nplt.plot(r,rho_list,color='black')\n#plt.ylim(-1,1)\nplt.title(\"Density vs radius\" )\nplt.ylabel(\"Density (dimensionless)\")\nplt.xlabel('Radius (Dimensionless)')\nplt.grid()\nfig.savefig('2b1.png')\nplt.show()\n\n\nM0 = rho0[1]\nfig = plt.figure()\nplt.plot(r,M0,color='black')\n#plt.ylim(-1,1)\nplt.title(\"Mass vs radius\" )\nplt.ylabel(\"Mass (dimensionless)\")\nplt.xlabel('Radius (Dimensionless)')\nplt.grid()\nfig.savefig('2b2.png')\nplt.show()\n\n\nsurfpress = []\nradius = []\nMsun = 1.9891e30\nmu = 2.4\nG = 6.67408e-11\nmp = 1.6726219e-27\nk = 1.38064852e-23\nT = 10\nAU = 1.496e11\nfor i in range(0,len(r)):\n a = ((M0[i]**2 * rho_list[i]) / Msun**2)*(1.0/G**3)*(k*T / (mu*mp))**4\n b = (r[i] * (G*Msun*mu*mp) / (k*T*M0[i])) / AU\n surfpress.append(a)\n radius.append(b)\n\nfig = plt.figure()\nplt.plot(radius,surfpress,color='black')\nplt.xlim(0,70000)\nplt.title(\"Surface Pressure vs radius\" )\nplt.ylabel(\"Surface Pressure (Pascals)\")\nplt.xlabel('Radius (AU)')\nplt.grid()\nfig.savefig('2c.png')\nplt.show()\n\n\nMj = []\n\n\n\nfor i in range(1,len(r)):\n #m = 0.2*Msun*((rho_list*(Msun/M0[i])**2 *(k*T/ (G*mu*mp))**3 * (1/(3.0e-15)))**(-1/2.0))\n m = 0.2*1.989*10**30*((rho_list[i]*(1.989*10**30/M0[i])*(con.k*10/(con.G*1.989*10**15*2.4*con.m_p))**3)/(3.0*10**15))**(-1.0/2.0)\n Mj.append(m)\n\nfig = plt.figure()\nplt.plot(radius[1:],Mj,color='black')\nplt.xlim(0,50000)\nplt.title(\"Jean's Mass vs radius\" )\nplt.ylabel(\"Jeans Mass (kg)\")\nplt.xlabel('Radius (AU)')\nplt.grid()\nfig.savefig('2d.png')\nplt.show()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5893293023109436,
"alphanum_fraction": 0.6466278433799744,
"avg_line_length": 18.910112380981445,
"blob_id": "19077442b6c15cd6a662ce4b2a6c5bfe64d94bdd",
"content_id": "950fb7d5f47685329d99b70bfdb599ca3aef365a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5323,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 267,
"path": "/Assignment1.py",
"repo_name": "Joshualau23/Physics-375",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 16 14:12:48 2019\n\n@author: joshu\n\"\"\"\nimport numpy as np\nfrom math import log, pi\nimport matplotlib.pyplot as plt\nimport datetime\nfrom astropy.io import ascii\nfrom scipy.stats import linregress\nfrom astropy import constants\n\n\n###Q2a\n\n\ndata=ascii.read('hipparcos.txt')\nparallax1 = data['col1']\nparallax = parallax1*(0.001)\nVmag = data['col2']\nBmag = data['col3']\nImag = data['col4']\n\ndistance = []\n\nfor i in range(0,len(parallax)):\n d = 1.0 / parallax[i]\n distance.append(d)\n \nMagv = []\nfor k in range(0,len(distance)):\n absmag = Vmag[k] - 5*log(distance[k] / 10.0,10)\n Magv.append(absmag)\n\nBV = Bmag-Vmag\n\nfig,ax=plt.subplots()\nax.scatter(BV,Magv)\nplt.ylabel('Absolute Magnitude V')\nplt.xlabel('B-V')\nplt.gca().invert_yaxis()\nplt.yticks(np.arange(-7, 18, 1.5))\nplt.title('Absolute Magnitude V vs B-V ')\nplt.show()\n\n###Q2b\n\n\nlogtemperature = []\n\nfor i in range(0,len(BV)):\n temp = 9000.0 / ((BV[i]) + 0.93)\n a = log(temp,10)\n logtemperature.append(a)\n\nlogluminosity = []\n\nfor k in range(0,len(Magv)):\n lum = 10**(0.4*(4.83 - Magv[k]))\n a = log(lum,10)\n logluminosity.append(a)\n\nfig,ax=plt.subplots()\nplt.scatter(logtemperature,logluminosity)\nplt.ylabel('log(Lv/Lsun)')\nplt.xlabel('log(T)')\nplt.title('log(Lv/Lsun) vs log(T) ')\nplt.show()\n\n###Q2c\n\n\nsteboltz = 5.670*(10**(-8))\ndef lumino(R,T):\n return 4*pi*steboltz*(R**2)*((T)**4)\n\nLsun = 3.828*(10**26)\nrsun = 695700000\nluminosity1 = []\nluminosity02 = []\nluminosity5 = []\ntemperature = []\n\nfor i in range(0,len(BV)):\n temp = 9000.0 / ((BV[i]) + 0.93)\n temperature.append(temp)\n\n\nfor j in range(0,len(temperature)):\n lum1 = log((lumino(1*rsun,temperature[j])) / Lsun ,10)\n lum02 = log((lumino(0.2*rsun,temperature[j])) / Lsun,10)\n lum5 = log((lumino(5*rsun,temperature[j])) / Lsun,10)\n luminosity1.append(lum1)\n luminosity02.append(lum02)\n luminosity5.append(lum5)\n \n \n \nplt.scatter(logtemperature,luminosity1,color='blue')\nplt.scatter(logtemperature,luminosity02,color='green')\nplt.scatter(logtemperature,luminosity5,color='r')\nplt.ylabel('log(L/L0)')\nplt.xlabel('log t')\nplt.title('log(Lv/Lsun) vs log(T) ')\nplt.show()\n\n\nlogtemperature = []\n\nfor i in range(0,len(BV)):\n temp = 9000.0 / ((BV[i]) + 0.93)\n a = log(temp,10)\n logtemperature.append(a)\n\nlogluminosity = []\n\nfor k in range(0,len(Magv)):\n lum = 10**(0.4*(4.83 - Magv[k]))\n a = log(lum,10)\n logluminosity.append(a)\n\nfig,ax=plt.subplots()\nplt.scatter(logtemperature,logluminosity)\nplt.plot(logtemperature,luminosity1,color='blue')\nplt.plot(logtemperature,luminosity02,color='green')\nplt.plot(logtemperature,luminosity5,color='r')\nplt.ylabel('log(Lv/Lsun)')\nplt.title('log(Lv/Lsun) vs log(T) ')\nplt.xlabel('log(T)')\nplt.show()\n\n\n### q3a\n\ndata=ascii.read('W19_assignment1_orbit.dat')\norbitalphase = data['col1']\nradialvelocity1 = data['col2']\nradialvelocity2 = data['col3']\nspectro = data['col4']\n\ntime = []\nfor i in range(0,len(orbitalphase)):\n t = 50.0 * orbitalphase[i]\n time.append(t)\n \n\nfig,ax=plt.subplots()\nax.scatter(time,radialvelocity1)\nax.scatter(time,radialvelocity2)\nplt.ylabel('Radial Velocity (km/s)')\nplt.xlabel('Time (Days)')\nplt.title('Radial Velocities vs Time ')\nplt.show()\n\n\n###q3b\n\nradialvelocity1round = np.around(radialvelocity1,decimals = 1)\nradialvelocity2round = np.around(radialvelocity2,decimals = 1)\nsamevelocity = []\n\nfor i in range(0,len(radialvelocity1round)):\n if radialvelocity1round[i] == radialvelocity2round[i]:\n samevelocity.append(radialvelocity1round[i])\n else:\n continue\n\n\nmaxvr1 = max(radialvelocity1round) - samevelocity[0]\nmaxvr2 = max(radialvelocity2round) - samevelocity[0]\n\nP = 4320000\ndef msin3i(n,m):\n return (P / (2*pi*G))*((1+ (n/m))**-1)*((n+m)**3)\n\nM1 = msin3i(maxvr1,maxvr2) * 1000**3\nM2 = msin3i(maxvr2,maxvr1)* 1000**3\nprint M1,M2\n\ntime2 = []\nfor i in range(0,len(time)):\n t2 = time[i] + 52.0\n time2.append(t2)\n \n \n\n\nfig,ax=plt.subplots()\nax.scatter(time,spectro, color = 'gold')\nax.scatter(time2,spectro, color = 'gold')\nplt.ylabel('Apparent Magnitude')\nplt.xlabel('Time (Days)')\nplt.gca().invert_yaxis()\nplt.title('Apparent Magnitude vs Time ')\nplt.show()\n\n###q3c\n\nm0 = min(spectro)\n\ndef luminoratio(m,m0):\n return 100**((m-m0) / 5.0)\n\n\nloglml0ratio = []\n\nfor i in range(0,len(spectro)):\n a = log(luminoratio(spectro[i],m0),10)\n loglml0ratio.append(a)\n\n\nfig,ax=plt.subplots()\nax.scatter(time,loglml0ratio)\nplt.ylabel('Log(L/L0)')\nplt.xlabel('Time (Days)')\nplt.title('log(L/L0) vs Time ')\nplt.show()\n\n\n###q3d\n\nmld = max(spectro)\nmsd = 1.507312\n\n\n#for i in range(0,len(spectro)):\n # if spectro[i] > 1.5 and spectro[i] < 1.517 :\n # msd.append(spectro[i])\n # else:\n # continue\n\n\n#msd.sort()\n#for i in range(0,len(msd) - 1):\n # if round(msd[i],4) == round(msd[i+1],4):\n # msd2.append(msd[i])\n # else:\n # continue\n\n#print msd2\n\n#rh / rc\ntempratio = (1.0 - luminoratio(mld,m0)) / (1.0 - luminoratio(msd,m0))\nprint tempratio\n\n\n\n###q3d\n\nta = 0.473526\ntb = 0.47952\ntc = 0.52048\n\n#ta1 = []\n#tb1 = []\n\n#for i in range(0,len(spectro)):\n# if spectro[i] == mld and orbitalphase[i] < 0.6 and orbitalphase[i] > 0.4:\n# tb1.append(orbitalphase[i])\n \n#print tb1\n \n#rs / rl\nradiusratio = (tb - ta) / (tc - ta)\nprint radiusratio\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5832841992378235,
"alphanum_fraction": 0.6852889060974121,
"avg_line_length": 20.95145606994629,
"blob_id": "f46363e05f998037be9ab00d0c7e18cb78fe2e5b",
"content_id": "6ce3b1e48c7ed2d47aa80aa17cdbda09d8294cf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6784,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 309,
"path": "/Assignment2.py",
"repo_name": "Joshualau23/Physics-375",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 4 13:17:31 2019\n\n@author: joshu\n\"\"\"\n\nimport numpy as np\nfrom numpy import arange,amax,amin,array\nfrom math import log, pi,e\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\nfrom scipy.stats import linregress\nfrom scipy.constants import speed_of_light,Planck,Boltzmann\n\n\n###1a\n\n\ndef blackbodywave(lamda,temp):\n return ((2*Planck*speed_of_light**2) / lamda**5)*((e**(Planck*speed_of_light / (lamda*Boltzmann*temp)) - 1.0)**-1)\n\nwaverange = arange((100*(10**-9)),(2*(10**-6)),(5*(10**-10)))\n\nBBwavelength = []\n\nfor i in range(0,len(waverange)):\n l = waverange[i]\n bb = blackbodywave(l,5500)\n BBwavelength.append(bb)\n \nplt.scatter(waverange,BBwavelength)\nplt.xlim((100*(10**-9)),(2.1*(10**-6)))\nplt.xlabel('Wavelength')\nplt.ylabel('Plank Function')\nplt.title('Plank Function vs Wavelength')\nplt.show()\n\nprint amax(BBwavelength)\n\npeakwavelength = 0\nfor i in range(0,len(waverange)):\n if BBwavelength[i] == 20612853101415.254:\n peakwavelength = waverange[i]\n else:\n continue\n\nprint peakwavelength\n\n\n\n\n###1b\n\ndef blackbodyfreq(mu,temp):\n return ((2*Planck*mu**3 / speed_of_light**2))*((e**(Planck*mu / (Boltzmann*temp)) - 1.0)**-1)\n\nfreqrange = arange((0.15*10**14),(30*10**14),6.5**14)\n\nBBfrequency = []\n\nfor i in range(0,len(freqrange)):\n mu = freqrange[i]\n bb = blackbodyfreq(mu,5500)\n BBfrequency.append(bb)\n \nplt.scatter(freqrange,BBfrequency)\nplt.xlim((0.05*10**14),(31*10**14))\nplt.ylim(0,(50*10**-9))\nplt.xlabel('Frequency')\nplt.ylabel('Plank Function')\nplt.title('Plank Function vs Frequency')\nplt.show()\n\nprint amax(BBfrequency)\n\npeakfrequency = 0\nfor i in range(0,len(freqrange)):\n if BBfrequency[i] == 3.154577180833425*10**-8:\n peakfrequency = freqrange[i]\n else:\n continue\n\nBBfrequency = array(BBfrequency) \nitem_index = np.where(BBfrequency==3.154577180833425*10**-8)\nprint freqrange[item_index]\n\n\n\n\n\n###1d\n\nfreqrange = arange((0.15*10**14),(30*10**14),6.5**14)\n\nBBfreqtimefreq = []\n\nfor i in range(0,len(freqrange)):\n mu = freqrange[i]\n bb = mu*blackbodyfreq(mu,5500)\n BBfreqtimefreq.append(bb)\n\n\n#plt.scatter(freqrange,BBfreqtimefreq)\n#plt.xlim((0.05*10**14),(31*10**14))\n#plt.ylim(0,(15*10**6))\n#plt.show()\n\nprint amax(BBfreqtimefreq)\n\n\nwaverange = arange((100*(10**-9)),(4*(10**-6)),(15*(10**-10)))\n\nlogwavelength = []\nlogBBwavetimeswave5500 = []\nlogBBwavetimeswave3000 = []\nlogBBwavetimeswave30000 = []\nlogfrequency = []\nlogBBfreqtimefreq = []\n\nfor i in range(0,len(waverange)):\n l = waverange[i]\n logwav = log(waverange[i],10)\n logwavelength.append(logwav)\n \n bb5500 = log(l*blackbodywave(l,5500),10)\n logBBwavetimeswave5500.append(bb5500)\n bb3000= log(l*blackbodywave(l,3000),10)\n logBBwavetimeswave3000.append(bb3000)\n bb30000 = log(l*blackbodywave(l,30000),10)\n logBBwavetimeswave30000.append(bb30000)\n \n\n \nplt.scatter(logwavelength,logBBwavetimeswave5500, color = 'aqua')\nplt.scatter(logwavelength,logBBwavetimeswave3000,color = 'Gold')\nplt.scatter(logwavelength,logBBwavetimeswave30000,color = 'salmon')\nplt.xlabel('log(wavelength)')\nplt.ylabel('log(Plank Function)')\nplt.title('log(Plank Function) vs log(wavelength)')\nplt.show()\n\nviswaverange = arange((400*(10**-9)),(800*(10**-9)),(1*(10**-9)))\n\nlogviswavelength = []\nlogvisBBwavetimeswave5500 = []\nlogvisBBwavetimeswave3000 = []\nlogvisBBwavetimeswave30000 = []\n\nfor i in range(0,len(viswaverange)):\n l = viswaverange[i]\n logwav = log(viswaverange[i],10)\n logviswavelength.append(logwav)\n \n bb5500 = log(l*blackbodywave(l,5500),10)\n logvisBBwavetimeswave5500.append(bb5500)\n bb3000= log(l*blackbodywave(l,3000),10)\n logvisBBwavetimeswave3000.append(bb3000)\n bb30000 = log(l*blackbodywave(l,30000),10)\n logvisBBwavetimeswave30000.append(bb30000)\n\nplt.scatter(logviswavelength,logvisBBwavetimeswave5500, color = 'aqua')\nplt.scatter(logviswavelength,logvisBBwavetimeswave3000,color = 'Gold')\nplt.scatter(logviswavelength,logvisBBwavetimeswave30000,color = 'salmon')\nplt.xlabel('log(wavelength)')\nplt.ylabel('log(Plank Function)')\nplt.title('log(Plank Function) vs log(wavelength)')\nplt.show()\n\nvary30k = amax(logvisBBwavetimeswave30000) - amin(logvisBBwavetimeswave30000)\nvary5k = amax(logvisBBwavetimeswave5500) - amin(logvisBBwavetimeswave5500)\nvary3k = amax(logvisBBwavetimeswave3000) - amin(logvisBBwavetimeswave3000)\n\nprint vary30k,vary5k,vary3k\n\n\n###3a\n\nRsun = 695500\nTe = 10000\np = 10**-6 * 1000**3\nk = 3.0 / 1000**2\ndef eddington(s):\n return ( (3.0/4.0) * Te**4 *( (p*k*s) + (2.0/3.0)))**(1.0/4.0)\ndef tau(s):\n return p*k*s\n\nheight = arange(0,1000,0.5)\n\ntemperature = []\nttau = []\nfor i in range(0,len(height)):\n x = eddington(height[i])\n t = tau(height[i])\n temperature.append(x)\n ttau.append(t)\n \nplt.scatter(height,temperature)\nplt.ylabel('Temperature')\nplt.xlabel('Depth')\nplt.title('Temperature vs Depth ')\nplt.show()\n\n#plt.scatter(height,ttau)\n#plt.show()\n\nttau = array(ttau)\nttwooverthree = np.where(ttau==0.666 )\n#print height[ttwooverthree]\n\ntemperature = array(temperature)\n\nT10000 = np.where(temperature==9998.749765556617)\n#print T10000\n#print height[T10000]\n#print height[T10000] / Rsun\n#print (6.0/12.0)**(1.0/4.0)\n\n\n####b\n\n\n\nKb = 8.6173303e-05\nme = 9.10938356e-31\nmp = 6.64465723e-27\nkb = 1.38064852e-23\nh = 6.62607004e-34\nn = 10e19\nrho = 10e-6\n\n\ndef saha(t):\n a = (mp / rho)*((2*pi*me*kb*t) / h**2)**(3.0/2.0) * exp(-13.6 / (Kb*t))\n return (a / 2.0)*(sqrt(1 + 4.0/a) - 1)\n\ntemp = arange(1,20000,50)\nfraction = []\n\nfor i in range(0,len(height)):\n f = saha(temperature[i])\n fraction.append(f)\n\nplt.scatter(temperature,fraction)\nplt.ylim(0,1)\nplt.show()\n\n\nfs = []\nfor i in range(0,len(temperature)):\n f = saha(temperature[i])\n fs.append(f)\n\nfs = array(fs)\nplt.scatter(height,fs)\nplt.ylim(0,1)\nplt.ylabel('Fraction')\nplt.xlabel('Depth')\nplt.title('Fraction vs Depth ')\nplt.show()\n\nmostf2 = np.where(fs==0.9900108886802386)\n#print height[mostf2]\n\n\n###c\n\nn = arange(3,103,1)\n\nang = 911.6e-10\ndef wavelength(n):\n return ang / ( 1 / 4.0 - 1.0/n**2) \n\n#print wavelength(3)\n\n\np = 10**-6 \nk = 3.0 \nkbal = 3.5e5 + k\n\n\nheight = arange(0,10,0.01)\n\ndef optdepth(s,kap):\n return p*kap*s\n\n\nopticaldepthbalmer = []\nopticaldepthreg = []\nfor i in range(0,len(height)):\n optbal = optdepth(height[i],kbal)\n optreg = optdepth(height[i],k)\n opticaldepthbalmer.append(optbal)\n opticaldepthreg.append(optreg)\n\n#plt.scatter(height,opticaldepthbalmer)\nplt.scatter(height,opticaldepthreg)\nplt.ylim(0,1)\n#plt.xlim(0,223)\nplt.ylabel('Optical Depth')\nplt.xlabel('Depth')\n#plt.title('Optical Depth vs Depth ')\nplt.show()\n\nopticaldepthbalmer = array(opticaldepthbalmer)\ntbalmer = np.where(opticaldepthbalmer==0.6650057)\n#print height[tbalmer]\n\n#print kbal / k\n\n"
}
] | 5 |
BenBarry1/DataAnalysisProject
|
https://github.com/BenBarry1/DataAnalysisProject
|
a34ed46f9e697baa3fd91ff29adacefe39281821
|
9ae4f19bd71d1ca649491feb7d5e18f4211d2bae
|
7815a8d2e35d1d2b626af67fd1d4fad17ee3011d
|
refs/heads/master
| 2020-03-11T11:23:33.711168 | 2018-04-15T17:00:50 | 2018-04-15T17:00:50 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6927453875541687,
"alphanum_fraction": 0.699857771396637,
"avg_line_length": 40.235294342041016,
"blob_id": "102116ac1d0972225f1e7adad46ba136e73c4d2a",
"content_id": "2129a1b481a7ddd16921516c44a069b1abae9ac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 17,
"path": "/apiquery.py",
"repo_name": "BenBarry1/DataAnalysisProject",
"src_encoding": "UTF-8",
"text": "import requests\nimport pandas\n\ndata = pandas.read_csv('D:\\\\Masters\\\\Data Analysis and Data Mining\\\\Assignment 2\\\\Data\\\\busstops.txt',delim_whitespace = True)\n\nfor i in range(0,int(len(data.stopno))):\n\t\turl = 'http://data.smartdublin.ie/cgi-bin/rtpi/realtimebusinformation?stopid='+str(data.stopno[i])+'&format=xml'\n\t\tres = requests.get(url)\n\t\tfilename = 'D:\\\\Masters\\\\Data Analysis and Data Mining\\\\Assignment 2\\\\Data\\\\'+str(data.stopno[i])+str(data.fileno[i])+'.xml'\n\t\tprint filename\n\t\tf=open(filename,\"w+\")\n\t\tf.write(res.content)\n\t\tf.close()\n\t\t\n\t\tdata.fileno[i] = data.fileno[i]+1\n\ndata.to_csv('D:\\\\Masters\\\\Data Analysis and Data Mining\\\\Assignment 2\\\\Data\\\\busstops.txt', sep='\\t', index = False)\n\t\t"
}
] | 1 |
cponecp/py_demo
|
https://github.com/cponecp/py_demo
|
aa2601b7d6d9e79aca39cd12c0fd5965469b1d13
|
64e66f301310fceb43f2426690fad85b81cdd9c3
|
917791d330851e19449a2c66be2670a4ae0b1cf2
|
refs/heads/master
| 2020-04-11T01:57:02.934576 | 2018-12-12T14:59:58 | 2018-12-12T14:59:58 | 161,430,854 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7894737124443054,
"alphanum_fraction": 0.7894737124443054,
"avg_line_length": 18,
"blob_id": "41c7f6d9e2e84e6220b9897266f0f4d172707dab",
"content_id": "634a7cb5597a312e3638c62a04129d2b4e5be292",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/README.md",
"repo_name": "cponecp/py_demo",
"src_encoding": "UTF-8",
"text": "# py_demo\nfor practice git repository\n"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.75,
"avg_line_length": 9.666666984558105,
"blob_id": "bf7c89eca0c601fc7ce19f480b96f95e31eda3bd",
"content_id": "56eb0277dabb1bb2131cb8747d87e414119d2231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/index_view.py",
"repo_name": "cponecp/py_demo",
"src_encoding": "UTF-8",
"text": "import Django\nprint(1)\nprint(2)\n"
}
] | 2 |
ChangeWarehouse/vod
|
https://github.com/ChangeWarehouse/vod
|
3a1ede4464e8ea3938fe2508e3a87ee3ae91ca1b
|
0aa07e166ad313dc170038d2cca4e689eb78aa33
|
8d60f4abdbda8e06b5ac0212fd87d7e25400a6a5
|
refs/heads/develop
| 2022-12-15T17:49:03.856904 | 2019-05-23T12:31:24 | 2019-05-23T12:31:24 | 188,182,733 | 0 | 0 | null | 2019-05-23T07:23:08 | 2019-05-23T12:31:26 | 2022-12-08T05:09:14 |
CSS
|
[
{
"alpha_fraction": 0.5125392079353333,
"alphanum_fraction": 0.5208986401557922,
"avg_line_length": 23.84415626525879,
"blob_id": "beeae2b7bf8618cc8b8228bf896be0781a7451cd",
"content_id": "a09490459bb5b7871f1343075a8f228002e5b3c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2020,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 77,
"path": "/app/core/db.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "import pymysql\nfrom app.config.secure import DUSER,DPASSWORD,DHOST,DPORT,DATABASE\n# 创建一个基于pymysql的操作类 主要用于对数据的增删改查\n\nclass DB:\n # 初始化/构造\n def __init__(self,host='127.0.0.1',port=3306,user='root',password='123',database='vod'):\n try:\n # 获取数据库连接/句柄\n self.coon = pymysql.connect(host=host, port=port, user=user, password=password, database=database)\n # 获取游标\n self.cursor = self.coon.cursor(cursor=pymysql.cursors.DictCursor)\n except Exception as e:\n print(e)\n\n\n # 增\n def insert(self,sql):\n try:\n insert_id = self.cursor.execute(sql)\n self.coon.commit()\n return self.cursor.lastrowid\n except:\n self.coon.rollback()\n return 0\n\n # 删\n def delete(self,sql):\n try:\n del_id = self.cursor.execute(sql)\n self.coon.commit()\n return self.coon.affected_rows()\n except:\n self.coon.rollback()\n return 0\n\n #改\n def update(self,sql):\n try:\n update_id=self.cursor.execute(sql)\n self.coon.commit()\n return self.coon.affected_rows()\n except:\n self.coon.rollback()\n return 0\n\n\n #查\n def select(self,sql):\n try:\n self.cursor.execute(sql)\n data = self.cursor.fetchall()\n return data\n except:\n return []\n\n # 查一条记录\n def get_one(self,sql):\n try:\n self.cursor.execute(sql)\n return self.cursor.fetchone()\n except:\n return {}\n\n\n # 析构函数\n def __del__(self):\n try:\n if self.cursor:\n self.cursor.close()\n if self.coon:\n self.coon.close()\n except:\n pass\n pass \n\ndb = DB(host=DHOST,port=DPORT,user=DUSER,password=DPASSWORD,database=DATABASE)\n\n"
},
{
"alpha_fraction": 0.4523809552192688,
"alphanum_fraction": 0.6845238208770752,
"avg_line_length": 14.272727012634277,
"blob_id": "ec7216e3c419e84e9b659a620e7bebe3eb232834",
"content_id": "b5149e5fdb6a8090ec6977acfc17c08cde8913a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 11,
"path": "/requirement.txt",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "certifi==2019.3.9\nchardet==3.0.4\nClick==7.0\nFlask==1.0.3\nidna==2.8\nitsdangerous==1.1.0\nJinja2==2.10.1\nMarkupSafe==1.1.1\nPyMySQL==0.9.3\nurllib3==1.25.2\nWerkzeug==0.15.4\n"
},
{
"alpha_fraction": 0.6783115267753601,
"alphanum_fraction": 0.6870450973510742,
"avg_line_length": 39.47058868408203,
"blob_id": "b9814e216b2bdf953a97701d366f1920e2546563",
"content_id": "d370ab9b0dd0b6e889d8ac85e942aa3f43bda6ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 767,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 17,
"path": "/app/controller/login.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "from app.controller import admin\nfrom flask import render_template,request\nfrom app.code.code import StatusCode\nfrom app.helper.func import ajaxReturn\n# 登陆\[email protected]('/login',methods=['GET','POST'])\ndef login():\n if request.method == 'POST':\n username = request.form.get('username').strip() if request.form.get('username') else ''\n username =\"\"\n password = request.form.get('password').strip() if request.form.get('password') else ''\n if not username or not password:\n return ajaxReturn(StatusCode.A90000,data={\"username\":\"fq\"})\n # 查库 如果说用户名或者密码正确,我是可以重定向到系统内部,如果错误,我怎么办?failure+1 lock\n\n\n return render_template('login/login.html')"
},
{
"alpha_fraction": 0.3928571343421936,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 21.600000381469727,
"blob_id": "f8a29259030d6086f93ad7b7bd24239231b17b31",
"content_id": "d9695106e8286652773b4461f1c931b144db97d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/app/code/code.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "class StatusCode:\n\n # 针对于登陆功能\n A90000 = {\"code\":90000,\"msg\":\"成功\"}\n A90001 = {\"code\":90001,\"msg\":\"缺省参数\"}"
},
{
"alpha_fraction": 0.6605504751205444,
"alphanum_fraction": 0.6605504751205444,
"avg_line_length": 17.33333396911621,
"blob_id": "985475a899b09e84ffa55928aee42ae221723934",
"content_id": "d30178e485744028459a744c558ed546e3fb5e96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 6,
"path": "/app/helper/func.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "# 辅助函数\nimport json\n# 定义返回结构\ndef ajaxReturn(info,data=[]):\n info['data'] = data\n return json.dumps(info)"
},
{
"alpha_fraction": 0.4577464759349823,
"alphanum_fraction": 0.6197183132171631,
"avg_line_length": 12,
"blob_id": "d3e67bdcc203e9c810f0e4a35b5a34fb135ffd97",
"content_id": "f546dbd14b03d3ffbdbde186bfe12e92bf06ada0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 11,
"path": "/app/config/secure.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "# 核心数据\nHOST = '127.0.0.1'\nPORT = 9000\nDEBUG = True\n\n# 数据库配置\nDHOST = '127.0.0.1'\nDPORT = 3306\nDUSER = 'root'\nDPASSWORD = '123'\nDATABASE = 'vod'"
},
{
"alpha_fraction": 0.6152927279472351,
"alphanum_fraction": 0.6427717804908752,
"avg_line_length": 19.950000762939453,
"blob_id": "5982db7b7c7b104761b519208333b64ae8a829e5",
"content_id": "f33a6bf50997e50b3b002d083dfbeb60cd90883b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 40,
"path": "/app/core/db1.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "# pymysql\nimport pymysql\n#1 建立连接\nconn = pymysql.connect(host='127.0.0.1',port=3306,user='root',password='123',database='db5')\n\n\n# 2 获取游标\ncursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n# cursor = conn.cursor()\n# 3 准备SQL语句\n# sql = \"select name from user;\"\n# sql = \"insert into user(`id`,`name`) values('2','fuqiang')\"\n# sql = 'update user set name=\"{}\" where id=1'.format('alexdsb')\nsql = \"delete from user where name='{}'\".format('fuqiang')\n# 4 处理SQL\ntry:\n affect_id = cursor.execute(sql)\n conn.commit()\nexcept:\n conn.rollback()\n affect_id = 0\n\nprint(affect_id)\n\n# try:\n# insert_id = cursor.execute(sql)\n# conn.commit()\n# print(insert_id)\n# except Exception as e:\n# print(e)\n# conn.rollback()\n# insert_id = 0\n\n\n# data = cursor.fetchone()\n# print(data)\n\n# 5 释放资源关闭连接\ncursor.close()\nconn.close()"
},
{
"alpha_fraction": 0.7010309100151062,
"alphanum_fraction": 0.7010309100151062,
"avg_line_length": 15.333333015441895,
"blob_id": "dad8008d178f7b7ecb956c8cb43613881bf669ff",
"content_id": "ca3198e14605aac16971d84382491c6bb955fbb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 111,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 6,
"path": "/app/controller/user.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "#处理用户相关的\nfrom app.controller import admin\n\[email protected]('/')\ndef index():\n return 'user-index'"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 6.333333492279053,
"blob_id": "c0e87fec71b1c03849b56f1b192553c95a7babb6",
"content_id": "2dd161eb41c3f81bb4f46d687c6ce329a3c71749",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/app/config/setting.py",
"repo_name": "ChangeWarehouse/vod",
"src_encoding": "UTF-8",
"text": "# 普通配置\n\nPAGESIZE = 20"
}
] | 9 |
athanasios-gkikas/nlp_projects
|
https://github.com/athanasios-gkikas/nlp_projects
|
9881838b3d408f5bd1b4899fe90e6e4289d4d51f
|
adfbbbcefee2cdecdedaa5c8a4c88f7f4c8e83a8
|
ee9c9f479df43053b0065ed8539ace5aee6efec6
|
refs/heads/master
| 2020-04-25T17:36:37.952414 | 2019-05-12T20:41:29 | 2019-05-12T20:41:29 | 172,954,766 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6273130774497986,
"alphanum_fraction": 0.6299037933349609,
"avg_line_length": 24.49056625366211,
"blob_id": "c5df6b71dc42ff4fbf0f5a702946fadff66e55e6",
"content_id": "1d6ace670cd5253b1c2532577d978b4216c1f09b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2702,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 106,
"path": "/project_4/most_freq_base_classifier.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from random import shuffle\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report\nimport pyconll\nimport os\nfrom nltk.tokenize import word_tokenize\n\n\ndef buildDataset(pData) :\n x_train = []\n y_train = []\n\n for sentence in pData :\n\n for token in sentence :\n x_train.append(token[0])\n y_train.append(token[1])\n\n return x_train, y_train\n\n\ndef load_data(pData) :\n pos_data = []\n for sentence in pData:\n token_list = []\n for token in sentence:\n token_list.append([token.form, token.upos if token.upos is not None else \"None\"])\n\n pos_data.append(token_list)\n\n return pos_data\n\n\ndef train_most_freq_pos_classifier(x_train, y_train):\n count_pos_per_word = dict()\n\n for i in range(0, len(x_train)):\n if x_train[i] not in count_pos_per_word:\n count_pos_per_word[x_train[i]] = dict()\n if y_train[i] not in count_pos_per_word[x_train[i]]:\n count_pos_per_word[x_train[i]][y_train[i]] = 0\n count_pos_per_word[x_train[i]][y_train[i]] += 1\n\n # print(count_pos_per_word)\n\n for key_word in count_pos_per_word:\n temp_dict = count_pos_per_word[key_word]\n maximum = 0\n most_freq_tag = \"\"\n\n for key_tag in temp_dict:\n if temp_dict[key_tag] > maximum:\n maximum = temp_dict[key_tag]\n most_freq_tag = key_tag\n\n count_pos_per_word[key_word] = most_freq_tag\n\n # print(count_pos_per_word)\n return count_pos_per_word\n\n\ndef predict_sentence(sentence):\n y_pred = []\n #sentence = word_tokenize(sentence)\n\n for token in sentence:\n print(token)\n if token in most_freq_pos_per_word:\n y_pred.append(most_freq_pos_per_word[token])\n else:\n y_pred.append('UNK')\n\n return y_pred\n\ncwd = os.getcwd()\ntrain_path = cwd + \"/dataset/en_ewt-ud-train.conllu\"\ntrain_data = pyconll.load_from_file(train_path)\ntrain = load_data(train_data)\n\nx_train, y_train = buildDataset(train)\n# print(x_train)\n# print(y_train)\n\nmost_freq_pos_per_word = train_most_freq_pos_classifier(x_train, y_train)\n\ntest_path = cwd + \"/dataset/en_ewt-ud-test.conllu\"\ntest_data = pyconll.load_from_file(test_path)\ntest = load_data(test_data)\nshuffle(test)\nx_test, y_test_true = buildDataset(test)\n\ny_test_pred = []\n\nfor word in x_test:\n if word in most_freq_pos_per_word:\n y_test_pred.append(most_freq_pos_per_word[word])\n else:\n y_test_pred.append('UNK')\n\n# print(len(x_test))\n# print(y_test_true)\n# print(y_test_pred)\n\nprint('accuracy:', accuracy_score(y_test_true, y_test_pred))\nprint(classification_report(y_test_true, y_test_pred))\n\n#print(predict_sentence(\"This is a nice car!\"))\n"
},
{
"alpha_fraction": 0.5420129299163818,
"alphanum_fraction": 0.5484764575958252,
"avg_line_length": 19.846153259277344,
"blob_id": "26b02ff77411f706912ff265000404687604dafc",
"content_id": "62a7708a39f3817f42df189e2587e40d031a5d05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1083,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 52,
"path": "/project_4/data_generator.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import threading as th\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn.utils import shuffle\n\nclass threadsafe_iter :\n\n def __init__(self, it) :\n self.it = it\n self.lock = th.Lock()\n\n def __iter__(self) :\n return self\n\n def __next__(self) :\n with self.lock :\n return self.it.next()\n\ndef threadsafe_generator(f) :\n def g(*g, **kw) :\n return threadsafe_iter(f(*g, **kw))\n return g\n\n#@threadsafe_generator\ndef data_stream(pDataPairs, pBatchSize, pNumClasses) :\n\n currBatch = 0\n x = pDataPairs[0]\n y = pDataPairs[1]\n\n while True :\n\n batchX = np.zeros((pBatchSize, x.shape[1]), dtype=x.dtype)\n batchY = np.zeros((pBatchSize, x.shape[1], pNumClasses))\n\n for i in range(pBatchSize) :\n\n offset = (currBatch + i) % len(x)\n\n batchX[i, :] = x[offset, :]\n batchY[i, :, :] = y[offset, :, :]\n\n currBatch += pBatchSize\n\n if currBatch > len(x) - 1 :\n x, y = shuffle(x, y)\n currBatch = 0\n\n yield batchX, batchY\n\n return"
},
{
"alpha_fraction": 0.5783625841140747,
"alphanum_fraction": 0.601754367351532,
"avg_line_length": 39.71428680419922,
"blob_id": "c7ac3e059c787d842ecfe100294976d8cb64aa6f",
"content_id": "f16d47e5f18ad8cd357099052d8eaf86e4d1f97d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1710,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 42,
"path": "/project_3/logistic_regression_base_classifier.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report\n\n\ndef import_embeddings(filepath) :\n data = np.load(filepath)\n return data['a'], data['b']\n\n\n# ========== LOADING DATA ============\ncwd = os.getcwd()\ntraining_filepath = cwd + \"/dataset/train.npz\"\nx_train, y_train = import_embeddings(training_filepath)\n# print(x_train.shape)\n# print(y_train.shape)\n# print(y_train)\ny_train = [np.where(y==1)[0][0] for y in y_train]\n# print(y_train)\n\n# ====== TRAINING CLASSIFIER ========\nlr_classifier = LogisticRegression(random_state=42,\n solver='lbfgs', # newton-cg, sag, saga, lbfgs\n multi_class='multinomial',\n verbose=1).fit(x_train, y_train)\n\n\n# ====== EVALUATING CLASSIFIER ======\ntest_filepath = cwd + \"/dataset/test.npz\"\nx_test, y_test_true = import_embeddings(test_filepath)\ny_test_true = [np.where(y==1)[0][0] for y in y_test_true]\ny_test_pred = lr_classifier.predict(x_test)\n\npos_dict = {0: \"ADJ\", 1: \"ADP\", 2: \"ADV\", 3: \"AUX\", 4: \"CCONJ\", 5: \"DET\", 6: \"INTJ\", 7: \"NOUN\", 8: \"NUM\", 9: \"PART\", 10: \"PRON\", 11: \"PROPN\", 12: \"PUNCT\", 13: \"SCONJ\", 14: \"SYM\", 16: \"VERB\", 17: \"X\", 15: \"UNK\"}\ny_test_true = [pos_dict[y] for y in y_test_true]\ny_test_pred = [pos_dict[y] for y in y_test_pred]\n\nprint('accuracy:', accuracy_score(y_test_true, y_test_pred))\nprint('macro-f1-score:', f1_score(y_test_true, y_test_pred, average='macro'))\nprint('micro-f1-score:', f1_score(y_test_true, y_test_pred, average='micro'))\nprint(classification_report(y_test_true, y_test_pred))\n"
},
{
"alpha_fraction": 0.663407564163208,
"alphanum_fraction": 0.6672098636627197,
"avg_line_length": 30.56571388244629,
"blob_id": "1a48a6a70c1df188afd5ff2a0be3133ffe4be7a6",
"content_id": "002466dbbad7858617291b757280b3c421d7d2c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5523,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 175,
"path": "/project_2/load_dataset.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy\nimport random\nimport re\n\nfrom shutil import rmtree\nfrom collections import Counter\nfrom nltk.tokenize import word_tokenize\nfrom string import punctuation\nfrom nltk.corpus import stopwords\n\ndef split_data(tweets, keys):\n\tnew_tweets = {}\n\n\tfor key in keys:\n\t\tnew_tweets[key] = tweets[key]\n\n\treturn new_tweets\n\ndef load_tsv(filepath):\n\ttweets = {}\n\tdifferent_duplicates = []\n\n\twith open(filepath, \"r\") as file:\n\t\tfor line in file:\n\t\t\tline = line.split(\"\\t\")\n\t\t\t#if this tweet already exists but with a different label don't add it\n\t\t\tif line[0] in list(tweets.keys()) and line[1] != tweets[line[0]][0]:\n\t\t\t\tdifferent_duplicates.append(line[0])\n\t\t\telse:\n\t\t\t\ttweets[line[0]] = [line[1], line[2].replace(\"\\n\", \"\")]\n\n\t#remove tweets that have duplicates with different label\n\tfor duplicate in different_duplicates:\n\t\tdel tweets[duplicate]\n\n\treturn tweets\n\ndef write_tsv(tweets, filename):\n\n\twith open(filename, \"w\") as output_file:\n\t\tfor tweet in tweets:\n\t\t\toutput_file.write(tweet + \"\\t\" + tweets[tweet][0] + \"\\t\" + tweets[tweet][1])\n\t\t\toutput_file.write(\"\\n\")\n\n\ndef clean_tweets(tweets):\n\tcleaned_tweets = {}\n\n\tfor tweet in tweets:\n\t\tif tweets[tweet][1] != \"Not Available\":\n\t\t\tcleaned_tweets[tweet] = tweets[tweet]\n\treturn cleaned_tweets\n\ndef merge(first_set, second_set):\n\ttotal = first_set\n\tdifferent_duplicates = []\n\n\tfor tweet in second_set:\n\t\t#if this tweet already exists but with a different label don't add it\n\t\tif tweet in list(total.keys()) and second_set[tweet][0] != total[tweet][0]:\n\t\t\tdifferent_duplicates.append(tweet)\n\t\telse:\n\t\t\ttotal[tweet] = second_set[tweet]\n\n\t#remove tweets that have duplicates with different label\n\tfor duplicate in different_duplicates:\n\t\tdel total[duplicate]\n\n\treturn total\n\ndef process_tweets(tweets):\n\ttweet_stopwords = set(stopwords.words('english') + list(punctuation) + [\"...\"] + ['AT_USER', 'URL'])\n\tnew_tweets = {}\n\n\tfor tweet in tweets:\n\t\ttext = tweets[tweet][1].lower() # convert text to lower-case\n\t\ttext = find_emoticons_in_tweets(text) # convert emoticons to text\n\t\ttext = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))', 'URL', text) # remove URLs\n\t\ttext = re.sub('@[^\\s]+', 'AT_USER', text) # remove usernames\n\t\ttext = re.sub(r'#([^\\s]+)', r'\\1', text) # remove the # in #hashtag\n\t\ttext = re.sub('([.]+)', '.', text) # remove multiple dots\n\t\ttext = re.sub(r\"(?:['\\-_]+[a-z])\", ' ', text) # remove single characters with ' or -\n\t\ttext = word_tokenize(text)\n\t\ttext = [word for word in text if word not in tweet_stopwords]\n\n\t\tnew_tweets[tweet] = [tweets[tweet][0], \" \".join(text)]\n\n\treturn new_tweets\n\ndef find_emoticons_in_tweets(tweet):\n\tnew_tweet = tweet\n\t# All caps? Does it matter?\n\trepl = {' :)': ' HAPPY_EMOTICON', ' =)': ' HAPPY_EMOTICON', ' :d': ' VERY_HAPPY_EMOTICON', ' :(': ' SAD_EMOTICON',\n\t ' :/': ' MIXED_EMOTICON', ' :p': ' TONGUE_EMOTICON', ' ;)': ' WINK_EMOTICON'}\n\tfor a, b in repl.items():\n\t\tnew_tweet = new_tweet.replace(a, b)\n\n\treturn new_tweet\n\ndef main():\n\n #load data\n train_path = \"twitter_download/train_set.tsv\"\n dev_path = \"twitter_download/dev_set.tsv\"\n test_path = \"twitter_download/test_set.tsv\"\n\n train_tweets = load_tsv(train_path)\n dev_tweets = load_tsv(dev_path)\n test_tweets = load_tsv(test_path)\n\n print(\"After removing duplicates and tweets that are the same but have different labels:\")\n print(len(train_tweets),\"remained in train set\")\n print(len(dev_tweets),\"remained in dev set\")\n print(len(test_tweets),\"remained in devtest set\")\n\n #remove tweets that the text was not available\n cleaned_train_tweets = clean_tweets(train_tweets)\n cleaned_dev_tweets = clean_tweets(dev_tweets)\n cleaned_test_tweets = clean_tweets(test_tweets)\n\n print(\"Removed\", len(train_tweets)-len(cleaned_train_tweets), \"\\\"Not Available\\\" tweets from train set\")\n print(\"Removed\", len(dev_tweets)-len(cleaned_dev_tweets), \"\\\"Not Available\\\" tweets from dev set\")\n print(\"Removed\", len(test_tweets)-len(cleaned_test_tweets), \"\\\"Not Available\\\" tweets from test set\")\n\n\n print(\"Merging existing sets...\")\n\n print(\"There are\", len(cleaned_train_tweets) + len(cleaned_dev_tweets) + len(cleaned_test_tweets), \"in total\")\n\n #merge the sets removing duplicates with different label\n train_dev = merge(cleaned_train_tweets, cleaned_dev_tweets)\n total_tweets = merge(train_dev, cleaned_test_tweets)\n\n print(\"Merged set has\", len(total_tweets), \"tweets\")\n\n labels = [total_tweets[tweet][0] for tweet in total_tweets]\n\n count_labels = Counter(labels)\n\n print(count_labels)\n\n #split randomly to train, val, dev and test sets\n random.seed(42)\n keys = list(total_tweets.keys())\n random.shuffle(keys)\n\n train_split = int(numpy.floor(len(total_tweets) * 0.8))\n\n train_keys = keys[:train_split]\n test_keys = keys[train_split:]\n\n train_tweets = split_data(total_tweets, train_keys)\n test_tweets = split_data(total_tweets, test_keys)\n\n #create dataset folder\n try:\n rmtree('dataset/')\n except BaseException:\n pass # directory doesn't yet exist, no need to clear it\n os.makedirs(\"dataset/\")\n\n pr_train_tweets = process_tweets(train_tweets)\n pr_test_tweets = process_tweets(test_tweets)\n\n #write the resulted sets to tsv files\n write_tsv(pr_train_tweets, \"./dataset/train_set.tsv\")\n write_tsv(pr_test_tweets, \"./dataset/test_set.tsv\")\n\n print(\"Final dataset consists of:\")\n print(\"Train set:\", len(pr_train_tweets))\n print(\"Test set:\", len(pr_test_tweets))\n\nif __name__ == \"__main__\":\n\tmain()"
},
{
"alpha_fraction": 0.5998225212097168,
"alphanum_fraction": 0.6060337424278259,
"avg_line_length": 32.14706039428711,
"blob_id": "f8acc273777ecc2ab0beb9d684f8a5f523ce7a5c",
"content_id": "aad1c1b8fb3045283d64d9b53a8d1295ad56fda4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1127,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 34,
"path": "/project_4/layers.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import keras.backend as K\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom keras.layers import Layer\n\n\nclass ElmoLayer(Layer):\n def __init__(self, seq_len, batchSize, **kwargs):\n self.elmo = None\n self.name = \"ELMo\"\n self.seqLen = seq_len\n self.batchSize = batchSize\n super(ElmoLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.elmo = hub.Module('https://tfhub.dev/google/elmo/2',\n trainable=True, name=\"{}_module\".format(self.name))\n\n self.trainable_weights += K.tf.trainable_variables(\n scope=\"^{}_module/.*\".format(self.name))\n\n super(ElmoLayer, self).build(input_shape)\n\n def call(self, x, mask=None):\n return self.elmo(inputs={\n \"tokens\": tf.squeeze(tf.cast(x, tf.string)),\n \"sequence_len\": tf.constant(self.batchSize * [self.seqLen])\n }, as_dict=True, signature='tokens')['elmo']\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], 1024\n\n def compute_mask(self, inputs, mask=None):\n return None\n"
},
{
"alpha_fraction": 0.565699577331543,
"alphanum_fraction": 0.5934250950813293,
"avg_line_length": 29.056547164916992,
"blob_id": "5349b9b09e53bc7a80183efe12fb632d80b81aa2",
"content_id": "03373dc5adaafd36cabd14882cd82dc1e10594a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10099,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 336,
"path": "/project_2/classifiers.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import re\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.svm import LinearSVC, SVC, NuSVR\nfrom sklearn.metrics import precision_recall_curve, average_precision_score, f1_score\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import TruncatedSVD\nfrom time import time\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.multiclass import unique_labels\n\ndef load_tsv(filepath):\n\ttweets = {}\n\n\twith open(filepath, \"r\") as file:\n\t\tfor line in file:\n\t\t\tline = line.split(\"\\t\")\n\t\t\ttweets[line[0]] = [line[1], line[2].replace(\"\\n\", \"\")]\n\n\treturn tweets\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 10), scorer='f1_macro'):\n\n plt.subplot(2, 2, 1)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scorer)\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"b\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training F1-macro\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"b\",\n label=\"Test F1-macro\")\n\n plt.legend(loc=\"best\")\n\n plt.subplot(2, 2, 2)\n plt.xlabel(\"Training examples\")\n\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring='accuracy')\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"b\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training accuracy\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"b\",\n label=\"Test accuracy\")\n\n plt.legend(loc=\"best\")\n\n #plt.show()\n\ndef plot_precision_recall_curves(estimator, name, x_train, y_1, x_test, y_2):\n y_train = label_binarize(y_1, classes=[0, 1, 2])\n y_test = label_binarize(y_2, classes=[0, 1, 2])\n\n labels = [\"negative\", \"neutral\", \"positive\"]\n\n classifier = OneVsRestClassifier(estimator)\n y_score = classifier.fit(x_train, y_train).predict_proba(x_test)\n\n precision = dict()\n recall = dict()\n average_precision = dict()\n n_classes = 3\n\n for i in range(n_classes):\n precision[i], recall[i], _ = precision_recall_curve(\n y_test[:, i], y_score[:, i])\n\n average_precision[i] = average_precision_score(\n y_test[:, i], y_score[:, i])\n\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(\n y_test.ravel(), y_score.ravel())\n\n average_precision[\"micro\"] = average_precision_score(\n y_test, y_score, average=\"micro\")\n\n plt.subplot(2, 2, 3)\n\n plt.plot(recall[\"micro\"], precision[\"micro\"], color='navy',\n label='micro-average AUC= {0:0.2f}'.format(average_precision['micro']))\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.legend(loc=\"best\")\n plt.grid()\n\n plt.subplot(2, 2, 4)\n\n for i, color in zip(range(3), ['red', 'pink', 'blue']):\n plt.plot(recall[i], precision[i], color=color,\n label='{0} AUC= {1:0.2f}'\n ''.format(labels[i], average_precision[i]))\n\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.legend(loc=\"lower left\")\n plt.grid()\n plt.show()\n return\n\ndef encodeSet(pSet, pEncoder) :\n\n x = [s[1] for s in list(pSet.values())]\n y = [s[0] for s in list(pSet.values())]\n y = pEncoder.transform(y)\n\n return x, y\n\ndef tuner(pTrain, pPipeline, pParams) :\n\n grid = GridSearchCV(pPipeline,\n param_grid=pParams, cv=5, n_jobs=-1, verbose=1)\n\n t0 = time()\n\n grid.fit(pTrain[0], pTrain[1])\n\n print(\"done in %0.3fs\" % (time() - t0))\n\n print(\"Best score: %0.3f\" % grid.best_score_)\n print(\"Best parameters set:\")\n best_parameters = grid.best_estimator_.get_params()\n\n for param_name in sorted(pParams.keys()):\n print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n\n return grid.best_estimator_.named_steps['clf']\n\ndef tuneSVC(pTrain) :\n\n pipeline = Pipeline([\n ('clf', OneVsRestClassifier(SVC(\n class_weight=\"balanced\",\n max_iter=5000,\n gamma='scale',\n random_state=0))),])\n\n parameters = {\n 'clf__estimator__kernel' : ['linear', 'rbf', 'poly', 'sigmoid'],\n 'clf__estimator__C' : [0.01, 0.1, 1.0, 10.0],\n 'clf__estimator__coef0' : [3, 4],}\n\n return tuner(pTrain, pipeline, parameters)\n\ndef tuneSGD(pTrain) :\n\n pipeline = Pipeline([\n ('clf', OneVsRestClassifier(SGDClassifier(\n max_iter=10000,\n tol=1.e-4,\n n_jobs=-1,\n class_weight='balanced',\n random_state=0))),])\n\n parameters = {\n 'clf__estimator__penalty': ['l2', 'elasticnet'],\n 'clf__estimator__l1_ratio': [0.0, 0.2, 0.4, 0.6, 0.8, 1.0],\n 'clf__estimator__loss': ['modified_huber', 'perceptron'],}\n\n return tuner(pTrain, pipeline, parameters)\n\ndef tuneBase(pTrain) :\n\n pipeline = Pipeline([\n ('clf', DummyClassifier(random_state=0)),])\n\n parameters = { 'clf__strategy': ['most_frequent', 'stratified'], }\n\n return tuner(pTrain, pipeline, parameters)\n\ndef tuneLogistic(pTrain) :\n\n pipeline = Pipeline([\n ('clf', LogisticRegression(\n class_weight=\"balanced\",\n max_iter=10000,\n n_jobs=-1,\n random_state=0,\n multi_class='multinomial',\n penalty='l2')),])\n\n parameters = {\n 'clf__solver': ['newton-cg', 'lbfgs', 'sag',],\n 'clf__C' : [0.01, 0.1, 1.0, 10.0,]}\n\n return tuner(pTrain, pipeline, parameters)\n\ndef approxDim(pData) :\n\n maxFeatures = tuple()\n #(700, 1225, 1500, 2000, 2200, 2275, 2650, 3025, 2850, 3162, 3475, 3787, 3700)\n\n for f in range(1000, 14000, 1000) :\n print(\"num features: \", f)\n\n vectorizer = TfidfVectorizer(\n use_idf=True,\n smooth_idf=True,\n sublinear_tf=True,\n norm='l2',\n ngram_range=(1,3),\n max_features=f)\n\n tfidf = vectorizer.fit_transform(pData)\n\n svd = TruncatedSVD(n_components=1, n_iter=1, random_state=0)\n left = 100\n right = f - 100\n\n while True :\n c = (left + right) // 2\n svd = TruncatedSVD(n_components=c, n_iter=1, random_state=0)\n svd.fit(tfidf)\n var = svd.explained_variance_ratio_.sum()\n if var > 0.88 and var < 0.93 :\n print(c)\n maxFeatures += (c,)\n break\n elif var > 0.93 :\n right = c\n else :\n left = c\n\n print(maxFeatures)\n arr = np.array(maxFeatures).transpose()\n x = np.arange(1000, 14000, 1000)\n\n fig, ax = plt.subplots()\n ax.plot(x, arr)\n ax.set(xlabel='initial domain', ylabel='projected domain')\n ax.grid()\n plt.show()\n\n fig, ax = plt.subplots()\n ax.plot(x, arr / x)\n ax.set(xlabel='ratio', ylabel='domain')\n ax.grid()\n plt.show()\n\n arr = np.log(arr)\n\n return int(np.exp(arr.sum() / len(arr)))\n\ndef plotClassifier(pEstimator, pTrain, pTest) :\n\n fig = plt.figure(figsize=(8, 7))\n plt.subplots_adjust(wspace=0.3, hspace=0.3)\n\n plot_learning_curve(pEstimator,\n \"Learning curves\",\n pTrain[0], pTrain[1], (0.1, 1.01), cv=5,\n n_jobs=-1)\n\n plot_precision_recall_curves(pEstimator,\n \"P-R curves\",\n pTrain[0], pTrain[1], pTest[1], pTest[0])\n\n return\n\ndef main():\n\n cwd = os.getcwd()\n\n train_tweets = load_tsv(cwd + \"/project_2/dataset/train_set.tsv\")\n test_tweets = load_tsv(cwd + \"/project_2/dataset/test_set.tsv\")\n\n encoder = preprocessing.LabelEncoder()\n encoder.fit([\"negative\", \"neutral\", \"positive\"])\n\n x_train, y_train = encodeSet(train_tweets, encoder)\n x_test, y_test = encodeSet(test_tweets, encoder)\n\n f = 2281 #approxDim(y_train)\n vectorizer = TfidfVectorizer(\n use_idf=True,\n smooth_idf=True,\n sublinear_tf=True,\n norm='l2',\n ngram_range=(1,3),\n max_features=f)\n\n vectorizer.fit(x_train)\n x_train = vectorizer.transform(x_train)\n x_test = vectorizer.transform(x_test)\n\n model = tuneBase([x_train, y_train])\n #model = tuneSVC([x_train, y_train])\n #model = tuneSGD([x_train, y_train])\n #model = tuneLogistic([x_train, y_train])\n\n plotClassifier(model, [x_train, y_train], [x_test, y_test])\n\nif __name__ == \"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.5110330581665039,
"alphanum_fraction": 0.5237841010093689,
"avg_line_length": 29.38979148864746,
"blob_id": "c9ff016f7abb93a30b2b1cd7de10e34623e944f7",
"content_id": "6398b2a0284a7309ebfae5c4ff496874599ec95a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13097,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 431,
"path": "/project_1/model.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import gzip\nimport glob\nimport json\nimport math\nimport nltk\nimport os\nimport random\nimport string\nimport sys\n\nclass model :\n\n def __init__(self, pRoot = None) :\n\n self.mRoot = pRoot if pRoot is not None else os.getcwd() + \"//\"\n self.mNgramList = [[] for _ in range(3)]\n self.mLambda = 1.0\n\n return\n\n def build(self, pFile, pShuffleSentences = False) :\n\n self.export_corpus(pFile, pShuffleSentences)\n\n unigram = self.build_ngrams(self.mRoot + \"corpus//train_corpus\", 1)\n bigram = self.build_ngrams(self.mRoot + \"corpus//train_corpus\", 2)\n trigram = self.build_ngrams(self.mRoot + \"corpus//train_corpus\", 3)\n\n self.mNgramList[0] = self.compute_counts(unigram)\n self.mNgramList[1] = self.compute_counts(bigram)\n self.mNgramList[2] = self.compute_counts(trigram)\n\n print(\"Exporting ngrams\")\n self.export_lists(self.mRoot + \"unigrams//unigram\", self.mNgramList[0])\n self.export_lists(self.mRoot + \"bigrams//bigram\", self.mNgramList[1])\n self.export_lists(self.mRoot + \"trigrams//trigram\", self.mNgramList[2])\n\n return\n\n def load_counts(self) :\n\n print(\"Loading unigrams\")\n self.load_ngram(self.mRoot + \"unigrams//*\", 0)\n\n print(\"Loading bigrams\")\n self.load_ngram(self.mRoot + \"bigrams//*\", 1)\n\n print(\"Loading trigrams\")\n self.load_ngram(self.mRoot + \"trigrams//*\", 2)\n\n return\n\n def set_voc(self, pDic, pModel) :\n self.mNgramList[pModel - 1] = pDic\n return\n\n def load_ngram(self, pFile, pIdx) :\n\n for i, source in enumerate(glob.glob(pFile)) :\n self.mNgramList[pIdx] += [{\n tuple(pair[0]) : pair[1] for pair in self.import_arr(source) }]\n\n return\n\n def export_lists(self, pFile, pLists) :\n\n for i, ngrams in enumerate(pLists) :\n self.export_arr(\n pFile + \"_\" + str(i),\n [(k,v) for k, v in ngrams.items()])\n\n return\n\n def export_arr(self, pFile, pArray) :\n\n with gzip.GzipFile(pFile + \".json.gz\", \"wb\") as file :\n file.write(json.dumps(pArray, indent = 4).encode('utf-8'))\n\n return\n\n def import_arr(self, pFile) :\n\n with gzip.GzipFile(pFile, \"rb\") as file :\n lines = json.loads(file.read().decode('utf-8'))\n\n return lines\n\n def low_freq_tokens(self, pSentences, pThresh) :\n\n voc = dict()\n\n for sentence in pSentences :\n for token in sentence :\n if token in voc :\n voc[token] += 1\n else :\n voc[token] = 1\n\n return {k for k, v in voc.items() if v < pThresh}\n\n def remove_tokens(self, pSentences, pTokens, pReplacement) :\n\n for sentence in pSentences :\n for i, token in enumerate(sentence) :\n if token in pTokens :\n sentence[i] = pReplacement\n\n return pSentences\n\n def export_corpus(self, pFile, pShuffleSentences) :\n\n punctuation = str.maketrans('', '', \".,!?)(-\")\n replace = str.maketrans('\\'', '_')\n\n sentectesArr = []\n\n with open(pFile, mode=\"r\", encoding='utf8') as corpus_desrc :\n print(\"Reading corpus\")\n corpus = corpus_desrc.read()\n\n print(\"Extracting sentences\")\n sentences = nltk.tokenize.sent_tokenize(corpus)\n\n print(\"Processing sentences\")\n for sentence in sentences :\n sentence = sentence.lower()\n sentence = sentence.translate(punctuation)\n #sentence = sentence.translate(replace)\n sentectesArr += (nltk.word_tokenize(sentence),)\n\n if pShuffleSentences :\n random.shuffle(sentectesArr)\n\n size = len(sentectesArr)\n train_sz = int(size * 0.7)\n val_sz = int(size * 0.2)\n\n train = sentectesArr[:train_sz]\n val = sentectesArr[train_sz:train_sz + val_sz]\n test = sentectesArr[train_sz + val_sz:]\n\n print(\"Extracting low frequency tokens\")\n lfTokens = self.low_freq_tokens(train, 10)\n\n print(\"Removing low frequency tokens\")\n train = self.remove_tokens(train, lfTokens, \"*UNK*\")\n val = self.remove_tokens(val, lfTokens, \"*UNK*\")\n test = self.remove_tokens(test, lfTokens, \"*UNK*\")\n\n print(\"Exporting dataset\")\n self.export_arr(self.mRoot + \"corpus//train_corpus\", train)\n self.export_arr(self.mRoot + \"corpus//val_corpus\", val)\n self.export_arr(self.mRoot + \"corpus//test_corpus\", test)\n\n return\n\n def get_ngram(self, pTokens, pN) :\n\n ngram = [[None] * pN for _ in range (len(pTokens) - pN + 1)]\n\n for i in range(len(pTokens) - pN + 1) :\n ngram[i] = pTokens[i : i + pN]\n\n return ngram\n\n def augment_seq(self, pSeq, pN, pMerge = False) :\n\n st = list()\n\n if pN == 1 :\n st += ['*start*',]\n else :\n for i in range(1, pN) :\n st += [str('*start' + str(i) + '*'), ]\n\n sequences = list()\n\n if pMerge :\n for sentence in pSeq :\n sequences.extend((st + sentence + ['*end*',]))\n sequences = [sequences,]\n else :\n for sentence in pSeq :\n sequences += [st + sentence + ['*end*',],]\n\n return sequences\n\n def build_ngrams(self, pFile, pN) :\n\n sentences = self.import_arr(pFile + \".json.gz\")\n\n print(\"Building sequence\")\n sequence = self.augment_seq(sentences, pN, True)\n\n print(\"Creating ngram \", pN)\n return [self.get_ngram(sequence[0], n + 1) for n in range(0, pN)]\n\n def compute_counts(self, pNgramLists) :\n\n counts = list()\n\n print(\"Creating ngram maps\")\n\n for i, ngramList in enumerate(pNgramLists) :\n voc = {}\n for ngram in ngramList :\n ngram = tuple(ngram)\n\n if ngram in voc :\n voc[ngram] += 1\n else :\n voc[ngram] = 1\n counts.append(voc)\n\n return counts\n\n def import_queries(self) :\n return self.import_arr(self.mRoot + \"corpus//test_corpus.json.gz\")\n\n def import_val(self) :\n return self.import_arr(self.mRoot + \"corpus//val_corpus.json.gz\")\n\n def get_voc_sz(self, pN) :\n return len(self.mNgramList[pN][0])\n\n def get_count(self, pNgram, pModel) :\n count = 0\n n = len(pNgram) - 1\n\n if pNgram in self.mNgramList[pModel - 1][n] :\n count = self.mNgramList[pModel - 1][n][pNgram]\n\n return count\n\n def accum_ngrams(self, pN) :\n sum = 0\n\n for ngram in self.mNgramList[pN - 1][0].values() :\n sum += ngram\n\n return sum\n\n def laplace_smoothing(self, pEnum, pDenom, pN) :\n V = self.get_voc_sz(pN - 1) - 1\n a = 1.0\n p = ((pEnum + a) / (pDenom + a * V))\n\n if p > 1.0 or p < 0.0 :\n print(\n \"Prob out of range: \",\n \" cEnum: \", pEnum,\n \" cDenom: \", pDenom,\n \" V: \", V)\n\n return p\n\n def prob(self, pNgram, pModel) :\n n = len(pNgram)\n\n cEnum = self.get_count(pNgram, pModel)\n cDenom = self.get_count(pNgram[:n - 1], pModel) if n > 1 else \\\n self.accum_ngrams(pModel)\n\n #return self.laplace_smoothing(cEnum, cDenom, pModel)\n return self.kn_smoothing(pNgram, cEnum, cDenom, pModel, 0.75)\n\n def probs(self, pQueries, pN, pMerge = False) :\n print(\"Processing queries\")\n sequences = self.augment_seq(pQueries, pN, pMerge)\n\n probs = [1.0 for _ in range(len(sequences))]\n\n print(\"Computing probs\")\n for i, seq in enumerate(sequences) :\n gram = self.get_ngram(seq, pN)\n\n for g in gram :\n probs[i] *= self.prob(tuple(g), len(g))\n\n return probs\n\n def log_probs(self, pQueries, pN, pMerge = False) :\n print(\"Processing queries\")\n sequences = self.augment_seq(pQueries, pN, pMerge)\n\n probs = [0.0 for _ in range(len(sequences))]\n\n print(\"Computing probs\")\n for i, seq in enumerate(sequences) :\n grams = self.get_ngram(seq, pN)\n for gram in grams :\n p = math.log2(self.prob(tuple(gram), len(gram)))\n #print(gram, \" \", p)\n probs[i] += p\n\n return probs\n\n def language_cross_entropy(self, pCorpus, pN) :\n sum_of_entropy = 0\n start_ngrams = 0\n sequences = self.augment_seq(pCorpus, pN, True)\n ngrams = self.get_ngram(sequences[0], pN)\n\n for token in sequences[0] :\n if (token == \"*start1*\") or \\\n (token == \"*start2*\") or \\\n (token == \"*start*\") :\n start_ngrams += 1\n\n for ngram in ngrams :\n if (ngram[-1] != \"*start1*\") and \\\n (ngram[-1] != \"*start2*\") and \\\n (ngram[-1] != \"*start*\") :\n sum_of_entropy += math.log2(self.prob(tuple(ngram), pN))\n #sum_of_entropy += self.interpolated_logProb(tuple(ngram))\n\n return -sum_of_entropy / (len(sequences[0]) - start_ngrams)\n\n def perplexity(self, pCorpus, pN) :\n return pow(2, self.language_cross_entropy(pCorpus, pN))\n\n def interpolated_logProb(self, pNgram) :\n return math.log2(\n self.mLambda * self.prob(pNgram, 3) +\n (1.0 - self.mLambda) * self.prob(pNgram[1:3], 3))\n\n def evaluate(self, pQueries, pMerge = False) :\n sequences = self.augment_seq(pQueries, 3, pMerge)\n\n probs = [0.0 for _ in range(len(sequences))]\n\n for i, seq in enumerate(sequences) :\n gram = self.get_ngram(seq, 3)\n for g in gram :\n probs[i] += self.interpolated_logProb(tuple(g))\n\n return probs\n\n def tune(self) :\n\n pCorpus = self.import_val()\n minCEntropy = [sys.float_info[0], 0, self.mLambda]\n\n start_ngrams = 0\n sequences = self.augment_seq(pCorpus, 3, True)\n trigrams = self.get_ngram(sequences[0], 3)\n\n for token in sequences[0] :\n if (token == \"*start1*\") or (token == \"*start2*\") :\n start_ngrams += 1\n\n for l in range(0, 102, 2) :\n sum_of_entropy = 0\n\n for ngram in trigrams:\n ngram = tuple(ngram)\n\n if (ngram[-1] != \"*start1*\") and \\\n (ngram[-1] != \"*start2*\") and \\\n (ngram[-1] != \"*start*\") :\n trigram_prob = self.prob(ngram, 3)\n bigram_prob = self.prob(ngram[1:3], 3)\n\n sum_of_entropy += math.log2((l / 100.) * trigram_prob + \\\n (1.0 - (l / 100.)) * bigram_prob)\n\n cross_entropy = -sum_of_entropy / (len(sequences[0]) - start_ngrams)\n perplexity = pow(2, cross_entropy)\n\n #print(\"Lamda: \", l/100., \" CE: \", cross_entropy)\n\n if cross_entropy < minCEntropy[0] :\n minCEntropy = [cross_entropy, perplexity, l / 100.]\n\n self.mLambda = minCEntropy[2]\n return minCEntropy\n\n def get_prev(self, wk, pModel) :\n prev_wk = 0\n for w in self.mNgramList[pModel - 1][0].keys() :\n temp = [w[0],]\n for k in wk : temp.extend([k,])\n #print(\"Prev \", temp)\n if self.get_count(tuple(temp), pModel) > 0:\n prev_wk += 1\n return prev_wk\n\n def get_next(self, wk, pModel, pCountNeg) :\n next_wk = 0\n for w in self.mNgramList[pModel - 1][0].keys() :\n temp = []\n for k in wk : temp.extend([k,])\n temp.extend([w[0],])\n #print(\"Next \", temp)\n if self.get_count(tuple(temp), pModel) > 0 and not pCountNeg:\n next_wk += 1\n\n if self.get_count(tuple(temp), pModel) == 0 and pCountNeg:\n next_wk += 1\n\n return next_wk\n\n def get_denom(self, w, pModel) :\n wk = 0\n for item in self.mNgramList[pModel - 1][pModel - 1].items() :\n if item[0][-2] == w :\n wk += item[1]\n return wk\n\n def kn_smoothing(self, pNgram, pEnum, pDenom, pModel, D):\n n = len(pNgram)\n\n if pEnum > 0:\n probability_ngram = (pEnum - D) / pDenom\n else:\n prev_wk = self.get_prev(pNgram[:n - 1], pModel)\n next_wk = self.get_next(pNgram[:n - 1], pModel, False)\n next_denom = self.get_next(pNgram[:n - 1], pModel, True)\n\n #print(pDenom, \" \", next_denom)\n probability_ngram = (D * next_wk) / self.mNgramList[pModel - 1][0][pNgram[-2]]\n\n if probability_ngram > 1.0 or probability_ngram < 0.0 :\n print(pNgram[:n - 1], \" \", next_wk, \" \", prev_wk)\n print(pNgram, \" \", probability_ngram, \" cEnum \", pEnum, \" cDenom \", pDenom)\n print(\n \"Prob out of range: \",\n \" cEnum: \", pEnum,\n \" cDenom: \", pDenom)\n\n return probability_ngram"
},
{
"alpha_fraction": 0.6060508489608765,
"alphanum_fraction": 0.6232700347900391,
"avg_line_length": 30.231155395507812,
"blob_id": "6412b67fbd0ed7a4db15989db2c404e7fc0dd5c7",
"content_id": "53261e0a880b19f6414ec7fbd4b94b91180a9b38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6214,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 199,
"path": "/project_4/metrics.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from sklearn.metrics import f1_score, accuracy_score, precision_recall_curve, average_precision_score\nimport data_generator\nimport sklearn.metrics as sk_metrics\nfrom keras.callbacks import Callback\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelEncoder\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport csv\nimport math\nimport json\nimport gzip\n\n\nclass Metrics(Callback):\n\n def __init__(self, val_set, batch_size, label_enc, **kwargs):\n super(Metrics, self).__init__(**kwargs)\n self.valX = val_set[0]\n self.valY = np.argmax(val_set[1], axis=2).ravel()\n self.labelEnc = label_enc\n self.tags = self.labelEnc.inverse_transform([tag for tag in range(len(self.labelEnc.classes_))])\n self.batchSize = batch_size\n self.report = []\n self.epochs = 0\n\n def get_metrics(self, model):\n pred = np.zeros(self.valY.shape, dtype=int)\n batches = math.ceil(self.valX.shape[0] / self.batchSize)\n seq_len = self.valX.shape[1]\n\n for i in range(batches):\n sample = np.zeros((self.batchSize, seq_len), dtype=self.valX.dtype)\n\n offset = i * self.batchSize\n end = offset + self.batchSize\n\n if end > self.valX.shape[0]:\n tmp = self.valX[offset:, :]\n sample[:tmp.shape[0], :] = tmp\n else:\n sample = self.valX[offset:end, :]\n\n prediction = np.argmax(model.predict(sample, steps=1), axis=2).ravel()\n\n offset = i * seq_len * self.batchSize\n end = offset + seq_len * self.batchSize\n\n if end > pred.shape[0]:\n pred[offset:] = prediction[:pred.shape[0] - offset]\n else:\n pred[offset:end] = prediction\n\n print(\"Accuracy: \", accuracy_score(self.valY, pred))\n print(classification_report(self.valY, pred, target_names=self.tags))\n\n return classification_report(self.valY, pred, target_names=self.tags, output_dict=True)\n\n def on_epoch_begin(self, epoch, logs=None):\n pass\n\n def on_epoch_end(self, epoch, logs=None):\n report = self.get_metrics(self.model)\n self.report.append(report)\n return\n\n def on_batch_end(self, batch, logs=None):\n pass\n\n def on_train_begin(self, logs=None):\n pass\n\n def on_train_end(self, logs=None):\n with gzip.GzipFile(os.getcwd() + \"\\\\dataset\\\\report.json.gz\", \"wb\") as file:\n file.write(json.dumps(self.report, indent=4).encode('utf-8'))\n return\n\n\ndef evaluate_model(model):\n encoder = LabelEncoder()\n encoder.classes_ = np.load(os.getcwd() + \"/dataset/labelEncoder.npz\")['arr_0']\n per_class_history = load_per_class_data(model, encoder)\n accuracy_loss_metrics(per_class_history)\n return\n plt.figure()\n print(len(encoder.classes_))\n plt.subplot(2, 2, 1)\n per_class_plots(0, 5, encoder, per_class_history)\n plt.subplot(2, 2, 2)\n per_class_plots(5, 10, encoder, per_class_history)\n plt.subplot(2, 2, 3)\n per_class_plots(10, 15, encoder, per_class_history)\n plt.subplot(2, 2, 4)\n per_class_plots(15, 18, encoder, per_class_history)\n plt.show()\n\n return\n\ndef accuracy_loss_metrics(pPerClassHistory):\n with open(os.getcwd() + '/dataset/logger.log', 'r') as log:\n rows = csv.DictReader(log)\n history = {}\n for i, row in enumerate(rows):\n for key, value in row.items():\n if key in history:\n history[key] += [float(value), ]\n else:\n history[key] = [float(value), ]\n\n epochs = len(history['epoch'])\n\n plt.figure()\n plt.subplot(1, 3, 1)\n plot_history(hs={'Init model': history}, epochs=epochs, metric='categorical_accuracy')\n plt.grid()\n\n plt.subplot(1, 3, 2)\n plot_history(hs={'Init model': history}, epochs=epochs, metric='loss')\n\n plt.grid()\n #plt.show()\n\n plt.subplot(1, 3, 3)\n macro_f1_per_epoch = calculate_macro_f1(pPerClassHistory, True)\n\n plt.plot(list(macro_f1_per_epoch.keys()), list(macro_f1_per_epoch.values()), label='macro F1 with PAD')\n plt.xlabel('Epochs')\n #plt.ylabel('Scores')\n plt.legend(loc=\"best\")\n macro_f1_per_epoch = calculate_macro_f1(pPerClassHistory, False)\n plt.plot(list(macro_f1_per_epoch.keys()), list(macro_f1_per_epoch.values()), label='macro F1 without PAD')\n plt.xlabel('Epochs')\n #plt.ylabel('Scores')\n plt.legend(loc=\"best\")\n plt.grid()\n plt.show()\n\n return\n\n\ndef load_per_class_data(model, encoder):\n\tdata = model.import_json(os.getcwd() + \"\\\\dataset\\\\report\")\n\tper_class_history = {}\n\tepoch_counter = 1\n\tfor i in range(0, 18):\n\t\tper_class_history[i] = defaultdict(list)\n\tfor epoch in data:\n\t\tfor i in range(0, 18):\n\t\t\tlabel = encoder.inverse_transform([i])[0]\n\t\t\tper_class_history[i][epoch_counter] = epoch[label]['f1-score']\n\t\tepoch_counter = epoch_counter + 1\n\n\treturn per_class_history\n\n\ndef calculate_macro_f1(per_class_history, pWithPAD):\n\tepochs = len(per_class_history[0])\n\tf1_per_epoch = {}\n\tfor epoch in range(1, epochs + 1):\n\t\ttotal_f1 = 0\n\t\tfor i in range(0, (18 if pWithPAD else 17)):\n\t\t\ttotal_f1 = total_f1 + per_class_history[i][epoch]\n\t\tf1_per_epoch[epoch] = total_f1/(18 if pWithPAD else 17)\n\n\treturn f1_per_epoch\n\n\ndef per_class_plots(start, end, encoder, per_class_history):\n for i in range(start, end):\n label = encoder.inverse_transform([i])[0]\n if label == \"__PAD__\" :\n label = \"PAD\"\n plt.plot(list(per_class_history[i].keys()), list(per_class_history[i].values()), label=label)\n\n plt.xlabel('epoch')\n plt.ylabel('F1 macro')\n plt.grid()\n plt.legend()\n\n return\n\n\ndef plot_history(hs, epochs, metric):\n\tplt.rcParams['figure.figsize'] = [10, 5]\n\tplt.rcParams['font.size'] = 16\n\n\tfor label in hs:\n\t\tplt.plot(hs[label][metric], label='train {0:s}'.format(metric))\n\t\tplt.plot(hs[label]['val_{0:s}'.format(metric)], label='val {0:s}'.format(metric))\n\n\tx_ticks = np.arange(0, epochs + 1)\n\tx_ticks[0] += 1\n\tplt.xticks(x_ticks)\n\tplt.ylim((0, 1))\n\tplt.xlabel('Epochs')\n\tplt.ylabel('Loss' if metric == 'loss' else 'Accuracy')\n\tplt.legend()"
},
{
"alpha_fraction": 0.5736559629440308,
"alphanum_fraction": 0.5825121402740479,
"avg_line_length": 33.4077262878418,
"blob_id": "32475269d0fedebb87ff0bfd4c9bb0134d86d0cf",
"content_id": "43bac308c8b24d2c73ba56f7938a045f0788d16b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8017,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 233,
"path": "/project_4/pos_model.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import layers\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport metrics\nimport pyconll\nimport gzip\nimport json\nimport gc\nimport data_generator\nimport math\nimport metrics\n\nfrom random import shuffle\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Model\nfrom keras.layers import Input, Embedding, Dropout, Bidirectional, LSTM, GRU\nfrom keras.layers import Dense, TimeDistributed, BatchNormalization, concatenate\nfrom keras.optimizers import Adam\nfrom keras.callbacks import CSVLogger, EarlyStopping\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\nfrom nltk import ngrams\n\nclass PoStagger:\n\n def __init__(self, seq_len=25):\n self.model = None\n self.seqLen = seq_len\n self.batchSize = 128\n self.labelEncoder = None\n self.root = os.getcwd() + \"\\\\dataset\\\\\"\n self.name = \"base\"\n return\n\n def numClasses(self):\n return len(self.labelEncoder.classes_)\n\n def load_data(self, pData, pPadding):\n x = []\n y = []\n max_len = {}\n\n for sentence in pData:\n xx = []\n yy = []\n\n if len(sentence) in max_len:\n max_len[len(sentence)] += 1\n else:\n max_len[len(sentence)] = 1\n\n for i, token in enumerate(sentence):\n xx.append(token.form if token.form is not None else \"__NONE__\")\n yy.append(token.upos if token.upos is not None else \"__NONE__\")\n\n for i in range(len(xx), pPadding):\n xx.append('__PAD__')\n yy.append('__PAD__')\n x.append(xx)\n y.append(yy)\n\n tmpx = []\n tmpy = []\n for i, sentence in enumerate(x):\n if len(sentence) > pPadding:\n gx = ngrams(sentence, pPadding)\n gy = ngrams(y[i], pPadding)\n for gram in gx:\n tmpx.append(list(gram))\n for gram in gy:\n tmpy.append(list(gram))\n else:\n tmpx.append(sentence)\n tmpy.append(y[i])\n # print(\"Max len sentence: \", max_len.items())\n return tmpx, tmpy\n\n def export_json(self, pFile, pData):\n with gzip.GzipFile(pFile + \".json.gz\", \"wb\") as file:\n file.write(json.dumps(pData, indent=4).encode('utf-8'))\n return\n\n def import_json(self, pFile):\n with gzip.GzipFile(pFile + \".json.gz\", \"rb\") as file:\n data = json.loads(file.read().decode('utf-8'))\n return data\n\n def export_arr(self, pFile, pX, pY):\n np.savez_compressed(pFile, a=pX, b=pY)\n return\n\n def import_arr(self, pFile):\n data = np.load(pFile + \".npz\")\n return data['a'], data['b']\n\n def build_labels(self, pTrain, pVal, pTest, pDev):\n labels = {}\n for dataset in [pTrain, pVal, pTest, pDev]:\n for sentence in dataset:\n for token in sentence:\n labels[token] = token\n\n print(\"Num labels: \", len(labels.keys()))\n\n label_encoder = LabelEncoder()\n label_encoder.fit([l for l in labels.keys()])\n\n return label_encoder\n\n def build_categ_labels(self, pY):\n y = np.zeros((len(pY), self.seqLen, self.numClasses()))\n\n for i, tags in enumerate(pY):\n for j, tag in enumerate(tags):\n y[i, j, self.labelEncoder.transform([tag])[0]] = 1.0\n\n return y\n\n def compile_dataset(self):\n train_path = self.root + \"en_ewt-ud-train.conllu\"\n dev_path = self.root + \"en_ewt-ud-dev.conllu\"\n test_path = self.root + \"en_ewt-ud-test.conllu\"\n\n train_data = pyconll.load_from_file(train_path)\n dev_data = pyconll.load_from_file(dev_path)\n test_data = pyconll.load_from_file(test_path)\n\n trainX, trainY = self.load_data(train_data, self.seqLen)\n devX, devY = self.load_data(dev_data, self.seqLen)\n testX, testY = self.load_data(test_data, self.seqLen)\n\n xtrain, xval, ytrain, yval = train_test_split(\n trainX, trainY, test_size=0.1)\n\n self.labelEncoder = self.build_labels(ytrain, devY, testY, yval)\n np.savez_compressed(self.root + \"labelEncoder\", self.labelEncoder.classes_)\n\n self.export_arr(self.root + \"train\", np.array(xtrain), self.build_categ_labels(ytrain))\n self.export_arr(self.root + \"val\", np.array(xval), self.build_categ_labels(yval))\n self.export_arr(self.root + \"dev\", np.array(devX), self.build_categ_labels(devY))\n self.export_arr(self.root + \"test\", np.array(testX), self.build_categ_labels(testY))\n\n return\n\n def load_model(self):\n self.model.load_weights(self.root + \"base_model\")\n return\n\n def predict(self, pSentence) :\n sentence = pSentence\n initLen = len(pSentence)\n for i in range(len(pSentence), self.seqLen):\n sentence.append('__PAD__')\n\n sample = np.zeros((self.batchSize, self.seqLen), dtype='|S6')\n sample[0,:] = sentence\n\n prediction = self.model.predict(sample, steps=1)\n prediction = np.argmax(prediction[0, :], axis=1).ravel()\n\n return self.labelEncoder.inverse_transform(list(prediction))[:initLen]\n\n def test_model(self) :\n devX, devY = self.import_arr(self.root + \"test\")\n metric = metrics.Metrics((devX, devY), self.batchSize, self.labelEncoder)\n metric.get_metrics(self.model)\n return\n\n def compile_base_model(self) :\n gc.collect()\n self.labelEncoder = LabelEncoder()\n self.labelEncoder.classes_ = np.load(self.root + \"labelEncoder.npz\")['arr_0']\n\n inputs = Input(shape=(self.seqLen,), name='input', dtype=tf.string)\n elmo = layers.ElmoLayer(self.seqLen, self.batchSize)(inputs)\n gru1 = Bidirectional(GRU(100, dropout=0.2, recurrent_dropout=0.5, return_sequences=True, name='gru1'))(elmo)\n bn1 = BatchNormalization()(gru1)\n residual = concatenate([elmo, bn1])\n gru2 = Bidirectional(GRU(100, dropout=0.2, recurrent_dropout=0.5, return_sequences=True, name='gru2'))(residual)\n bn2 = BatchNormalization()(gru2)\n output = TimeDistributed(Dense(self.numClasses(), activation='softmax'))(bn2)\n model = Model(inputs=inputs, outputs=output)\n model.compile(\n optimizer=Adam(),\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy',])\n model.summary()\n\n self.model = model\n self.name = \"base_model\"\n return\n\n\n def compile_model(self):\n self.compile_base_model()\n return\n\n def train_model(self):\n trainX, trainY = self.import_arr(self.root + \"train\")\n devX, devY = self.import_arr(self.root + \"dev\")\n '''\n trainX = trainX[:1000]\n trainY = trainY[:1000]\n devX = devX[:1000]\n devY = devY[:1000]\n '''\n train_gen = data_generator.data_stream([trainX, trainY], self.batchSize, self.numClasses())\n dev_gen = data_generator.data_stream([devX, devY], self.batchSize, self.numClasses())\n\n stopper = EarlyStopping(monitor='val_loss',\n min_delta=0, patience=5,\n verbose=0, mode='auto',\n restore_best_weights=True)\n\n csv_logger = CSVLogger(self.root + 'logger.log')\n\n self.model.fit_generator(\n generator=train_gen,\n steps_per_epoch=math.ceil(len(trainX) / self.batchSize),\n validation_data=dev_gen,\n validation_steps=math.ceil(len(devX) / self.batchSize),\n callbacks=[ \\\n metrics.Metrics((devX, devY), self.batchSize, self.labelEncoder), \\\n stopper, csv_logger],\n epochs=20,\n verbose=1,\n max_queue_size=100,\n workers=1,\n use_multiprocessing=False, )\n\n self.model.save_weights(self.root + self.name)\n return\n"
},
{
"alpha_fraction": 0.5436524748802185,
"alphanum_fraction": 0.5499789118766785,
"avg_line_length": 31.054054260253906,
"blob_id": "32d3d8f3c8de0b9e8757a097a78bd202db001e69",
"content_id": "a0e36a8754c7b2c09e0f0fbbe91a3fd4011bc7d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2371,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 74,
"path": "/project_3/main.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from sklearn.preprocessing import LabelEncoder\nfrom keras.models import load_model\n\nimport data_generators\nimport data_loaders\nimport models\nimport metrics\nimport os\nimport numpy as np\nfrom gensim.models import KeyedVectors\n\ndef stats(pY, pLabelEnc) :\n\n y = np.argmax(pY, axis=1)\n\n for i in range(0, len(pLabelEnc.classes_)) :\n mask = np.count_nonzero(y == i)\n print(i, \" \", pLabelEnc.inverse_transform([i])[0], \" \", mask)\n\n return\n\ndef main() :\n\n data_loaders.exportCorpus()\n data_generators.export_word2vec_dataset()\n\n cwd = os.getcwd()\n trainX, trainY = data_loaders.import_embeddings(\"train\")\n valX, valY = data_loaders.import_embeddings(\"val\")\n testX, testY = data_loaders.import_embeddings(\"test\")\n devX, devY = data_loaders.import_embeddings(\"dev\")\n\n encoder = LabelEncoder()\n encoder.classes_ = np.load(cwd + \"/dataset/labelEncoder.npz\")['arr_0']\n\n model = models.build_mlp(trainX.shape[1], len(encoder.classes_))\n model = models.train_model(model, (trainX, trainY), (devX, devY), encoder)\n model.load_weights(cwd + \"/dataset/mlp1\")\n metrics.evaluate_model(model, (testX, testY), encoder)\n\n '''\n \n cwd = os.getcwd()\n\n embeddings = KeyedVectors.load_word2vec_format(\n cwd + \"/dataset/GoogleNews-vectors-negative300.bin\", binary=True)\n\n sentence1 = [\n [['__PAD__', '__PAD__', 'I', 'love', 'you'], 'X'],\n [['__PAD__', 'I', 'love', 'you', '__PAD__'], 'VERB'],\n [['I', 'love', 'you', '__PAD__', '__PAD__'], 'PRON'],]\n\n sentence2 = [\n [['__PAD__', '__PAD__', 'Let', 'make', 'love'], 'VERB'],\n [['__PAD__', 'Let', 'make', 'love', '__PAD__'], 'VERB'],\n [['Let', 'make', 'love', '__PAD__', '__PAD__'], 'NOUN']]\n\n sentence = [\n [['__PAD__', '__PAD__', 'Google', 'is', 'a'], 'PROPN'],\n [['__PAD__', 'Google', 'is', 'a', 'nice'], 'AUX'],\n [['Google', 'is', 'a', 'nice', 'search'], 'DET'],\n [['is', 'a', 'nice', 'search', 'engine'], 'ADJ'],\n [['a', 'nice', 'search', 'engine', '.'], 'NOUN'],\n [['nice', 'search', 'engine', '.', '__PAD__'], 'NOUN'],\n [['search', 'engine', '.', '__PAD__', '__PAD__'], 'PUNCT'],]\n\n metrics.test_model(model, encoder, embeddings, sentence1)\n metrics.test_model(model, encoder, embeddings, sentence2)\n '''\n\n return\n\nif __name__ == '__main__' :\n main()"
},
{
"alpha_fraction": 0.6303818225860596,
"alphanum_fraction": 0.6348497271537781,
"avg_line_length": 25.7608699798584,
"blob_id": "7a56afdd604c39de31a8af8168cabbd695a0ab8a",
"content_id": "07a252b9e4c0d9f0d8477e673ae54fb6c46744cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2462,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 92,
"path": "/project_3/most_freq_base_classifier.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from random import shuffle\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report\nimport pyconll\nimport os\n\n\ndef buildDataset(pData) :\n x_train = []\n y_train = []\n\n for sentence in pData :\n\n for token in sentence :\n x_train.append(token[0])\n y_train.append(token[1])\n\n return x_train, y_train\n\n\ndef load_data(pData) :\n pos_data = []\n for sentence in pData:\n token_list = []\n for token in sentence:\n token_list.append([token.form, token.upos if token.upos is not None else \"None\"])\n\n pos_data.append(token_list)\n\n return pos_data\n\n\ndef find_most_freq_pos_classifier(x_train, y_train):\n count_pos_per_word = dict()\n\n for i in range(0, len(x_train)):\n if x_train[i] not in count_pos_per_word:\n count_pos_per_word[x_train[i]] = dict()\n if y_train[i] not in count_pos_per_word[x_train[i]]:\n count_pos_per_word[x_train[i]][y_train[i]] = 0\n count_pos_per_word[x_train[i]][y_train[i]] += 1\n\n # print(count_pos_per_word)\n\n for key_word in count_pos_per_word:\n temp_dict = count_pos_per_word[key_word]\n maximum = 0\n most_freq_tag = \"\"\n\n for key_tag in temp_dict:\n if temp_dict[key_tag] > maximum:\n maximum = temp_dict[key_tag]\n most_freq_tag = key_tag\n\n count_pos_per_word[key_word] = most_freq_tag\n\n # print(count_pos_per_word)\n return count_pos_per_word\n\n\ncwd = os.getcwd()\ntrain_path = cwd + \"/dataset/en_ewt-ud-train.conllu\"\ntrain_data = pyconll.load_from_file(train_path)\ntrain = load_data(train_data)\n\nx_train, y_train = buildDataset(train)\n# print(x_train)\n# print(y_train)\n\nmost_freq_pos_per_word = find_most_freq_pos_classifier(x_train, y_train)\n\ntest_path = cwd + \"/dataset/en_ewt-ud-test.conllu\"\ntest_data = pyconll.load_from_file(test_path)\ntest = load_data(test_data)\nshuffle(test)\nx_test, y_test_true = buildDataset(test)\n\ny_test_pred = []\n\nfor word in x_test:\n if word in most_freq_pos_per_word:\n y_test_pred.append(most_freq_pos_per_word[word])\n else:\n y_test_pred.append('UNK')\n\n# print(len(x_test))\n# print(y_test_true)\n# print(y_test_pred)\n\nprint('accuracy:', accuracy_score(y_test_true, y_test_pred))\nprint('macro-f1-score:', f1_score(y_test_true, y_test_pred, average='macro'))\nprint('micro-f1-score:', f1_score(y_test_true, y_test_pred, average='micro'))\nprint(classification_report(y_test_true, y_test_pred))\n"
},
{
"alpha_fraction": 0.644855797290802,
"alphanum_fraction": 0.6530348658561707,
"avg_line_length": 20.924528121948242,
"blob_id": "c9f9ff91a04075d33a189189f14070c41f972ead",
"content_id": "3db8b10a2b8ff3f8680d6006ef182f152543d8ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2323,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 106,
"path": "/project_3/data_loaders.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from nltk import ngrams\nfrom random import shuffle\n\nimport pyconll\nimport os\nimport gzip\nimport json\nimport numpy as np\n\ndef load_data(pData) :\n\n\tpos_data = []\n\n\tfor sentence in pData:\n\t\ttoken_list = []\n\t\tfor token in sentence:\n\t\t\ttoken_list.append([token.form, token.upos\n\t\t\t\tif token.upos is not None else \"None\"])\n\n\t\tpos_data.append(token_list)\n\n\treturn pos_data\n\ndef buildDataset(pData, pNgram) :\n\n\tdataset = []\n\n\tfor sentence in pData :\n\t\ttoken_list = []\n\t\tfor i in range(0, int(pNgram / 2)) :\n\t\t\ttoken_list.append([\"__PAD__\", \"__PAD__\"])\n\n\t\tfor token in sentence :\n\t\t\ttoken_list.append(token)\n\n\t\tfor i in range(0, int(pNgram / 2)) :\n\t\t\ttoken_list.append([\"__PAD__\", \"__PAD__\"])\n\n\t\tfor i, ngram in enumerate(ngrams(token_list, pNgram)) :\n\t\t\tgram = []\n\t\t\tfor token in ngram :\n\t\t\t\tgram.append(token[0])\n\t\t\tdataset.append([gram, ngram[int(pNgram / 2)][1]])\n\n\treturn dataset\n\ndef export_arr(pFile, pData) :\n\n\twith gzip.GzipFile(pFile + \".json.gz\", \"wb\") as file :\n\t\tfile.write(json.dumps(pData, indent = 4).encode('utf-8'))\n\n\treturn\n\ndef import_arr(pFile) :\n\n\twith gzip.GzipFile(pFile, \"rb\") as file :\n\t\tdata = json.loads(file.read().decode('utf-8'))\n\n\treturn data\n\ndef importCorpus() :\n\tcwd = os.getcwd()\n\tdb = import_arr(cwd + \"/dataset/5gram_database.json.gz\")\n\treturn db[0], db[1], db[2], db[3]\n\n\ndef export_embeddings(pX, pY, pFile) :\n cwd = os.getcwd()\n np.savez_compressed(cwd + \"/dataset/\" + pFile, a=pX, b=pY)\n return\n\ndef import_embeddings(pFile) :\n cwd = os.getcwd()\n data = np.load(cwd + \"/dataset/\" + pFile + \".npz\")\n return data['a'], data['b']\n\n\ndef exportCorpus(pWindow = 5):\n\n\tcwd = os.getcwd()\n\n\ttrain_path = cwd + \"/dataset/en_ewt-ud-train.conllu\"\n\tdev_path = cwd + \"/dataset/en_ewt-ud-dev.conllu\"\n\ttest_path = cwd + \"/dataset/en_ewt-ud-test.conllu\"\n\n\ttrain_data = pyconll.load_from_file(train_path)\n\tdev_data = pyconll.load_from_file(dev_path)\n\ttest_data = pyconll.load_from_file(test_path)\n\n\ttrain = load_data(train_data)\n\tdev = load_data(dev_data)\n\ttest = load_data(test_data)\n\n\tshuffle(train)\n\tsz = int(len(train) * 0.1)\n\tval = train[:sz]\n\ttrain = train[sz:]\n\n\ttrain = buildDataset(train, pWindow)\n\tval = buildDataset(val, pWindow)\n\tdev = buildDataset(dev, pWindow)\n\ttest = buildDataset(test, pWindow)\n\n\texport_arr(cwd + \"/dataset/5gram_database\", (train, val, dev, test))\n\n\treturn"
},
{
"alpha_fraction": 0.6012937426567078,
"alphanum_fraction": 0.6204057335853577,
"avg_line_length": 35.56989288330078,
"blob_id": "4b5b8313ee6cd1d15eddf163f1a6289213d8adbf",
"content_id": "8ad5f8f74d111dd1fc3ee70aa83b0e0f8d70ce0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3401,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 93,
"path": "/project_4/hyperas_tuning.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom keras.callbacks import EarlyStopping\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform\nfrom keras.models import Model\nfrom keras.layers import Input, Dropout, Bidirectional, GRU, Dense, TimeDistributed\nfrom keras.callbacks import EarlyStopping\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport os\nimport layers\nimport numpy as np\nimport tensorflow as tf\nimport data_generator\nimport math\n\ndef data():\n cwd = os.getcwd()\n val_data = np.load(cwd + \"/dataset/val.npz\")\n valX = val_data['a']\n valY = val_data['b']\n\n pTrainX, pTestX, pTrainY, pTestY = train_test_split(valX, valY, test_size=0.2)\n encoder = LabelEncoder()\n encoder.classes_ = np.load(cwd + \"/dataset/labelEncoder.npz\")['arr_0']\n return pTrainX, pTrainY, pTestX, pTestY, encoder\n\n\ndef create_model(pTrainX, pTrainY, pTestX, pTestY, encoder):\n\n batchSize = 111\n seq = 25\n inputs = Input(shape=(seq,), name='input', dtype=tf.string)\n elmo = layers.ElmoLayer(seq, batchSize)(inputs)\n gru1 = Bidirectional(GRU({{choice([25, 50, 100])}}, dropout={{choice([0.0, 0.2, 0.5])}}, recurrent_dropout={{choice([0.0, 0.2, 0.5])}}, return_sequences=True, name='gru1'))(elmo)\n\n if {{choice(['two', 'three'])}} == \"three\":\n gru2 = Bidirectional(GRU({{choice([25, 50, 100])}}, dropout={{choice([0.0, 0.2, 0.5])}}, recurrent_dropout={{choice([0.0, 0.2, 0.5])}}, return_sequences=True, name='lstm2'))(gru1)\n output = TimeDistributed(Dense(len(encoder.classes_), activation='softmax'))(gru2)\n else:\n output = TimeDistributed(Dense(len(encoder.classes_), activation='softmax'))(gru1)\n\n model = Model(inputs=inputs, outputs=output)\n\n model.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n\n train_gen = data_generator.data_stream([pTrainX, pTrainY], batchSize, len(encoder.classes_))\n dev_gen = data_generator.data_stream([pTestX, pTestY], batchSize, len(encoder.classes_))\n\n stopper = EarlyStopping(monitor='val_loss',\n min_delta=0, patience=2,\n verbose=0, mode='auto',\n restore_best_weights=True)\n\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=math.ceil(len(pTrainX) / batchSize),\n validation_data=dev_gen,\n validation_steps=math.ceil(len(pTestX) / batchSize),\n callbacks=[stopper, ],\n epochs=8,\n verbose=1,\n max_queue_size=100,\n workers=1,\n use_multiprocessing=False, )\n\n score, acc = model.evaluate_generator(\n generator=dev_gen,\n steps=math.ceil(len(pTestX) / batchSize),\n verbose=0)\n\n print('Best val acc of epoch:', acc)\n return {'loss': -acc, 'status': STATUS_OK, 'model': model}\n\ndef main():\n best_run, best_model = optim.minimize(model=create_model,\n data=data,\n algo=tpe.suggest,\n max_evals=10,\n trials=Trials())\n\n print(\"Best performing model chosen hyper-parameters:\")\n print(best_run)\n\n print(best_model.summary())\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5789867043495178,
"alphanum_fraction": 0.6013942360877991,
"avg_line_length": 29.43181800842285,
"blob_id": "d87c38f1072c14b6b86b43ceaa0ec6d15d6addc1",
"content_id": "a998b6fcfcad8eb02cd96cd07218b6f557d06d05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8033,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 264,
"path": "/project_3/metrics.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from keras.callbacks import Callback\nfrom sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score, accuracy_score, precision_recall_curve, average_precision_score\nfrom keras import backend as K\nfrom gensim.models import KeyedVectors\n\nimport sklearn.metrics as sk_metrics\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport csv\nimport data_loaders\nimport data_generators\n\nclass Metrics(Callback) :\n\n def __init__(self, pNumClasses, pValSet, pLabelEnc, **kwargs):\n\n super(Metrics, self).__init__(**kwargs)\n\n self.valX = pValSet[0]\n self.valY = pValSet[1]\n self.numClasses = pNumClasses\n\n self.labelEnc = pLabelEnc\n\n self.per_class_history = np.zeros([100, pNumClasses, 3])\n self.macro_history = np.zeros([100, 4])\n\n def getMetrics(self, pModel) :\n\n prediction = pModel.predict(self.valX)\n gt = np.argmax(self.valY, axis=1)\n pred = np.argmax(prediction, axis=1)\n\n per_class_metrics = np.zeros([self.numClasses, 3])\n macro_metrics = np.zeros([1, 4])\n\n for i in range(0, self.numClasses) :\n trueY = gt == i\n predY = pred == i\n\n per_class_metrics[i, 0] = precision_score(trueY, predY)\n per_class_metrics[i, 1] = recall_score(trueY, predY)\n per_class_metrics[i, 2] = f1_score(trueY, predY)\n\n macro_metrics[0, 0] = np.mean(per_class_metrics[:, 0])\n macro_metrics[0, 1] = np.mean(per_class_metrics[:, 1])\n macro_metrics[0, 2] = np.mean(per_class_metrics[:, 2])\n\n index = self.labelEnc.transform(['UNK',])[0]\n\n arr1 = per_class_metrics[0:index,2]\n arr2 = per_class_metrics[index + 1:,2]\n\n macro_metrics[0, 3] = np.mean(np.concatenate((arr1, arr2)))\n\n return per_class_metrics, macro_metrics\n\n def on_epoch_begin(self, epoch, logs=None) :\n pass\n\n def on_epoch_end(self, epoch, logs=None) :\n\n per_class_metrics, macro_metrics = self.getMetrics(self.model)\n\n self.per_class_history[epoch, :,:] = per_class_metrics\n self.macro_history[epoch, :] = macro_metrics\n\n for i in range(0, self.numClasses) :\n\n label = self.labelEnc.inverse_transform([i])\n\n print(i, \"class \", label,\n \" precision: {0:.4f}\".format(self.per_class_history[epoch,i,0]),\n \" recall: {0:.4f}\".format(self.per_class_history[epoch,i,1]),\n \" f1: {0:.4f}\".format(self.per_class_history[epoch,i,2]),)\n\n print(\"With <UNK> macro f1: {0:.4f}\".format(self.macro_history[epoch, 2]))\n print(\"Without <UNK> macro f1: {0:.4f}\".format(self.macro_history[epoch, 3]))\n\n return\n\n def on_batch_end(self, batch, logs=None) :\n pass\n\n def on_train_begin(self, logs=None) :\n pass\n\n def on_train_end(self, logs=None) :\n cwd = os.getcwd()\n np.savez_compressed(cwd + \"/dataset/per_class_history\", self.per_class_history)\n np.savez_compressed(cwd + \"/dataset/macro_history\", self.macro_history)\n return\n\ndef evaluate_model(pModel, pTest, pEncoder) :\n\n cwd = os.getcwd()\n\n metric = Metrics(\n len(pEncoder.classes_),\n (pTest[0], pTest[1]), pEncoder)\n\n metric.getMetrics(pModel)\n\n prediction = pModel.predict(pTest[0])\n plot_score(pTest[1], prediction, len(pEncoder.classes_))\n\n loss, acc = pModel.evaluate(pTest[0], pTest[1], verbose=1)\n\n with open(cwd + '/dataset/' + pModel.name + '.log', 'r') as log :\n rows = csv.DictReader(log)\n history = {}\n for i, row in enumerate(rows) :\n for key,value in row.items() :\n if key in history :\n history[key] += [float(value),]\n else :\n history[key] = [float(value),]\n\n epochs = len(history['epoch'])\n\n per_class_history = np.load(cwd + \"/dataset/per_class_history.npz\")['arr_0']\n macro_history = np.load(cwd + \"/dataset/macro_history.npz\")['arr_0']\n\n per_class_history = per_class_history[:epochs - 1, :, :]\n\n plt.figure()\n\n print(len(pEncoder.classes_))\n plt.subplot(2,2,1)\n\n for i in range(0, 5) :\n label = pEncoder.inverse_transform([i])[0]\n plt.plot(per_class_history[:epochs - 1, i, 2], label=label)\n\n plt.xlabel('epoch')\n plt.ylabel('F1 macro')\n plt.grid()\n plt.legend()\n plt.subplot(2,2,2)\n\n for i in range(5, 10) :\n label = pEncoder.inverse_transform([i])[0]\n plt.plot(per_class_history[:epochs - 1, i, 2], label=label)\n\n plt.grid()\n plt.legend()\n plt.subplot(2,2,3)\n\n for i in range(10, 15) :\n label = pEncoder.inverse_transform([i])[0]\n plt.plot(per_class_history[:epochs - 1, i, 2], label=label)\n\n plt.grid()\n plt.legend()\n plt.subplot(2,2,4)\n for i in range(15, 18) :\n label = pEncoder.inverse_transform([i])[0]\n plt.plot(per_class_history[:epochs - 1, i, 2], label=label)\n\n plt.grid()\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(macro_history[:epochs - 1, 2], label='macro F1 with <UNK> token')\n plt.plot(macro_history[:epochs - 1, 3], label='macro F1 without <UNK> token')\n plt.xlabel('epoch')\n plt.ylabel('score')\n plt.legend(loc=\"best\")\n plt.legend()\n plt.grid()\n plt.show()\n\n print(\"Train Loss : {0:.5f}\".format(history['loss'][-1]))\n print(\"Dev Loss: {0:.5f}\".format(history['val_loss'][-1]))\n print(\"Test Loss : {0:.5f}\".format(loss))\n print(\"---\")\n print(\"Train Accuracy : {0:.5f}\".format(history['categorical_accuracy'][-1]))\n print(\"Dev Accuracy: {0:.5f}\".format(history['val_categorical_accuracy'][-1]))\n print(\"Test Accuracy : {0:.5f}\".format(acc))\n\n fig = plt.figure(figsize=(10, 10))\n plt.subplots_adjust(wspace=0.3, hspace=0.3)\n\n plt.subplot(1, 2, 1)\n plot_history(hs={'Init model': history}, epochs=epochs, metric='categorical_accuracy')\n plt.grid()\n\n plt.subplot(1, 2, 2)\n plot_history(hs={'Init model': history}, epochs=epochs, metric='loss')\n\n plt.grid()\n plt.show()\n\n return\n\ndef plot_score(pTest, pPred, pNumClasses) :\n\n y_tets_arg = np.argmax(pTest, axis=1)\n y_pred_arg = np.argmax(pPred, axis=1)\n\n print('accuracy:', accuracy_score(y_tets_arg, y_pred_arg))\n print('macro-f1-score:', f1_score(y_tets_arg, y_pred_arg, average='macro'))\n print('micro-f1-score:', f1_score(y_tets_arg, y_pred_arg, average='micro')) \n print(sk_metrics.classification_report( y_tets_arg, y_pred_arg))\n\n y_test = pTest.ravel()\n y_pred = pPred.ravel()\n\n precision = {}\n recall = {}\n average_precision = {}\n\n precision[\"micro\"], recall[\"micro\"], _ = precision_recall_curve(\n y_test, y_pred)\n\n average_precision[\"micro\"] = average_precision_score(\n y_test, y_pred, average=\"micro\")\n\n plt.figure()\n\n plt.plot(recall[\"micro\"], precision[\"micro\"], color='navy',\n label='average AUC= {0:0.2f}'.format(average_precision['micro']))\n\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.legend(loc=\"lower left\")\n\n plt.grid()\n plt.show()\n\n return\n\ndef plot_history(hs, epochs, metric):\n plt.rcParams['figure.figsize'] = [10, 5]\n plt.rcParams['font.size'] = 16\n\n for label in hs:\n plt.plot(hs[label][metric], label='train {0:s}'.format(metric))\n plt.plot(hs[label]['val_{0:s}'.format(metric)], label='val {0:s}'.format(metric))\n\n x_ticks = np.arange(0, epochs + 1)\n x_ticks [0] += 1\n plt.xticks(x_ticks)\n plt.ylim((0, 1))\n plt.xlabel('Epochs')\n plt.ylabel('Loss' if metric=='loss' else 'Accuracy')\n plt.legend()\n\ndef test_model(pModel, pEncoder, embeddings, pSentence) :\n\n x, y = data_generators.buildData_word2vec(pSentence, 5, embeddings, pEncoder)\n\n predict = pModel.predict(x)\n\n y_tets_arg = np.argmax(y, axis=1)\n y_pred_arg = np.argmax(predict, axis=1)\n\n print(pEncoder.inverse_transform(list(y_tets_arg)))\n print(pEncoder.inverse_transform(list(y_pred_arg)))\n return"
},
{
"alpha_fraction": 0.6087149977684021,
"alphanum_fraction": 0.6287238597869873,
"avg_line_length": 31.128570556640625,
"blob_id": "af65cd001f7aab1396ca3060165a1cb81bcaf2db",
"content_id": "e47cb365f099f0a8daf1730fb3d47e16adef144e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2249,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 70,
"path": "/project_3/data_generators.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "\nfrom gensim.models import KeyedVectors\nfrom keras.utils import to_categorical, np_utils\nfrom sklearn.preprocessing import LabelEncoder\n\nimport data_loaders\nimport gensim\nimport numpy as np\nimport os\n\ndef export_word2vec_dataset() :\n\n data_loaders.exportCorpus()\n\n train, val, dev, test = data_loaders.importCorpus()\n encoder = build_labels(train, val, dev, test)\n\n cwd = os.getcwd()\n\n np.savez_compressed(cwd + \"/dataset/labelEncoder\", encoder.classes_)\n\n print(\"Loading pre-trained word2vec embeddings...\")\n\n embeddings = KeyedVectors.load_word2vec_format(\n cwd + \"/dataset/GoogleNews-vectors-negative300.bin\", binary=True)\n\n print(\"Extracting embeddings...\")\n\n trainX, trainY = buildData_word2vec(train, 5, embeddings, encoder)\n testX, testY = buildData_word2vec(test, 5, embeddings, encoder)\n valX, valY = buildData_word2vec(val, 5, embeddings, encoder)\n devX, devY = buildData_word2vec(dev, 5, embeddings, encoder)\n data_loaders.export_embeddings(trainX, trainY, \"train\")\n data_loaders.export_embeddings(testX, testY, \"test\")\n data_loaders.export_embeddings(valX, valY, \"val\")\n data_loaders.export_embeddings(devX, devY, \"dev\")\n\n return\n\ndef buildData_word2vec(pData, pWindow, pEmbeddings, pLabelEncoder) :\n\n x = np.zeros((len(pData), 300 * pWindow), dtype=np.float32)\n\n for i, pair in enumerate(pData) :\n for j, token in enumerate(pair[0]) :\n if token != \"__PAD__\" :\n if token in pEmbeddings.vocab :\n x[i, j * 300 : j * 300 + 300] = pEmbeddings[token]\n else :\n x[i, j * 300 : j * 300 + 300] = pEmbeddings[\"UNK\"]\n if j == int(pWindow / 2) :\n pair[0][j] = \"UNK\"\n pair[1] = \"UNK\"\n\n return x, to_categorical(pLabelEncoder.transform([tag[1] for tag in pData]))\n\ndef build_labels(pTrain, pVal, pTest, pDev) :\n\n labels = {}\n labels[\"UNK\"] = \"UNK\"\n\n for dataset in [pTrain, pVal, pTest, pDev] :\n for token in dataset :\n labels[token[1]] = token[1]\n\n print(\"Num labels: \", len(labels.keys()))\n\n label_encoder = LabelEncoder()\n label_encoder.fit([l for l in labels.keys()])\n\n return label_encoder"
},
{
"alpha_fraction": 0.6011139750480652,
"alphanum_fraction": 0.6285346746444702,
"avg_line_length": 24.380434036254883,
"blob_id": "530541f26881f8db069ee443d06da7e81cbe6db8",
"content_id": "234bab39d62ed5f76a791551343c2dd6085cefcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2334,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 92,
"path": "/project_3/models.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, LeakyReLU\nfrom keras.optimizers import Adam\nfrom keras.callbacks import CSVLogger, EarlyStopping\nfrom sklearn.utils import class_weight\n\nimport data_generators\nimport data_loaders\nimport metrics\nimport csv\nimport os\n\nimport numpy as np\n\ndef build_mlp(pInputDim, pNumClasses) :\n\n model = Sequential(name='mlp1')\n\n model.add(Dense(512, input_dim=pInputDim))\n model.add(LeakyReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(256))\n model.add(LeakyReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(128))\n model.add(LeakyReLU())\n model.add(Dropout(0.2))\n\n model.add(Dense(128))\n model.add(LeakyReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(256))\n model.add(LeakyReLU())\n model.add(Dropout(0.5))\n\n model.add(Dense(512))\n model.add(LeakyReLU())\n\n model.add(Dense(pNumClasses))\n model.add(Activation('softmax'))\n\n model.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n\n return model\n\ndef train_model(pModel, pTrain, pVal, pLabelEnc) :\n\n y = np.argmax(pTrain[1], axis=1)\n\n freq = np.zeros([1, len(pLabelEnc.classes_)])\n\n for i in range(0, len(pLabelEnc.classes_)) :\n mask = np.count_nonzero(y == i)\n freq[0,i] = mask / pTrain[1].shape[0]\n\n weight = np.median(freq) / freq\n\n for i in range(0, len(pLabelEnc.classes_)) :\n mask = np.count_nonzero(y == i)\n percentage = mask / pTrain[0].shape[0]\n\n print(i, \"\\t\", pLabelEnc.inverse_transform([i])[0], \"\\t\",\n mask,\"\\t{0:.2}%\".format(percentage * 100.0),\n \"\\tweight: {0:.2}\".format(weight[0,i]))\n\n pModel.summary()\n\n csv_logger = CSVLogger('dataset/' + pModel.name + '.log')\n\n epochs = 20\n\n stopper = EarlyStopping(monitor='val_loss',\n min_delta=0, patience=2, verbose=0, mode='auto')\n\n history = pModel.fit(pTrain[0], pTrain[1],\n callbacks=[\n metrics.Metrics(len(pLabelEnc.classes_), (pVal[0], pVal[1]), pLabelEnc),\n csv_logger,\n stopper],\n validation_data=(pVal[0], pVal[1]),\n epochs=epochs, batch_size=128, verbose=1,)\n\n print(stopper.stopped_epoch)\n pModel.save('dataset/' + pModel.name)\n\n return pModel"
},
{
"alpha_fraction": 0.5688118934631348,
"alphanum_fraction": 0.5811881422996521,
"avg_line_length": 27.05555534362793,
"blob_id": "4d2f5c4ac585a46db6f1b882c4db03f4adee183c",
"content_id": "86e93939463a68030d09826ee00a5489aa6ba938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2020,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 72,
"path": "/project_1/main.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import os\nimport model\nimport random\n\ndef test_case_custom_corpus() :\n\n m = model.model()\n\n tokens = (\"*start1*\", \"*start2*\", \"a\", \"b\", \"*end*\",) * 10\n\n unigram = m.get_ngram(tokens, 1)\n bigram = m.get_ngram(tokens, 2)\n trigram = m.get_ngram(tokens, 3)\n voc = m.compute_counts([unigram, bigram, trigram])\n m.set_voc(voc, 3)\n\n query = [[\"a\",],]\n\n print(m.probs(query, 3))\n print(m.log_probs(query, 3))\n\n return\n\ndef test_case_full_corpus() :\n\n cwd = os.getcwd()\n root = cwd + \"//dataset//\"\n corpus = root + \"europarl.en\"\n\n m = model.model(root)\n #m.build(corpus) # do not enable this\n m.load_counts()\n #print(\"Tuning result: \", m.tune())\n\n queries = [\n ['i', 'think', 'that', 'the', 'honourable', 'member', 'raises', 'an', 'important', 'point'],\n ['member', 'the', 'think', 'that', 'raises', 'i', 'important', 'an', 'honourable', 'point'],\n ['it', 'possesses', 'political', 'economic', 'and', 'diplomatic', 'leverage'],\n ['economic', 'it', 'leverage', 'and', 'diplomatic', 'possesses', 'political'],\n ]\n\n #queries = m.import_queries()\n #for q in queries : print(q)\n\n #print(m.get_kn_smoothing(queries[0], 10))\n\n #print(\"Perplexity Bigram: \", m.perplexity(queries, 2))\n #print(\"Unigram: \", m.language_cross_entropy(queries, 1))\n #print(\"Bigram: \", m.language_cross_entropy(queries, 2))\n #print(\"Bigram: \", m.perplexity(queries, 2))\n #print(\"Trigram: \", m.language_cross_entropy(queries, 3))\n #print(\"Trigram: \", m.perplexity(queries, 3))\n #print(\"Interpolated: \", m.language_cross_entropy(queries, 3))\n #print(\"Interpolated: \", m.perplexity(queries, 3))\n\n #print(m.log_probs(queries, 1))\n #print(m.log_probs(queries, 2))\n print(m.log_probs(queries, 3))\n #for q in queries :\n # random.shuffle(q)\n # print(q)\n\n #print(m.log_probs(queries, 2))\n\n #print(m.evaluate(queries))\n\n return\n\nif __name__ == \"__main__\" :\n\n #test_case_custom_corpus()\n test_case_full_corpus()\n"
},
{
"alpha_fraction": 0.6570372581481934,
"alphanum_fraction": 0.6812227368354797,
"avg_line_length": 23.409835815429688,
"blob_id": "4c0d520521c9eb57b19434501876e5fe2fcf1c90",
"content_id": "257cb9c5cce863d0620dba2a545ed3a447caa0fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2977,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 122,
"path": "/project_3/hyperas_tuning.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nfrom sklearn.preprocessing import LabelEncoder\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, BatchNormalization, LeakyReLU, GaussianNoise\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform\nfrom sklearn.model_selection import train_test_split\n\nimport keras.layers\nimport data_loaders\nimport os\nimport numpy as np\nimport metrics\n\ndef data():\n\tcwd = os.getcwd()\n\tvalX, valY = data_loaders.import_embeddings(\"val\")\n\n\tencoder = LabelEncoder()\n\tencoder.classes_ = np.load(cwd + \"/dataset/labelEncoder.npz\")['arr_0']\n\n\tpTrainX, pTestX, pTrainY, pTestY = train_test_split(\n\t\tvalX, valY, test_size=0.2, random_state=11)\n\n\treturn np.array(pTrainX), np.array(pTrainY), np.array(pTestX), np.array(pTestY), encoder\n\ndef create_model(pTrainX, pTrainY, pTestX, pTestY, encoder) :\n\n\tactivation = {{choice(['relu', 'leakyrelu'])}}\n\tmodel = Sequential()\n\n\tmodel.add(Dense(512, input_dim=pTrainX.shape[1]))\n\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(256))\n\t\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(128))\n\t\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(128))\n\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(256))\n\t\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(512))\n\t\n\tif activation == 'relu' :\n\t\tmodel.add(Activation('relu'))\n\telse :\n\t\tmodel.add(LeakyReLU())\n\n\tmodel.add(Dropout({{choice([0.0, 0.2, 0.5])}}))\n\n\tmodel.add(Dense(len(encoder.classes_)))\n\tmodel.add(Activation('softmax'))\n\n\tmodel.compile(\n\t\toptimizer='adam',\n\t\tloss='categorical_crossentropy',\n\t\tmetrics=['categorical_accuracy'])\n\n\tresult = model.fit(pTrainX, pTrainY,\n\t\tepochs=20,\n\t\tbatch_size= 128,\n\t\tvalidation_data=(pTestX, pTestY),\n\t\tcallbacks=[EarlyStopping(monitor='val_loss',\n\t\t\tmin_delta=0, patience=2, verbose=0, mode='auto')],\n\t\tverbose=1)\n\n\tscore, acc = model.evaluate(pTestX, pTestY, verbose=0)\n\tprint('Best val acc of epoch:', acc)\n\treturn {'loss': -acc, 'status': STATUS_OK, 'model': model}\n\ndef main():\n\tbest_run, best_model = optim.minimize(model=create_model, \n\t\t\t\t\t\t\t\t\t\t\tdata=data, \n\t\t\t\t\t\t\t\t\t\t\talgo=tpe.suggest, \n\t\t\t\t\t\t\t\t\t\t\tmax_evals=50, \n\t\t\t\t\t\t\t\t\t\t\ttrials=Trials())\n\n\tprint(\"Best performing model chosen hyper-parameters:\")\n\tprint(best_run)\n\n\tprint(best_model.summary())\n\nif __name__ == '__main__' :\n\tmain()"
},
{
"alpha_fraction": 0.5246305465698242,
"alphanum_fraction": 0.5252463221549988,
"avg_line_length": 55.034481048583984,
"blob_id": "d91e2a0ddee25f1ca38cbe152b54acb7f7f44de6",
"content_id": "f796d45e2b7a32186356541db772a5af2c7f5b1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1624,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 29,
"path": "/project_4/main.py",
"repo_name": "athanasios-gkikas/nlp_projects",
"src_encoding": "UTF-8",
"text": "import pos_model\nimport metrics\n\ndef main() :\n model = pos_model.PoStagger()\n model.compile_dataset()\n model.compile_model()\n model.train_model()\n model.load_model()\n metrics.evaluate_model(model)\n model.test_model()\n\n print(model.predict(['Feels', 'like', 'you', 'are', 'in', 'Brooklyn', ',', 'but', 'people', 'watching', 'is', 'entertaining', '.']))\n print(model.predict(['We', 'honestly', 'can', 'not', 'think', 'of', 'even', '1', 'thing', 'we', 'did', 'n\\'t', 'like', '!']))\n print(model.predict(['Bland', 'and', 'over', 'cooked', '.']))\n print(model.predict(['Great', 'meets', 'that', 'are', 'already', 'cooked', ',', 'easy', 'to', 'take', 'home', 'for', 'dinner', '.']))\n print(model.predict(['The', 'talk', 'of', 'the', 'day', 'besides', 'a', 'more', 'level', 'playing', 'field', 'with', 'China', 'was', 'North', 'Korea', '.']))\n print(model.predict(['The', 'workers', 'sped', 'up', 'and', 'down', 'the', 'street', 'with', 'no', 'mind', 'to', 'the', 'small', 'children', 'playing', '.']))\n print(model.predict(['They', 're', 'probably', 'just', 'drawn', 'for', 'the', 'show', 'anyways', '.']))\n print(model.predict(['One', 'of', 'the', 'pictures', 'shows', 'a', 'flag', 'that', 'was', 'found', 'in', 'Fallujah', '.']))\n print(model.predict([ \"I\", \"love\", \"you\", \".\"]))\n print(model.predict([\"I\", \"am\", \"in\", \"love\", \"with\", \"the\", \"giant\", \"plate\", \"of\", \"nachos\" \"!\"]))\n print(model.predict([\"Would\", \"love\", \"for\", \"you\", \"to\", \"join\", \"us\", \".\"]))\n print(model.predict([\"Let\", \"'s\", \"make\", \"love\", \".\"]))\n\n return\n\nif __name__ == \"__main__\" :\n main()"
}
] | 19 |
espinolaramd/Exam2_DiegoEspinola
|
https://github.com/espinolaramd/Exam2_DiegoEspinola
|
853924913e1f1d9b7cd8a966e4e4dd376dfff22d
|
e54db66deba88d8b298be6dc11cda1ff53a72e01
|
e03d25626ad34e3bbddae16688425601ad8c209a
|
refs/heads/master
| 2022-04-17T19:09:06.358487 | 2020-04-13T09:25:00 | 2020-04-13T09:25:00 | 255,210,276 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5226053595542908,
"alphanum_fraction": 0.5310344696044922,
"avg_line_length": 35.74647903442383,
"blob_id": "0d2ffbbf8e325bb31a218ea49fe10a8fa13e2c63",
"content_id": "4ba2f538569cefd91f642648408e6144093cfcc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2610,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 71,
"path": "/Exam2.py",
"repo_name": "espinolaramd/Exam2_DiegoEspinola",
"src_encoding": "UTF-8",
"text": "#Diego Espinola\n#04.12.2020\n#second part of the exam, the drive.\nfrom Seacrh_Methods import bubble_sort\nfrom Seacrh_Methods import selection_sort\nfrom Seacrh_Methods import insertion_sort\nfrom Seacrh_Methods import merge_sort\nfrom Seacrh_Methods import quick_sort\n\nlist = []\nmethod = 0\nprint(\"Welcome to this program, this program will let you type any random list of numbers you have and it will\"\n \" organized the list for you\")\n\nwhile method != 6:\n print(\"Lets start by asking you what method do you want yo use? Here are you options please type \"\n \"the number of the method you want to use\")\n print(\"1 = bubble sort\")\n print(\"2 = selection sort\")\n print(\"3 = insertion sort\")\n print(\"4 = merge sort\")\n print(\"5 = quick sort\")\n print(\"6 = Exit the program\")\n method = int(input(\">\"))\n\n if method == 1:\n num = int(input(\"How many numbers do you want to add? \\n>\"))\n for n in range(num):\n numbers = int(input(\"Enter number:\"))\n list.append(numbers)\n bubble_sort(list)\n print(f\"Here is your list sorted:{list}\")\n list.clear()\n\n\n if method == 2:\n num = int(input(\"How many numbers do you want to add? \\n>\"))\n for n in range(num):\n numbers = int(input(\"Enter number:\"))\n list.append(numbers)\n selection_sort(list)\n print(f\"Here is your list sorted:{list}\")\n list.clear()\n\n if method == 3:\n num = int(input(\"How many numbers do you want to add? \\n>\"))\n for n in range(num):\n numbers = int(input(\"Enter number:\"))\n list.append(numbers)\n insertion_sort(list)\n print(f\"Here is your list sorted:{list}\")\n list.clear()\n\n if method == 4:\n num = int(input(\"How many numbers do you want to add? \\n>\"))\n for n in range(num):\n numbers = int(input(\"Enter number:\"))\n list.append(numbers)\n merge_sort(list)\n print(f\"Here is your list sorted:{list}\")\n list.clear()\n if method == 5:\n num = int(input(\"How many numbers do you want to add? \\n>\"))\n for n in range(num):\n numbers = int(input(\"Enter number:\"))\n list.append(numbers)\n quick_sort(list)\n print(f\"Here is your list sorted:{list}\")\n list.clear()\n if method == 6:\n print(\"Bye bye\")\n\n"
}
] | 1 |
komprogram/Accuracy-comparison-of-ML-classification-techniques
|
https://github.com/komprogram/Accuracy-comparison-of-ML-classification-techniques
|
e05ad85a10284753ad638391b7fdf44d5fe228a1
|
57145612fb4242a0cf075dc9aa28f11bdcb62e7b
|
0ced9267bea94caf0086c81b7ebe910d13d8f14e
|
refs/heads/master
| 2021-06-18T23:24:16.044411 | 2017-07-03T08:01:00 | 2017-07-03T08:01:00 | 91,300,639 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7116022109985352,
"alphanum_fraction": 0.7414364814758301,
"avg_line_length": 29.200000762939453,
"blob_id": "dd569bf3772c6414793ab9d8462cb3c1e88257d7",
"content_id": "0791718ad2d93849b6be7e256d25f009230b2d01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 30,
"path": "/Documents/AIProject/decision.py",
"repo_name": "komprogram/Accuracy-comparison-of-ML-classification-techniques",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport csv\nfrom sklearn import preprocessing, cross_validation, neighbors\nfrom sklearn.tree import DecisionTreeClassifier\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nimport random\ndf= pd.read_csv('breast-cancer-wisconsin.data.txt')\ndf.replace('?',-99999, inplace=True)\n\ndf.drop(df.columns[0], axis=1,inplace=True)\n\nX = np.array(df.drop(df.columns[9], axis=1))\ny = np.array(df.iloc[:, 9])\nprint(X)\nprint(y)\t\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nclf = DecisionTreeClassifier(random_state=0)\n\nclf.fit(X_train, y_train)\nconfidence = clf.score(X_test, y_test)\nprint(confidence)\n\nexample_measures = np.array([[4,2,1,1,1,2,3,2,1]])\nexample_measures = example_measures.reshape(len(example_measures), -1)\nprediction = clf.predict(example_measures)\nif(prediction==[2]):\n\tprint(\"malignant\")\nelse:\n\tprint(\"benign\")"
},
{
"alpha_fraction": 0.8510638475418091,
"alphanum_fraction": 0.8510638475418091,
"avg_line_length": 140,
"blob_id": "f004af91c7c706577dbd6275852f393e716d50b7",
"content_id": "c8f4008ea5af2e4c6382b76f3a15e7283eb00605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 2,
"path": "/README.md",
"repo_name": "komprogram/Accuracy-comparison-of-ML-classification-techniques",
"src_encoding": "UTF-8",
"text": "# Accuracy-comparison-of-ML-classification-techniques\nWe used four classification techniques of Machine Learning,namely Naive Bayes ,Support Vector Machine,Decision Tree Classifier,K nearest neighbors.Algorithms were implemented in python on the Wisconsin Dataset of Breast Cancer.\n"
},
{
"alpha_fraction": 0.6790000200271606,
"alphanum_fraction": 0.6869999766349792,
"avg_line_length": 28,
"blob_id": "71db1ac69df42016da7f1e7e9b6ae2f6f95e6159",
"content_id": "27fb1e2f7baf8524346073f7f8795add8676ff00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3000,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 100,
"path": "/Documents/AIProject/naive_bayes_classification.py",
"repo_name": "komprogram/Accuracy-comparison-of-ML-classification-techniques",
"src_encoding": "UTF-8",
"text": "\r\nimport csv\r\nimport random\r\nimport math\r\n\r\ndef loadCsv(filename):\r\n\tlines = csv.reader(open(filename, \"rb\"))\r\n\tdataset = list(lines)\r\n\tfor i in range(len(dataset)):\r\n\t\tdataset[i] = [float(x) for x in dataset[i]]\r\n\treturn dataset\r\n\r\ndef trainTestSplit(dataset, splitRatio):\r\n\ttrainLength = int(len(dataset) * splitRatio)\r\n\ttrainData = []\r\n\tcopy = list(dataset)\r\n\twhile len(trainData) < trainLength:\r\n\t\tindex = random.randrange(len(copy))\r\n\t\ttrainData.append(copy.pop(index))\r\n\treturn [trainData, copy]\r\n\r\ndef separateByClass(dataset):\r\n\tseperate = {}\r\n\tfor i in range(len(dataset)):\r\n\t\tvec = dataset[i]\r\n\t\tif (vec[-1] not in seperate):\r\n\t\t\tseperate[vec[-1]] = []\r\n\t\tseperate[vec[-1]].append(vec)\r\n\treturn seperate\r\n\r\ndef mean(values):\r\n\treturn sum(values)/float(len(values))\r\n\r\ndef stdev(values):\r\n\tavg = mean(values)\r\n\tvariance = sum([pow(x-avg,2) for x in values])/float(len(values)-1)\r\n\treturn math.sqrt(variance)\r\n\r\ndef summarize(dataset):\r\n\tsummaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]\r\n\tdel summaries[-1]\r\n\treturn summaries\r\n\r\ndef summarizeByClass(dataset):\r\n\tseperate = separateByClass(dataset)\r\n\t#print seperate\r\n\tsummaries = {}\r\n\tfor classValue, instances in seperate.iteritems(): \r\n\t\tsummaries[classValue] = summarize(instances)\r\n\treturn summaries\r\n\r\ndef findProbability(x, mean, stdev):\r\n\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\r\n\treturn (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\r\n\r\ndef findClassProbabilities(summaries, inputvec):\r\n\tprobabilities = {}\r\n\tfor classValue, classSummaries in summaries.iteritems():\r\n\t\tprobabilities[classValue] = 1\r\n\t\tfor i in range(len(classSummaries)):\r\n\t\t\tmean, stdev = classSummaries[i]\r\n\t\t\tx = inputvec[i]\r\n\t\t\tprobabilities[classValue] *= findProbability(x, mean, stdev)\r\n\treturn probabilities\r\n\t\t\t\r\ndef predict(summaries, inputvec):\r\n\tprobabilities = findClassProbabilities(summaries, inputvec)\r\n\tbestLabel, bestProb = None, -1\r\n\tfor classValue, probability in probabilities.iteritems():\r\n\t\tif bestLabel is None or probability > bestProb:\r\n\t\t\tbestProb = probability\r\n\t\t\tbestLabel = classValue\r\n\treturn bestLabel\r\n\r\ndef getPredictions(summaries, testData):\r\n\tpredictions = []\r\n\tfor i in range(len(testData)):\r\n\t\tresult = predict(summaries, testData[i])\r\n\t\tpredictions.append(result)\r\n\treturn predictions\r\n\r\ndef getAccuracy(testData, predictions):\r\n\tcorrect = 0\r\n\tfor i in range(len(testData)):\r\n\t\tif testData[i][-1] == predictions[i]:\r\n\t\t\tcorrect += 1\r\n\treturn (correct/float(len(testData))) * 100.0\r\n\r\ndef main():\r\n\t #filename = 'pima-indians-diabetes.data.csv'\r\n\t i=0.67\r\n\t filename='data/cancer.data.csv'\r\n\t splitRatio = i\r\n\t dataset = loadCsv(filename)\r\n\t trainingSet, testData = trainTestSplit(dataset, splitRatio)\r\n\t summaries = summarizeByClass(trainingSet)\r\n predictions = getPredictions(summaries, testData)\r\n\t accuracy = getAccuracy(testData, predictions)\r\n\t print('Accuracy: {0}%').format(accuracy)\r\n\r\nmain()"
},
{
"alpha_fraction": 0.6416916251182556,
"alphanum_fraction": 0.6550116539001465,
"avg_line_length": 25.072072982788086,
"blob_id": "45766d94ae90a63f9d0d9927f18106f9e02a99b3",
"content_id": "92834eeedb452c360f4e14d2fb25c3f248708398",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3003,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 111,
"path": "/Documents/AIProject/kn.py",
"repo_name": "komprogram/Accuracy-comparison-of-ML-classification-techniques",
"src_encoding": "UTF-8",
"text": "import csv\r\nimport random\r\nimport math\r\nimport operator\r\nimport numpy as np\r\n#from sklearn import preprocessing, cross_validation, neighbors, svm\r\n#import pandas as pd\r\n#from sklearn.cluster import KMeans\r\n\r\ndef loadDataset(filename, split, trainingSet=[] , testSet=[]):\r\n\twith open(filename, newline='') as csvfile:\r\n\t lines = csv.reader(csvfile)\r\n\t dataset = list(lines)\r\n\t for x in range(len(dataset)-1):\r\n\t for y in range(4):\r\n\t dataset[x][y] = float(dataset[x][y])\r\n\t if random.random() < split:\r\n\t trainingSet.append(dataset[x])\r\n\t else:\r\n\t testSet.append(dataset[x])\r\n\r\n\r\ndef euclideanDistance(instance1, instance2, length):\r\n\tdistance = 0\r\n\tfor x in range(length):\r\n\t\t#print instance1[x] - instance2[x]\r\n\t\ta=instance1[x]\r\n\t\tb=instance2[x]\r\n\t\ta=int(a)\r\n\t\tb=int(b)\r\n\t\tdistance += math.pow((a - b), 2)\r\n\t\tprint (distance)\r\n\t\tprint (math.sqrt(distance))\r\n\treturn math.sqrt(distance)\r\n\r\n\r\ndef getNeighbors(trainingSet, testInstance, k):\r\n\tdistances = []\r\n\tlength = len(testInstance)-1\r\n\tfor x in range(len(trainingSet)):\r\n\t\tdist = euclideanDistance(testInstance, trainingSet[x], length)\r\n\t\tdistances.append((trainingSet[x], dist))\r\n\tdistances.sort(key=operator.itemgetter(1))\r\n\tneighbors = []\r\n\tfor x in range(k):\r\n\t\tneighbors.append(distances[x][0])\r\n\treturn neighbors\r\n\r\ndef getResponse(neighbors):\r\n\tclassVotes = {}\r\n\tfor x in range(len(neighbors)):\r\n\t\tresponse = neighbors[x][-1]\r\n\t\tif response in classVotes:\r\n\t\t\tclassVotes[response] += 1\r\n\t\telse:\r\n\t\t\tclassVotes[response] = 1\r\n\tsortedVotes = sorted(classVotes.iteritems(), key=operator.itemgetter(1), reverse=True)\r\n\treturn sortedVotes[0][0]\r\n\r\ndef getAccuracy(testSet, predictions):\r\n\tcorrect = 0\r\n\tfor x in range(len(testSet)):\r\n\t\tif testSet[x][-1] == predictions[x]:\r\n\t\t\tcorrect += 1\r\n\treturn (correct/float(len(testSet))) * 100.0\r\n\t\r\ndef main():\r\n\t# prepare data\r\n\ttrainingSet=[]\r\n\ttestSet=[]\r\n\tsplit = 0.67\r\n\tloadDataset('breast-cancer-wisconsin.data', split, trainingSet, testSet)\r\n\r\n\tprint ('Train set: ' + repr(len(trainingSet)))\r\n\tprint ('Test set: ' + repr(len(testSet)))\r\n\t# generate predictions\r\n\tpredictions=[]\r\n\tk = 3\r\n\tfor x in range(len(testSet)):\r\n\t\tneighbors = getNeighbors(trainingSet, testSet[x], k)\r\n\t\tresult = getResponse(neighbors)\r\n\t\tpredictions.append(result)\r\n\t\tprint('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][-1]))\r\n\taccuracy = getAccuracy(testSet, predictions)\r\n\tprint('Accuracy: ' + repr(accuracy) + '%')\r\n\t\r\nmain()\r\n\"\"\"\r\n\tdf = pd.read_csv('data/breast-cancer-wisconsin.data.txt')\r\n\tdf.replace('?',-99999, inplace=True)\r\n\tdf.drop(['id'], 1, inplace=True)\r\n\r\n\tX = np.array(df.drop(['class'], 1))\r\n\ty = np.array(df['class'])\r\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)\r\n\"\"\"\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nclf_knn = neighbors.KNeighborsClassifier()\r\nclf_knn.fit(X_train, y_train)\r\naccuracy = clf_knn.score(X_test, y_test)\r\nprint(accuracy)\r\n\r\n# Example of kNN implemented from Scratch in Python\r\n\r\n\"\"\""
}
] | 4 |
GoodLuck-09/meiduo_github
|
https://github.com/GoodLuck-09/meiduo_github
|
37a6ffdb4fd8ec188d62ed03b5f071bd97ba5267
|
a0c07b00bffabbe12a78fcee14d04e18fc28a9d9
|
2848778dd981be4952389c43addd3313aa3bf33f
|
refs/heads/master
| 2021-09-21T03:21:05.867393 | 2018-08-20T08:03:53 | 2018-08-20T08:03:53 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6632026433944702,
"alphanum_fraction": 0.6693434119224548,
"avg_line_length": 30.073530197143555,
"blob_id": "b01fda3eb9a34652022968cbac0424ed782a148b",
"content_id": "7a1405bc749e782e92f957f1381fa3f43ad4a7ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2285,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 68,
"path": "/meiduo_mall/meiduo_mall/apps/verifications/views.py",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\nfrom django_redis import get_redis_connection\nfrom rest_framework.views import APIView\nfrom meiduo_mall.libs import constants\nfrom meiduo_mall.libs.captcha.captcha import captcha\nfrom django.http.response import HttpResponse\nfrom rest_framework.generics import GenericAPIView\nfrom . import serializers\nimport random\nfrom meiduo_mall.libs.yuntongxun.sms import CCP\nfrom rest_framework.response import Response\nfrom celery_tasks.sms.tasks import send_sms_code\n\n\nclass ImageCodeView(APIView):\n \"\"\"\n 图片验证码\n\n \"\"\"\n def get(self, request, image_code_id):\n \"\"\"\n 获取图片验证码\n \"\"\"\n\n # 生成验证码图片\n text, image = captcha.generate_captcha()\n\n redis_conn = get_redis_connection(\"verify_codes\")\n redis_conn.setex(\"img_%s\" % image_code_id, constants.IMAGE_CODE_REDIS_EXPIRES, text)\n\n return HttpResponse(image, content_type=\"images/jpg\")\n\n\nclass SMSCodeView(GenericAPIView):\n serializer_class = serializers.CheckImageCodeSerializer\n\n def get(self, request, mobile):\n serializer = self.get_serializer(data=request.query_params)\n serializer.is_valid(raise_exception=True)\n\n # 校验通过,生成短信验证码\n sms_code = '%06d' % random.randint(0, 999999)\n\n # 保存验证码及发送记录\n redis_conn = get_redis_connection('verify_codes')\n # redis_conn.setex(\"sms_%s\" % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)\n # redis_conn.setex(\"send_flag_%s\" % mobile, constants.SMS_SEND_INTERVAL, 1)\n\n # 使用redis管道代替之前的直接执行命令,可以一次执行多条命令\n pl = redis_conn.pipeline()\n pl.setex(\"sms_%s\" % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)\n pl.setex(\"send_flag_%s\" % mobile, constants.SMS_SEND_INTERVAL, 1)\n\n # 让管道执行命令\n pl.execute()\n\n # 发送短信\n # ccp = CCP()\n\n # sms_time = str(constants.SMS_CODE_REDIS_EXPIRES / 60)\n # ccp.send_template_sms(mobile, [sms_code, sms_time], constants.SMS_CODE_TEMPLATE_ID)\n\n # 使用celery异步实现\n send_sms_code.delay(mobile, sms_code)\n\n return Response({'message': 'ok'})\n\n\n\n\n"
},
{
"alpha_fraction": 0.6620689630508423,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 21.947368621826172,
"blob_id": "3bd4f825e84a9fbd0fcd81bc0bc956a526fd84be",
"content_id": "6f54617b3ca3dcd14cc611513e7b024528efee2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 461,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 19,
"path": "/meiduo_mall/celery_tasks/sms/tasks.py",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "from .yuntongxun.sms import CCP\nfrom . import constants\nfrom celery_tasks.main import celery_app\n\n\n@celery_app.task(name='send_sms_code')\ndef send_sms_code(mobile, sms_code):\n \"\"\"\n\n :param mobile: 手机号码\n :param sms_code: 短信验证码\n :return: None\n\n \"\"\"\n # 发送短信\n ccp = CCP()\n\n sms_time = str(constants.SMS_CODE_REDIS_EXPIRES / 60)\n ccp.send_template_sms(mobile, [sms_code, sms_time], constants.SMS_CODE_TEMPLATE_ID)"
},
{
"alpha_fraction": 0.7454954981803894,
"alphanum_fraction": 0.7454954981803894,
"avg_line_length": 23.61111068725586,
"blob_id": "c6131b3bb99d8a8b0e0f1a05f11b76b02caca987",
"content_id": "bb3c3fe7e53ce6b4847b525cfe407bf1c16ba799",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 18,
"path": "/meiduo_mall/celery_tasks/main.py",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "from celery import Celery\n# 为celery使用django配置文件进行设置\nimport os\nif not os.getenv('DJANGO_SETTINGS_MODULE'):\n os.environ['DJANGO_SETTINGS_MODULE'] = 'meiduo_mall.settings.dev'\n\n# 创建celery应用\ncelery_app = Celery('meiduo')\n\n# 导入celery配置\ncelery_app.config_from_object('celery_tasks.config')\n\n# 自动注册celery任务\ncelery_app.autodiscover_tasks(['celery_tasks.sms'])\n\n# 启动celery\n# celery -A(应用) (应用的路径)-l info\n# celery -A celery_tasks.main worker -l info\n\n"
},
{
"alpha_fraction": 0.6894409656524658,
"alphanum_fraction": 0.7453415989875793,
"avg_line_length": 13.636363983154297,
"blob_id": "f7e2b43b95f22b3a588f6fe172f64c0f30bf2ed6",
"content_id": "4d8f7b376c2177bf00883d93d35ff500bf5ace0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 11,
"path": "/meiduo_mall/meiduo_mall/libs/constants.py",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "# 图片验证码的有效期\nIMAGE_CODE_REDIS_EXPIRES = 300\n\n# 短信验证码的有效期\nSMS_CODE_REDIS_EXPIRES= 300\n\n# 短信验证码的发送间隔时间\nSMS_SEND_INTERVAL = 60\n\n# 短信验证码模版编号\nSMS_CODE_TEMPLATE_ID = 1\n"
},
{
"alpha_fraction": 0.4324324429035187,
"alphanum_fraction": 0.6216216087341309,
"avg_line_length": 36,
"blob_id": "8c0e10e64e3f90c607a698231164cb896170f6e9",
"content_id": "3422f2a0f7ad426bfb94bef03202e41fc33f13fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 2,
"path": "/meiduo_mall/celery_tasks/config.py",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "broker_url = \"redis://127.0.0.1/3\"\nresult_backend = \"redis://127.0.0.1/4\"\n"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 10.5,
"blob_id": "4dd3009bae3d42d36e49d3fbd428fa54221ecf8d",
"content_id": "713545236cf5a036919629e41d39be3f50bf9f41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "GoodLuck-09/meiduo_github",
"src_encoding": "UTF-8",
"text": "# meiduo_github\nmeiduo\n"
}
] | 6 |
smehan/App-data-reqs
|
https://github.com/smehan/App-data-reqs
|
042a661fb1d00f7685bda1b97de0d6e6ade6dc5e
|
64e31ed2b2ea9616f7266fbf8e112e4ee9740564
|
ee9b1a754bf0d996a4d7865f9f5f3ad0236e8793
|
refs/heads/master
| 2020-12-12T03:56:30.590261 | 2015-08-31T17:22:21 | 2015-08-31T17:22:21 | 35,231,958 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7822878360748291,
"alphanum_fraction": 0.7822878360748291,
"avg_line_length": 44.16666793823242,
"blob_id": "7389d1990cd57444d8dbbcc467b92efc824aaeb3",
"content_id": "c7ed36fc318a442e1dda17443f2bbff029c167c8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 271,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 6,
"path": "/README.md",
"repo_name": "smehan/App-data-reqs",
"src_encoding": "UTF-8",
"text": "# App-data-reqs\nR to analyze cp app data requests\n\nThe data is being pulled from JIRA and now has multiple empty columns\ndue to lack of data quality control on entry. These are being cleansed\nin a pre-processor in log-preprocessor.py. The first R class to run is Load.R.\n"
},
{
"alpha_fraction": 0.40116846561431885,
"alphanum_fraction": 0.40116846561431885,
"avg_line_length": 59.411766052246094,
"blob_id": "7b408ca1c1dd8b602828d49f6f7f28f6480fd87f",
"content_id": "95a5989658894385bd1958ac5b0077defea08add",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2054,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 34,
"path": "/Fishbone.R",
"repo_name": "smehan/App-data-reqs",
"src_encoding": "UTF-8",
"text": "###########################################################\n### Fishbone.R is a class to make fishbone diagrams.\n### It leverages the qcc package.\n###########################################################\n\nlibrary(qcc)\n\n# This is the constructor for the fishbone diagram. There are two sets of inputs.\n# Causes have major and minor branches. Effects has a single value.\n\ncause.and.effect(cause=list(Technology=c(\"Revised Form Confusing\",\n \"Sits in email inbox\\n if approver out\",\n \"manual reminders\",\n \"form outdated\",\n \"third party\"),\n Policy=c(\"Confusion of authority\\n to enforce policy\",\n \"Who can say no\",\n \"Lose control of data\\n once released\"),\n People=c(\"Don't understand single\\n source of truth\",\n \"Turnover of requestors\",\n \"No training\",\n \"Don't understand data levels\",\n \"Clarification of roles\\n and responsibilities\",\n \"Requestor doesn't know\\n what to ask for\"), \n Process=c(\"Don't understand what\\n requestor trying to do\",\n \"Unlcear on why form received\",\n \"Level of data requested (sensitive)\",\n \"Director level approval\",\n \"Why population is necessary\",\n \"Unknown priority\",\n \"No direct tie to XXXX process\",\n \"Can't see big pic of data use / need\",\n \"No single place to go to start process\")), \n effect=\"Delay in Approvals\")\n"
},
{
"alpha_fraction": 0.6299113035202026,
"alphanum_fraction": 0.6539924144744873,
"avg_line_length": 45.411766052246094,
"blob_id": "23d97a2e04c04c7000c9b9be2129d77b82204fda",
"content_id": "c7c34f0eeb0dd21b5646854efaa3b1be5ecc1e4a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 17,
"path": "/log-preprocessor.py",
"repo_name": "smehan/App-data-reqs",
"src_encoding": "UTF-8",
"text": "__author__ = 'shawnmehan'\n\nimport csv, re\n\n# first lets open the file, which is tab delimited because of , in description field and others\nwith open('./data/AppDataRequest2010-2015.tsv', 'rb') as csvfile:\n outfile = open('./data/AppDataRequest2010-2015-clean.tsv', 'wb') # TODO get proper line endings, not ^M\n records = csv.reader(csvfile, delimiter='\\t')\n outwriter = csv.writer(outfile, delimiter='\\t')\n count = 0\n for row in records: # iterate through each row to clear out errant newlines\n for i, s in enumerate(row): # i is list index, s is string\n row[i] = re.sub(r'\\n+', '', s)\n outwriter.writerow(row) # write the clean row\n count += 1\n print u\"There were {0:d} lines processed in this run.\".format(count)\n outfile.close()\n"
},
{
"alpha_fraction": 0.5985983610153198,
"alphanum_fraction": 0.6058482527732849,
"avg_line_length": 49.14545440673828,
"blob_id": "058014411f013b768e1d5a6c473b553bd6e08db4",
"content_id": "7ac41ddcd2f50f83de37a576b477a187b6e7182c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 8276,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 165,
"path": "/Approver.R",
"repo_name": "smehan/App-data-reqs",
"src_encoding": "UTF-8",
"text": "###########################################################\n### Approver.R is a class to make operations on Approver Data\n### including durations, numbers of approvers, etc.\n### Works off of dataset already cleansed from Load.R\n###########################################################\n\nlibrary(qcc)\n\n# First read in data set\nmyData <- readRDS(file=\"data/App_Data_Reqs.rds\")\n\n# if we need to identify and remove Key dups\ndups <- anyDuplicated(approverDF$Key)\napproverDF <- approverDF[-dups, ]\n\n# CONSTANT used to convert time deltas to days\nSECINDAY <- (60*60*24)\n\n# Subset out everything not an approval task\n## TODO haven't taken care of three bad rows of times yet!, so they are lost next function\n\nmyData <- myData[grep(\"Get Approval\", myData$Summary, perl = TRUE), ]\n\n# Create the approver DF as a subset of myData\napproverDF <- data.frame(Key = factor(myData$Key),\n Creator = myData$Creator,\n Assignee = myData$Assignee,\n Created = myData$Created,\n Resolved = myData$Resolved,\n # Issue.Type = myData$Issue.Type,\n Date.of.First.Response = myData$Date.of.First.Response,\n PA.AT = as.integer(myData$Preston.Allen.End - myData$Preston.Allen.Start)/(SECINDAY),\n DA.AT = as.integer(myData$Donna.Amos.End - myData$Donna.Amos.Start)/(SECINDAY),\n AB.AT = as.integer(myData$Alison.Beug.End - myData$Alison.Beug.Start)/(SECINDAY),\n MB.AT = as.integer(myData$Martin.Bragg.End - myData$Martin.Bragg.End)/(SECINDAY),\n KC.AT = as.integer(myData$Kacey.Chun.End - myData$Kacey.Chun.Start)/(SECINDAY),\n AC.AT = as.integer(myData$Anthony.Colvard.End - myData$Anthony.Colvard.Start)/(SECINDAY),\n MC.AT = as.integer(myData$Margie.Coolidge.End - myData$Margie.Coolidge.Start)/(SECINDAY),\n BG.AT = as.integer(myData$Beth.Gallagher.End - myData$Beth.Gallagher.Start)/(SECINDAY),\n KI.AT = as.integer(myData$Kimi.Ikeda.End - myData$Kimi.Ikeda.Start)/(SECINDAY),\n AL.AT = as.integer(myData$Al.Liddicoat.End - myData$Al.Liddicoat.Start)/(SECINDAY),\n JL.AT = as.integer(myData$John.Lyons.End - myData$John.Lyons.Start)/(SECINDAY),\n NO.AT = as.integer(myData$Nelda.Olvera.End - myData$Nelda.Olvera.Start)/(SECINDAY),\n JM.AT = as.integer(myData$Jim.Maraviglia.End - myData$Jim.Maraviglia.Start)/(SECINDAY),\n BM.AT = as.integer(myData$Barbara.Martinez.End - myData$Barbara.Martinez.Start)/(SECINDAY),\n TM.AT = as.integer(myData$Theresa.May.End - myData$Theresa.May.Start)/(SECINDAY),\n CN.AT = as.integer(myData$Craig.Nelson.End - myData$Craig.Nelson.Start)/(SECINDAY),\n DR.AT = as.integer(myData$Dave.Ross.End - myData$Dave.Ross.Start)/(SECINDAY),\n LS.AT = as.integer(myData$Lori.Serna.End - myData$Lori.Serna.Start)/(SECINDAY),\n MS.AT = as.integer(myData$Mary.Shaffer.End - myData$Mary.Shaffer.Start)/(SECINDAY),\n SS.AT = as.integer(myData$Sharif.Sharifi.End - myData$Sharif.Sharifi.Start)/(SECINDAY),\n CS.AT = as.integer(myData$Craig.Schultz..End - myData$Craig.Schultz.Start)/(SECINDAY),\n PS.AT = as.integer(myData$Patricia.Stoneman.End - myData$Patricia.Stoneman.Start)/(SECINDAY),\n SUESS.AT = as.integer(myData$Mike.Suess.End - myData$Mike.Suess.Start)/(SECINDAY),\n CEM.AT = as.integer(myData$Cem.Sunata.End - myData$Cem.Sunata.Start)/(SECINDAY),\n TV.AT = as.integer(myData$Terry.Vahey.End - myData$Terry.Vahey.Start)/(SECINDAY))\n\n\n# TODO Calculate the longest interval and call that the approval duration\n# TODO Remove timestamps hhmm\n# Remove duplicates (3 of) and add all times into the one row\ndups <- anyDuplicated(approverDF$Key)\n\nwhile (dups != 0){\n target <- which(approverDF$Key == approverDF$Key[dups])\n for (i in 7:ncol(approverDF)){\n approverDF[target, i] <- (approverDF[target, i] + approverDF[dups, i])\n }\n approverDF <- approverDF[-dups, ]\n dups <- anyDuplicated(approverDF$Key)\n}\n\n# Now take same form and clean up small and 0 days intervals.\nfor (r in 1:nrow(approverDF)){\n for (i in 7:ncol(approverDF)){\n if (!is.na(approverDF[r,i]) && approverDF[r,i] == 0){\n approverDF[r,i] <- 0.5\n } else if (!is.na(approverDF[r,i]) && approverDF[r,i] < 0.1){\n approverDF[r,i] <- 0.1\n } \n } \n}\n\n# Retrieve max of the ATs\napproverDF$Duration.AT <- apply(approverDF[sapply(approverDF,is.numeric)],1,max,na.rm=TRUE)\n\n# Now clean up -Inf in Duration.AT\n# TODO\n# This simply removes the -Inf, but still need to decide what to do with these\n# missing values.\napproverDF <- approverDF[-approverDF$Duration.AT < 0, ]\n\n# convert to long form\nmeltApproverDF <- melt(approverDF, \n# id.vars = c(\"Key, Creator, Assignee\"), #TODO melt chokes on Creator,Assignee\n id.vars = c(\"Key\"),\n measure.vars = c(\"Duration.AT\"),\n# measured.vars = c(\"PA.AT, DA.AT, AB.AT, MB.AT, KC.AT,\n# AC.AT, MC.AT, BG.AT, KI.AT, AL.AT, JL.AT, NO.AT,\n# JM.AT, BM.AT, TM.AT, CN.AT, DR.AT, LS.AT, MS.AT, \n# SS.AT, CS.AT, PS.AT, SUESS.AT, CEM.AT, TV.AT, Duration.AT\"),\n na.rm = TRUE,\n variable.name = \"stuff\",\n value.name = \"values\",\n factorsAsStrings = T)\n\n\n##########################################################\n### Add some plots\n##########################################################\nggplot(approverDF) +\n aes(x=Key, y=as.numeric(project_duration)) +\n geom_point(aes(color=factor(Assignee))) +\n facet_wrap(~year_created) +\n ggtitle(\"Project duration by \\nmonth project created by year created\")\n\n\nggplot(approverDF) +\n aes(x=Key, y=Duration.AT) +\n geom_point(aes(color=factor(Assignee))) +\n geom_hline(yintercept=mean(approverDF$Duration.AT), color=\"red\") +\n geom_hline(yintercept=mean(approverDF$Duration.AT)+2.66*sd(approverDF$Duration.AT), linetype=2, color=\"Navy Blue\") +\n geom_hline(yintercept=mean(approverDF$Duration.AT)-2.66*sd(approverDF$Duration.AT), linetype=2, color=\"Navy Blue\") +\n theme_stata() +\n ggtitle(\"Approval Duration of \\nApp Data Requests\") +\n labs(x=\"Requests\", y=\"Duration (days)\") +\n theme(axis.text.x = element_text(angle = 90)) +\n theme(strip.text.x = element_text(colour = \"red\", angle = 45, size = 10, hjust = 0.5, vjust = 0.5)) +\n theme(axis.title.y = element_text(vjust=1.0)) +\n theme(axis.title.x = element_text(vjust=-0.1)) +\n theme(plot.title = element_text(size=20, face=\"bold\", vjust=2))\n \n# Using qcc to get a simple XmR\n# Get the vector of observations in which are interested\nmy.xmr.raw <- approverDF$Duration.AT\n\n# Create the individuals chart and qcc object\nmy.xmr.x <- qcc(my.xmr.raw, type = \"xbar.one\", plot = T)\n\n# Create a process capability analysis of the xbar.one\nprocess.capability(my.xmr.x, spec.limits = c(-25.02,53.69))\n\n# Create a moving range chart as a qcc object. This takes a 2-col matrix that is used\n# to calculate the moving range.\nmy.xmr.raw.r <- matrix(cbind(my.xmr.raw[1:length(my.xmr.raw)-1],\n my.xmr.raw[2:length(my.xmr.raw)]),\n ncol = 2)\n# Make the XmR plot\nmy.xmr.mr <- qcc(my.xmr.raw.r, type=\"R\", plot = T, \n add.stats = T,\n title = \"Approval Process XmR Chart\",\n xlab = \"Approval Tasks\",\n ylab = \"Duration (days)\",\n axes.las = 0)\n\n# And now identify which observations are violating runs and which are out of control\nbeyond.limits(my.xmr.mr, limits = my.xmr.mr$limits)\nviolating.runs(my.xmr.mr, run.length = qcc.options(\"run.length\"))\n\n# density plot for the approval time\nggplot(approverDF, aes(x=approverDF$Duration.AT)) +\n geom_density(na.rm=T) +\n geom_vline(xintercept=mean(approverDF$Duration.AT), color=\"red\") +\n ggtitle(\"Density Plot for Approval Process Duration Times\") +\n labs(x=\"Duration (days)\", y=\"Kernel Density\")\n\n\n"
},
{
"alpha_fraction": 0.5157657861709595,
"alphanum_fraction": 0.5157657861709595,
"avg_line_length": 36,
"blob_id": "332522f115465e75cdde42042a123acf09914812",
"content_id": "9dce7e14e7dcd04c47e3e5c7f266fe7a33d76118",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 444,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 12,
"path": "/models.r",
"repo_name": "smehan/App-data-reqs",
"src_encoding": "UTF-8",
"text": "###########################################################\n### Models for durations\n###########################################################\n\n# First read in data set\nreadRDS(file=\"data/App_Data_Reqs.rds\")\n\nggplot(myData) +\n aes(x=month_num_created, y=as.numeric(project_duration)) +\n geom_point(aes(color=factor(Assignee))) +\n facet_wrap(~year_created) +\n ggtitle(\"Project duration by \\nmonth project created by year created\")\n"
}
] | 5 |
decolector/kill.me
|
https://github.com/decolector/kill.me
|
e932bd3de3ed6dc2cce322f9de3da74e923d5c55
|
4742c525848391b80ed780720f7a751ec84df4f5
|
c705ec8ee0d34948cab1c781b21378262b51e078
|
refs/heads/master
| 2021-01-10T21:30:43.803911 | 2014-03-19T22:15:09 | 2014-03-19T22:15:09 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7185184955596924,
"alphanum_fraction": 0.7185184955596924,
"avg_line_length": 18.285715103149414,
"blob_id": "552ae2ca770bc15f4927b93f97f2e9e2c2b3638c",
"content_id": "81d627d0ad699007a36eb91711bd03e9e2def4b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 7,
"path": "/README.md",
"repo_name": "decolector/kill.me",
"src_encoding": "UTF-8",
"text": "kill.me\n=======\n\nCódigo y documentación del proyecto kill.me de Ana Maria Montenegro y Camilo Martínez\n\nhttp://montenegrojaramillo.com\nhttp://decolector.net\n"
},
{
"alpha_fraction": 0.6222639083862305,
"alphanum_fraction": 0.6460287570953369,
"avg_line_length": 25.649999618530273,
"blob_id": "cd07f3e2433a25b8bc4ee9ad78f52e92e425d78e",
"content_id": "1408219271f6cecc7b7b57ebc90d3f79694e2a19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1599,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 60,
"path": "/main.py",
"repo_name": "decolector/kill.me",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os, sys\nimport BaseHTTPServer\nimport serial \nimport time\n\ntry:\n from serial.tools.list_ports import comports\nexcept ImportError:\n print \"comports not present, check serial library\"\n comports = None\n\n\nPORT = 8000 \n\n\nclass myHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"\n Clase heredada de BaseHTTPRequestHandler, para gestionar las acciones en cada request.\n \"\"\"\n\n\n\n def do_GET(self):\n print \"Acaba de entrar una peticion\"\n #envia un codigo 200 como respuesta http, es decir OK\n self.send_response(200)\n\n #envia los headers de respuesta \n self.protocol_version='HTTP/1.1'\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n # envia el html en el body de la respuesta\n #TODO servir un archivo del directorio raiz o custom dir\n self.wfile.write(bytes(\"<html> <head><title> Kill Me </title> </head> <body> <h1> Hola Mundo </h1><body></html>\"))\n\n\n #Abre puerto serial, espera 3 secs, escribe la data y cierra puerto\n #TODO implementar callbacks y threading\n micro = serial.Serial(\"/dev/tty.usbmodem621\", 115200)\n time.sleep(2)\n micro.write('a')\n micro.close()\n\n\nif __name__ == '__main__':\n\n httpd_class = BaseHTTPServer.HTTPServer \n httpd = httpd_class((\"192.168.1.29\", 8000), myHandler)\n\n # gestion de escepciones y errores con try - except\n try:\n print \"Starting Server\"\n httpd.serve_forever()\n\n except KeyboardInterrupt:\n print \"Stoping Server\"\n httpd.server_close()\n"
}
] | 2 |
denimboy/red-fab-deploy
|
https://github.com/denimboy/red-fab-deploy
|
493a6a114e8b885ff0ec5130a2addd4dfde49eab
|
8055664152439b7905824fb7433adcb367deb371
|
c19ffe3d8f0ba7e2fb6f6ab436d98c8a88ce319c
|
refs/heads/master
| 2021-01-17T01:12:29.206733 | 2012-12-28T19:45:18 | 2012-12-28T19:45:18 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5457668900489807,
"alphanum_fraction": 0.5504397749900818,
"avg_line_length": 36.99477767944336,
"blob_id": "52303eb5c24fd0c725327fc049bf570ffcc72b82",
"content_id": "ac280f1b892f9fb7376f3d0f48d77a798922ba16",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14552,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 383,
"path": "/fab_deploy/amazon/postgres.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport tempfile\n\nfrom fabric.api import run, sudo, env, local, hide, settings\nfrom fabric.contrib.files import append, exists\nfrom fabric.operations import put\n\nfrom fabric.tasks import Task\n\nfrom fab_deploy.functions import random_password\n\nimport utils\n\n\nclass PostgresInstall(Task):\n \"\"\"\n Install postgresql on server.\n\n This task gets executed inside other tasks, including\n setup.db_server, setup.slave_db and setup.dev_server\n\n install postgresql package, and set up access policy in pg_hba.conf.\n enable postgres access from localhost without password;\n enable all other user access from other machines with password;\n setup a few parameters related with streaming replication;\n database server listen to all machines '*';\n create a user for database with password.\n \"\"\"\n\n name = 'master_setup'\n db_version = '9.1'\n\n encrypt = 'md5'\n hba_txts = ('local all postgres ident\\n'\n 'host replication replicator 0.0.0.0/0 md5\\n'\n 'local all all password\\n'\n '# # IPv4 local connections:\\n'\n 'host all all 127.0.0.1/32 %(encrypt)s\\n'\n '# # IPv6 local connections:\\n'\n 'host all all ::1/128 %(encrypt)s\\n'\n '# # IPv4 external\\n'\n 'host all all 0.0.0.0/0 %(encrypt)s\\n')\n\n postgres_config = {\n 'listen_addresses': \"'*'\",\n 'wal_level': \"hot_standby\",\n 'wal_keep_segments': \"32\",\n 'max_wal_senders': \"5\",\n 'archive_mode': \"on\"}\n\n def _get_data_dir(self, db_version):\n return os.path.join('/var/lib/postgresql', '%s' % db_version, 'main')\n\n def _get_config_dir(self, db_version):\n return os.path.join('/etc/postgresql', '%s' % db_version, 'main')\n\n def _setup_parameter(self, file, **kwargs):\n for key, value in kwargs.items():\n origin = \"#%s =\" % key\n new = \"%s = %s\" % (key, value)\n sudo('sed -i \"/%s/ c\\%s\" %s' % (origin, new, file))\n\n\n def _setup_hba_config(self, config_dir=None, encrypt=None):\n \"\"\"\n enable postgres access without password from localhost\n \"\"\"\n hba_conf = os.path.join(config_dir, 'pg_hba.conf')\n kwargs = {'config_dir': config_dir, 'encrypt': encrypt}\n hba_txts = self.hba_txts % kwargs\n\n if exists(hba_conf, use_sudo=True):\n sudo(\"echo '%s' > %s\" % (hba_txts, hba_conf))\n else:\n print ('Could not find file %s. Please make sure postgresql was '\n 'installed and data dir was created correctly.'% hba_conf)\n sys.exit(1)\n\n def _setup_postgres_config(self, config_dir=None, config=None):\n postgres_conf = os.path.join(config_dir, 'postgresql.conf')\n\n if exists(postgres_conf, use_sudo=True):\n self._setup_parameter(postgres_conf, **config)\n else:\n print ('Could not find file %s. Please make sure postgresql was '\n 'installed and data dir was created correctly.' % postgres_conf)\n sys.exit(1)\n\n def _setup_archive_dir(self, data_dir):\n archive_dir = os.path.join(data_dir, 'wal_archive')\n sudo(\"mkdir -p %s\" % archive_dir)\n sudo(\"chown postgres:postgres %s\" % archive_dir)\n\n return archive_dir\n\n def _setup_ssh_key(self):\n ssh_dir = '/var/lib/postgresql/.ssh'\n\n sudo('mkdir -p %s' % ssh_dir)\n sudo('chown -R postgres:postgres %s' % ssh_dir)\n sudo('chmod -R og-rwx %s' % ssh_dir)\n rsa = os.path.join(ssh_dir, 'id_rsa')\n run('sudo su postgres -c \"ssh-keygen -t rsa -f %s -N \\'\\'\"' % rsa)\n\n def _create_user(self, section):\n username = raw_input(\"Now we are creating the database user, please \"\n \"specify a username: \")\n # 'postgres' is postgresql superuser\n while username == 'postgres':\n username = raw_input(\"Sorry, you are not allowed to use postgres \"\n \"as username, please choose another one: \")\n db_out = run('echo \"select usename from pg_shadow where usename=\\'%s\\'\" |'\n 'sudo su postgres -c psql' % username)\n if username in db_out:\n print 'user %s already exists, skipping creating user.' % username\n else:\n run(\"sudo su postgres -c 'createuser -D -S -R -P %s'\" % username)\n\n env.config_object.set(section, env.config_object.USERNAME, username)\n\n return username\n\n def _create_replicator(self, db_version, section):\n db_out = run(\"echo '\\du replicator' | sudo su postgres -c 'psql'\")\n if 'replicator' not in db_out:\n replicator_pass = random_password(12)\n\n c1 = ('CREATE USER replicator REPLICATION LOGIN ENCRYPTED '\n 'PASSWORD \\\"\\'%s\\'\\\"' % replicator_pass)\n run(\"echo %s | sudo su postgres -c \\'psql\\'\" % c1)\n history_file = os.path.join('/var', 'pgsql', '.psql_history')\n if exists(history_file):\n sudo('rm %s' % history_file)\n\n env.config_object.set(section, env.config_object.REPLICATOR,\n 'replicator')\n env.config_object.set(section,\n env.config_object.REPLICATOR_PASS,\n replicator_pass)\n return replicator_pass\n else:\n print \"user replicator already exists, skipping creating user.\"\n\n\n def run(self, db_version=None, encrypt=None, save_config=True,\n section=None, **kwargs):\n \"\"\"\n \"\"\"\n if not section:\n section = 'db-server'\n if not db_version:\n db_version = self.db_version\n db_version = '.'.join(db_version.split('.')[:2])\n data_dir = self._get_data_dir(db_version)\n config_dir = self._get_config_dir(db_version)\n\n if not encrypt:\n encrypt = self.encrypt\n\n sudo(\"apt-get -y install postgresql\")\n sudo(\"apt-get -y install postgresql-contrib\")\n sudo(\"service postgresql start\")\n archive_dir = self._setup_archive_dir(data_dir)\n self.postgres_config['archive_command'] = (\"'cp %s %s/wal_archive/%s'\"\n % ('%p', data_dir, '%f'))\n\n self._setup_hba_config(config_dir, encrypt)\n self._setup_postgres_config(config_dir=config_dir,\n config=self.postgres_config)\n sudo('service postgresql restart')\n self._setup_ssh_key()\n self._create_user(section)\n self._create_replicator(db_version, section)\n\n if save_config:\n env.config_object.save(env.conf_filename)\n\n\nclass SlaveSetup(PostgresInstall):\n \"\"\"\n Set up master-slave streaming replication: slave node\n \"\"\"\n\n name = 'slave_setup'\n\n postgres_config = {\n 'listen_addresses': \"'*'\",\n 'wal_level': \"hot_standby\",\n 'hot_standby': \"on\"}\n\n def _get_master_db_version(self, master):\n command = (\"ssh %s psql --version | head -1 | awk '{print $3}'\" % master)\n version_string = local(command, capture=True)\n version = '.'.join(version_string.split('.')[:2])\n\n return version\n\n def _get_replicator_pass(self, section='db-server'):\n password = env.config_object.get_list(section,\n env.config_object.REPLICATOR_PASS)\n return password[0]\n\n def _setup_recovery_conf(self, master_ip, password, data_dir, psql_bin):\n wal_dir = os.path.join(data_dir, 'wal_archive')\n recovery_conf = os.path.join(data_dir, 'recovery.conf')\n\n txts = ((\"standby_mode = 'on'\\n\") +\n (\"primary_conninfo = 'host=%s \" % master_ip) +\n (\"port=5432 user=replicator password=%s'\\n\" % password) +\n (\"trigger_file = '/tmp/pgsql.trigger'\\n\") +\n (\"restore_command = 'cp -f %s/%s </dev/null'\\n\"\n % (wal_dir, '%f %p')) +\n (\"archive_cleanup_command = '%s/pg_archivecleanup %s %s'\\n\"\n % (psql_bin, wal_dir, \"%r\")))\n\n sudo('touch %s' % recovery_conf)\n append(recovery_conf, txts, use_sudo=True)\n sudo('chown postgres:postgres %s' % recovery_conf)\n\n def _ssh_key_exchange(self, master, slave):\n \"\"\"\n copy ssh key(pub) from master to slave, so that master can access slave\n without password via ssh\n \"\"\"\n ssh_dir = '/var/lib/postgresql/.ssh'\n\n with settings(host_string=master):\n rsa_pub = os.path.join(ssh_dir, 'id_rsa.pub')\n with hide('output'):\n pub_key = sudo('cat %s' % rsa_pub)\n\n with settings(host_string=slave):\n authorized_keys = os.path.join(ssh_dir, 'authorized_keys')\n with hide('output', 'running'):\n run('sudo su postgres -c \"echo %s >> %s\"'\n % (pub_key, authorized_keys))\n\n def run(self, master=None, encrypt=None, **kwargs):\n \"\"\"\n \"\"\"\n if not master:\n print \"Hey, a master is required for slave.\"\n sys.exit(1)\n\n master_ip = master.split('@')[-1]\n db_version = self._get_master_db_version(master=master)\n data_dir = self._get_data_dir(db_version)\n config_dir = self._get_config_dir(db_version)\n psql_bin = os.path.join('/usr/lib/postgresql', '%s' %db_version, 'bin')\n slave = env.host_string\n slave_ip = slave.split('@')[1]\n\n sudo(\"apt-get -y install postgresql\")\n sudo(\"apt-get -y install postgresql-contrib\")\n sudo('service postgresql stop')\n\n self._setup_ssh_key()\n self._ssh_key_exchange(master, slave)\n\n with settings(host_string=master):\n master_internal_ip = run(utils.get_ip_command('eth0'))\n\n run('echo \"select pg_start_backup(\\'backup\\', true)\" | sudo su postgres -c \\'psql\\'')\n run('sudo su postgres -c \"rsync -av --exclude postmaster.pid '\n '--exclude server.crt --exclude server.key '\n '--exclude pg_xlog %s/ postgres@%s:%s/\"'\n % (data_dir, slave_ip, data_dir))\n run('echo \"select pg_stop_backup()\" | sudo su postgres -c \\'psql\\'')\n\n self._setup_postgres_config(config_dir=config_dir,\n config=self.postgres_config)\n self._setup_archive_dir(data_dir)\n\n replicator_pass = self._get_replicator_pass()\n self._setup_recovery_conf(master_ip=master_internal_ip,\n password=replicator_pass,\n data_dir=data_dir,\n psql_bin_path=psql_bin_path)\n\n if not encrypt:\n encrypt = self.encrypt\n self._setup_hba_config(config_dir, encrypt)\n\n sudo('service postgresql start')\n print('password for replicator on master node is %s' % replicator_pass)\n\n log_dir = '/var/log/postgresql/postgresql-%s-main.log' %db_version\n log = run('tail -5 %s' %log_dir)\n if ('streaming replication successfully connected' in log and\n 'database system is ready to accept read only connections' in log):\n print \"streaming replication set up is successful\"\n else:\n print (\"something unexpected occured. streaming replication is not\"\n \" successful. please check all configuration and fix it.\")\n\n\n\n\nclass PGBouncerInstall(Task):\n \"\"\"\n Set up PGBouncer on a database server\n \"\"\"\n\n name = 'setup_pgbouncer'\n\n config_dir = '/etc/pgbouncer'\n\n config = {\n '*': 'host=127.0.0.1',\n 'logfile': '/var/log/pgbouncer/pgbouncer.log',\n 'pidfile': '/var/run/pgbouncer/pgbouncer.pid',\n 'listen_addr': '*',\n 'listen_port': '6432',\n 'unix_socket_dir': '/var/run/postgresql',\n 'auth_type': 'md5',\n 'auth_file': '%s/pgbouncer.userlist' % config_dir,\n 'pool_mode': 'session',\n 'admin_users': 'postgres',\n 'stats_users': 'postgres',\n }\n\n def _setup_parameter(self, file, **kwargs):\n for key, value in kwargs.items():\n origin = \"%s =\" % key\n new = \"%s = %s\" % (key, value)\n sudo('sed -i \"/%s/ c\\%s\" %s' % (origin, new, file))\n\n def _get_passwd(self, username):\n with hide('output'):\n string = run('echo \"select usename, passwd from pg_shadow where '\n 'usename=\\'%s\\' order by 1\" | sudo su postgres -c '\n '\"psql\"' % username)\n\n user, passwd = string.split('\\n')[2].split('|')\n user = user.strip()\n passwd = passwd.strip()\n\n __, tmp_name = tempfile.mkstemp()\n fn = open(tmp_name, 'w')\n fn.write('\"%s\" \"%s\" \"\"\\n' % (user, passwd))\n fn.close()\n put(tmp_name, '%s/pgbouncer.userlist' % self.config_dir, use_sudo=True)\n local('rm %s' % tmp_name)\n\n def _get_username(self, section=None):\n try:\n names = env.config_object.get_list(section, env.config_object.USERNAME)\n username = names[0]\n except:\n print ('You must first set up a database server on this machine, '\n 'and create a database user')\n raise\n return username\n\n def run(self, section=None):\n \"\"\"\n \"\"\"\n sudo('apt-get -y install pgbouncer')\n\n self._setup_parameter('%s/pgbouncer.ini' % self.config_dir, **self.config)\n\n if not section:\n section = 'db-server'\n username = self._get_username(section)\n self._get_passwd(username)\n # postgres should be the owner of these config files\n sudo('chown -R postgres:postgres %s' % self.config_dir)\n\n # pgbouncer won't run smoothly without these directories\n sudo('mkdir -p /var/run/pgbouncer')\n sudo('mkdir -p /var/log/pgbouncer')\n sudo('chown postgres:postgres /var/run/pgbouncer')\n sudo('chown postgres:postgres /var/log/pgbouncer')\n\n # start pgbouncer\n pgbouncer_control_file = '/etc/default/pgbouncer'\n sudo(\"sed -i 's/START=0/START=1/' %s\" %pgbouncer_control_file)\n sudo('service pgbouncer start')\n\nsetup = PostgresInstall()\nslave_setup = SlaveSetup()\nsetup_pgbouncer = PGBouncerInstall()\n"
},
{
"alpha_fraction": 0.5488757491111755,
"alphanum_fraction": 0.5541941523551941,
"avg_line_length": 36.510101318359375,
"blob_id": "93da2e76a461e0bf414e66babadac039eaf884dd",
"content_id": "77ea839b188f1b657c6bfdf3e9046ad52ae4f911",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14854,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 396,
"path": "/fab_deploy/joyent/postgres.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport sgmllib\nimport urllib2\nimport tempfile\n\nfrom fabric.api import run, sudo, env, local, hide, settings\nfrom fabric.contrib.files import append, sed, exists, contains\nfrom fabric.context_managers import prefix\nfrom fabric.operations import get, put\nfrom fabric.context_managers import cd\n\nfrom fabric.tasks import Task\n\nfrom fab_deploy.functions import random_password\nimport utils\n\nclass PostgresInstall(Task):\n \"\"\"\n Install postgresql on server\n\n install postgresql package;\n enable postgres access from localhost without password;\n enable all other user access from other machines with password;\n setup a few parameters related with streaming replication;\n database server listen to all machines '*';\n create a user for database with password.\n \"\"\"\n\n name = 'master_setup'\n db_version = '9.1'\n\n encrypt = 'md5'\n hba_txts = ('local all postgres ident\\n'\n 'host replication replicator 0.0.0.0/0 md5\\n'\n 'local all all password\\n'\n '# # IPv4 local connections:\\n'\n 'host all all 127.0.0.1/32 %(encrypt)s\\n'\n '# # IPv6 local connections:\\n'\n 'host all all ::1/128 %(encrypt)s\\n'\n '# # IPv4 external\\n'\n 'host all all 0.0.0.0/0 %(encrypt)s\\n')\n\n postgres_config = {\n 'listen_addresses': \"'*'\",\n 'wal_level': \"hot_standby\",\n 'wal_keep_segments': \"32\",\n 'max_wal_senders': \"5\",\n 'archive_mode': \"on\" }\n\n def _get_data_dir(self, db_version):\n # Try to get from svc first\n output = run('svcprop -p config/data postgresql')\n if output.stdout and exists(output.stdout, use_sudo=True):\n return output.stdout\n\n data_path = os.path.join('/var', 'pgsql')\n data_version_path = os.path.join(data_path, 'data%s' %db_version)\n if exists(data_version_path, use_sudo=True):\n return data_version_path\n else:\n return os.path.join(data_path, 'data')\n\n def _setup_parameter(self, file, **kwargs):\n for key, value in kwargs.items():\n origin = \"#%s =\" %key\n new = \"%s = %s\" %(key, value)\n sudo('sed -i \"/%s/ c\\%s\" %s' %(origin, new, file))\n\n def _install_package(self, db_version=None):\n sudo(\"pkg_add postgresql%s-server\" %db_version)\n sudo(\"pkg_add postgresql%s-replicationtools\" %db_version)\n sudo(\"svcadm enable postgresql\")\n\n def _setup_hba_config(self, data_dir=None, encrypt=None):\n \"\"\"\n enable postgres access without password from localhost\n \"\"\"\n hba_conf = os.path.join(data_dir, 'pg_hba.conf')\n kwargs = {'data_dir':data_dir, 'encrypt':encrypt}\n hba_txts = self.hba_txts % kwargs\n\n if exists(hba_conf, use_sudo=True):\n sudo(\"echo '%s' > %s\" %(hba_txts, hba_conf))\n else:\n print ('Could not find file %s. Please make sure postgresql was '\n 'installed and data dir was created correctly.'%hba_conf)\n sys.exit(1)\n\n def _setup_postgres_config(self, data_dir=None, config=None):\n postgres_conf = os.path.join(data_dir, 'postgresql.conf')\n\n if exists(postgres_conf, use_sudo=True):\n self._setup_parameter(postgres_conf, **config)\n else:\n print ('Could not find file %s. Please make sure postgresql was '\n 'installed and data dir was created correctly.' %postgres_conf)\n sys.exit(1)\n\n def _setup_archive_dir(self, data_dir):\n archive_dir = os.path.join(data_dir, 'wal_archive')\n sudo(\"mkdir -p %s\" %archive_dir)\n sudo(\"chown postgres:postgres %s\" %archive_dir)\n\n return archive_dir\n\n def _setup_ssh_key(self):\n ssh_dir = '/var/pgsql/.ssh'\n\n rsa = os.path.join(ssh_dir, 'id_rsa')\n if exists(rsa):\n print \"rsa key exists, skipping creating\"\n else:\n sudo('mkdir -p %s' %ssh_dir)\n sudo('chown -R postgres:postgres %s' %ssh_dir)\n sudo('chmod -R og-rwx %s' %ssh_dir)\n run('sudo su postgres -c \"ssh-keygen -t rsa -f %s -N \\'\\'\"' %rsa)\n\n def _restart_db_server(self, db_version):\n sudo('svcadm restart postgresql')\n\n def _create_user(self, section):\n username = raw_input(\"Now we are creating the database user, please \"\n \"specify a username: \")\n # 'postgres' is postgresql superuser\n while username == 'postgres':\n username = raw_input(\"Sorry, you are not allowed to use postgres \"\n \"as username, please choose another one: \")\n db_out = run('echo \"select usename from pg_shadow where usename=\\'%s\\'\" |'\n 'sudo su postgres -c psql' %username)\n if username in db_out:\n print 'user %s already exists, skipping creating user.' %username\n else:\n run(\"sudo su postgres -c 'createuser -D -S -R -P %s'\" %username)\n\n env.config_object.set(section, env.config_object.USERNAME, username)\n\n return username\n\n def _create_replicator(self, db_version, section):\n db_out = run(\"echo '\\du replicator' | sudo su postgres -c 'psql'\")\n if 'replicator' not in db_out:\n replicator_pass = random_password(12)\n\n c1 = ('CREATE USER replicator REPLICATION LOGIN ENCRYPTED '\n 'PASSWORD \\\"\\'%s\\'\\\"' %replicator_pass)\n run(\"echo %s | sudo su postgres -c \\'psql\\'\" %c1)\n history_file = os.path.join('/var', 'pgsql', '.psql_history')\n if exists(history_file):\n sudo('rm %s' %history_file)\n env.config_object.set(section, env.config_object.REPLICATOR,\n 'replicator')\n env.config_object.set(section, env.config_object.REPLICATOR_PASS,\n replicator_pass)\n return replicator_pass\n else:\n print \"user replicator already exists, skipping creating user.\"\n return None\n\n def run(self, db_version=None, encrypt=None, save_config=True,\n section='db-server', **kwargs):\n \"\"\"\n \"\"\"\n if not db_version:\n db_version = self.db_version\n db_version = ''.join(db_version.split('.')[:2])\n\n if not encrypt:\n encrypt = self.encrypt\n\n self._install_package(db_version=db_version)\n data_dir = self._get_data_dir(db_version)\n archive_dir = self._setup_archive_dir(data_dir)\n\n config = dict(self.postgres_config)\n config['archive_command'] = (\"'cp %s %s/wal_archive/%s'\"\n %('%p', data_dir, '%f'))\n\n self._setup_hba_config(data_dir, encrypt)\n self._setup_postgres_config(data_dir=data_dir,\n config=config)\n self._restart_db_server(db_version)\n self._setup_ssh_key()\n self._create_user(section)\n self._create_replicator(db_version, section)\n\n if save_config:\n env.config_object.save(env.conf_filename)\n\n\nclass SlaveSetup(PostgresInstall):\n \"\"\"\n Set up master-slave streaming replication: slave node\n \"\"\"\n\n name = 'slave_setup'\n\n postgres_config = {\n 'listen_addresses': \"'*'\",\n 'wal_level': \"hot_standby\",\n 'hot_standby': \"on\"}\n\n def _get_master_db_version(self, master):\n command = (\"ssh %s psql --version | head -1 | awk '{print $3}'\" %master)\n version_string = local(command, capture=True)\n version = ''.join(version_string.split('.')[:2])\n\n return version\n\n def _get_replicator_pass(self):\n try:\n password = env.config_object.get_list('db-server',\n env.config_object.REPLICATOR_PASS)\n return password[0]\n except:\n print (\"I can't find replicator-password from db-server section \"\n \"of your server.ini file.\\n Please set up replicator user \"\n \"in your db-server, and register its info in server.ini\")\n sys.exit(1)\n\n def _setup_recovery_conf(self, master_ip, password, data_dir):\n wal_dir = os.path.join(data_dir, 'wal_archive')\n recovery_conf = os.path.join(data_dir, 'recovery.conf')\n\n txts = ((\"standby_mode = 'on'\\n\") +\n (\"primary_conninfo = 'host=%s \" %master_ip) +\n (\"port=5432 user=replicator password=%s'\\n\" %password) +\n (\"trigger_file = '/tmp/pgsql.trigger'\\n\") +\n (\"restore_command = 'cp -f %s/%s </dev/null'\\n\"\n %(wal_dir, '%f %p')) +\n (\"archive_cleanup_command = 'pg_archivecleanup %s %s'\\n\"\n %(wal_dir, \"%r\")))\n\n sudo('touch %s' %recovery_conf)\n append(recovery_conf, txts, use_sudo=True)\n sudo('chown postgres:postgres %s' %recovery_conf)\n\n def _ssh_key_exchange(self, master, slave):\n \"\"\"\n copy ssh key(pub) from master to slave, so that master can access slave\n without password via ssh\n \"\"\"\n ssh_dir = '/var/pgsql/.ssh'\n\n with settings(host_string=master):\n rsa_pub = os.path.join(ssh_dir, 'id_rsa.pub')\n with hide('output'):\n pub_key = sudo('cat %s' %rsa_pub)\n\n with settings(host_string=slave):\n authorized_keys = os.path.join(ssh_dir, 'authorized_keys')\n with hide('output', 'running'):\n run('sudo su postgres -c \"echo %s >> %s\"'\n %(pub_key, authorized_keys))\n\n def run(self, master=None, encrypt=None, section=None, **kwargs):\n \"\"\"\n \"\"\"\n if not master:\n print \"Hey, a master is required for slave.\"\n sys.exit(1)\n\n replicator_pass = self._get_replicator_pass()\n\n db_version = self._get_master_db_version(master=master)\n slave = env.host_string\n slave_ip = slave.split('@')[1]\n\n self._install_package(db_version=db_version)\n data_dir = self._get_data_dir(db_version)\n sudo('svcadm disable postgresql')\n\n self._setup_ssh_key()\n self._ssh_key_exchange(master, slave)\n\n with settings(host_string=master):\n master_ip = run(utils.get_ip_command(None)) # internal ip\n\n run('echo \"select pg_start_backup(\\'backup\\', true)\" | sudo su postgres -c \\'psql\\'')\n run('sudo su postgres -c \"rsync -av --exclude postmaster.pid '\n '--exclude pg_xlog %s/ postgres@%s:%s/\"'%(data_dir, slave_ip, data_dir))\n run('echo \"select pg_stop_backup()\" | sudo su postgres -c \\'psql\\'')\n\n self._setup_postgres_config(data_dir=data_dir,\n config=self.postgres_config)\n self._setup_archive_dir(data_dir)\n\n self._setup_recovery_conf(master_ip=master_ip,\n password=replicator_pass, data_dir=data_dir)\n\n if not encrypt:\n encrypt = self.encrypt\n self._setup_hba_config(data_dir, encrypt)\n\n sudo('svcadm enable postgresql')\n print('password for replicator on master node is %s' %replicator_pass)\n\nclass PGBouncerInstall(Task):\n \"\"\"\n Set up PGBouncer on a database server\n \"\"\"\n\n name = 'setup_pgbouncer'\n\n pgbouncer_src = 'http://pkgsrc.smartos.org/packages/SmartOS/2012Q2/databases/pgbouncer-1.4.2.tgz'\n pkg_name = 'pgbouncer-1.4.2.tgz'\n config_dir = '/etc/opt/pkg'\n\n config = {\n '*': 'host=127.0.0.1',\n 'logfile': '/var/log/pgbouncer/pgbouncer.log',\n 'pidfile': '/var/pgsql/pgbouncer/pgbouncer.pid',\n 'listen_addr': '*',\n 'listen_port': '6432',\n 'unix_socket_dir': '/tmp',\n 'auth_type': 'md5',\n 'auth_file': '%s/pgbouncer.userlist' %config_dir,\n 'pool_mode': 'session',\n 'admin_users': 'postgres',\n 'stats_users': 'postgres',\n }\n\n def _setup_parameter(self, file, **kwargs):\n for key, value in kwargs.items():\n origin = \"%s =\" %key\n new = \"%s = %s\" %(key, value)\n sudo('sed -i \"/%s/ c\\%s\" %s' %(origin, new, file))\n\n def _get_passwd(self, username):\n with hide('output'):\n string = run('echo \"select usename, passwd from pg_shadow where '\n 'usename=\\'%s\\' order by 1\" | sudo su postgres -c '\n '\"psql\"' %username)\n\n user, passwd = string.split('\\n')[2].split('|')\n user = user.strip()\n passwd = passwd.strip()\n\n __, tmp_name = tempfile.mkstemp()\n fn = open(tmp_name, 'w')\n fn.write('\"%s\" \"%s\" \"\"\\n' %(user, passwd))\n fn.close()\n put(tmp_name, '%s/pgbouncer.userlist'%self.config_dir, use_sudo=True)\n local('rm %s' %tmp_name)\n\n def _get_username(self, section=None):\n try:\n names = env.config_object.get_list(section, env.config_object.USERNAME)\n username = names[0]\n except:\n print ('You must first set up a database server on this machine, '\n 'and create a database user')\n raise\n return username\n\n def run(self, section=None):\n \"\"\"\n \"\"\"\n\n sudo('pkg_add libevent')\n sudo('mkdir -p /opt/pkg/bin')\n sudo(\"ln -sf /opt/local/bin/awk /opt/pkg/bin/nawk\")\n sudo(\"ln -sf /opt/local/bin/sed /opt/pkg/bin/nbsed\")\n\n with cd('/tmp'):\n run('wget %s' %self.pgbouncer_src)\n sudo('pkg_add %s' %self.pkg_name)\n\n svc_method = os.path.join(env.configs_dir, 'pgbouncer.xml')\n put(svc_method, self.config_dir, use_sudo=True)\n\n self._setup_parameter('%s/pgbouncer.ini' %self.config_dir, **self.config)\n\n if not section:\n section = 'db-server'\n username = self._get_username(section)\n self._get_passwd(username)\n # postgres should be the owner of these config files\n sudo('chown -R postgres:postgres %s' %self.config_dir)\n\n # pgbouncer won't run smoothly without these directories\n sudo('mkdir -p /var/pgsql/pgbouncer')\n sudo('mkdir -p /var/log/pgbouncer')\n sudo('chown postgres:postgres /var/pgsql/pgbouncer')\n sudo('chown postgres:postgres /var/log/pgbouncer')\n\n # set up log\n sudo('logadm -C 3 -p1d -c -w /var/log/pgbouncer/pgbouncer.log -z 1')\n run('svccfg import %s/pgbouncer.xml' %self.config_dir)\n\n # start pgbouncer\n sudo('svcadm enable pgbouncer')\n\nsetup = PostgresInstall()\nslave_setup = SlaveSetup()\nsetup_pgbouncer = PGBouncerInstall()\n"
},
{
"alpha_fraction": 0.5884707570075989,
"alphanum_fraction": 0.5884707570075989,
"avg_line_length": 24.489795684814453,
"blob_id": "9d173dc075daf48b0ee1477d61bce12f64f9b220",
"content_id": "155f725b04a20fca8ffe994d27e4b26ed29160da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1249,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 49,
"path": "/fab_deploy/amazon/nginx.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom fabric.api import sudo, env, local\nfrom fabric.tasks import Task\n\nDEFAULT_NGINX_CONF = \"nginx/nginx.conf\"\n\n\nclass NginxInstall(Task):\n \"\"\"\n Install nginx\n\n Takes one optional argument:\n\n * **nginx_conf**: the relative path of the nginx config file\n (that is part of your repo) that you want use\n as your nginx config. If not provided it will\n default to nginx/nginx.conf\n\n Also sets up log rotation\n \"\"\"\n\n name = 'setup'\n\n def run(self, nginx_conf=None, hosts=[]):\n \"\"\"\n \"\"\"\n if not nginx_conf:\n nginx_conf = DEFAULT_NGINX_CONF\n\n self._install_package()\n # self._setup_logging()\n self._setup_dirs()\n self._setup_config(nginx_conf=nginx_conf)\n\n def _install_package(self):\n sudo(\"apt-get -y install nginx\")\n\n def _setup_dirs(self):\n sudo('mkdir -p /var/www/cache-tmp')\n sudo('mkdir -p /var/www/cache')\n sudo('chown -R www-data:www-data /var/www')\n\n def _setup_config(self, nginx_conf=None):\n remote_conv = os.path.join(env.git_working_dir, 'deploy', nginx_conf)\n sudo('ln -sf %s /etc/nginx/nginx.conf' % remote_conv)\n\n\nsetup = NginxInstall()\n"
},
{
"alpha_fraction": 0.5637653470039368,
"alphanum_fraction": 0.5673604607582092,
"avg_line_length": 31.42331314086914,
"blob_id": "4f37ee4ec40c1687a75d88236419bb06a9160ae2",
"content_id": "995ce907d7fffd720b5b7b10abdb8f43377dfa53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10570,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 326,
"path": "/fab_deploy/amazon/setup.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import sys\nfrom fabric.api import run, sudo, execute, env\nfrom fabric.tasks import Task\n\nfrom fab_deploy import functions\n\nfrom api import get_ec2_connection\nimport utils\n\n\nclass BaseSetup(Task):\n \"\"\"\n Base server setup.\n\n Sets up ssh so root cannot login and other logins must\n be key based.\n \"\"\"\n\n # Because setup tasks modify the config file\n # they should always be run serially.\n serial = True\n\n def _update_config(self, config_section):\n if not env.host_string:\n print \"env.host_string is None, please specify a host by -H \"\n sys.exit(1)\n added = False\n cons = env.config_object.get_list(config_section,\n env.config_object.CONNECTIONS)\n if not env.host_string in cons:\n added = True\n cons.append(env.host_string)\n env.config_object.set_list(config_section,\n env.config_object.CONNECTIONS, cons)\n\n ips = env.config_object.get_list(config_section,\n env.config_object.INTERNAL_IPS)\n internal_ip = run(utils.get_ip_command('eth0'))\n ips.append(internal_ip)\n\n env.config_object.set_list(config_section,\n env.config_object.INTERNAL_IPS, ips)\n return added\n\n def _save_config(self):\n env.config_object.save(env.conf_filename)\n\n def _secure_ssh(self):\n # Change disable root and password\n # logins in /etc/ssh/sshd_config\n sudo('sed -ie \"s/^PermitRootLogin.*/PermitRootLogin no/g\" /etc/ssh/sshd_config')\n sudo('sed -ie \"s/^PasswordAuthentication.*/PasswordAuthentication no/g\" /etc/ssh/sshd_config')\n sudo('service ssh restart')\n\n def _update_apt(self):\n #update apt repository so installation of packages can be smooth\n sudo('apt-get update')\n\n\nclass AppSetup(BaseSetup):\n \"\"\"\n Setup an app-server\n\n After base setup installs nginx setups a git repo. Then\n calls the deploy task.\n\n Also installs gunicorn, python, and other base packages.\n Runs the scripts/setup.sh script.\n\n Once finished it add the new instance into load balancer\n\n This is a serial task as it modifies local config files.\n \"\"\"\n\n name = 'app_server'\n\n config_section = 'app-server'\n settings_host = config_section\n\n nginx_conf = 'nginx/nginx.conf'\n\n git_branch = 'master'\n git_hook = None\n\n def _add_remote(self, name=None):\n if not env.host_string in env.git_reverse:\n name = functions.get_remote_name(env.host_string,\n self.config_section, name=name)\n execute('local.git.add_remote', remote_name=name,\n user_and_host=env.host_string)\n return name\n\n def _set_profile(self):\n super(AppSetup, self)._set_profile()\n if self.settings_host and env.project_env_var:\n data = {'env_name': env.project_env_var,\n 'value' : self.settings_host}\n line = '%(env_name)s=\"%(value)s\"; export %(env_name)s' % data\n append('/etc/profile', line, use_sudo=True)\n\n def _transfer_files(self):\n execute('git.setup', branch=self.git_branch, hook=self.git_hook)\n execute('local.git.push', branch=self.git_branch)\n execute('local.git.reset_remote')\n\n def _modify_others(self):\n execute('setup.lb_server', section=self.config_section)\n\n def _install_packages(self):\n sudo('apt-get -y install python-psycopg2')\n sudo('apt-get -y install python-setuptools')\n sudo('apt-get -y install python-imaging')\n sudo('apt-get -y install python-pip')\n self._install_venv()\n\n def _install_venv(self):\n sudo('pip install virtualenv')\n run('sh %s/scripts/setup.sh production' % env.git_working_dir)\n\n def _setup_services(self):\n execute('nginx.setup', nginx_conf=self.nginx_conf)\n sudo('service nginx restart')\n execute('gunicorn.setup')\n sudo('supervisorctl start gunicorn')\n\n def run(self, name=None):\n self._update_apt()\n self._update_config(self.config_section)\n\n self._add_remote(name=name)\n\n # Transfer files first so all configs are in place.\n self._transfer_files()\n\n self._secure_ssh()\n self._install_packages()\n self._setup_services()\n self._save_config()\n\n execute('deploy', branch=self.git_branch)\n\n self._modify_others()\n\n\nclass DBSetup(BaseSetup):\n \"\"\"\n Setup a database server\n \"\"\"\n name = 'db_server'\n config_section = 'db-server'\n\n def run(self, name=None):\n self._update_apt()\n self._update_config(self.config_section)\n self._secure_ssh()\n execute('postgres.master_setup', save_config=False,\n section=self.config_section)\n self._save_config()\n\n\nclass SlaveSetup(DBSetup):\n \"\"\"\n Set up a slave database server with streaming replication\n \"\"\"\n name = 'slave_db'\n config_section = 'slave-db'\n\n def _get_master(self):\n cons = env.config_object.get_list('db-server',\n env.config_object.CONNECTIONS)\n n = len(cons)\n if n == 0:\n print ('I could not find db server in server.ini.'\n 'Did you set up a master server?')\n sys.exit(1)\n else:\n for i in range(1, n + 1):\n print \"[%2d ]: %s\" % (i, cons[i - 1])\n while True:\n choice = raw_input('I found %d servers in server.ini. Which '\n 'one do you want to use as master? ' % n)\n try:\n choice = int(choice)\n master = cons[choice - 1]\n break\n except:\n print \"please input a number between 1 and %d\" % n - 1\n\n return master\n\n def run(self, name=None):\n \"\"\"\n \"\"\"\n master = self._get_master()\n self._update_apt()\n if not env.config_object.has_section(self.config_section):\n env.config_object.add_section(self.config_section)\n self._update_config(self.config_section)\n self._secure_ssh()\n execute('postgres.slave_setup', master=master,\n section=self.config_section)\n self._save_config()\n\n\nclass DevSetup(AppSetup):\n \"\"\"\n Setup a development server\n \"\"\"\n name = 'dev_server'\n config_section = 'dev-server'\n settings_host = config_section\n\n def _modify_others(self):\n pass\n\n def _install_venv(self):\n sudo('pip install virtualenv')\n run('sh %s/scripts/setup.sh production development'\n % env.git_working_dir)\n\n def _setup_services(self):\n super(DevSetup, self)._setup_services()\n execute('postgres.master_setup', section=self.config_section)\n\n\nclass LBSetup(Task):\n \"\"\"\n Set up load balancer\n\n Create an elastic load balancer, read connections info from server.ini,\n get ip address and look for corresponding ec2 instances, and register\n the instances with load balancer.\n\n you may define the following optional arguments in env:\n * **lb_name**: name of load_balancer. If not defined, load balancer will\n be named after the name of your project directory.\n * **listeners**: listeners of load balancer, a list of tuple\n (lb port, instance port, protocol).\n If not provided, only port 80 will be registered.\n * **hc_policy**: a dictionary defining the health check policy, keys can be\n interval, target, healthy_threshold, timeout\n and unhealthy_threshold\n\n default value is\n hc_policy = {\n 'interval': 30,\n 'target': 'HTTP:80/index.html', }\n \"\"\"\n\n name = 'lb_server'\n config_section = 'load-balancer'\n\n hc_policy = {\n 'interval': 30,\n 'target': 'HTTP:80/index.html', }\n\n listeners = [(80, 80, 'http',)]\n\n def get_instance_id_by_ip(self, ip, **kwargs):\n \"\"\"\n get ec2 instance id based on ip address\n \"\"\"\n instances = []\n conn = get_ec2_connection(server_type='ec2', **kwargs)\n reservations = conn.get_all_instances()\n for resv in reservations:\n for instance in resv.instances:\n if instance.ip_address == ip:\n instances.append(instance.id)\n return instances\n\n def _get_elb(self, conn, lb_name):\n lbs = conn.get_all_load_balancers()\n for lb in lbs:\n if lb.name == lb_name:\n return lb\n return None\n\n def run(self, section, **kwargs):\n conn = get_ec2_connection(server_type='ec2', **kwargs)\n elb_conn = get_ec2_connection(server_type='elb', **kwargs)\n\n zones = [ z.name for z in conn.get_all_zones()]\n\n lb_name = env.get('lb_name')\n if not lb_name:\n lb_name = env.project_name\n\n listeners = env.get('listeners')\n if not listeners:\n listeners = self.listeners\n\n connections = env.config_object.get_list(section,\n env.config_object.CONNECTIONS)\n ips = [ ip.split('@')[-1] for ip in connections]\n for ip in ips:\n instances = self.get_instance_id_by_ip(ip, **kwargs)\n if len(instances) == 0:\n print \"Cannot find any ec2 instances match your connections\"\n sys.exit(1)\n\n elb = self._get_elb(elb_conn, lb_name)\n print \"find load balancer %s\" %lb_name\n if not elb:\n elb = elb_conn.create_load_balancer(lb_name, zones, listeners,\n security_groups=['lb_sg'])\n print \"load balancer %s successfully created\" %lb_name\n\n elb.register_instances(instances)\n print \"register instances into load balancer\"\n print instances\n\n hc_policy = env.get('hc_policy')\n if not hc_policy:\n hc_policy = self.hc_policy\n print \"Configure load balancer health check policy\"\n print hc\n hc = HealthCheck(**hc_policy)\n elb.configure_health_check(hc)\n\n\napp_server = AppSetup()\ndev_server = DevSetup()\ndb_server = DBSetup()\nslave_db = SlaveSetup()\nlb_server = LBSetup()\n"
},
{
"alpha_fraction": 0.8504672646522522,
"alphanum_fraction": 0.8504672646522522,
"avg_line_length": 12.375,
"blob_id": "17fde6b6c45b16a7f3ff2dc9687074f7d8fe8e8f",
"content_id": "c7cc270a3bf940d7de5f75a5a82f16f7027d3501",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "permissive",
"max_line_length": 15,
"num_lines": 8,
"path": "/fab_deploy/amazon/__init__.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import utils\nimport setup\nimport git\nimport gunicorn\nimport nginx\nimport postgres\nimport manage\nimport api\n"
},
{
"alpha_fraction": 0.5932539701461792,
"alphanum_fraction": 0.5967261791229248,
"avg_line_length": 29.77862548828125,
"blob_id": "c70697a1b5da7b4b41eb6b2cd312a2b6c9695c5d",
"content_id": "79c72da497b0f920320145333f670d17411b78f5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4032,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 131,
"path": "/fab_deploy/joyent/nginx.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom fabric.api import run, sudo, env, local\nfrom fabric.tasks import Task\n\nDEFAULT_NGINX_CONF = \"nginx/nginx.conf\"\n\nclass NginxInstall(Task):\n \"\"\"\n Install nginx\n\n Takes one optional argument:\n\n * **nginx_conf**: the relative path of the nginx config file\n (that is part of your repo) that you want use\n as your nginx config. If not provided it will\n default to nginx/nginx.conf\n\n Also sets up log rotation\n \"\"\"\n\n name = 'setup'\n\n def run(self, nginx_conf=None, hosts=[]):\n \"\"\"\n \"\"\"\n if not nginx_conf:\n nginx_conf = DEFAULT_NGINX_CONF\n\n self._install_package()\n self._setup_logging()\n self._setup_dirs()\n self._setup_config(nginx_conf=nginx_conf)\n\n def _install_package(self):\n sudo(\"pkg_add nginx\")\n\n def _setup_logging(self):\n sudo('sed -ie \"s/^#nginx\\(.*\\)/nginx\\\\1/g\" /etc/logadm.conf')\n sudo('logadm')\n\n def _setup_dirs(self):\n sudo('mkdir -p /var/www/cache-tmp')\n sudo('mkdir -p /var/www/cache')\n sudo('chown -R www:www /var/www')\n\n def _setup_config(self, nginx_conf=None):\n remote_conv = os.path.join(env.git_working_dir, 'deploy', nginx_conf)\n sudo('ln -sf %s /opt/local/etc/nginx/nginx.conf' % remote_conv)\n\nclass UpdateAppServers(Task):\n \"\"\"\n Build app servers list in your load balancer nginx config.\n\n Finds your load banlancer nginx config by looking up\n the attribute on the task and rebuilds the list of\n app servers.\n\n Changes made by this task are not commited to your repo, or deployed\n anywhere automatically. You should review any changes and commit and\n deploy as appropriate.\n\n This is a serial task, that should not be called directly\n with any remote hosts as it performs no remote actions.\n \"\"\"\n\n START_DELM = \"## Start App Servers ##\"\n END_DELM = \"## End App Servers ##\"\n LINE = \"server %s:8000 max_fails=5 fail_timeout=60s;\"\n START = None\n END = None\n\n name = 'update_app_servers'\n serial = True\n\n def _update_file(self, nginx_conf, section):\n file_path = os.path.join(env.deploy_path, nginx_conf)\n text = [self.START_DELM]\n if self.START:\n text.append(self.START)\n\n for ip in env.config_object.get_list(section, env.config_object.INTERNAL_IPS):\n text.append(self.LINE % ip)\n\n if self.END:\n text.append(self.END)\n text.append(self.END_DELM)\n\n txt = \"\\\\n\".join(text)\n new_path = file_path + '.bak'\n cmd = \"awk '{\\\n tmp = match($0, \\\"%s\\\"); \\\n if (tmp) { \\\n print \\\"%s\\\"; \\\n while(getline>0){tmp2 = match($0, \\\"%s\\\"); if (tmp2) break;} \\\n next;} \\\n {print $0}}' %s > %s\" %(self.START_DELM, txt, self.END_DELM,\n file_path, new_path)\n local(cmd)\n local('mv %s %s' %(new_path, file_path))\n\n def run(self, section=None, nginx_conf=None):\n assert section and nginx_conf\n self._update_file(nginx_conf, section)\n\nclass UpdateAllowedIPs(UpdateAppServers):\n \"\"\"\n Build allowed servers list in your app server nginx config.\n\n Finds your app server nginx config by looking up\n the attribute on the task and rebuilds the list of\n app servers.\n\n Changes made by this task are not commited to your repo, or deployed\n anywhere automatically. You should review any changes and commit and\n deploy as appropriate.\n\n This is a serial task, that should not be called directly\n with any remote hosts as it performs no remote actions.\n \"\"\"\n\n START_DELM = \"## Start Allowed IPs ##\"\n END_DELM = \"## End Allowed IPs ##\"\n LINE = \"set_real_ip_from %s;\"\n END = \"real_ip_header X-Cluster-Client-Ip;\"\n\n name = 'update_allowed_ips'\n\nupdate_app_servers = UpdateAppServers()\nupdate_allowed_ips = UpdateAllowedIPs()\nsetup = NginxInstall()\n"
},
{
"alpha_fraction": 0.6249294877052307,
"alphanum_fraction": 0.6249294877052307,
"avg_line_length": 26.703125,
"blob_id": "1d044557dad9f150767cc0ea8dfa1cb72bfe00dc",
"content_id": "df665c0d5b41a9612599930c132630ec21d7ea13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1773,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 64,
"path": "/fab_deploy/joyent/manage.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "from fabric.api import execute, env\nfrom fabric.tasks import Task\n\nfrom fab_deploy import functions\n\nclass FirewallSync(Task):\n \"\"\"\n Updates the firewall on a live server.\n\n Calls ``firewall.update_files`` and then updates the\n remote servers using 'firewall.sync_single'\n\n Takes the same arguments as ``firewall.update_files``\n\n While this task will deploy any changes it makes they\n are not commited to your repo. You should review any\n changes and commit as appropriate.\n \"\"\"\n\n name = 'firewall_sync'\n serial = True\n task_group = 'firewall'\n\n def run(self, section=None):\n \"\"\"\n \"\"\"\n update = '%s.update_files' % self.task_group\n single = '%s.sync_single' % self.task_group\n\n execute(update, section=section, hosts=[])\n if section:\n sections = [section]\n else:\n sections = env.config_object.server_sections()\n\n task = functions.get_task_instance(update)\n for s in sections:\n hosts = env.config_object.get_list(s,\n env.config_object.CONNECTIONS)\n if hosts:\n filename = task.get_section_path(s)\n execute(single, filename=filename,\n hosts=hosts)\n\n\nclass SNMPSync(FirewallSync):\n \"\"\"\n Updates the firewall on a live server.\n\n Calls ``snmp.update_files`` and then updates the\n remote servers using 'snmp.sync_single'\n\n Takes the same arguments as ``snmp.update_files``\n\n While this task will deploy any changes it makes they\n are not commited to your repo. You should review any\n changes and commit as appropriate.\n \"\"\"\n name = 'snmp_sync'\n task_group = 'snmp'\n\n\nfirewall_sync = FirewallSync()\nsnmp_sync = SNMPSync()\n"
},
{
"alpha_fraction": 0.5446686148643494,
"alphanum_fraction": 0.5489913821220398,
"avg_line_length": 25.69230842590332,
"blob_id": "5dd6beda9511ad91f7943a46ca255b6fc3491ebc",
"content_id": "a41832f1fb873c01d4bb326502fd3bed387647e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 26,
"path": "/fab_deploy/joyent/gunicorn.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "from fabric.api import run, sudo, env\nfrom fabric.tasks import Task\n\nclass GunicornInstall(Task):\n \"\"\"\n Install gunicorn and set it up with svcadm.\n \"\"\"\n\n name = 'setup'\n\n def run(self, env_value=None):\n \"\"\"\n \"\"\"\n\n sudo('mkdir -p /var/log/gunicorn')\n sudo('chown -R www:www /var/log/gunicorn')\n\n # Add django log\n sudo('logadm -C 3 -p1d -c -w /var/log/gunicorn/django.log -z 1')\n run('svccfg import /srv/active/deploy/gunicorn/gunicorn.xml')\n\n if env_value:\n run('svccfg -s gunicorn setenv %s %s' % (env.project_env_var,\n env_value))\n\nsetup = GunicornInstall()\n"
},
{
"alpha_fraction": 0.5992865562438965,
"alphanum_fraction": 0.5992865562438965,
"avg_line_length": 23.735294342041016,
"blob_id": "274955c461d3bbf815db79a16f5489712754427d",
"content_id": "e4ce71b59ab88f0d870986f056d3c710c2475815",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 34,
"path": "/fab_deploy/amazon/gunicorn.py",
"repo_name": "denimboy/red-fab-deploy",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom fabric.api import sudo, env\nfrom fabric.contrib.files import append\nfrom fabric.tasks import Task\n\n\nclass GunicornInstall(Task):\n \"\"\"\n Set up gunicorn, and set up supervisor to control it.\n \"\"\"\n\n name = 'setup'\n\n def run(self):\n \"\"\"\n \"\"\"\n\n sudo('mkdir -p /var/log/gunicorn')\n sudo('chown -R www-data:www-data /var/log/gunicorn')\n\n # we use supervisor to control gunicorn\n sudo('apt-get -y install supervisor')\n\n conf_file = '/etc/supervisor/supervisord.conf'\n\n gunicorn_conf = os.path.join(env.git_working_dir, 'deploy',\n 'gunicorn', 'supervisor_gunicorn.conf')\n text = 'files = %s' % gunicorn_conf\n\n append(conf_file, text, use_sudo=True)\n sudo('supervisorctl update')\n\nsetup = GunicornInstall()\n"
}
] | 9 |
ibarral18/demo-flask
|
https://github.com/ibarral18/demo-flask
|
b1eb8f2e808d83158b5d650b005d163ec8590063
|
ce677a3d84e2c60d867f1ef9b37cb9ebe39984bb
|
434288dd7644610175cb34bc7c1abe07f731a005
|
refs/heads/main
| 2023-08-15T02:16:18.654442 | 2021-09-22T22:59:16 | 2021-09-22T22:59:16 | 409,060,256 | 1 | 2 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5021038055419922,
"alphanum_fraction": 0.5021038055419922,
"avg_line_length": 20.935483932495117,
"blob_id": "348dc3e865d36b0ce6d4ab3a288602b8d3f2c989",
"content_id": "a32c0b0798c893810063f8a38731375dcfbb30c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 713,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 31,
"path": "/middleware/context.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "\r\nimport os\r\n\r\n# This is a bad place for this import\r\nimport pymysql\r\n\r\ndef get_db_info():\r\n \"\"\"\r\n This is crappy code.\r\n\r\n :return: A dictionary with connect info for MySQL\r\n \"\"\"\r\n db_host = os.environ.get(\"DBHOST\", None)\r\n\r\n if db_host is None:\r\n db_info = {\r\n \"host\": \"localhost\",\r\n \"user\": \"dbuser\",\r\n \"password\": \"dbuserdbuser\",\r\n \"cursorclass\": pymysql.cursors.DictCursor\r\n }\r\n\r\n else:\r\n\r\n db_info = {\r\n \"host\": db_host,\r\n \"user\": os.environ.get(\"DBUSER\"),\r\n \"password\": os.environ.get(\"DBPASSWORD\"),\r\n \"cursorclass\": pymysql.cursors.DictCursor\r\n }\r\n\r\n return db_info\r\n"
},
{
"alpha_fraction": 0.6592427492141724,
"alphanum_fraction": 0.6592427492141724,
"avg_line_length": 31.14285659790039,
"blob_id": "8959def2997b4e94cd7d973e2b48aaf39183cab7",
"content_id": "bd4aeb16a6b0fcc0674fdb7d45cf3e72875d9c9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 449,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 14,
"path": "/application_services/imdb_artists_resource.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "from application_services.BaseApplicationResource import BaseApplicationResource\nimport database_services.RDBService as d_service\n\n\nclass IMDBArtistResource(BaseApplicationResource):\n\n def __init__(self):\n super().__init__()\n\n @classmethod\n def get_by_name_prefix(cls, name_prefix):\n res = d_service.get_by_prefix(\"IMDBFixed\", \"name_basics\",\n \"primaryName\", name_prefix)\n return res"
},
{
"alpha_fraction": 0.554707407951355,
"alphanum_fraction": 0.6183205842971802,
"avg_line_length": 26.14285659790039,
"blob_id": "9492b01b883514e098e844d6870fe0c9d23e8c18",
"content_id": "82426fe65ae05131e2537057c650f2896afb9c26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 786,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 28,
"path": "/SQLScripts/create_user.sql",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "-- MySQL dump 10.13 Distrib 8.0.17, for macos10.14 (x86_64)\r\n--\r\n-- Host: localhost Database: imdbnew\r\n-- ------------------------------------------------------\r\n-- Server version\t8.0.19\r\n\r\n--\r\n-- Table structure for table `user`\r\n--\r\n\r\nDROP TABLE IF EXISTS `user`;\r\nCREATE TABLE `user` (\r\n `user_no` int NOT NULL AUTO_INCREMENT,\r\n `username` varchar(128) NOT NULL,\r\n `first_name` text,\r\n `last_name` text,\r\n `email` text NOT NULL,\r\n `birth_year` varchar(45) DEFAULT NULL,\r\n `middle_name` text,\r\n `status` varchar(45) DEFAULT NULL,\r\n `created_time` datetime DEFAULT NULL,\r\n `updated_time` datetime DEFAULT NULL,\r\n PRIMARY KEY (`user_no`)\r\n) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;\r\n\r\n\r\n\r\n-- Dump completed on 2020-11-29 10:25:16"
},
{
"alpha_fraction": 0.6195820569992065,
"alphanum_fraction": 0.6892414689064026,
"avg_line_length": 33.231788635253906,
"blob_id": "85b528068eb698a40d05283bbca1ddbd5969ea52",
"content_id": "27cf5387f3ccc54d3cd9042acc790ba02e66d76c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 5168,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 151,
"path": "/SQLScripts/create_imdb.sql",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "-- MySQL dump 10.13 Distrib 8.0.17, for macos10.14 (x86_64)\n--\n-- Host: localhost Database: imdbnew\n-- ------------------------------------------------------\n-- Server version\t8.0.19\n\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!50503 SET NAMES utf8 */;\n/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n/*!40103 SET TIME_ZONE='+00:00' */;\n/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;\n/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n\n--\n-- Table structure for table `name_basics`\n--\n\nDROP TABLE IF EXISTS `name_basics`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `name_basics` (\n `nconst` varchar(128) DEFAULT NULL,\n `primary_name` varchar(128) DEFAULT NULL,\n `birth_year` varchar(45) DEFAULT NULL,\n `death_year` varchar(45) DEFAULT NULL,\n `primary_profession` varchar(128) DEFAULT NULL,\n `known_for_titles` varchar(128) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_akas`\n--\n\nDROP TABLE IF EXISTS `title_akas`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_akas` (\n `titleid` varchar(128) DEFAULT NULL,\n `ordering` varchar(128) DEFAULT NULL,\n `title` text,\n `region` varchar(128) DEFAULT NULL,\n `language` varchar(128) DEFAULT NULL,\n `types` varchar(512) DEFAULT NULL,\n `attributes` varchar(512) DEFAULT NULL,\n `is_original` varchar(45) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_basics`\n--\n\nDROP TABLE IF EXISTS `title_basics`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_basics` (\n `tconst` varchar(64) NOT NULL,\n `ttype` varchar(64) DEFAULT NULL,\n `primary_title` varchar(512) DEFAULT NULL,\n `original_title` varchar(512) DEFAULT NULL,\n `is_adult` varchar(32) DEFAULT NULL,\n `start_year` varchar(45) DEFAULT NULL,\n `end_year` varchar(45) DEFAULT NULL,\n `runtime` varchar(45) DEFAULT NULL,\n `genres` varchar(256) DEFAULT NULL,\n PRIMARY KEY (`tconst`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_crew`\n--\n\nDROP TABLE IF EXISTS `title_crew`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_crew` (\n `tconst` varchar(128) DEFAULT NULL,\n `directors` text,\n `writer` text\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_episode`\n--\n\nDROP TABLE IF EXISTS `title_episode`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_episode` (\n `tconst` varchar(128) NOT NULL,\n `parent` varchar(128) DEFAULT NULL,\n `season_number` text,\n `episode_number` text,\n PRIMARY KEY (`tconst`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_principals`\n--\n\nDROP TABLE IF EXISTS `title_principals`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_principals` (\n `tconst` varchar(128) DEFAULT NULL,\n `ordering` varchar(45) DEFAULT NULL,\n `nconst` varchar(128) DEFAULT NULL,\n `category` text,\n `job` text,\n `characters` text\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Table structure for table `title_ratings`\n--\n\nDROP TABLE IF EXISTS `title_ratings`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!50503 SET character_set_client = utf8mb4 */;\nCREATE TABLE `title_ratings` (\n `tconst` varchar(128) NOT NULL,\n `rating` varchar(45) DEFAULT NULL,\n `votes` varchar(45) DEFAULT NULL,\n PRIMARY KEY (`tconst`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping routines for database 'imdbnew'\n--\n/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n\n-- Dump completed on 2020-11-29 10:25:16"
},
{
"alpha_fraction": 0.6730769276618958,
"alphanum_fraction": 0.6840659379959106,
"avg_line_length": 26.153846740722656,
"blob_id": "ab543b707a22893e6f14cf033e0076853ea5dd4f",
"content_id": "a336575f77d5a3690c6b9fc7a124b1829b9daa64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 13,
"path": "/application_services/user_service.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "from application_services.BaseApplicationResource import BaseApplicationResource\r\nimport database_services.RDBService as d_service\r\n\r\n\r\nclass UserResource(BaseApplicationResource):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n @classmethod\r\n def get_by_template(cls):\r\n res = d_service.get_by_template(\"w6156\", \"user\")\r\n return res"
},
{
"alpha_fraction": 0.585457980632782,
"alphanum_fraction": 0.5864022374153137,
"avg_line_length": 21.488889694213867,
"blob_id": "055051c05cbf8fbdd58a32a4d4aac26b5bf2af96",
"content_id": "b801ac94190d11f0900e3146230cb8ba5dc7f591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1059,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 45,
"path": "/database_services/RDBService.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "import pymysql\r\nimport json\r\nimport middleware.context as context\r\n\r\n\r\ndef _get_db_connection():\r\n\r\n db_connect_info = context.get_db_info()\r\n\r\n print(\"Connection info = \\n\", json.dumps(db_connect_info, indent=2, default=str))\r\n\r\n db_connection = pymysql.connect(**db_connect_info)\r\n return db_connection\r\n\r\n\r\ndef get_by_prefix(db_schema, table_name, column_name, value_prefix):\r\n\r\n conn = _get_db_connection()\r\n cur = conn.cursor()\r\n\r\n sql = \"select * from \" + db_schema + \".\" + table_name + \" where \" + \\\r\n column_name + \" like \" + \"'\" + value_prefix + \"%'\"\r\n print(\"SQL Statement = \" + cur.mogrify(sql, None))\r\n\r\n res = cur.execute(sql)\r\n res = cur.fetchall()\r\n\r\n conn.close()\r\n\r\n return res\r\n\r\ndef get_by_template(db_schema, table_name):\r\n\r\n conn = _get_db_connection()\r\n cur = conn.cursor()\r\n\r\n sql = \"select * from \" + db_schema + \".\" + table_name\r\n print(\"SQL Statement = \" + cur.mogrify(sql, None))\r\n\r\n res = cur.execute(sql)\r\n res = cur.fetchall()\r\n\r\n conn.close()\r\n\r\n return res\r\n\r\n"
},
{
"alpha_fraction": 0.557692289352417,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 16.238094329833984,
"blob_id": "f0d79d65e562c1006a23c3d49ea33274a19cda54",
"content_id": "a36947ba543c952cea04e2239f4e93b7f9ab884b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 21,
"path": "/database_services/rdb_tests.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "import database_services.RDBService as db_service\n\n\ndef t1():\n\n res = db_service.get_by_prefix(\n \"imdbfixed\", \"names_basic\", \"primary_name\", \"Tom H\"\n )\n print(\"t1 resule = \", res)\n\n\ndef t2():\n\n res = db_service.find_by_template(\n \"imdbfixed\", \"name_basics\", {\"primaryName\": \"Tom Hanks\"}, None\n )\n print(\"t2 resuls = \", res)\n\n\n\nt2()\n\n\n"
},
{
"alpha_fraction": 0.6628383994102478,
"alphanum_fraction": 0.6874487400054932,
"avg_line_length": 29.256410598754883,
"blob_id": "3760a9111e8df484dc2c08c950b17e9bdf4bf70f",
"content_id": "cfe68b0581b2e96d56b8f338952e66e03cb6c2b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1219,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 39,
"path": "/app.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "from flask import Flask, Response\r\nimport database_services.RDBService as d_service\r\nfrom flask_cors import CORS\r\nimport json\r\n\r\nfrom application_services.imdb_artists_resource import IMDBArtistResource\r\nfrom application_services.user_service import UserResource\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n\r\[email protected]('/')\r\ndef hello_world():\r\n return 'Hello World!'\r\n\r\n\r\[email protected]('/imdb/artists/<prefix>')\r\ndef get_artists_by_prefix(prefix):\r\n res = IMDBArtistResource.get_by_name_prefix(prefix)\r\n rsp = Response(json.dumps(res), status=200, content_type=\"application/json\")\r\n return rsp\r\n\r\[email protected]('/users')\r\ndef get_users():\r\n res = UserResource.get_by_template()\r\n rsp = Response(json.dumps(res, default = str), status=200, content_type=\"application/json\")\r\n return rsp\r\n\r\n# http://160.39.188.227:5000/w6156/user/first_name/iv\r\n\r\[email protected]('/<db_schema>/<table_name>/<column_name>/<prefix>')\r\ndef get_by_prefix(db_schema, table_name, column_name, prefix):\r\n res = d_service.get_by_prefix(db_schema, table_name, column_name, prefix)\r\n rsp = Response(json.dumps(res), status=200, content_type=\"application/json\")\r\n return rsp\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\")\r\n"
},
{
"alpha_fraction": 0.6381322741508484,
"alphanum_fraction": 0.6381322741508484,
"avg_line_length": 14.117647171020508,
"blob_id": "cbd45581c0d8d766454d785ab177df8bdd445a99",
"content_id": "571df6287d8fa4ca3ae86c9be41592bb4246c5e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 17,
"path": "/application_services/BaseApplicationResource.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "from abc import ABC, abstractmethod\n\n\nclass BaseApplicationExcetion:\n\n def __init__(self):\n pass\n\n\nclass BaseApplicationResource(ABC):\n\n def __init__(self):\n pass\n\n @abstractmethod\n def get_links(self, resource_data):\n pass\n"
},
{
"alpha_fraction": 0.6437054872512817,
"alphanum_fraction": 0.6484560370445251,
"avg_line_length": 29.14285659790039,
"blob_id": "d6e5f46cd7f435431dcc83354e10641475e5550a",
"content_id": "aa104f9ec2cce0c4be58836b2b3680ea4bed85db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 14,
"path": "/application_services/UsersResource/user_service.py",
"repo_name": "ibarral18/demo-flask",
"src_encoding": "UTF-8",
"text": "from application_services.BaseApplicationResource import BaseApplicationResource\nimport database_services.RDBService as d_service\n\n\nclass UserResource(BaseApplicationResource):\n\n def __init__(self):\n super().__init__()\n\n @classmethod\n def get_by_template(cls, template):\n res = d_service.find_by_template(\"aaaaF21\", \"users\",\n template, None)\n return res"
}
] | 10 |
buoyad/CommRobot
|
https://github.com/buoyad/CommRobot
|
a8fe8346dff7f086e95a48ce84d03ed46f1bdc5f
|
d177ec51efa253eb5eadd6be4affe9ea3cc11110
|
f9484f0deff0e0810129d557168201d8e9aa8285
|
refs/heads/master
| 2016-08-11T19:15:38.799662 | 2016-04-15T01:42:02 | 2016-04-15T01:42:02 | 52,050,488 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.637499988079071,
"alphanum_fraction": 0.675000011920929,
"avg_line_length": 11.307692527770996,
"blob_id": "7da0eb9931bda32a463b7a7bf9fa03cba083910d",
"content_id": "ec421ce639d16e7b4e295c2e7569a62726eb33a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 13,
"path": "/modemTest.py",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport serial\n\nm = serial.Serial()\nm.port = '/dev/ttyUSB2'\nm.baudrate = 19200\n\nm.open()\n\nm.flushInput()\n\nwhile True:\n\tprint m.readline()\n"
},
{
"alpha_fraction": 0.37272727489471436,
"alphanum_fraction": 0.7090908885002136,
"avg_line_length": 54,
"blob_id": "06e7cd3dcd9490cee6dbe961747188ab49cc1ef3",
"content_id": "f554af66c81807d3dea5360dae0a633adb1dc31a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 2,
"path": "/demo/mavproxy.sh",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /bin/bash\nmavproxy.py --master tcp:169.254.24.153:5760 --out /dev/ttyUSB0,57600 --out 169.254.24.153:14550\n"
},
{
"alpha_fraction": 0.6265822649002075,
"alphanum_fraction": 0.6708860993385315,
"avg_line_length": 13.363636016845703,
"blob_id": "c42ab9c82650fb7490ae06495b8f2503b39b2d38",
"content_id": "a828856461a8475e8bfea8c2345c7e191ade8549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 11,
"path": "/test.py",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport serial\n\ns = serial.Serial()\ns.port = '/dev/ttyUSB0'\ns.baudrate = 19200\ns.timeout = .5\ns.open()\n\nwhile True:\n\tprint s.readline()\n"
},
{
"alpha_fraction": 0.2985938787460327,
"alphanum_fraction": 0.31927213072776794,
"avg_line_length": 34.5,
"blob_id": "7b7e993620c3c0fc3df82c5bf20df86ae44ee378",
"content_id": "a9051de239637fe088d62c64e9c580223baaf524",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1209,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 34,
"path": "/demo/testSend.py",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport serial, time, sys\n\nwhoi = serial.Serial(timeout=None)\nwhoi.port = '/dev/ttyUSB0'\nwhoi.baudrate = 19200\n\nwhoi.open();\nwhoi.flushInput()\nwhoi.flushOutput()\n \nwhoi.write('$CCCFG,REV,0\\r\\n'); \nwhoi.write('$CCCFG,ASD,0\\r\\n'); \nwhoi.write('$CCCFG,SRC,1\\r\\n'); \nwhoi.write('$CCCFG,RXA,0\\r\\n'); \nwhoi.write('$CCCFG,RXD,1\\r\\n'); \nwhoi.write('$CCCFG,DTO,5\\r\\n'); \nwhoi.write('$CCCFG,AGN,0\\r\\n'); \nwhoi.write('$CCCFG,XST,0\\r\\n'); \n\ntime.sleep(1);\n\nwhoi.write('$CCMUC,0,1,1FFF'+'\\r\\n')\n\nx=0\nwhile True: \n\tprint(whoi.readline())\n\ttime.sleep(.05)\n\tx=x+1\n\tif (x >= 200): \n\t\tbreak\n\t \n \nser.close(); \n"
},
{
"alpha_fraction": 0.5480109453201294,
"alphanum_fraction": 0.554183840751648,
"avg_line_length": 23.644067764282227,
"blob_id": "0ce24a9d86966910cd355a9efe6f63fd99651b66",
"content_id": "4fa0e6473e1c9f2fa839d375ef9f198aae7a0dd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1458,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 59,
"path": "/RFLink.py",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport serial, threading\n#from dronekit import connect, VehicleMode\nfrom pymavlink import mavlinkv10 as mavlink\n\n\nclass fifo(object):\n def __init__(self):\n self.buf = []\n def write(self, data):\n self.buf += data\n return len(data)\n def read(self):\n return self.buf.pop(0)\n\nclass rfLink (threading.Thread):\n\tsrc = None\n\tdest = None\n\tname = None\n\tmav = None\n\tdef __init__(self, src, dest, name, **kwds):\n\t\tthreading.Thread.__init__(self)\n\t\tself.src = src\n\t\tself.dest = dest\n\t\tself.name = name\n\t\tself.f = fifo()\n\t\tself.mav = mavlink.MAVLink(self.f)\n\t\ttry:\n\t\t\tif(kwds['daemon']): \n\t\t\t\tself.daemon = True\n\t\t\telse:\n\t\t\t\tself.daemon = False\n\t\texcept KeyError:\n\t\t\tpass\n\t\n\tdef run(self):\n\t\tself.src.open()\n\t\tself.dest.open()\n\t\tself.src.flushInput()\n\t\tself.dest.flushOutput()\n\t\tself.handover()\n\n\tdef handover(self):\n\t\tprint 'Started ' + self.name\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tg = self.mav.parse_buffer(self.src.read())\n\t\t\texcept mavlink.MAVError:\n\t\t\t\tpass\n\t\t\tif (g!=None):\n\t\t\t\t#self.dest.write(str(bytearray(self.f)))\n\t\t\t\tprint(g)\n print(g[0].type)\n print(g[0].autopilot)\n print(g[0].base_mode)\n print(g[0].custom_mode)\n print(g[0].system_status)\n print(g[0].mavlink_version)\n\t\t\t\tself.src.write(str(bytearray(self.f.buf)))\n\t\t\t\t"
},
{
"alpha_fraction": 0.5874758958816528,
"alphanum_fraction": 0.6242774724960327,
"avg_line_length": 31.037036895751953,
"blob_id": "ba93a7da7cf9b4783729bded604aca20880d8769",
"content_id": "2a4c945cf6775069f02dbae7c751939d8cee7827",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5190,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 162,
"path": "/WHOI.py",
"repo_name": "buoyad/CommRobot",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nimport serial, time, threading, Queue, math\nfrom pymavlink import mavlinkv10 as mavlink\nfrom pymavlink import mavutil\nfrom dronekit import connect, VehicleMode\nRxQueue = Queue.Queue()\n\n\nclass fifo(object):\n\tdef __init__(self):\n\t\tself.buf = []\n\tdef write(self, data):\n\t\tself.buf += data\n\t\treturn len(data)\n\tdef read(self):\n\t\treturn self.buf.pop(0)\n\n\nclass serialRead(threading.Thread):\n\ts = serial.Serial()\n\tport = None\n\trate = 19200 # Probably\n\n\tdef __init__(self, port):\n\t\tthreading.Thread.__init__(self)\n\t\tself.port = port\n\t\tself.initSerial()\n\n\tdef initSerial(self):\n\t\tself.s.port = self.port\n\t\tself.s.baudrate = self.rate\n\t\tself.s.timeout = 0.5\n\t\tself.s.open()\n\t\tself.s.flushInput()\n\t\tself.s.flushOutput()\n\n\tdef run(self):\n\t\t# Only run AFTER modem is connected and turned on\n\t\tself.initSerial()\n\t\tself.configModem()\n\t\twhile True:\n\t\t\tmsg = self.s.readline()\n\t\t\tif (len(msg) != 0):\n\t\t\t\tRxQueue.put(msg)\n\n\tdef configModem(self): # From uModem.py, by Mike Puntolillo, 2011\n\t\tself.s.write('$CCCFG,REV,0\\r\\n');\n\t\tself.s.write('$CCCFG,ASD,0\\r\\n');\n\t\tself.s.write('$CCCFG,RXA,0\\r\\n');\n\t\tself.s.write('$CCCFG,RXD,1\\r\\n');\n\t\tself.s.write('$CCCFG,DTO,5\\r\\n');\n\t\tself.s.write('$CCCFG,AGN,200\\r\\n');\n\t\tself.s.write('$CCCFG,XST,0\\r\\n');\n\t\tself.s.write('$CCCFG,DOP,1\\r\\n');\n\t\tprint('Modem configured\\n')\n\nclass Beats(threading.Thread):\n\tmav = None\n\ts = None\n\n\tdef __init__(self):\n\t\tthreading.Thread.__init__(self)\n\t\tMAVBuffer = fifo()\n\t\tself.mav = mavlink.MAVLink(MAVBuffer)\n\t\tself.mav.srcSystem = 255\n\t\tself.mav.srcComponent = 190\n\t\tself.s = serial.Serial()\n\t\tself.s.port = '/dev/ttyUSB0' # Check validity\n\t\tself.s.baudrate = 57600\n\n\tdef run(self):\n\t\tself.s.open\n\t\tself.s.flushOutput()\n\t\tself.s.flushInput()\n\t\tprint('Starting listener...\\n')\n\t\twhile True:\n\t\t\t# Type, APM, base_mode, custom_mode, system_status, mavlink version\n\t\t\tmsg = self.mac.MAVLink_heartbeat_message(6, 3, x, y, z, 10) # x = base_mode, y = custom_mode, z = system_status\n\t\t\tself.s.write(str((bytearray(msg.pack()))))\n\t\t\ttime.sleep(30)\n\n\nclass WHOI(threading.Thread):\n\ts = None # Modem reader\n\tpacketQueue = Queue.Queue() # Parsed MAVLINK to be sent to APM\n\tret = 65536 # 2^16, to retain old channel value\n\tmav = None\n\tmavsink = None\n\n\tdef __init__(self, port, mavsink):\n\t\tthreading.Thread.__init__(self)\n\t\tself.s = serialRead(port)\n\t\tMAVBuffer = fifo()\n\t\tself.mav = mavlink.MAVLink(MAVBuffer)\n\t\tself.mav.srcSystem = 255\n\t\tself.mav.srcComponent = 190\n\t\tself.mavsink = mavsink\n\n\tdef packCMD(self, xv, yv):\n\t\treturn self.mavsink.message_factory.set_position_target_global_int_encode(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, 0b0000111111000111, 0,0,0, xv,yv,0, 0,0,0,0,0)\n\n\tdef run(self):\n\t\tself.s.start()\n\t\t#hBeats = Beats()\n\t\t#hBeats.start()\n\t\twhile True:\n\t\t\tif not RxQueue.empty():\n\t\t\t\tmsg = self.receive()\n\t\t\t\tprint(msg);\n\t\t\t\ttime.sleep(.05);\n\t\t\t\t#self.mavsink.send_mavlink(self.mavsink.message_factory.rc_channel_override_encode(\n\t\t\t\tif msg.startswith('$CAMUA'):\t\t# Mini packet received\n\t\t\t\t\ttheta = self.mavsink.heading*math.pi/180\n\t\t\t\t\txv = -1*math.sin(theta)\n\t\t\t\t\tyv = -1*math.cos(theta)\n\t\t\t\t\t# FORWARD: (xv, yv)\n\t\t\t\t\t# BACKWARD: (-xv, -yv)\n\t\t\t\t\t# LEFT: (yv, -xv)\n\t\t\t\t\t# RIGHT: (-yv, xv)\n\t\t\t\t\t# msg = self.mavsink.message_factory.set_position_target_global_int_encode(0,0,0,mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, 0b0000111111000111, 0,0,0, xvelocity,yvelocity,zvelocity, 0,0,0,0,0)\n\t\t\t\t\t# self.mavsink.send_mavlink(msg)\n\t\t\t\t\tv = msg.split(',')\t\t\t\t# Delineate message values\n\t\t\t\t\tdata = (v[3].split('*'))[0]\t\t# Strip XOR value off data\n\t\t\t\t\tdata = int(data, 16)\t\t\t# Cast data as int\n\t\t\t\t\tbnd = bin(data)\t\t\t\t# Convert to binary string\n\t\t\t\t\tcmd = bnd[2:5]\t\t\t\t\t# Format \"0bXXXXXXXXXXXXX\" strip \"ob\"\n\t\t\t\t\tprint('bnd: ' + bnd + ' cmd: ' + cmd)\n\t\t\t\t\tval = bnd[5:]\t\t\t\t\t\n\t\t\t\t\tch_raw = int(val, 2) + 1000\t\t# Convert rest of value to string\n\t\t\t\t\t# Pack appropriate MAVLINK message, send along.\n\t\t\t\t\tif cmd == '001':\t# RC Override Channel 1\n\t\t\t\t#\t\tmav_pack = self.mav.MAVLink_rc_channels_override_message(\n\t\t\t\t\t\tprint('RC Override 001 received');\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t100, 100, ch_raw, ret, \n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tret, ret, ret, ret, ret, ret)\n\t\t\t\t#\t\tself.packetQueue.put(mav_pack.pack(mav))\n\t\t\t\t\telif cmd == '010':\t# RC Override Channel 2\n\t\t\t\t\t\tprint('RC Override 010 received')\n\t\t\t\t#\t\tmav_pack = self.mav.MAVLink_rc_channels_override_message(\n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t100, 100, ret, ch_raw, \n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tret, ret, ret, ret, ret, ret)\n\t\t\t\t#\t\tself.packetQueue.put(mav_pack.pack(mav))\n\t\t\t\t#\telif cmd == '010':\t# RC Override Channel 3\n\t\t\t\t#\t\tmav_pack = self.mav.MAVLink_rc_channels_override_message(\n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t100, 100, ret, ret, \n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tch_raw, ret, ret, ret, ret, ret)\n\t\t\t\t#\t\tself.packetQueue.put(mav_pack.pack(mav))\n\t\t\t\t#\telif cmd == '011':\t# RC Override Channel \n\t\t\t\t#\t\tmav_pack = self.mav.MAVLink_rc_channels_override_message(\n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t100, 100, ret, ret, \n\t\t\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tret, ch_raw, ret, ret, ret, ret)\n\t\t\t\t#\t\tself.packetQueue.put(mav_pack.pack(mav))\n\t\t\t\t#\telif cmd == '100':\n\t\t\t\t#\t\tpass\n\t\t\t\t#\telif cmd == '101':\n\t\t\t\t#\t\tpass\n\t\t\t\t#\telif cmd == '110':\n\t\t\t\t#\t\tpass\n\t\t\t\t#\telif cmd == '111':\n\t\t\t\t#\t\tpass\n\tdef receive(self):\n\t\tmsg = RxQueue.get()\n\t\treturn msg\n"
}
] | 6 |
tommyli3318/desktop-pet
|
https://github.com/tommyli3318/desktop-pet
|
0ec34ddb654e499810a809fafeac725001cf8644
|
89ec03a6574cf53128edaeb031003bb26940a48b
|
5c0a886e9d1cb833f2ce38216454ae57b5cbd36f
|
refs/heads/master
| 2023-04-01T10:15:11.253919 | 2021-03-28T04:55:14 | 2021-03-28T04:55:14 | 351,604,752 | 2 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.573943018913269,
"alphanum_fraction": 0.5828343033790588,
"avg_line_length": 41.393699645996094,
"blob_id": "e402c05faee9face3e13d46d967af3e99f0aa585",
"content_id": "0b35f2b5a8a4dd12c36d46ec5df5eea93c3ce735",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5511,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 127,
"path": "/main.py",
"repo_name": "tommyli3318/desktop-pet",
"src_encoding": "UTF-8",
"text": "import tkinter\r\nimport os\r\nimport random\r\nfrom platform import system\r\n\r\nclass Pet:\r\n def __init__(self):\r\n self.root = tkinter.Tk() # create window\r\n self.delay = 200 # delay in ms\r\n self.pixels_from_right = 200 # change to move the pet's starting position\r\n self.pixels_from_bottom = 200 # change to move the pet's starting position\r\n self.move_speed = 6 # change how fast the pet moves in pixels\r\n\r\n # initialize frame arrays\r\n self.animation = dict(\r\n idle = [tkinter.PhotoImage(file=os.path.abspath('gifs/idle.gif'), format = 'gif -index %i' % i) for i in range(5)],\r\n idle_to_sleep = [tkinter.PhotoImage(file=os.path.abspath('gifs/idle-to-sleep.gif'), format = 'gif -index %i' % i) for i in range(8)],\r\n sleep = [tkinter.PhotoImage(file=os.path.abspath('gifs/sleep.gif'), format = 'gif -index %i' % i) for i in range(3)]*3,\r\n sleep_to_idle = [tkinter.PhotoImage(file=os.path.abspath('gifs/sleep-to-idle.gif'), format = 'gif -index %i' % i) for i in range(8)],\r\n walk_left = [tkinter.PhotoImage(file=os.path.abspath('gifs/walk-left.gif'), format = 'gif -index %i' % i) for i in range(8)],\r\n walk_right = [tkinter.PhotoImage(file=os.path.abspath('gifs/walk-right.gif'),format = 'gif -index %i' % i) for i in range(8)]\r\n )\r\n\r\n # window configuration\r\n self.root.overrideredirect(True) # remove UI\r\n if system() == 'Windows':\r\n self.root.wm_attributes('-transparent','black')\r\n else: # platform is Mac/Linux\r\n # https://stackoverflow.com/questions/19080499/transparent-background-in-a-tkinter-window\r\n self.root.wm_attributes('-transparent', True) # do this for mac, but the bg stays black\r\n self.root.config(bg='systemTransparent')\r\n \r\n self.root.attributes('-topmost', True) # put window on top\r\n self.root.bind(\"<Button-1>\", self.onLeftClick)\r\n self.root.bind(\"<Button-2>\", self.onRightClick)\r\n self.root.bind(\"<Button-3>\", self.onRightClick)\r\n self.root.bind(\"<Key>\", self.onKeyPress)\r\n self.label = tkinter.Label(self.root,bd=0,bg='black') # borderless window\r\n if system() != 'Windows':\r\n self.label.config(bg='systemTransparent')\r\n self.label.pack()\r\n \r\n screen_width = self.root.winfo_screenwidth() # width of the entire screen\r\n screen_height = self.root.winfo_screenheight() # height of the entire screen\r\n self.min_width = 10 # do not let the pet move beyond this point\r\n self.max_width = screen_width-110 # do not let the pet move beyond this point\r\n \r\n # change starting properties of the window\r\n self.curr_width = screen_width-self.pixels_from_right\r\n self.curr_height = screen_height-self.pixels_from_bottom\r\n self.root.geometry('%dx%d+%d+%d' % (100, 100, self.curr_width, self.curr_height))\r\n \r\n\r\n def update(self, i, curr_animation):\r\n # print(\"Curently: %s\" % curr_animation)\r\n self.root.attributes('-topmost', True) # put window on top\r\n animation_arr = self.animation[curr_animation]\r\n frame = animation_arr[i]\r\n self.label.configure(image=frame)\r\n \r\n # move the pet if needed\r\n if curr_animation in ('walk_left', 'walk_right'):\r\n self.move_window(curr_animation)\r\n \r\n i += 1\r\n if i == len(animation_arr):\r\n # reached end of this animation, decide on the next animation\r\n next_animation = self.getNextAnimation(curr_animation)\r\n self.root.after(self.delay, self.update, 0, next_animation)\r\n else:\r\n self.root.after(self.delay, self.update, i, curr_animation)\r\n\r\n\r\n def onLeftClick(self, event):\r\n print(\"detected left click\")\r\n \r\n \r\n def onRightClick(self, event):\r\n self.quit()\r\n\r\n\r\n def onKeyPress(self, event):\r\n if event.char in ('q', 'Q'):\r\n self.quit()\r\n \r\n \r\n def move_window(self, curr_animation):\r\n if curr_animation == 'walk_left':\r\n if self.curr_width > self.min_width:\r\n self.curr_width -= self.move_speed\r\n \r\n elif curr_animation == 'walk_right':\r\n if self.curr_width < self.max_width:\r\n self.curr_width += self.move_speed\r\n\r\n self.root.geometry('%dx%d+%d+%d' % (100, 100, self.curr_width, self.curr_height))\r\n \r\n\r\n def getNextAnimation(self, curr_animation):\r\n if curr_animation == 'idle':\r\n return random.choice(['idle', 'idle_to_sleep', 'walk_left', 'walk_right'])\r\n elif curr_animation == 'idle_to_sleep':\r\n return 'sleep'\r\n elif curr_animation == 'sleep':\r\n return random.choice(['sleep', 'sleep_to_idle'])\r\n elif curr_animation == 'sleep_to_idle':\r\n return 'idle'\r\n elif curr_animation == 'walk_left':\r\n return random.choice(['idle', 'walk_left', 'walk_right'])\r\n elif curr_animation == 'walk_right':\r\n return random.choice(['idle', 'walk_left', 'walk_right'])\r\n \r\n \r\n def run(self):\r\n self.root.after(self.delay, self.update, 0, 'idle') # start on idle\r\n self.root.mainloop()\r\n \r\n \r\n def quit(self):\r\n self.root.destroy()\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Initializing your desktop pet...')\r\n print('To quit, right click on the pet')\r\n pet = Pet()\r\n pet.run()\r\n"
},
{
"alpha_fraction": 0.6976127028465271,
"alphanum_fraction": 0.73209547996521,
"avg_line_length": 25.071428298950195,
"blob_id": "79352af26dc761348358eb33f4c4226ab47f0636",
"content_id": "644bdbfe3a6b68f2b904648900ce36c1d5de2973",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 14,
"path": "/readme.md",
"repo_name": "tommyli3318/desktop-pet",
"src_encoding": "UTF-8",
"text": "# About\r\nAn animated pet that moves around on your desktop, using tkinter in Python.\r\n\r\n\r\n# Credits\r\nGifs and idea came from [this blog](https://medium.com/analytics-vidhya/create-your-own-desktop-pet-with-python-5b369be18868), but all of my code is original.\r\n\r\n\r\n# How to run\r\n```\r\ngit clone https://github.com/tommyli3318/desktop-pet.git\r\ncd desktop-pet\r\npython main.py\r\n```"
}
] | 2 |
pshushereba/Data-Structures
|
https://github.com/pshushereba/Data-Structures
|
bf3f5eb21f0d135cc26eaff0f5958078c9b62f2d
|
865b7809daf8856e59930eedd51af3dd42b8b191
|
0fe842734103f10b8400c16d71ed5e6c9dd4a400
|
refs/heads/master
| 2022-12-23T22:17:14.713037 | 2020-10-07T01:37:23 | 2020-10-07T01:37:23 | 265,428,046 | 0 | 0 | null | 2020-05-20T02:33:39 | 2020-05-20T02:33:40 | 2020-10-07T01:37:24 | null |
[
{
"alpha_fraction": 0.572452962398529,
"alphanum_fraction": 0.574544370174408,
"avg_line_length": 34.2315788269043,
"blob_id": "7a4ca55ed66c88f6a53bd4749f1e08d0ce798a75",
"content_id": "3b24636c95bac4bd7f0cbb878c7a1d0e50c7a5e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3347,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 95,
"path": "/lru_cache/lru_cache.py",
"repo_name": "pshushereba/Data-Structures",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('/doubly_linked_list.py')\nfrom doubly_linked_list import *\n\nfrom collections import OrderedDict\n\nclass LRUCache:\n \"\"\"\n Our LRUCache class keeps track of the max number of nodes it\n can hold, the current number of nodes it is holding, a doubly-\n linked list that holds the key-value entries in the correct\n order, as well as a storage dict that provides fast access\n to every node stored in the cache.\n \"\"\"\n def __init__(self, limit=10):\n self.limit = limit\n self.storage = DoublyLinkedList()\n self.cache_order = OrderedDict()\n self.size = self.storage.length\n\n\n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the end of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache.\n \"\"\"\n def get(self, key):\n if key not in self.cache_order:\n return None\n \n node = self.storage.head\n while node is not None:\n if key == node.value[0]:\n self.storage.move_to_front(node)\n break\n node = node.next\n return self.cache_order[key]\n \n \n # Look up value by key\n # if key not in self.cache_order:\n # return None\n # else:\n # self.cache_order.move_to_end(key)\n # return self.cache_order[key]\n #node = self.storage # add to dictionary; don't use ListNode\n # move node to head of list\n # node.add_to_head()\n \n\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply\n want to overwrite the old value associated with the key with\n the newly-specified value.\n \"\"\"\n def set(self, key, val):\n # if key is already stored, overwrite old value\n if key in self.cache_order:\n # overwrite in dictionary\n self.cache_order[key] = val\n # overwrite in DLL.\n # iterate across and find node to be updated.\n node = self.storage.head\n while node is not None:\n # check key equality\n if key == node.value[0]:\n # and update the value\n node.value[1] = val\n # move to head of DLL.\n self.storage.move_to_front(node)\n break\n node = node.next\n else:\n # handle case where the cache is already full.\n if self.size == self.limit:\n # delete something\n node = self.storage.tail\n old_key = node.value[0]\n self.storage.remove_from_tail()\n\n del self.cache_order[old_key]\n else:\n self.size += 1 \n\n # if key isn't stored, and we are not full, just add to cache\n \n self.cache_order[key] = val\n self.storage.add_to_head([key, val])\n"
},
{
"alpha_fraction": 0.5637627840042114,
"alphanum_fraction": 0.5674560070037842,
"avg_line_length": 28.139240264892578,
"blob_id": "f1c77a4bd69411084e2cf97a657ef50b5a58e5b0",
"content_id": "bdf8add58e43a642f06be94e6cd32af2dcc3d1db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4603,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 158,
"path": "/binary_search_tree/binary_search_tree.py",
"repo_name": "pshushereba/Data-Structures",
"src_encoding": "UTF-8",
"text": "\"\"\"\nBinary search trees are a data structure that enforce an ordering over \nthe data they store. That ordering in turn makes it a lot more efficient \nat searching for a particular piece of data in the tree. \n\nThis part of the project comprises two days:\n1. Implement the methods `insert`, `contains`, `get_max`, and `for_each`\n on the BSTNode class.\n2. Implement the `in_order_print`, `bft_print`, and `dft_print` methods\n on the BSTNode class.\n\"\"\"\nimport sys\n\nsys.path.append('/stack.py')\nfrom stack import Stack\n\nsys.path.append('/queue.py')\nfrom queue import Queue\n\nclass BSTNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n if value >= self.value:\n if self.right is None:\n self.right = BSTNode(value)\n else:\n self.right.insert(value)\n else:\n if self.left is None:\n self.left = BSTNode(value)\n else:\n self.left.insert(value)\n\n # Return True if the tree contains the value\n # False if it does not\n # def contains(self, target):\n # node = self\n # while node.value != target:\n # if node.value < target:\n # node = node.right\n # else:\n # node = node.left\n # if node is None:\n # return False\n # return True\n\n def contains(self, target):\n if self.value == target:\n return True\n\n elif target >= self.value:\n if self.right is not None:\n return self.right.contains(target)\n\n # Return the maximum value found in the tree\n def get_max(self):\n max_value = 0\n node = self\n while True:\n if node.value > max_value:\n max_value = node.value\n elif node.right == None:\n return max_value\n else:\n node = node.right\n return max_value\n # Call the function `fn` on the value of each node\n def for_each(self, fn):\n fn(self.value)\n\n if self.left:\n self.left.for_each(fn)\n\n if self.right:\n self.right.for_each(fn)\n\n return None\n\n # Part 2 -----------------------\n\n # Print all the values in order from low to high\n # Hint: Use a recursive, depth first traversal\n def in_order_print(self, node):\n if node.left:\n node.left.in_order_print(node.left)\n\n print(node.value)\n\n if node.right:\n node.right.in_order_print(node.right)\n\n # Print the value of every node, starting with the given node,\n # in an iterative breadth first traversal\n def bft_print(self, node):\n bft_queue = Queue()\n bft_queue.enqueue(node)\n \n while bft_queue.__len__ != 0:\n current_node = bft_queue.dequeue()\n if current_node.left is not None:\n bft_queue.enqueue(current_node.left)\n if current_node.right is not None:\n bft_queue.enqueue(current_node.right)\n print(current_node.value)\n # make a queue\n # enqueue the node\n # as long as the queue is not empty\n # dequeue from the front of the queue, this is our current node\n # enqueue the kids of the current node on the queue\n\n #queue = Queue(12, 8)\n\n # Print the value of every node, starting with the given node,\n # in an iterative depth first traversal\n def dft_print(self, node):\n dft_stack = Stack()\n dft_stack.push(node)\n\n while dft_stack.__len__ != 0:\n current_node = dft_stack.pop()\n if current_node.left is not None:\n dft_stack.push(current_node.left)\n elif current_node.right is not None:\n dft_stack.push(current_node.right)\n print(current_node)\n\n # make a stack\n # push the node on the stack\n # as long as the stack is not empty\n # pop off the stack, this is our current node\n # put the kids of the current node on the stack\n # (check that they are not None, then put them on the stack)\n\n # Stretch Goals -------------------------\n # Note: Research may be required\n\n # Print Pre-order recursive DFT\n def pre_order_dft(self, node):\n pass\n\n # Print Post-order recursive DFT\n def post_order_dft(self, node):\n pass\n\nt = BSTNode(1)\nt.insert(8)\nt.insert(5)\nt.insert(7)\nt.insert(6)\nt.insert(3)\nt.insert(4)\nt.insert(2)\nt.bft_print(t)"
},
{
"alpha_fraction": 0.52326500415802,
"alphanum_fraction": 0.5264195799827576,
"avg_line_length": 28.84705924987793,
"blob_id": "095c60da97bb1967ab80b83b71db16acada6980e",
"content_id": "3848a9db785a0aa559aa2847801a85858e5bbc66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2536,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 85,
"path": "/singly_linked_list/singly_linked_list.py",
"repo_name": "pshushereba/Data-Structures",
"src_encoding": "UTF-8",
"text": "class ListNode:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n\nclass LinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def add_to_tail(self, value):\n node = ListNode(value)\n self.length += 1\n # if head does not exist, set both head and tail to the node that you created.\n if self.head is None:\n self.head = node\n self.tail = node\n # if head exists\n else:\n # set previous tail next pointer from None to node that you created.\n self.tail.next = node\n # set the list tail to the node you created.\n self.tail = node\n\n def add_to_head(self, value):\n node = ListNode(value)\n self.length += 1\n # check to see if there is a head (Is the list empty right now?)\n if self.head is None:\n self.head = node\n self.tail = node\n else:\n # take node that I'm creating and point the next pointer to the current head\n node.next = self.head\n # tell the linked list that the inserted node is the new head\n self.head = node\n\n def contains(self, value):\n current_node = self.head\n \n while current_node is not None:\n if current_node.value == value:\n return True\n \n current_node = current_node.next\n\n return False\n\n def remove_head(self):\n # update self.head pointer\n if self.head is not None:\n cur_val = self.head\n self.head = self.head.next\n if self.head is None: # checking to see if we have removed the last node\n self.tail = None\n self.length -= 1\n return cur_val.value\n else:\n return None\n\n def get_max(self):\n if self.head is None:\n return None\n\n node = self.head\n incr = 0\n value = 0\n\n while incr < self.length:\n incr += 1\n if value < node.value:\n value = node.value\n node = node.next\n return value\n\n # def reverse(self, node, prev):\n # # will need to call reverse(node.next, node) again\n # if prev is not None:\n # prev.next = None\n # if self.head != node:\n # self.add_to_head(node)\n # if node is not None:\n # self.reverse(node.next, node)"
},
{
"alpha_fraction": 0.6287058591842651,
"alphanum_fraction": 0.6320000290870667,
"avg_line_length": 24.011764526367188,
"blob_id": "683ed1056fd0b656b10fabad4deb3fe9ff0d6340",
"content_id": "88c541bd0d4db2eb515ad43d46bf76aa70548c13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2175,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 85,
"path": "/text_buffer.py",
"repo_name": "pshushereba/Data-Structures",
"src_encoding": "UTF-8",
"text": "# file I/O - for another day\n\n# add to the back of a text buffer\n# add to the front of a text buffer\n# delete from the back of a text buffer\n# delete from the front of a text buffer\n\n# join text buffers together\n\n# add to the middle\n\n# array vs DLL\n# array: add to back. O(1)\n# array: add to front: O(n)\n# array: delete from back: O(1)\n# array: delete from front: O(n)\n# array: join text buffers together: O(n)\n\n# DLL: add to back: O(1)\n# DLL: add to front: O(1)\n# DLL: delete from front: O(1)\n# DLL: delete from front: O(1)\n# DLL: join text buffers together: O(1)\n\n# __str__, to print out what's inside\n# array, O(n)\n# DLL, O(n)\n\nimport sys\n\nsys.path.append('./doubly_linked_list')\n\nfrom doubly_linked_list import DoublyLinkedList\n\nclass TextBuffer:\n def __init__(self):\n self.storage = DoublyLinkedList()\n\n def __str__(self):\n string_to_return = \"\"\n\n node = self.storage.head\n while node is not None:\n string_to_return += node.value\n node = node.next\n\n return string_to_return\n\n def join(self, other_buffer):\n # link tail of this buffer to the head of the other_buffer\n self.storage.tail.next = other_buffer.storage.head\n other_buffer.storage.head.prev = self.storage.tail\n\n # point our tail to the new tail\n self.storage.tail = other_buffer.storage.tail\n\n\n def append(self, string_to_add):\n for char in string_to_add:\n self.storage.add_to_tail(char)\n\n def prepend(self, string_to_add):\n for char in string_to_add:\n self.storage.add_to_head(char)\n\n def delete_from_front(self, number_of_chars_to_remove):\n for _ in range(number_of_chars_to_remove):\n self.storage.remove_from_head()\n\n def delete_from_back(self, number_of_chars_to_remove):\n for _ in range(number_of_chars_to_remove):\n self.storage.remove_from_tail()\n\n def find_text(self, text_to_find):\n pass\n\ntext = TextBuffer()\ntext.append('hello')\n\nother_buffer = TextBuffer()\nother_buffer.append(' how are you')\n\ntext.join(other_buffer)\n\nprint(text)"
},
{
"alpha_fraction": 0.49603697657585144,
"alphanum_fraction": 0.5019814968109131,
"avg_line_length": 20.600000381469727,
"blob_id": "73656f54ea9ade4b13a1d1322187e04c887bbd8e",
"content_id": "4f37e785fc3cf0ecf0e1cd90e6dcfb90c8f8918e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1514,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 70,
"path": "/singly_linked_list/alt_singly_linked_list.py",
"repo_name": "pshushereba/Data-Structures",
"src_encoding": "UTF-8",
"text": "class ListNode:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n \n def insert_after(self, value):\n current_next = self.next\n node = ListNode(value)\n \n if current_next:\n self.next = node.next\n node.next = current_next\n\n def insert_before(self, value):\n pass \n \n def delete(self):\n pass\n\n\nclass LinkedList:\n def __init__(self, node=None):\n self.head = node\n self.tail = node\n self.length = 1 if node is not None else 0\n\n def __len__(self):\n return self.length\n\n def add_to_head(self, value):\n node = ListNode(value)\n self.length += 1\n self.head = node\n\n def add_to_tail(self, value):\n node = ListNode(value)\n self.length += 1\n\n # if there is a tail\n if self.tail:\n self.tail.next = node\n self.tail = node\n else:\n self.tail = node\n self.head = node\n\n def remove_head(self):\n temp = self.head.value\n self.head = self.head.next\n self.next = None\n return temp\n# head\n# 1 -> 2 -> 3\n\n def get_max(self):\n pass\n\n def contains(self, value):\n if self.head == self.tail:\n return self.head.value\n \n node = self.head\n incr = 0\n\n while incr < self.length:\n incr += 1\n if value == node.value:\n return True\n else:\n node = node.next\n\n\n"
}
] | 5 |
ewilson/testWS
|
https://github.com/ewilson/testWS
|
33496c56765399178b7fd54d98828aeba81d33f5
|
b6d042a355566b366128af503dcafbb125c93a71
|
31416cffba333400568686a11178b80c32bc2ed3
|
refs/heads/master
| 2020-05-19T15:05:56.926701 | 2013-12-17T18:22:19 | 2013-12-17T18:22:19 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5610765814781189,
"alphanum_fraction": 0.5610765814781189,
"avg_line_length": 22,
"blob_id": "0dfacda3c25a0357e54de8bca62d32906a4f6387",
"content_id": "07200ff1481bdb41927623085d8a4583a07220a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 483,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 21,
"path": "/app.py",
"repo_name": "ewilson/testWS",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, jsonify\n\nimport json\n\napp = Flask(__name__)\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef echo():\n data = {'headers':dict(request.headers),\n 'method':request.method,\n 'url':request.url,\n 'args':request.args,\n 'query_string':request.query_string,\n 'form':request.form,\n }\n print dir(request)\n print data\n return jsonify(**data)\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.47037914395332336,
"alphanum_fraction": 0.5189573168754578,
"avg_line_length": 27.133333206176758,
"blob_id": "605932d7b473b1fd85d146c4829f33cb83981f79",
"content_id": "b4389b98bfcfa17f6f9a8ba7936fbcdbc7b0b591",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 844,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 30,
"path": "/README.md",
"repo_name": "ewilson/testWS",
"src_encoding": "UTF-8",
"text": "testWS\n======\n\nA Flask web app for echoing requests. Sometimes useful for testing.\n\n###Output###\n\n {\n \"args\":{\n\t \"baz\":\"true\",\n\t \"foo\":\"bar\"\n },\n \"form\":{\n\n },\n \"headers\":{\n\t \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n\t \"Accept-Encoding\":\"gzip,deflate,sdch\",\n \"Accept-Language\":\"en-US,en;q=0.8\",\n\t \"Connection\":\"keep-alive\",\n\t \"Content-Length\":\"\",\n\t \"Content-Type\":\"\",\n\t \"Cookie\":\"session=eyJjc3JmX3Rva2VuIjp7IiBiIjoiWTJWbU5qTmpaamc0TTJZMFl6bGtNelUwT1dJMFlXRTRPRFV6WVdFd09ESTNZMlpqTUROaE53PT0ifX0.BZIikg.ZoasTYWdaZ7dLAqy5tfyzfOkeG8\",\n\t \"Host\":\"localhost:5000\",\n\t \"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36\"\n },\n \"method\":\"GET\",\n \"query_string\":\"foo=bar&baz=true\",\n \"url\":\"http://localhost:5000/?foo=bar&baz=true\"\n }\n"
}
] | 2 |
Sargazi77/Project1
|
https://github.com/Sargazi77/Project1
|
1423b55d520cdb80446bfa2eb3e98fad1b5eb663
|
215f5114fd3bc316f7dddd1917c4f8a71971a990
|
6e069c20cc59f93e5d17cc21aa18e2b9fbd4f6c7
|
refs/heads/master
| 2023-02-25T22:51:45.143093 | 2021-01-27T19:12:27 | 2021-01-27T19:12:27 | 333,528,793 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.655952513217926,
"alphanum_fraction": 0.6648129224777222,
"avg_line_length": 54.81578826904297,
"blob_id": "467e0c8138c6359a06c886f73c549a294cd8f471",
"content_id": "17374687fc2a338907f2320dbe80f00977c2cdc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10609,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 190,
"path": "/project1.py",
"repo_name": "Sargazi77/Project1",
"src_encoding": "UTF-8",
"text": "'''\nAdvance Rock paper sciecer game \nGame featurs:\n1- Being able to choose to play against computer or multiple player\n2- Bieng able to monitor resutls in both 1 player or 2 player mode\n3- Writing data into excell in 2 player mode\n\nRule of the game is just like the traditional rock paper sciecer game \nRock vs sciecer: Rock wins\nRock vs Paper : Paper wins\nPaper vs Sciecer : Sciecer wins \n\nTODO: Coming soon: Game resutls will be stored in a database.\n'''\n\nimport xlsxwriter #import module to write excell file\nimport random\nimport getpass #in 2 player mode players shouldn't be able to see eachother's input\ndef main():\n computer_score = 0\n user_score = 0\n second_player_score = 0\n print('Please choose if you want to play againt computer or another friend?\\n Enter 1 for computer \\ an Enter 2 to play with a friend ')\n cof = int(input()) #this game has 2 modes 1- to play with computer or 2- to play with another human (real player)\n \n if cof == 1:\n while True:\n user_name = input ('please enter your name')\n #TODO: create a function to play against computer\n print('Please enter the number assosiated with your choice \\n 1- ROCK \\n 2-PAPER \\n 3- SCISSOR')\n user_choices = getpass.getpass(f'{user_name}\\'s turn: ') # this method will keep the player 1 input hidden so the player 2 will not be able to cheat\n computer_choice, computer_choice_name, user_choices_name = game(user_choices) # game function is in charge of the game against computer it will return the choices computer made\n computer_score,user_score = results(user_choices,computer_choice,user_choices_name,computer_choice_name,computer_score,user_score,user_name) # this function takes all the data in play against computer mode to desplay results\n #this function does not write data into excell\n print('Do you want to play again?')\n play_again = input('please answer using y or n')\n if play_again == 'n' or play_again == 'N':\n break\n elif cof == 2:\n while True:\n user_name = input ('please enter your name')\n print('Please enter the number assosiated with your choice \\n 1- ROCK \\n 2-PAPER \\n 3- SCISSOR')\n user_choices = getpass.getpass(f'{user_name}\\'s turn: ') #keeps the user input hidden\n # TODO: creat a function to play against another player \n second_player_name,second_player_choices,user_choices_name,second_player_choices_name = two_player_game(user_choices,user_name) # works just like game function but for 2 player mode\n two_player_results(second_player_name,second_player_choices,second_player_choices_name,user_name,user_choices_name,user_choices,second_player_score,user_score) # just like result function but for two player mode and also writes data into excell\n play_again = input('please answer using y or n')\n if play_again == 'n' or play_again == 'N':\n break\n else:\n print(\"Invalid Input, please enter 1 or 2\")\n return main()\n \n \n\n\ndef game(user_choices):\n #setup and identify user input \n user_choices_name = ''\n if int(user_choices) == 1 : # conver the user input into int\n user_choices_name = 'rock' \n print('Your choice is rock')\n elif int(user_choices) == 2:\n user_choices_name = 'paper'\n print('Your choice is paper')\n elif int(user_choices) == 3:\n user_choices_name = 'sciecer'\n print('Your choice is sicssor') \n else:\n print('your input is not valid')\n return\n \n\n computer_choice = random.randint(1,3) # computer must select a random number between 1 to 3\n computer_choice_name = ''\n if computer_choice == 1 :\n computer_choice_name = 'rock'\n print('Computer choice is rock')\n elif computer_choice == 2:\n computer_choice_name = 'paper'\n print('Computer choice is paper')\n elif computer_choice == 3:\n computer_choice_name = 'sicssor'\n print('Computer choice is sicssor') \n else:\n print('Computer input is not valid')\n return computer_choice,computer_choice_name,user_choices_name\n\ndef results(user_choices, computer_choice, user_choices_name,computer_choice_name,computer_score,user_score,user_name):\n #TODO: possible ways to win in this game\n if(int(user_choices) == computer_choice):\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, the choices are equal so no one gets a point ')\n elif int(user_choices) == 1 and computer_choice == 2:\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, Computer wins! ')\n computer_score+=1\n elif int(user_choices) == 2 and computer_choice == 1:\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, You win! ')\n user_score+=1\n elif int(user_choices) == 3 and computer_choice == 1 :\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, Computer wins! ')\n computer_score+=1\n elif int(user_choices) == 3 and computer_choice == 2 :\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, You win! ') \n user_score+=1 \n elif computer_choice == 3 and int(user_choices) == 1 :\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, You win! ') \n user_score+=1 \n elif computer_choice == 3 and int(user_choices) == 2 :\n print(f'Your choice was {user_choices_name} and the computer choice was {computer_choice_name}, Computer wins! ') \n computer_score+=1 \n print(f' Your score is: {str(user_score)} \\n Computer score is: {str(computer_score)}')\n\n return computer_score,user_score\ndef two_player_game(user_choices,user_name): # this function produce second player choice\n # and must identify 1st player choices too\n second_player_name = input('What is the second player\\'s name?')\n print('Please enter the number assosiated with your choice \\n 1- ROCK \\n 2-PAPER \\n 3- SCISSOR')\n second_player_choices = int(input(f'{second_player_name} turn: '))\n if int(user_choices) == 1 :\n user_choices_name = 'rock'\n print('Your choice is rock')\n elif int(user_choices) == 2:\n user_choices_name = 'paper'\n print('Your choice is paper')\n elif int(user_choices) == 3:\n user_choices_name = 'sciecer'\n print('Your choice is sicssor') \n else:\n print('your input is not valid')\n \n if second_player_choices == 1 :\n second_player_choices_name = 'rock'\n print(f'{second_player_name} choice is rock')\n elif second_player_choices == 2:\n second_player_choices_name = 'paper'\n print(f'{second_player_name} choice is paper')\n elif second_player_choices == 3:\n second_player_choices_name = 'sciecer'\n print(f'{second_player_name} choice is sicssor') \n else:\n print('your input is not valid')\n return second_player_name,second_player_choices,user_choices_name,second_player_choices_name\n\ndef two_player_results(second_player_name,second_player_choices,second_player_choices_name,user_name,user_choices_name,user_choices,second_player_score,user_score):\n workbook = xlsxwriter.Workbook('results.xlsx') # create the file and use results as file name\n worksheet = workbook.add_worksheet('Two player Results') # add a new sheet to the excell file\n #in order to tell the program ro start writing in excell from the first row and colum\n row = 0\n col = 0\n if(int(user_choices) == second_player_choices):\n print(f'second_player_name is: {second_player_name} , second_player_choices_name is: {second_player_choices_name}')\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, the choices are equal so no one gets a point ')\n elif int(user_choices) == 1 and second_player_choices == 2:\n print(f'second_player_name is: {second_player_name} , second_player_choices_name is: {second_player_choices_name}')\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {second_player_name} Wins! ')\n second_player_score+=1\n elif int(user_choices) == 2 and second_player_choices == 1:\n print(f'second_player_name is: {second_player_name} , second_player_choices_name is: {second_player_choices_name}')\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {user_name} Wins! ')\n user_score+=1\n elif int(user_choices) == 3 and second_player_choices == 1 :\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {second_player_name} Wins! ')\n second_player_score+=1\n elif int(user_choices) == 3 and second_player_choices == 2 :\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {user_name} Wins! ') \n user_score+=1 \n elif second_player_choices == 3 and int(user_choices) == 1 :\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {user_name} Wins! ') \n user_score+=1 \n elif second_player_choices == 3 and int(user_choices) == 2 :\n print(f'{user_name}\\'s choice was {user_choices_name} and the {second_player_name}\\'s choice was {second_player_choices_name}, {second_player_name} Wins! ') \n second_player_score+=1 \n worksheet.write('A1', 'Player Name') # setting up the header of the excell\n worksheet.write('B1', 'Scores') \n worksheet.write(row,col, user_name) # write the first player name\n worksheet.write(row+1,col, second_player_name) # write the sound player name\n worksheet.write(row,col +1, user_score) # write the current score for player 1\n worksheet.write(row+1,col +1, second_player_score) # # write the current score for player 1\n\n workbook.close() # must close the file to save\n \n \n #TODO: FINAL: display the results\n print(f'{user_name} score is: {str(user_score)} \\n {second_player_name} score is: {str(second_player_score)}')\n\n\n \n\nif __name__ == \"__main__\":\n main()\n "
}
] | 1 |
jaketanda/NFLArgumentTwitterBot
|
https://github.com/jaketanda/NFLArgumentTwitterBot
|
e2a5620d98e9df88a29462b670fdc90e2b45fd4f
|
9769ec72dcc1048dca32a0ca937c787672dc3b6e
|
5572d2857937674dfbf9a08179e090a2f7785c8b
|
refs/heads/main
| 2023-05-09T10:14:46.606020 | 2021-05-28T01:23:10 | 2021-05-28T01:23:10 | 371,538,578 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7605633735656738,
"alphanum_fraction": 0.7613458633422852,
"avg_line_length": 66.31578826904297,
"blob_id": "63712495dd1169df1cc0e51c48432ae75d461109",
"content_id": "94bf73e725a5734333d27ebd90999187236be1aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1278,
"license_type": "no_license",
"max_line_length": 330,
"num_lines": 19,
"path": "/README.md",
"repo_name": "jaketanda/NFLArgumentTwitterBot",
"src_encoding": "UTF-8",
"text": "# NFL Argument Twitter Bot\nA bot to have civil discussions with heated NFL fans on Twitter.\n\nIf you use this bot, it will probably be banned for spam... :(\n But hey, it's pretty funny while it lasts\n\n## How it works\n\nThis bot listens for posts by Twitter accounts determined in `twitterIds.json`. Usually, I would have it listen for sports media accounts like @NFL, or @PFF. To see the id of a Twitter account use [this tool](https://tweeterid.com/). Simply add that ID to the `twitterIds.json` file to start listening for tweets from that account\n\nIf ANY of their posts contain a player name in `players.json`, then the bot will respond with a phrase from `phrases.json`, replacing `PLAYERNAME` with the player's actual name.\n\nIf someone responds to our bot, the bot will wait a little bit, and then respond to the user with a response from `responses.json`\n\nAdd whatever phrases and replies to the json, and have fun!\n\nNote: I made this when I was pretty new to Python3, so it might not work and it might suck. I'm working on a new version of this, so hopefully that should work better. Until then... this.\n\nAlso note: This bot was made for fun and not to cause serious harm or trouble with anyone it interacts with. Don't be too mean to these passionate fans on Twitter :)"
},
{
"alpha_fraction": 0.6270492076873779,
"alphanum_fraction": 0.6270492076873779,
"avg_line_length": 39.83333206176758,
"blob_id": "6d84fdf86b98f7354e065467743af571ac2e8e16",
"content_id": "02ed522acabfeb730edabeeba8ec144e989dbed6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 6,
"path": "/keys.py",
"repo_name": "jaketanda/NFLArgumentTwitterBot",
"src_encoding": "UTF-8",
"text": "keys = dict(\n\tconsumer_key = '[Enter consumer key here]',\n\tconsumer_secret = '[Enter secret consumer key here]',\n\taccess_token = '[Enter access token here]',\n\taccess_token_secret = '[Enter secret access token here]',\n)"
},
{
"alpha_fraction": 0.5633108615875244,
"alphanum_fraction": 0.568500280380249,
"avg_line_length": 30.842975616455078,
"blob_id": "087a3c085e250483587fb214cd3cfa322b503ecb",
"content_id": "9996c2e8cb778807378037b03334136a40d76315",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3854,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 121,
"path": "/twitterBot.py",
"repo_name": "jaketanda/NFLArgumentTwitterBot",
"src_encoding": "UTF-8",
"text": "import tweepy\nimport time\nimport json\nimport random\nfrom keys import keys\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\n\nstatusList = []\n\nclass StdOutListener(StreamListener):\n def on_status(self, status):\n try:\n status.retweeted_status\n return True\n except:\n with open('twitterIds.json') as json_file:\n data = json.load(json_file)\n \n if str(status.user.id) in data['twitterIds']:\n print(f\"Adding tweet by {status.user.name} to statusList\")\n statusList.append(status)\n return True\n\n def on_error(self, status):\n return False\n\ndef doesReply(pctChance):\n if (pctChance > random.randint(0,99)):\n return True\n return False\n\ndef createApi():\n CONSUMER_KEY = keys['consumer_key']\n CONSUMER_SECRET = keys['consumer_secret']\n ACCESS_TOKEN = keys['access_token']\n ACCESS_TOKEN_SECRET = keys['access_token_secret']\n\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n try:\n api.verify_credentials()\n print(\"Authentication OK\")\n except:\n print(\"Error during authentication\")\n\n return api\n\ndef createStream(api, stream):\n with open('twitterIds.json') as json_file:\n data = json.load(json_file)\n twitterHandlesStr = \"\"\n for p in data['twitterIds']:\n twitterHandlesStr += p + \",\"\n\n twitterHandlesStr = twitterHandlesStr[:-1]\n\n stream.filter(follow=[twitterHandlesStr], is_async=True, stall_warnings=True)\n\ndef check_mentions(api, since_id):\n new_since_id = since_id\n for tweet in tweepy.Cursor(api.mentions_timeline,\n since_id=since_id).items():\n new_since_id = max(tweet.id, new_since_id)\n \n if (doesReply(80)):\n time.sleep(60)\n print(f\"Replying to {tweet.user.name}\")\n\n with open('replies.json') as reply_file:\n replies = json.load(reply_file)[\"replies\"]\n api.update_status(\n status=f\"@{tweet.user.screen_name} \" + replies[random.randint(0, len(replies)-1)],\n in_reply_to_status_id=tweet.id,\n )\n\n return new_since_id\n\ndef checkNewTweets(api):\n for status in statusList:\n with open('players.json') as json_players:\n playerNames = json.load(json_players)[\"players\"]\n\n for playerName in playerNames:\n if playerName.lower() in status.text.lower():\n if doesReply(30):\n time.sleep(60) \n print(f\"Commenting on post by {status.user.name}\")\n\n with open('phrases.json') as phrases_json:\n phrases = json.load(phrases_json)[\"phrases\"]\n api.update_status(\n status=f\"@{status.user.screen_name} \" + phrases[random.randint(0, len(phrases)-1)].replace(\"PLAYERNAME\", playerName),\n in_reply_to_status_id=status.id,\n )\n \n time.sleep(30)\n\n break\n\n statusList.clear()\n\ndef main():\n api = createApi()\n stream = Stream(auth=api.auth, listener=StdOutListener())\n createStream(api, stream)\n while True:\n sinceIdFile = open(r\"Id.txt\", \"r\")\n sinceId = int(sinceIdFile.read())\n sinceIdFile.close()\n sinceId = check_mentions(api, sinceId)\n sinceIdFile = open(r\"Id.txt\", \"w\")\n sinceIdFile.write(str(sinceId))\n sinceIdFile.close()\n checkNewTweets(api)\n time.sleep(60)\n\nmain()\n\n"
}
] | 3 |
11lookpig23/NLRL_v2
|
https://github.com/11lookpig23/NLRL_v2
|
19b9c5fc5a852b0999f2522b5a62084433b5bc92
|
b075c902dd0924aa2f234949eff35affc7d95efb
|
df13013c75a8a3cccaa7bd6bcb28f0e46254752f
|
refs/heads/master
| 2021-03-20T10:00:22.683556 | 2020-03-28T11:07:19 | 2020-03-28T11:07:19 | 247,200,011 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4872205853462219,
"alphanum_fraction": 0.5159041881561279,
"avg_line_length": 47.94117736816406,
"blob_id": "aa1dfd64508e3999cf71ae33fc9111bce212aaed",
"content_id": "9ad7d39b93463a3e2a52494795437edb1f39cfa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12481,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 255,
"path": "/NLRL-master-yuan/core/setup.py",
"repo_name": "11lookpig23/NLRL_v2",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function, division, absolute_import\nfrom core.clause import *\nfrom core.ilp import *\nimport ray\nfrom core.rules import *\nfrom core.induction import *\nfrom core.rl import *\nfrom core.clause import str2atom,str2clause\nfrom core.symbolicEnvironment import *\n\ndef setup_predecessor():\n constants = [str(i) for i in range(10)]\n background = [Atom(Predicate(\"succ\", 2), [constants[i], constants[i + 1]]) for i in range(9)]\n positive = [Atom(Predicate(\"predecessor\", 2), [constants[i], constants[i+2]]) for i in range(8)]\n all_atom = [Atom(Predicate(\"predecessor\", 2), [constants[i], constants[j]]) for i in range(10) for j in range(10)]\n negative = list(set(all_atom) - set(positive))\n\n language = LanguageFrame(Predicate(\"predecessor\",2), [Predicate(\"succ\",2)], constants)\n ilp = ILP(language, background, positive, negative)\n program_temp = ProgramTemplate([], {Predicate(\"predecessor\", 2): [RuleTemplate(1, False), RuleTemplate(0, False)]},\n 4)\n man = RulesManager(language, program_temp)\n return man, ilp\n\ndef setup_fizz():\n constants = [str(i) for i in range(10)]\n succ = Predicate(\"succ\", 2)\n zero = Predicate(\"zero\", 1)\n fizz = Predicate(\"fizz\", 1)\n pred1 = Predicate(\"pred1\", 2)\n pred2 = Predicate(\"pred2\", 2)\n\n background = [Atom(succ, [constants[i], constants[i + 1]]) for i in range(9)]\n background.append(Atom(zero, \"0\"))\n positive = [Atom(fizz, [constants[i]]) for i in range(0, 10, 3)]\n all_atom = [Atom(fizz, [constants[i]]) for i in range(10)]\n negative = list(set(all_atom) - set(positive))\n language = LanguageFrame(fizz, [zero, succ], constants)\n ilp = ILP(language, background, positive, negative)\n program_temp = ProgramTemplate([pred1, pred2], {fizz: [RuleTemplate(1, True), RuleTemplate(1, False)],\n pred1: [RuleTemplate(1, True),],\n pred2: [RuleTemplate(1, True),],},\n 10)\n man = RulesManager(language, program_temp)\n return man, ilp\n\ndef setup_even():\n constants = [str(i) for i in range(10)]\n succ = Predicate(\"succ\", 2)\n zero = Predicate(\"zero\", 1)\n target = Predicate(\"even\", 1)\n pred = Predicate(\"pred\", 2)\n background = [Atom(succ, [constants[i], constants[i + 1]]) for i in range(9)]\n background.append(Atom(zero, \"0\"))\n positive = [Atom(target, [constants[i]]) for i in range(0, 10, 2)]\n all_atom = [Atom(target, [constants[i]]) for i in range(10)]\n negative = list(set(all_atom) - set(positive))\n language = LanguageFrame(target, [zero, succ], constants)\n ilp = ILP(language, background, positive, negative)\n program_temp = ProgramTemplate([pred], {target: [RuleTemplate(1, True), RuleTemplate(1, False)],\n pred: [RuleTemplate(1, True),RuleTemplate(1, False)],\n },\n 10)\n man = RulesManager(language, program_temp)\n return man, ilp\n\ndef setup_cliffwalking(variation=None, invented=True):\n env = CliffWalking()\n if variation:\n env = env.vary(variation)\n temp1 = [RuleTemplate(1, False)]\n temp1_main = [RuleTemplate(2, False)]\n temp2_main = [RuleTemplate(3, True)]\n temp2_invent = [RuleTemplate(1, True)]\n temp_2extential = [RuleTemplate(2, False)]\n if invented:\n invented = Predicate(\"invented\", 1)\n invented2 = Predicate(\"invented2\", 2)\n invented3 = Predicate(\"invented3\", 1)\n invented4 = Predicate(\"invented4\", 2)\n program_temp = ProgramTemplate([invented, invented2, invented3, invented4], {\n invented2: temp2_invent,\n invented: temp_2extential,\n invented3: temp2_invent,\n invented4: temp2_invent,\n UP: temp2_main,\n DOWN: temp2_main,\n LEFT: temp2_main,\n RIGHT: temp2_main},\n 4)\n else:\n program_temp = ProgramTemplate([], {UP: temp1, DOWN: temp1, LEFT: temp1, RIGHT: temp1}, 1)\n man = RulesManager(env.language, program_temp)\n return man, env\n\ndef setup_windycliffwalking(variation=None, invented=True):\n env = WindyCliffWalking()\n if variation:\n env = env.vary(variation)\n temp1 = [RuleTemplate(1, False)]\n temp1_main = [RuleTemplate(2, False)]\n temp2_main = [RuleTemplate(3, True)]\n temp2_invent = [RuleTemplate(1, True)]\n temp_2extential = [RuleTemplate(2, False)]\n if invented:\n invented = Predicate(\"invented\", 1)\n invented2 = Predicate(\"invented2\", 2)\n invented3 = Predicate(\"invented3\", 1)\n invented4 = Predicate(\"invented4\", 2)\n program_temp = ProgramTemplate([invented, invented2, invented3, invented4], {\n invented2: temp2_invent,\n invented: temp_2extential,\n invented3: temp2_invent,\n invented4: temp2_invent,\n UP: temp2_main,\n DOWN: temp2_main,\n LEFT: temp2_main,\n RIGHT: temp2_main},\n 4)\n else:\n program_temp = ProgramTemplate([], {UP: temp1, DOWN: temp1, LEFT: temp1, RIGHT: temp1}, 1)\n man = RulesManager(env.language, program_temp)\n return man, env\n\ndef setup_unstack(variation=None, templete=\"reduced\", all_block=False):\n env = Unstack(all_block=all_block)\n if variation:\n env = env.vary(variation)\n if templete==\"full\":\n maintemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n inventedtemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n if templete==\"reduced\":\n maintemp = [RuleTemplate(1, True)]\n inventedtemp = [RuleTemplate(1, True)]\n inventedtemp2 = [RuleTemplate(1, True)]\n inventedtemp_2extential = [RuleTemplate(2, False)]\n invented = Predicate(\"invented\", 2)\n invented4 = Predicate(\"invented4\", 2)\n invented2 = Predicate(\"invented2\", 1)\n invented3 = Predicate(\"invented3\", 1)\n\n program_temp = ProgramTemplate([invented, invented3, invented2, invented4],\n {\n invented3: inventedtemp2,\n invented: inventedtemp_2extential,\n invented4: inventedtemp2,\n invented2: inventedtemp_2extential,\n MOVE: maintemp,\n }, 4)\n man = RulesManager(env.language, program_temp)\n return man, env\n\ndef setup_stack(variation=None, templete=\"reduced\", all_block=False):\n env = Stack(initial_state=INI_STATE2, all_block=all_block)\n if variation:\n env = env.vary(variation)\n if templete==\"full\":\n maintemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n inventedtemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n if templete==\"reduced\":\n maintemp = [RuleTemplate(1, True)]\n inventedtemp = [RuleTemplate(1, True)]\n inventedtemp2 = [RuleTemplate(1, True)]\n inventedtemp_2extential = [RuleTemplate(2, False)]\n invented = Predicate(\"invented\", 2)\n invented4 = Predicate(\"invented4\", 2)\n invented2 = Predicate(\"invented2\", 1)\n invented3 = Predicate(\"invented3\", 1)\n\n program_temp = ProgramTemplate([invented, invented3, invented2, invented4],\n {\n invented3: inventedtemp2,\n invented: inventedtemp2,\n invented4: inventedtemp2,\n invented2: inventedtemp_2extential,\n MOVE: maintemp,\n }, 4)\n man = RulesManager(env.language, program_temp)\n return man, env\n\ndef setup_on(variation=None, templete=\"reduced\", all_block=False):\n env = On(all_block=all_block)\n if variation:\n env = env.vary(variation)\n if templete==\"full\":\n maintemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n inventedtemp = [RuleTemplate(1, False), RuleTemplate(1, True)]\n if templete==\"reduced\":\n maintemp = [RuleTemplate(1, True), RuleTemplate(0, True)]\n inventedtemp = [RuleTemplate(1, True)]\n inventedtemp2 = [RuleTemplate(1, True)]\n inventedtemp_2extential = [RuleTemplate(2, False)]\n invented = Predicate(\"invented\", 2)\n invented4 = Predicate(\"invented4\", 2)\n invented2 = Predicate(\"invented2\", 1)\n invented3 = Predicate(\"invented3\", 1)\n invented5 = Predicate(\"invented5\", 2)\n invented6 = Predicate(\"invented6\", 1)\n\n program_temp = ProgramTemplate([invented, invented3, invented2, invented4],\n {\n invented3: inventedtemp2,\n invented: inventedtemp2,\n invented4: inventedtemp2,\n invented2: inventedtemp_2extential,\n MOVE: maintemp,\n }, 4)\n man = RulesManager(env.language, program_temp)\n return man, env\n\ndef setup_tictacteo(variation=None):\n #from core.clause import *\n #from core.ilp import LanguageFrame\n case1 = {1:[(0,1),(1,0)],-1:[(2,1),(2,2)]}\n case2 = {1:[(0,1),(0,0)],-1:[(2,1),(2,2)]}\n case3 = {1:[(1,1),(2,0)],-1:[(2,1),(0,2)]}\n case4 = {1:[(0,0)],-1:[(2,0),(0,2)]}\n case5 = {1:[(1,1)],-1:[(2,0),(2,2)]}\n case6 = {1:[(2,0)],-1:[(0,0),(2,2)]}\n\n env = TicTacTeo(case=case6)#case6)\n if variation:\n env = env.vary(variation)\n maintemp = [RuleTemplate(1, True)]\n inventedtemp2 = [RuleTemplate(1, True)]\n inventedtemp_2extential = [RuleTemplate(2, False)]\n invented = Predicate(\"invented\", 2)\n invented2 = Predicate(\"invented2\", 2)\n invented3 = Predicate(\"invented3\", 1)\n invented4 = Predicate(\"invented4\", 1)\n program_temp = ProgramTemplate([invented, invented2, invented3, invented4],\n {invented:inventedtemp2,\n PLACE:maintemp,\n invented2: inventedtemp2,\n invented3:inventedtemp2,\n invented4: inventedtemp_2extential\n }, 4)\n man = RulesManager(env.language, program_temp)\n cla = man.all_clauses\n Pla = cla[Predicate(name='place', arity=2)]\n def cond(cla):\n a = (cla.body[0].predicate == Predicate(name='mine', arity=2)) and ( cla.body[0].terms == cla.head.terms )\n b = (cla.body[1].predicate == Predicate(name='mine', arity=2)) and ( cla.body[1].terms == cla.head.terms )\n c = (cla.body[0].predicate == Predicate(name='opponent', arity=2)) and ( cla.body[0].terms == cla.head.terms )\n d = (cla.body[1].predicate == Predicate(name='opponent', arity=2)) and ( cla.body[1].terms == cla.head.terms )\n if ( a or b or c or d) == True:\n print(\"invalid----\", cla)\n return True\n return False\n for cl in Pla[0]:\n k = cond(cl)\n if k:\n Pla[0].remove(cl)\n man.all_clauses[Predicate(name='place', arity=2)] = [Pla[0]]\n return man, env\n\n"
},
{
"alpha_fraction": 0.5892857313156128,
"alphanum_fraction": 0.6085164546966553,
"avg_line_length": 32.1363639831543,
"blob_id": "66bca30879237f65bcb9e751ade9b171a775ab53",
"content_id": "f012f414115e5245523f44a63e93faf56c1a4ee2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 22,
"path": "/NLRL-master-yuan/core/plotana.py",
"repo_name": "11lookpig23/NLRL_v2",
"src_encoding": "UTF-8",
"text": "from collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef plottrain(res,repeat,total_step,expname):\n gap = int(total_step/repeat)\n res = res[:len(res)-(len(res)%repeat)]\n out = res.reshape(gap,repeat)\n dicfreq = {-1:[],0:[],1:[]}\n for k in out:\n freq = Counter(k)\n dicfreq[-1].append(freq[-1]/repeat)\n dicfreq[1].append(freq[1]/repeat)\n dicfreq[0].append(freq[0]/repeat)\n plt.ion()\n plt.plot(range(gap),dicfreq[1],color = 'r')\n plt.plot(range(gap),dicfreq[-1],color = 'blue')\n plt.plot(range(gap),dicfreq[0],color = 'yellow')\n plt.savefig(expname+\"trainres.png\")\n plt.show()\n plt.pause(25)\n plt.close()\n print(\"over------------\")"
}
] | 2 |
williae2/01-IntroductionToPython
|
https://github.com/williae2/01-IntroductionToPython
|
437af6a0fdd7f1679cb7196fcf96cd10bc24233d
|
a07e227cffccecd4b17dafed63cf60d9ce0d8e11
|
e2623d480cee0773819dad73a80f997b09a91ea9
|
refs/heads/master
| 2020-03-27T16:46:26.185785 | 2018-09-02T20:14:41 | 2018-09-02T20:14:41 | 146,805,366 | 0 | 0 |
MIT
| 2018-08-30T20:51:33 | 2018-08-29T19:44:13 | 2018-08-29T19:44:11 | null |
[
{
"alpha_fraction": 0.5222355723381042,
"alphanum_fraction": 0.5390625,
"avg_line_length": 33.64583206176758,
"blob_id": "8e80da98bd6db132a4f6351eecc54c8ad786a6f0",
"content_id": "9e316536a99ab0c851d8682f5a3402af48eb8d99",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1664,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 48,
"path": "/src/m6_your_turtles.py",
"repo_name": "williae2/01-IntroductionToPython",
"src_encoding": "UTF-8",
"text": "\"\"\"\nYour chance to explore Loops and Turtles!\n\nAuthors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,\n their colleagues and Elijah Williams.\n\"\"\"\n###############################################################################\n# DONE: 1.\n# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.\n###############################################################################\n\n###############################################################################\n# DONE: 2.\n# You should have RUN the m5e_loopy_turtles module and READ its code.\n# (Do so now if you have not already done so.)\n#\n# Below this comment, add ANY CODE THAT YOU WANT, as long as:\n# 1. You construct at least 2 rg.SimpleTurtle objects.\n# 2. Each rg.SimpleTurtle object draws something\n# (by moving, using its rg.Pen). ANYTHING is fine!\n# 3. Each rg.SimpleTurtle moves inside a LOOP.\n#\n# Be creative! Strive for way-cool pictures! Abstract pictures rule!\n#\n# If you make syntax (notational) errors, no worries -- get help\n# fixing them at either this session OR at the NEXT session.\n#\n# Don't forget to COMMIT-and-PUSH when you are done with this module.\n###############################################################################\nimport rosegraphics as rg\n\nlarry = rg.SimpleTurtle('classic')\nlarry.pen = rg.Pen('violet', 3)\nlarry.speed = 20\nterry = rg.SimpleTurtle('turtle')\nterry.pen = rg.Pen('red', 3)\nterry.speed = 20\nfor k in range(26):\n larry.draw_circle(13)\n larry.pen_up()\n larry.right(15)\n larry.forward(10)\n larry.pen_down()\n terry.draw_square(13)\n terry.pen_up()\n terry.left(15)\n terry.forward(10)\n terry.pen_down()\n\n"
}
] | 1 |
kcozzone3/AtlantaBeltline
|
https://github.com/kcozzone3/AtlantaBeltline
|
fce67d271d1100a5392c2e7ae56fb871b89f9304
|
72e93c66f1596acc88fca328b8296dc3223cdabf
|
0e4f896ad440d54d1b1a944c08995c5c2044b8ef
|
refs/heads/master
| 2021-06-25T22:02:52.037621 | 2020-12-13T19:37:22 | 2020-12-13T19:37:22 | 179,183,500 | 1 | 0 | null | 2019-04-03T01:02:40 | 2019-04-22T00:46:44 | 2019-04-22T00:56:11 |
Python
|
[
{
"alpha_fraction": 0.5650495290756226,
"alphanum_fraction": 0.5725496411323547,
"avg_line_length": 47.895355224609375,
"blob_id": "55c16f913ea573be9b50dfdc1800701b15c7e962",
"content_id": "d63b94e33f92539a4ba1735f29b4f12e26fdc30b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 71999,
"license_type": "no_license",
"max_line_length": 897,
"num_lines": 1443,
"path": "/Queries.py",
"repo_name": "kcozzone3/AtlantaBeltline",
"src_encoding": "UTF-8",
"text": "import pymysql\r\nfrom datetime import datetime\r\nfrom pprint import pprint\r\n\r\n\"\"\"\r\n __ ___\r\n \\ \\ / (_)\r\n \\ \\ / / _ _____ _____\r\n \\ \\/ / | |/ _ \\ \\ /\\ / / __|\r\n \\ / | | __/\\ V V /\\__ \\\r\n \\/ |_|\\___| \\_/\\_/ |___/\r\n\r\nSome views that will make later queries easier. Put these at the bottom of buildDB.sql\r\n\"\"\"\r\n\r\n\"\"\"\r\nThis looks like a mess, admittedly. And there's probably a better way to do it.\r\nThis view (transit_connect) basically holds all of the transits that a user may take.\r\nIt joins the transit table and connect table, and then joins to a temporary table that\r\ncalculates the number of connected sites (which is necessary according to the PDF).\r\n\r\nAlso, I rename some of the columns in this view because it might save us a few lines of Python later when we have to\r\ndisplay each column.\r\n\r\nCREATE VIEW transit_connect AS\r\nSELECT T.TransportType, T.Route, T.Price, C.SiteName, tmp.num_sites as NumSites\r\n FROM transit AS T JOIN connect AS C\r\n ON (T.TransportType, T.Route) = (C.TransportType, C.Route)\r\n JOIN (SELECT TransportType, Route, count(*) AS num_sites FROM connect GROUP BY TransportType, Route) AS tmp\r\n ON (T.TransportType, T.Route) = (tmp.TransportType, tmp.Route);\r\n\r\nCREATE VIEW emp_profile AS\r\nSELECT E.EmpUsername, E.EmployeeID, E.Phone, Concat(E.Address, ', ', E.City, ' ', E.State, ', ', E.Zipcode) as Address\r\n\tFROM Employee as E;\r\n\r\nCREATE VIEW user_type AS\r\nSELECT Username, CASE WHEN EXISTS(SELECT * FROM manager WHERE ManUsername = u.Username) = 1 THEN 'Manager' collate utf8mb4_general_ci\r\n\t\t\t\t WHEN EXISTS(SELECT * FROM staff WHERE StaffUsername = u.Username) = 1 THEN 'Staff' collate utf8mb4_general_ci\r\n\t\t\t\t WHEN EXISTS(SELECT * FROM visitor WHERE VisUsername = u.Username) = 1 THEN 'Visitor' collate utf8mb4_general_ci\r\n ELSE 'User' collate utf8mb4_general_ci\r\n END AS UserType\r\nFROM User AS u WHERE NOT EXISTS(SELECT * FROM administrator WHERE AdminUsername = u.Username);\r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\n _ _ ______ _ _ _ _ _\r\n | | | | | ____| | | (_) | (_) |\r\n | | | |___ ___ _ __ | |__ _ _ _ __ ___| |_ _ ___ _ __ __ _| |_| |_ _ _\r\n | | | / __|/ _ \\ '__| | __| | | | '_ \\ / __| __| |/ _ \\| '_ \\ / _` | | | __| | | |\r\n | |__| \\__ \\ __/ | | | | |_| | | | | (__| |_| | (_) | | | | (_| | | | |_| |_| |\r\n \\____/|___/\\___|_| |_| \\__,_|_| |_|\\___|\\__|_|\\___/|_| |_|\\__,_|_|_|\\__|\\__, |\r\n __/ |\r\n |___/\r\n\"\"\" \"\"\"SCREENS 15-16\"\"\"\r\n\r\n\r\nclass TakeTransit:\r\n \"\"\"(15) USER TAKE TRANSIT\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n \"\"\"Returns a dict for col names, and a list of sites (for the Contain Site filter\r\n dropdown.\"\"\"\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Route, TransportType, Price, NumSites as '# Connected Sites' \"\r\n \"FROM transit_connect GROUP BY TransportType\") # Yes, inefficient since we're only\r\n transits = cursor.fetchall() # displaying a blank table on loadup. But,\r\n # it still gets the col names :/\r\n for i in transits:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n transits = {1: transits[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sites = [d['Name'] for d in cursor.fetchall()]\r\n\r\n return transits, sites\r\n\r\n def filter(self, p1=None, p2=None, site=None, transport_type=None, sort='TransportType'):\r\n \"\"\"Given two prices, a transport type and site, return a list of tuples that represent the possible transits.\"\"\"\r\n\r\n query = \"SELECT Route, TransportType, Price, NumSites as '# Connected Sites' FROM transit_connect WHERE 1=1 \"\r\n # 1=1 is there so I can add AND to every if statement and not have to check if there should be an\r\n # AND statement there\r\n if p1 and p2:\r\n query += f\"AND Price BETWEEN {p1} AND {p2} \"\r\n elif p1:\r\n query += f\"AND Price >= {p1} \"\r\n elif p2:\r\n query += f\"AND Price <= {p2} \"\r\n\r\n if transport_type:\r\n query += f\"AND TransportType = '{transport_type}' \"\r\n\r\n if site:\r\n query += f\"AND SiteName = '{site}' \"\r\n\r\n query += f'GROUP BY TransportType, Route ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(query)\r\n transits = cursor.fetchall()\r\n\r\n for i in transits:\r\n for key in i:\r\n i[key] = str(i[key])\r\n transits = {i+1: transits[i] for i in range(len(transits))}\r\n\r\n if transits == {}:\r\n transits = self.load()[0]\r\n # Why does .fetchall() return an empty tuple if there are no results? Why not an empty dict like any reasonable person would want\r\n\r\n return transits\r\n\r\n\r\n def submit(self, route, transport_type, date, username):\r\n \"\"\"Given a route, transport_type, and date, submits an entry into the database. Returns 0 for a successful\r\n submission, -1 if the User attempts to take the same transport on the same day twice, and -2 if the inputted\r\n date is incorrect. \"\"\"\r\n\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT * FROM take WHERE Username = '{username}' AND Date = '{date}' AND Route = '{route}' \"\r\n f\"AND TransportType = '{transport_type}'\")\r\n if len(cursor.fetchall()) >= 1:\r\n # Create a window/popup alerting the user they cannot take the same transit twice\r\n return -1\r\n\r\n else:\r\n cursor.execute(f\"INSERT INTO take VALUES ('{username}', '{transport_type}', '{route}', '{date}')\")\r\n self.connection.commit()\r\n\r\n return 0\r\n\r\n\r\nclass TransitHistory:\r\n \"\"\"(16) USER TRANSIT HISTORY\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n \"\"\"Given a username, returns a list of tuples that represents all of the transits a User has taken, and a list\r\n of sites for the Contain Site filter dropdown.\"\"\"\r\n\r\n # DO NOT DELETE THIS COMMENT\r\n # SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'take';\r\n # Just in case the TAs get mean and delete every transit and ask us to load TransitHistory, this is the fix ^\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Date, Route, TransportType, Price FROM transit_connect NATURAL JOIN take\")\r\n transits = cursor.fetchall()\r\n\r\n\r\n for i in transits:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n transits = {1: transits[0]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sites = [d['Name'] for d in cursor.fetchall()]\r\n\r\n return transits, sites\r\n\r\n\r\n def filter(self, username, d1=None, d2=None, transport_type=None, site=None, route=None, sort='Date'):\r\n \"\"\"Given two days (as strings or datetime objects), a transport type, site, and route, return a list of tuples\r\n that represents all of the transits a User has taken.\"\"\"\r\n\r\n query = f\"SELECT Date, Route, TransportType, Price FROM transit_connect NATURAL JOIN take WHERE \" \\\r\n f\"Username = '{username}' \"\r\n if d1 and d2:\r\n query += f\"AND Date BETWEEN '{d1}' AND '{d2}' \"\r\n elif d1:\r\n query += f\"AND Date >= '{d1}' \"\r\n elif d2:\r\n query += f\"AND Date <= '{d2}' \"\r\n\r\n if transport_type:\r\n query += f\"AND TransportType = '{transport_type}' \"\r\n\r\n if site:\r\n query += f\"AND SiteName = '{site}' \"\r\n\r\n if route:\r\n query += f\"AND Route = '{route}' \"\r\n\r\n query += f'GROUP BY TransportType, Route, Date ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(query)\r\n transits = cursor.fetchall()\r\n\r\n for i in transits:\r\n for key in i:\r\n i[key] = str(i[key])\r\n transits = {i+1: transits[i] for i in range(len(transits))}\r\n\r\n if transits == {}:\r\n transits = self.load()[0] # Why does .fetchall() return an empty tuple if there are no results?\r\n print(query)\r\n return transits\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n ______ _ ______ _ _ _ _ _\r\n | ____| | | | ____| | | (_) | (_) |\r\n | |__ _ __ ___ _ __ | | ___ _ _ ___ ___ | |__ _ _ _ __ ___| |_ _ ___ _ __ __ _| |_| |_ _ _\r\n | __| | '_ ` _ \\| '_ \\| |/ _ \\| | | |/ _ \\/ _ \\ | __| | | | '_ \\ / __| __| |/ _ \\| '_ \\ / _` | | | __| | | |\r\n | |____| | | | | | |_) | | (_) | |_| | __/ __/ | | | |_| | | | | (__| |_| | (_) | | | | (_| | | | |_| |_| |\r\n |______|_| |_| |_| .__/|_|\\___/ \\__, |\\___|\\___| |_| \\__,_|_| |_|\\___|\\__|_|\\___/|_| |_|\\__,_|_|_|\\__|\\__, |\r\n | | __/ | __/ |\r\n |_| |___/ |___/\r\n\"\"\"\r\n\r\n\r\nclass ManageProfile:\r\n \"\"\"(17) EMPLOYEE MANAGE PROFILE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, username):\r\n \"\"\"Returns a first name, last name, site name, emp ID, phone number, address (as a concatenated string), and\r\n a list of emails.\"\"\"\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT EmpUsername as 'Username', EmployeeID, Phone, Address, FirstName, LastName FROM \"\r\n f\"emp_profile JOIN user ON EmpUsername = Username WHERE EmpUsername = '{username}'\")\r\n username, empid, phone, address, fname, lname= cursor.fetchall()[0].values()\r\n\r\n cursor.execute(f\"SELECT Email FROM emails WHERE Username = '{username}'\")\r\n emails = [d['Email'] for d in cursor.fetchall()]\r\n\r\n cursor.execute(f\"SELECT Name FROM Site WHERE ManUsername = '{username}'\")\r\n site = cursor.fetchone()\r\n if site:\r\n site = site['Name']\r\n\r\n cursor.execute(f\"SELECT Exists(SELECT VisUsername FROM visitor WHERE VisUsername = '{username}') as Vis\")\r\n vis = cursor.fetchone()['Vis']\r\n if vis == 1:\r\n vis = True\r\n\r\n else:\r\n vis = False\r\n\r\n return fname, lname, empid, phone, address, emails, site, vis\r\n\r\n def submit(self, username, fname, lname, phone, emails, vis):\r\n\r\n with self.connection.cursor() as cursor:\r\n\r\n cursor.execute(f\"SELECT Email FROM emails WHERE Username != '{username}'\")\r\n all_emails = [d['Email'] for d in cursor.fetchall()]\r\n if any(i in all_emails for i in emails):\r\n return -1\r\n\r\n cursor.execute(f\"DELETE FROM emails WHERE Username = '{username}'\")\r\n self.connection.commit()\r\n\r\n for email in emails:\r\n cursor.execute(f\"INSERT INTO emails VALUES ('{username}', '{email}')\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f\"UPDATE user SET FirstName = '{fname}', LastName = '{lname}' WHERE Username = '{username}'\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f\"UPDATE employee SET phone = '{phone}' WHERE EmpUsername = '{username}'\")\r\n self.connection.commit()\r\n\r\n if vis and vis != self.get_vis(username):\r\n cursor.execute(f\"INSERT INTO visitor VALUES ('{username}')\")\r\n self.connection.commit()\r\n\r\n elif not vis and vis != self.get_vis(username):\r\n cursor.execute(f\"DELETE FROM visitor WHERE VisUsername = '{username}'\")\r\n self.connection.commit()\r\n\r\n return 0\r\n\r\n def get_vis(self, username):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Exists(SELECT VisUsername FROM visitor WHERE VisUsername = '{username}') as Vis\")\r\n vis = cursor.fetchone()['Vis']\r\n if vis == 1:\r\n vis = True\r\n\r\n else:\r\n vis = False\r\n\r\n return vis\r\n\r\n\r\nclass ManageUser:\r\n \"\"\"(18) ADMIN MANAGE USER\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Username, COUNT(Email) AS 'Email Count', UserType, Status FROM user NATURAL JOIN emails \"\r\n \"NATURAL JOIN user_type GROUP BY Username\")\r\n users = cursor.fetchall()\r\n\r\n for i in users:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n users = {1: users[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n return users\r\n\r\n def filter(self, username=None, user_type=None, status=None, sort='Username'):\r\n\r\n query = \"SELECT Username, COUNT(Email) AS 'Email Count', UserType, Status FROM user NATURAL JOIN emails \" \\\r\n \"NATURAL JOIN user_type WHERE 1=1 \"\r\n if username:\r\n query += f\"AND Username = '{username}' \"\r\n\r\n if user_type:\r\n query += f\"AND UserType = '{user_type}' \"\r\n\r\n if status:\r\n query += f\"AND Status = '{status}' \"\r\n\r\n query += f'GROUP BY Username ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n users = cursor.fetchall()\r\n\r\n for i in users:\r\n for key in i:\r\n i[key] = str(i[key])\r\n users = {i+1: users[i] for i in range(len(users))}\r\n\r\n if users == {}:\r\n users = self.load()\r\n print(users)\r\n return users\r\n\r\n\r\n def submit(self, username, status):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"UPDATE user SET Status = '{status}' WHERE Username = '{username}'\")\r\n self.connection.commit()\r\n\r\n\r\nclass ManageSite:\r\n \"\"\"(20) ADMIN EDIT SITE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Name as SiteName, Manager, OpenEveryday FROM site AS s JOIN \"\r\n \"(SELECT ManUsername, Concat(FirstName, ' ', LastName) as Manager FROM manager \"\r\n \"JOIN user ON ManUsername = Username) as tmp ON tmp.ManUsername = s.ManUsername\")\r\n sites = cursor.fetchall()\r\n\r\n for i in sites:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n sites = {1: sites[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(\"SELECT DISTINCT ManUsername, FirstName, LastName FROM user JOIN manager ON Username = ManUsername\")\r\n managers = [f\"{d['FirstName']} {d['LastName']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sitenames = [d['Name'] for d in cursor.fetchall()]\r\n\r\n return sites, sitenames, managers\r\n\r\n def filter(self, site=None, manager=None, everyday=None, sort='SiteName'):\r\n\r\n query = \"SELECT Name as SiteName, Manager, OpenEveryday FROM site AS s JOIN \" \\\r\n \"(SELECT ManUsername, Concat(FirstName, ' ', LastName) as Manager FROM manager \" \\\r\n \"JOIN user ON ManUsername = Username) as tmp ON tmp.ManUsername = s.ManUsername \" \\\r\n \"WHERE 1=1 \"\r\n if site:\r\n query += f\"AND Name = '{site}' \"\r\n\r\n if manager:\r\n query += f\"AND Manager = '{manager}' \"\r\n\r\n if everyday is not None:\r\n query += f\"AND OpenEveryday = {everyday} \"\r\n\r\n query += f'ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n sites = cursor.fetchall()\r\n\r\n for i in sites:\r\n for key in i:\r\n i[key] = str(i[key])\r\n sites = {i+1: sites[i] for i in range(len(sites))}\r\n print(sites)\r\n for d in sites.values():\r\n d['OpenEveryday'] = 'false' if d['OpenEveryday'] == '0' else 'true'\r\n\r\n if sites == {}:\r\n return self.load()[0]\r\n else:\r\n return sites\r\n\r\n def delete(self, sitename):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"DELETE FROM site WHERE Name = '{sitename}'\")\r\n self.connection.commit()\r\n\r\n\r\nclass EditSite:\r\n \"\"\"(19) ADMIN MANAGE SITE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, sitename):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Manager, Zipcode, OpenEveryday, Address FROM site AS s JOIN \"\r\n \"(SELECT ManUsername, Concat(FirstName, ' ', LastName) as Manager FROM manager \"\r\n \"JOIN user ON ManUsername = Username) as tmp ON tmp.ManUsername = s.ManUsername \"\r\n f\"WHERE Name = '{sitename}'\")\r\n site = cursor.fetchone()\r\n manager, zipcode, everyday, address = site['Manager'], site['Zipcode'], site['OpenEveryday'], site['Address']\r\n everyday = True if everyday == 1 else False\r\n\r\n cursor.execute(\"SELECT ManUsername, FirstName, LastName FROM user JOIN manager ON Username = ManUsername \"\r\n \"WHERE ManUsername NOT IN (SELECT ManUsername FROM site)\")\r\n managers = [f\"{d['FirstName']} {d['LastName']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sitenames = [d['Name'] for d in cursor.fetchall()]\r\n\r\n return manager, managers, zipcode, address, everyday\r\n\r\n def update(self, sitename, address, zipcode, manager, everyday, original):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT ManUsername FROM user JOIN manager ON Username = ManUsername \"\r\n f\"WHERE Concat(FirstName, ' ', LastName) = '{manager}'\")\r\n manager = cursor.fetchone()['ManUsername']\r\n if sitename == original:\r\n cursor.execute(f\"UPDATE Site SET Address = '{address}', Zipcode = {zipcode}, ManUsername = '{manager}', \"\r\n f\"OpenEveryday = {'true' if everyday else 'false'} WHERE Name = '{sitename}'\")\r\n self.connection.commit()\r\n\r\n else:\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sites = [d['Name'] for d in cursor.fetchall()]\r\n\r\n if sitename in sites:\r\n return -1\r\n\r\n else:\r\n cursor.execute(f\"UPDATE Site SET Address = '{address}', Zipcode = {zipcode}, \"\r\n f\"ManUsername = '{manager}', OpenEveryday = {'true' if everyday else 'false'}, \"\r\n f\"Name = '{sitename}' WHERE Name = '{original}'\")\r\n self.connection.commit()\r\n\r\n\r\nclass CreateSite:\r\n \"\"\"(21) ADMIN CREATE SITE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT FirstName, LastName FROM user JOIN manager ON Username = ManUsername \"\r\n \"WHERE ManUsername NOT IN (SELECT ManUsername FROM site)\")\r\n return [f\"{d['FirstName']} {d['LastName']}\" for d in cursor.fetchall()]\r\n\r\n def create(self, sitename, address, zipcode, manager, everyday):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT ManUsername FROM user JOIN manager ON Username = ManUsername \"\r\n f\"WHERE Concat(FirstName, ' ', LastName) = '{manager}'\")\r\n manager = cursor.fetchone()['ManUsername']\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sites = [d['Name'] for d in cursor.fetchall()]\r\n\r\n if sitename in sites:\r\n return -1\r\n\r\n else:\r\n cursor.execute(f\"INSERT INTO Site VALUES ('{sitename}', '{address}', {zipcode}, \"\r\n f\"{'true' if everyday else 'false'}, '{manager}')\")\r\n self.connection.commit()\r\n\r\n\r\nclass ManageTransit:\r\n \"\"\"(20) ADMIN MANAGE TRANSIT\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Route, TransportType, Price, NumSites AS '# Connected Sites', \"\r\n \"NumTaken as '# Transits Logged' FROM transit_connect NATURAL JOIN \"\r\n \"(SELECT TransportType, Route, COUNT(*) as NumTaken FROM take GROUP BY TransportType, Route) \"\r\n \"as tmp GROUP BY Route, TransportType\")\r\n transits = cursor.fetchall()\r\n\r\n for i in transits:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n transits = {1: transits[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(\"SELECT Name FROM site\")\r\n sitenames = [d['Name'] for d in cursor.fetchall()]\r\n\r\n return transits, sitenames\r\n\r\n def filter(self, sitename=None, ttype=None, route=None, p1=None, p2=None, sort='TransportType'):\r\n\r\n query = \"SELECT Route, TransportType, Price, NumSites AS '# Connected Sites', NumTaken as '# Transits Logged' \" \\\r\n \"FROM transit_connect NATURAL JOIN ((SELECT TransportType, Route, COUNT(*) as NumTaken FROM take \" \\\r\n \"NATURAL JOIN transit Group By TransportType, Route) UNION (SELECT TransportType, Route, 0 \" \\\r\n \"FROM transit WHERE (TransportType, Route) NOT IN (SELECT TransportType, Route FROM take) \" \\\r\n \"GROUP BY TransportType, Route)) as tmp WHERE 1=1 \" # What a monster query.\r\n\r\n if sitename:\r\n query += f\"AND SiteName = '{sitename}' \"\r\n\r\n if ttype:\r\n query += f\"AND TransportType = '{ttype}' \"\r\n\r\n if route is not None:\r\n query += f\"AND Route = '{route}' \"\r\n\r\n if p1 and p2:\r\n query += f\"AND Price BETWEEN {p1} AND {p2} \"\r\n elif p1:\r\n query += f\"AND Price >= {p1} \"\r\n elif p2:\r\n query += f\"AND Price <= {p2} \"\r\n\r\n query += f'GROUP BY TransportType, Route ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n transits = cursor.fetchall()\r\n\r\n for i in transits:\r\n for key in i:\r\n i[key] = str(i[key])\r\n transits = {i+1: transits[i] for i in range(len(transits))}\r\n\r\n if transits == {}:\r\n return self.load()[0]\r\n else:\r\n return transits\r\n\r\n def delete(self, ttype, route):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"DELETE FROM transit WHERE TransportType = '{ttype}' AND Route = '{route}'\")\r\n self.connection.commit()\r\n\r\n\r\nclass EditTransit:\r\n \"\"\"(23) ADMIN EDIT SITE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, ttype, route):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Price, SiteName FROM transit_connect WHERE TransportType = '{ttype}' AND Route = '{route}'\")\r\n sites = cursor.fetchall()\r\n\r\n price = sites[0]['Price']\r\n connected_sites = [d['SiteName'] for d in sites]\r\n\r\n cursor.execute(f\"SELECT SiteName FROM transit_connect WHERE SiteName NOT IN (SELECT SiteName \"\r\n f\"FROM transit_connect WHERE TransportType = '{ttype}' AND Route = '{route}')\")\r\n other_sites = [d['SiteName'] for d in cursor.fetchall()]\r\n\r\n\r\n\r\n return price, connected_sites, other_sites\r\n\r\n def submit(self, ttype, route, price, sites, original):\r\n with self.connection.cursor() as cursor:\r\n try:\r\n cursor.execute(f\"DELETE FROM connect WHERE Route='{original}' AND TransportType='{ttype}'\")\r\n for site in sites:\r\n cursor.execute(f\"INSERT INTO connect VALUES ('{site}', '{ttype}', '{original}')\")\r\n\r\n cursor.execute(f\"UPDATE transit SET Route='{route}', Price='{price}' WHERE TransportType='{ttype}' AND Route='{original}'\")\r\n self.connection.commit()\r\n\r\n except Exception as e:\r\n print(e)\r\n return -1\r\n\r\n\r\nclass CreateTransit:\r\n \"\"\"(24) ADMIN CREATE TRANSIT\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Name FROM site\")\r\n\r\n sites = [d['Name'] for d in cursor.fetchall()]\r\n return sites\r\n\r\n def create(self, ttype, route, price, sites):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT TransportType, Route FROM transit\")\r\n transits = [(d['TransportType'], d['Route']) for d in cursor.fetchall()]\r\n\r\n if (ttype, route) in transits:\r\n return -1\r\n\r\n else:\r\n cursor.execute(f\"INSERT INTO transit VALUES ('{ttype}', '{route}', {price})\")\r\n self.connection.commit()\r\n for site in sites:\r\n cursor.execute(f\"INSERT INTO connect VALUES ('{site}', '{ttype}', '{route}')\")\r\n self.connection.commit()\r\n\r\n\r\nclass ManageEvent:\r\n \"\"\"(25) MANAGER MANAGE EVENT\"\"\"\r\n\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT EventName, SiteName, StartDate, StaffCount, Duration, Visits, Revenue FROM manage_event\")\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n events = {1: events[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n return events\r\n\r\n def filter(self, manager, name=None, keyword=None, d1=None, d2=None, dur1=None, dur2=None, vis1=None, vis2=None, rev1=None, rev2=None, sort='EventName'):\r\n\r\n query = f\"SELECT EventName, SiteName, StartDate, StaffCount, Duration, Visits, Revenue FROM manage_event WHERE ManUsername = '{manager}' \"\r\n\r\n if name:\r\n query += f\"AND EventName LIKE '%{name}%' \"\r\n\r\n if keyword:\r\n query += f\"AND Description LIKE '%{keyword}%' \"\r\n\r\n if d1 and d2:\r\n query += f\"AND StartDate IN (SELECT StartDate FROM event WHERE StartDate BETWEEN {d1} AND {d2}) \"\r\n elif d1:\r\n query += f\"AND StartDate IN (SELECT StartDate FROM event WHERE StartDate >= {d1}) \"\r\n elif d2:\r\n f\"AND StartDate IN (SELECT StartDate FROM event WHERE StartDate <= {d2}) \"\r\n\r\n if dur1 and dur2:\r\n query += f\"AND Duration BETWEEN {dur1} AND {dur2} \"\r\n elif dur1:\r\n query += f\"AND Duration >= {dur1} \"\r\n elif dur2:\r\n f\"AND Duration <= {dur2} \"\r\n\r\n if vis1 and vis2:\r\n query += f\"AND Visits BETWEEN {vis1} AND {vis2} \"\r\n elif vis1:\r\n query += f\"AND Visits >= {vis1} \"\r\n elif vis2:\r\n f\"AND Visits <= {vis2} \"\r\n\r\n if rev1 and rev2:\r\n query += f\"AND Revenue BETWEEN {rev1} AND {rev2} \"\r\n elif rev1:\r\n query += f\"AND Revenue >= {rev1} \"\r\n elif rev2:\r\n f\"AND Revenue <= {rev2} \"\r\n\r\n query += f'GROUP BY EventName, SiteName, StartDate ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = str(i[key])\r\n events = {i + 1: events[i] for i in range(len(events))}\r\n\r\n if events == {}:\r\n return self.load()\r\n else:\r\n return events\r\n\r\n def delete(self, eventname, sitename, startdate):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"DELETE FROM event WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n\r\n\r\nclass EditEvent:\r\n \"\"\"(26) MANAGER EDIT EVENT\"\"\"\r\n\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, manager, eventname, sitename, startdate):\r\n with self.connection.cursor() as cursor:\r\n\r\n cursor.execute(\"SELECT Price, EndDate, MinStaffReq, Capacity, Description FROM manage_event WHERE \"\r\n f\"ManUsername = '{manager}' AND EventName = '{eventname}' AND StartDate = '{startdate}'\")\r\n event = cursor.fetchone()\r\n price, enddate, minstaffreq, cap, desc = event['Price'], event['EndDate'], event['MinStaffReq'], event['Capacity'], event['Description']\r\n\r\n cursor.execute(f\"Select DISTINCT(CONCAT(FirstName, ' ', LastName, ' (', StaffUsername, ')')) AS StaffName FROM assignto JOIN user ON StaffUsername = Username \"\r\n f\"WHERE SiteName = '{sitename}' AND EventName = '{eventname}' AND StartDate = '{startdate}'\")\r\n cur_staff = [d['StaffName'] for d in cursor.fetchall()]\r\n\r\n cursor.execute(f\"\"\"\r\n SELECT Distinct(CONCAT(FirstName, ' ', Lastname, ' (', StaffUsername, ')')) AS StaffName FROM assignto JOIN user ON StaffUsername = Username\r\n WHERE StaffUsername NOT IN (SELECT StaffUsername from assignto NATURAL JOIN event WHERE SiteName = '{sitename}' AND EventName = '{eventname}' AND StartDate = '{startdate}')\r\n AND StaffUsername NOT IN (SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE StaffUsername = ANY(SELECT StaffUsername FROM assignto WHERE StartDate BETWEEN '{startdate}' AND '{enddate}'))\r\n AND StaffUsername NOT IN (SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE StaffUsername = ANY(SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE EndDate BETWEEN '{startdate}' AND '{enddate}'));\r\n \"\"\")\r\n avail_staff = [d['StaffName'] for d in cursor.fetchall()]\r\n\r\n cursor.execute(f\"\"\"\r\n SELECT gen_date AS Date, IFNULL(DailyVisits, 0) AS DailyVisits, IFNULL(DailyRevenue, 0) AS DailyRevenue FROM\r\n (SELECT Date, COUNT(VisUsername) AS DailyVisits, COUNT(VisUsername) * Price AS DailyRevenue\r\n FROM visitevent\r\n NATURAL JOIN\r\n event\r\n WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}' GROUP BY Date) AS calc\r\n\r\n RIGHT JOIN\r\n dates_view\r\n ON gen_date = Date\r\n WHERE gen_date BETWEEN '{startdate}' AND '{enddate}'\r\n \"\"\")\r\n dailies = cursor.fetchall()\r\n\r\n for i in dailies:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n dailies = {1: dailies[0]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n return price, enddate, minstaffreq, cap, cur_staff, avail_staff, desc, dailies\r\n\r\n def filter(self, manager, eventname, sitename, startdate, rev1=None, rev2=None, vis1=None, vis2=None, sort='Date'):\r\n print(rev2)\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT EndDate FROM event WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n enddate = cursor.fetchone()['EndDate']\r\n\r\n query = f\"\"\"\r\n SELECT gen_date AS Date, IFNULL(DailyVisits, 0) AS DailyVisits, IFNULL(DailyRevenue, 0) AS DailyRevenue FROM\r\n (SELECT Date, COUNT(VisUsername) AS DailyVisits, COUNT(VisUsername) * Price AS DailyRevenue\r\n FROM visitevent\r\n NATURAL JOIN\r\n event\r\n WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}' GROUP BY Date) AS calc\r\n\r\n RIGHT JOIN\r\n dates_view\r\n ON gen_date = Date\r\n WHERE gen_date BETWEEN '{startdate}' AND '{enddate} '\r\n \"\"\"\r\n\r\n if vis1 and vis2:\r\n query += f\"AND IFNULL(DailyVisits, 0) BETWEEN {vis1} AND {vis2} \"\r\n elif vis1:\r\n query += f\"AND IFNULL(DailyVisits, 0) >= {vis1} \"\r\n elif vis2:\r\n query += f\"AND IFNULL(DailyVisits, 0) <= {vis2} \"\r\n\r\n if rev1 and rev2:\r\n query += f\"AND IFNULL(DailyRevenue, 0) BETWEEN {rev1} AND {rev2} \"\r\n elif rev1:\r\n query += f\"AND IFNULL(DailyRevenue, 0) >= {rev1} \"\r\n elif rev2:\r\n query += f\"AND IFNULL(DailyRevenue, 0) <= {rev2} \"\r\n\r\n query += f\"ORDER BY {sort} DESC\"\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n dailies = cursor.fetchall()\r\n\r\n for i in dailies:\r\n for key in i:\r\n i[key] = str(i[key])\r\n dailies = {i+1: dailies[i] for i in range(len(dailies))}\r\n\r\n if dailies == {}:\r\n return self.load(manager, eventname, sitename, startdate)[-1]\r\n else:\r\n return dailies\r\n\r\n def submit(self, eventname, sitename, startdate, desc, staff):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"DELETE FROM assignto WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n self.connection.commit()\r\n for i in staff:\r\n print(i[i.find('(')+1:i.find(')')])\r\n cursor.execute(f\"INSERT INTO assignto VALUES ('{i[i.find('(')+1:i.find('}')]}', '{sitename}', '{eventname}', '{startdate}')\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f'UPDATE event SET Description = \"{desc}\" WHERE EventName = \"{eventname}\" AND SiteName = \"{sitename}\" AND StartDate = \"{startdate}\"')\r\n self.connection.commit()\r\n\r\n\r\nclass CreateEvent:\r\n \"\"\"(27) MANAGER CREATE EVENT\"\"\"\r\n\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n pass\r\n\r\n def get_staff(self, d1, d2):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Distinct(CONCAT(FirstName, ' ', Lastname, ' (', StaffUsername, ')')) AS StaffName FROM assignto JOIN user ON StaffUsername = Username \"\r\n f\"WHERE StaffUsername NOT IN (SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE StaffUsername = ANY(SELECT StaffUsername FROM assignto WHERE StartDate BETWEEN '{d1}' AND '{d2}')) \"\r\n f\"AND StaffUsername NOT IN (SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE StaffUsername = ANY(SELECT StaffUsername FROM assignto NATURAL JOIN event WHERE EndDate BETWEEN '{d1}' AND '{d2}'))\")\r\n return [d['StaffName'] for d in cursor.fetchall()]\r\n\r\n def create(self, manager, eventname, price, cap, minstaff, d1, d2, desc, staff):\r\n with self.connection.cursor() as cursor:\r\n try:\r\n cursor.execute(f\"SELECT Name FROM site WHERE ManUsername = '{manager}'\")\r\n sitename = cursor.fetchone()['Name']\r\n\r\n cursor.execute(f\"INSERT INTO event VALUES ('{sitename}', '{eventname}', '{d1}', '{d2}', {price}, {cap}, {minstaff}, '{desc}')\")\r\n\r\n for i in staff:\r\n cursor.execute(f\"INSERT INTO assignto VALUES ('{i[i.find('(')+1:i.find('}')]}', '{sitename}', '{eventname}', '{d1}')\")\r\n\r\n self.connection.commit()\r\n\r\n except:\r\n return -1\r\n\r\n\r\nclass ManageStaff:\r\n \"\"\"(28) MANAGER MANAGE STAFF\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT CONCAT(FirstName, ' ', LastName) AS Name, COUNT(*) As NumShifts FROM \"\r\n f\"assignto JOIN user on assignto.StaffUsername = user.Username GROUP BY Name\")\r\n staff = cursor.fetchall()\r\n\r\n for i in staff:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n staff = {1: staff[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(f\"SELECT DISTINCT(Name) FROM site\")\r\n sites = cursor.fetchall()\r\n sites = [d['Name'] for d in sites]\r\n\r\n return staff, sites\r\n\r\n def filter(self, site=None, fname=None, lname=None, d1=None, d2=None, sort='Name'):\r\n query = \"SELECT CONCAT(FirstName, ' ', LastName) AS Name, COUNT(*) As NumShifts FROM \" \\\r\n \"assignto JOIN user on assignto.StaffUsername = user.Username WHERE 1=1 \"\r\n\r\n if site:\r\n query += f\"AND SiteName = '{site}'\"\r\n if fname:\r\n query += f\"AND FirstName LIKE '%{fname}%' \"\r\n if lname:\r\n query += f\"AND LastName LIKE '%{lname}%' \"\r\n if d1 and d2:\r\n query += f\"AND StartDate BETWEEN '{d1}' AND '{d2}' \"\r\n elif d1:\r\n query += f\"AND StartDate >= '{d1}' \"\r\n elif d2:\r\n query += f\"AND StartDate <= '{d2}' \"\r\n\r\n query += f\"GROUP BY Name ORDER BY {sort}\"\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n staff = cursor.fetchall()\r\n\r\n for i in staff:\r\n for key in i:\r\n i[key] = str(i[key])\r\n staff = {i + 1: staff[i] for i in range(len(staff))}\r\n\r\n if staff == {}:\r\n return self.load()[0]\r\n else:\r\n return staff\r\n\r\n\r\nclass SiteReport:\r\n \"\"\"(29) MANAGER SITE REPORT\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\r\nf\"\"\"\r\nSELECT Date AS Date, IFNULL(EventCount, 0) AS EventCount, IFNULL(StaffCount, 0) AS StaffCount, IFNULL(TotalVisits, 0) AS TotalVisits, IFNULL(TotalRevenue, 0) AS TotalRevenue from\r\n(SELECT gen_date AS Date, Count(*) AS EventCount FROM event RIGHT JOIN dates_view ON gen_date BETWEEN StartDate AND EndDate WHERE StartDate BETWEEN '1111-11-11' AND '1111-11-11' AND SiteName = 'Lizard' GROUP BY Date) AS EC\r\nNATURAL JOIN\r\n(SELECT gen_date AS Date, Count(*) AS StaffCount FROM assignto NATURAL JOIN event RIGHT JOIN dates_view ON gen_date BETWEEN StartDate AND EndDate WHERE StartDate BETWEEN '1111-11-11' AND '1111-11-11' AND SiteName = 'Lizard' GROUP BY Date) AS SC\r\nNATURAL JOIN\r\n(SELECT gen_date AS Date, IFNULL(ETotal, 0) + IFNULL(STotal, 0) AS TotalVisits FROM dates_view LEFT JOIN (SELECT Date, COUNT(VisUsername) AS ETotal FROM visitevent WHERE Date BETWEEN '1111-11-11' AND '1111-11-11' AND SiteName = 'Lizard' GROUP BY Date) AS E ON gen_date = Date\r\nNATURAL LEFT JOIN (SELECT Date, COUNT(VisUsername) AS STotal FROM visitsite WHERE Date BETWEEN '1111-11-11' AND '1111-11-11' AND SiteName = 'Lizard' GROUP BY Date) AS S) AS TV\r\nNATURAL LEFT JOIN\r\n(SELECT Date, Q.Total AS TotalRevenue FROM (SELECT Date, Price * Count(VisUsername) AS Total FROM event NATURAL JOIN visitevent WHERE Date BETWEEN '1111-11-11' AND '1111-11-11' AND SiteName = 'Lizard' GROUP BY Date) AS Q) AS TR WHERE 1=1\r\n\"\"\")\r\n dailies = cursor.fetchall()\r\n for i in dailies:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n return {1: {'Date': '', 'EventCount': '', 'StaffCount': '', 'TotalVisits': '', 'TotalRevenue': ''}}\r\n\r\n def filter(self, manager, startdate, enddate, e1, e2, s1, s2, rev1, rev2, vis1, vis2, sort='Date'):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Name FROM site WHERE ManUsername = '{manager}'\")\r\n try:\r\n sitename = cursor.fetchone()['Name']\r\n except: # If the manager has no site assigned.\r\n return self.load()\r\n\r\n query = f\"\"\"\r\nSELECT gen_date AS Date, EventCount, StaffCount, TotalVisits, TotalRevenue from\r\n(SELECT gen_date, IFNULL(StaffCount, 0) AS StaffCount FROM (SELECT gen_date FROM dates_view WHERE gen_date BETWEEN '{startdate}' AND '{enddate}') AS D2 LEFT JOIN (SELECT gen_date AS Date, Count(*) AS StaffCount FROM assignto NATURAL JOIN event RIGHT JOIN dates_view ON gen_date BETWEEN StartDate AND EndDate WHERE StartDate BETWEEN '{startdate}' AND '{enddate}' AND SiteName = '{sitename}' GROUP BY Date) AS SC1 ON Date = gen_date) AS SC\r\nNATURAL JOIN\r\n(SELECT gen_date, IFNULL(EventCount, 0) AS EventCount FROM (SELECT gen_date FROM dates_view WHERE gen_date BETWEEN '{startdate}' AND '{enddate}') AS D1 LEFT JOIN (SELECT gen_date AS Date, Count(*) AS EventCount FROM event RIGHT JOIN dates_view ON gen_date BETWEEN StartDate AND EndDate WHERE StartDate BETWEEN '{startdate}' AND '{enddate}' AND SiteName = '{sitename}' GROUP BY Date) AS EC1 On Date = gen_date) AS EC\r\nNATURAL JOIN\r\n(SELECT gen_date, ETot + STot AS TotalVisits FROM\r\n\t(SELECT gen_date, IFNULL(ETotal, 0) AS ETot FROM dates_view LEFT JOIN (SELECT Date, COUNT(VisUsername) AS ETotal FROM visitevent WHERE Date BETWEEN '{startdate}' AND '{enddate}' AND SiteName = '{sitename}' GROUP BY Date) AS E ON gen_date = Date WHERE gen_date BETWEEN '{startdate}' AND '{enddate}') AS E\r\n\tNATURAL JOIN\r\n\t(SELECT gen_date, IFNULL(STotal, 0) AS STot FROM dates_view LEFT JOIN (SELECT Date, COUNT(VisUsername) AS STotal FROM visitsite WHERE Date BETWEEN '{startdate}' AND '{enddate}' AND SiteName = '{sitename}' GROUP BY Date) AS S ON gen_date = Date WHERE gen_date BETWEEN '{startdate}' AND '{enddate}') AS S) AS VTot\r\nNATURAL JOIN\r\n(SELECT gen_date, IFNULL(TotalRevenue, 0) AS TotalRevenue FROM (SELECT gen_date FROM dates_view WHERE gen_date BETWEEN '{startdate}' AND '{enddate}') AS D4 LEFT JOIN (SELECT Date, Price * Count(VisUsername) AS TotalRevenue FROM event NATURAL JOIN visitevent WHERE Date BETWEEN '{startdate}' AND '{enddate}' AND SiteName = '{sitename}' GROUP BY Date) AS TR1 ON Date = gen_date) AS RTot\r\nWHERE 1=1\r\n\"\"\"\r\n if e1 and e2:\r\n query += f\"AND EventCount BETWEEN {e1} AND {e2} \"\r\n elif e1:\r\n query += f\"AND EventCount >= {e1} \"\r\n elif e2:\r\n query += f\"AND EventCount <= {e2} \"\r\n\r\n if s1 and s2:\r\n query += f\"AND StaffCount BETWEEN {s1} AND {s2} \"\r\n elif s1:\r\n query += f\"AND StaffCount >= {s1} \"\r\n elif s2:\r\n query += f\"AND StaffCount <= {s2} \"\r\n\r\n if vis1 and vis2:\r\n query += f\"AND TotalVisits BETWEEN {vis1} AND {vis2} \"\r\n elif vis1:\r\n query += f\"AND TotalVisits >= {vis1} \"\r\n elif vis2:\r\n f\"AND Visits <= {vis2} \"\r\n\r\n if rev1 and rev2:\r\n query += f\"AND TotalRevenue BETWEEN {rev1} AND {rev2} \"\r\n elif rev1:\r\n query += f\"AND TotalRevenue >= {rev1} \"\r\n elif rev2:\r\n f\"AND TotalRevenue <= {rev2} \"\r\n\r\n query += f\"ORDER BY {sort}\"\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n dailies = cursor.fetchall()\r\n\r\n for i in dailies:\r\n for key in i:\r\n i[key] = str(i[key])\r\n dailies = {i + 1: dailies[i] for i in range(len(dailies))}\r\n\r\n if dailies == {}:\r\n return self.load()\r\n else:\r\n return dailies\r\n\r\n\r\nclass DailyDetail:\r\n \"\"\"(30) MANAGER DAILY DETAIl\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def filter(self, manager, date, sort='EventName'):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT Name FROM site WHERE ManUsername = '{manager}'\")\r\n try:\r\n sitename = cursor.fetchone()['Name']\r\n except: # If the manager has no site assigned.\r\n return {1: {'EventName': '', 'StaffNames': '', 'NumVisits': '', 'Revenue': ''}}\r\n\r\n query = f\"\"\"\r\n SELECT EventName, StaffNames, NumVisits, Revenue FROM(\r\n (SELECT EventName, IFNULL(NumVisits, 0) AS NumVisits, IFNULL(NumVisits * Price, 0) AS Revenue FROM (SELECT EventName, StartDate, Price FROM event WHERE '2019-10-10' BETWEEN StartDate and EndDate AND SiteName = 'Piedmont Park') A NATURAL LEFT JOIN (SELECT EventName, COUNT(VisUsername) AS NumVisits FROM visitevent WHERE Date = '2019-10-10' AND SiteName = 'Piedmont Park' GROUP BY EventName) B) AS C\r\n NATURAL JOIN\r\n (SELECT EventName, GROUP_CONCAT(CONCAT(FirstName, ' ', LastName) ORDER BY FirstName ASC) AS StaffNames FROM event NATURAL JOIN assignto JOIN user ON Username = StaffUsername WHERE '{date}' BETWEEN StartDate AND EndDate AND SiteName = '{sitename}' GROUP BY EventName) AS D)\r\n ORDER BY {sort}\r\n\r\n \"\"\"\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = str(i[key])\r\n events = {i + 1: events[i] for i in range(len(events))}\r\n\r\n if events == {}:\r\n return {1: {'EventName': '', 'StaffNames': '', 'NumVisits': '', 'Revenue': '', 'TotalRevenue': ''}}\r\n else:\r\n return events\r\n\r\n\r\nclass ViewSchedule:\r\n \"\"\"(31) STAFF VIEW SCHEDULE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def filter(self, staff, eventname=None, keyword=None, startdate=None, enddate=None, sort='EventName'):\r\n query = f\"SELECT EventName, SiteName, StartDate, EndDate, Count(StaffUsername) AS StaffCount FROM event NATURAL JOIN assignto \" \\\r\n f\"WHERE StaffUsername = '{staff}' \"\r\n\r\n if eventname:\r\n query += f\"AND EventName LIKE '%{eventname}%' \"\r\n if keyword:\r\n query += f\"AND Description LIKE '%{keyword}%' \"\r\n if startdate:\r\n query += f\"AND EndDate >= '{startdate}' \"\r\n if enddate:\r\n query += f\"AND StartDate <= '{enddate}' \"\r\n\r\n query += f\"GROUP BY EventName, SiteName, StartDate ORDER BY {sort}\"\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = str(i[key])\r\n events = {i + 1: events[i] for i in range(len(events))}\r\n\r\n if events == {}:\r\n return {1: {'EventName': '', 'SiteName': '', 'StartDate': '', 'EndDate': '', 'StaffCount': ''}}\r\n else:\r\n return events\r\n\r\n\r\nclass StaffEventDetail:\r\n \"\"\"(30) MANAGER DAILY DETAIl\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, eventname, sitename, startdate):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"SELECT EndDate, DateDiff(EndDate, StartDate) + 1 AS Duration, Capacity, Price, Description \"\r\n f\"FROM event WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n details = cursor.fetchone()\r\n\r\n enddate, duration, cap, price, desc = details['EndDate'], details['Duration'], details['Capacity'], details['Price'], details['Description']\r\n\r\n cursor.execute(f\"SELECT GROUP_CONCAT(CONCAT(FirstName, ' ', LastName) ORDER BY FirstName ASC) AS StaffNames FROM assignto JOIN user ON StaffUsername = Username WHERE \"\r\n f\"EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n staffnames = cursor.fetchone()['StaffNames']\r\n\r\n return enddate, duration, cap, price, desc, staffnames\r\n\r\n\r\nclass visitorExploreEvent:\r\n \"\"\"(33) Visitor Explore Event\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, identifier):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT EventName, SiteName, StartDate, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate))\")\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n events = {1: events[1]} # Returns just col names, as we have to load a blank table to start with.\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n eventNames = [f\"{d['EventName']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT DISTINCT Name FROM site WHERE 1=1 \")\r\n siteNames = [f\"{d['Name']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, StartDate, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n startDates = [f\"{d['StartDate']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n ticketPrices = [f\"{d['Price']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n ticketRemainings = [f\"{d['TicketsRemaining']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n totalVisits = [f\"{d['TotalNumVisits']}\" for d in cursor.fetchall()]\r\n\r\n cursor.execute(\"SELECT EventName, SiteName, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT EventName, SiteName, StartDate, Price, Capacity FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1 \")\r\n myVisits = [f\"{d['MyVisits']}\" for d in cursor.fetchall()]\r\n\r\n return events, eventNames, siteNames, startDates, ticketPrices, ticketRemainings, totalVisits, myVisits\r\n\r\n def filter(self, identifier, event=None, site=None, keyword=None, startDate=None, endDate=None, TVR1=None, TVR2=None, TPR1=None, TPR2=None, includeVisited=None, includeSoldOut=None, sort='EventName'):\r\n\r\n query = \"SELECT EventName, SiteName, StartDate, Price, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM ((SELECT * FROM event) AS e LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\"+identifier+\"\\\" GROUP BY EventName, SiteName, StartDate) AS myve ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)) WHERE 1=1\"\r\n\r\n if event is not None:\r\n query += f\" AND EventName = '{event}' \"\r\n\r\n if site is not None:\r\n query += f\" AND SiteName = '{site}' \"\r\n\r\n if keyword is not None:\r\n query += f\" AND Description LIKE '%{keyword}%' \"\r\n\r\n if startDate is not None and endDate is not None:\r\n query += f\" AND startDate BETWEEN '{startDate}' AND '{endDate}' \"\r\n query += f\" AND endDate BETWEEN '{startDate}' AND '{endDate}' \"\r\n elif startDate is not None:\r\n query += f\" AND startDate >= '{startDate}' \"\r\n elif endDate is not None:\r\n query += f\" AND endDate <= '{endDate}' \"\r\n\r\n if TVR1 is not None:\r\n query += f\" AND IFNULL(TotalVisits, 0) >= '{TVR1}' \"\r\n\r\n if TVR2 is not None:\r\n query += f\" AND IFNULL(TotalVisits, 0) <= '{TVR2}' \"\r\n\r\n if TPR1 is not None:\r\n query += f\" AND Price >= '{TPR1}' \"\r\n\r\n if TPR2 is not None:\r\n query += f\" AND Price <= '{site}' \"\r\n\r\n if includeVisited is not '1':\r\n query += f\" AND IFNULL(MyCount, 0) = 0 \"\r\n\r\n if includeSoldOut is not '1':\r\n query += f\" AND (Capacity - IFNULL(TotalVisits, 0)) > 0 \"\r\n\r\n query += f' ORDER BY {sort} DESC'\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n events = cursor.fetchall()\r\n\r\n for i in events:\r\n for key in i:\r\n i[key] = str(i[key])\r\n events = {i+1: events[i] for i in range(len(events))}\r\n print(events)\r\n # for d in events.values():\r\n # d['OpenEveryday'] = 'false' if d['OpenEveryday'] == '0' else 'true'\r\n\r\n if events == {}:\r\n return self.load(identifier)[0]\r\n else:\r\n return events\r\n\r\n\r\nclass visitorEventDetail:\r\n \"\"\"(34) Visitor Event Detail\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, identifier, eventname, sitename, startdate):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT EventName, SiteName, Price, Description, StartDate, EndDate, (Capacity - IFNULL(TotalVisits, 0)) AS TicketsRemaining, IFNULL(TotalVisits, 0) AS TotalNumVisits, IFNULL(MyCount, 0) AS MyVisits FROM (\"\r\n \"(SELECT * FROM event) AS e \"\r\n \"LEFT JOIN (SELECT EventName AS veEventName, SiteName AS veSiteName, StartDate AS veStartDate, COUNT(*) AS TotalVisits \"\r\n \"FROM visitevent GROUP BY EventName, SiteName, StartDate) AS ve \"\r\n \"ON (e.EventName = ve.veEventName AND e.SiteName = ve.veSiteName AND e.StartDate = ve.veStartDate) \"\r\n \"LEFT JOIN (SELECT EventName AS MYEventName, SiteName AS MYSiteName, StartDate AS MYStartDate, COUNT(*) AS MyCount FROM visitevent WHERE visUsername = \\\"\" + identifier + \"\\\" \"\r\n \"GROUP BY EventName, SiteName, StartDate) AS myve \"\r\n \"ON (e.EventName = myve.MYEventName AND e.SiteName = myve.MYSiteName AND e.StartDate = myve.MYStartDate)\"\r\n \")\"\r\n f\"WHERE EventName = '{eventname}' AND SiteName = '{sitename}' AND StartDate = '{startdate}'\")\r\n event = cursor.fetchone()\r\n eventName, siteName, startDate, endDate, ticketPrice, ticketsRemaining, description = event['EventName'], event['SiteName'], event['StartDate'], event['EndDate'], event['Price'], event['TicketsRemaining'], event['Description']\r\n return eventName, siteName, startDate, endDate, ticketPrice, ticketsRemaining, description\r\n\r\nclass visitorTransitDetail:\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, sitename):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT c.Route, c.TransportType, Price, cc.NumConnectedSites FROM (\"\r\n \"(SELECT Route, TransportType FROM connect WHERE SiteName = \\'\" +sitename+ \"\\') AS c \"\r\n \"JOIN (SELECT * FROM transit) AS t \"\r\n \"ON (t.Route = c.Route AND t.TransportType = c.TransportType) \"\r\n \"JOIN (SELECT COUNT(*) AS NumConnectedSites, Route, TransportType FROM connect GROUP BY Route, TransportType) AS cc \"\r\n \"ON (c.Route = cc.Route AND c.TransportType = cc.TransportType)\"\r\n \")\")\r\n routes = cursor.fetchall()\r\n\r\n for i in routes:\r\n for key in i:\r\n i[key] = \"\"\r\n\r\n print(routes)\r\n routes = {0: routes[0]}\r\n\r\n cursor.execute(\"SELECT c.Route, c.TransportType, Price, cc.NumConnectedSites FROM (\"\r\n \"(SELECT Route, TransportType FROM connect WHERE SiteName = \\'\" +sitename+ \"\\') AS c \"\r\n \"JOIN (SELECT * FROM transit) AS t \"\r\n \"ON (t.Route = c.Route AND t.TransportType = c.TransportType) \"\r\n \"JOIN (SELECT COUNT(*) AS NumConnectedSites, Route, TransportType FROM connect GROUP BY Route, TransportType) AS cc \"\r\n \"ON (c.Route = cc.Route AND c.TransportType = cc.TransportType)\"\r\n \")\")\r\n transportTypes = [f\"{d['TransportType']}\" for d in cursor.fetchall()]\r\n return routes, transportTypes\r\n\r\n def filter(self, sitename, transporttype):\r\n query = (\"SELECT c.Route, c.TransportType, Price, cc.NumConnectedSites FROM (\"\r\n \"(SELECT Route, TransportType FROM connect WHERE SiteName = \\'\" +sitename+ \"\\') AS c \"\r\n \"JOIN (SELECT * FROM transit) AS t \"\r\n \"ON (t.Route = c.Route AND t.TransportType = c.TransportType) \"\r\n \"JOIN (SELECT COUNT(*) AS NumConnectedSites, Route, TransportType FROM connect GROUP BY Route, TransportType) AS cc \"\r\n \"ON (c.Route = cc.Route AND c.TransportType = cc.TransportType)\"\r\n \")\")\r\n if(transporttype != 'Any'):\r\n query += \"WHERE c.TransportType = \\'\" +transporttype+ \"\\'\"\r\n\r\n\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n routes = cursor.fetchall()\r\n\r\n for i in routes:\r\n for key in i:\r\n i[key] = str(i[key])\r\n routes = {i+1: routes[i] for i in range(len(routes))}\r\n\r\n return routes\r\n\r\nclass VisitorExploreSite:\r\n \"\"\"(35) VISITOR EXPLORE SITE\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, username):\r\n \"\"\"Given username, create all the views\"\"\"\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(f\"CREATE OR REPLACE VIEW `SiteTotal_view` AS select SiteName, EventName, \"\\\r\n f\"Date, TotalVisits, OpenEveryday from ((select SiteName, ' ' as EventName, \"\\\r\n f\"Date, Count(*) as TotalVisits from visitSite group by SiteName, EventName, \"\\\r\n f\"Date union all select SiteName, EventName, Date, Count(*) as TotalVisits from \"\\\r\n f\"visitEvent group by SiteName, EventName, Date) as T join (select Name, OpenEveryday \"\\\r\n f\"from Site) as s on s.Name = T.SiteName);\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f\"CREATE OR REPLACE VIEW `SiteVis_view` AS select SiteName, EventName, Date, MyVisits, \"\\\r\n f\"OpenEveryday from ((select SiteName, ' ' as EventName, Date, Count(*) as MyVisits from \"\\\r\n f\"visitSite where VisUsername = '{username}' group by SiteName, EventName, Date union all \"\\\r\n f\"select SiteName, EventName, Date, Count(*) as MyVisits from visitEvent where VisUsername \"\r\n f\"= '{username}' group by SiteName, EventName, Date) as T1 join (select Name, OpenEveryday \"\\\r\n \"from Site) as s1 on s1.Name = T1.SiteName );\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f\"CREATE OR REPLACE VIEW `OMG_view` AS select m1.SiteName, m1.Date, m1.TotalVisits, \"\\\r\n f\"m1.MyVisits, m1.OpenEveryday, m2.EventCount from (SELECT f1.SiteName, f1.EventName, f1.Date, \"\\\r\n f\"f1.TotalVisits, f1.OpenEveryday, IFNULL(f2.MyVisits, 0) as MyVisits FROM SiteTotal_View as f1 \"\\\r\n f\"LEFT JOIN SiteVis_View as f2 ON f1.SiteName = f2.SiteName and f1.EventName = f2.EventName and \"\\\r\n f\"f1.Date = f2.Date) as m1 left join (select SiteName, count(EventName) as EventCount from (SELECT \"\\\r\n f\"f1.SiteName, f1.EventName, f1.Date, f1.TotalVisits, f1.OpenEveryday, IFNULL(f2.MyVisits, 0) as \"\\\r\n f\"MyVisits FROM SiteTotal_View as f1 LEFT JOIN SiteVis_View as f2 ON f1.SiteName = f2.SiteName and \"\\\r\n \"f1.EventName = f2.EventName and f1.Date = f2.Date) as blah where EventName <> ' ' group by SiteName) \"\\\r\n f\"as m2 on m1.SiteName = m2.SiteName;\")\r\n self.connection.commit()\r\n\r\n cursor.execute(f\"select Name from Site;\")\r\n sites = cursor.fetchall()\r\n sites = [i['Name']for i in sites]\r\n sites = ['Any'] + sites\r\n return sites\r\n\r\n def filter(self, username, name=None, openEveryday=None, startDate=None, endDate=None, visitRangea=None, visitRangeb=None, countRangea=None, countRangeb=None, includeVisited=None, sort=\"SiteName\"):\r\n \"\"\"Given all the filter requirement, return dict of all site details\"\"\"\r\n query = f\"select SiteName, EventCount, sum(TotalVisits) as TotalVisits, sum(MyVisits) as MyVisits from OMG_view \"\r\n\r\n if includeVisited == '0':\r\n query += f\"where SiteName NOT IN (SELECT SiteName from visitsite WHERE VisUsername = '{username}') \"\r\n else:\r\n query += f\"where 1=1 \"\r\n\r\n if name:\r\n query += f\"and SiteName = '{name}' \"\r\n\r\n if openEveryday:\r\n query += f\"and OpenEveryday = {openEveryday} \"\r\n\r\n if startDate:\r\n query += f\"and Date >= '{startDate}' \"\r\n\r\n if endDate:\r\n query += f\"and Date <= '{endDate}' \"\r\n\r\n query += f\"group by SiteName HAVING SUM(EventCount) >= 0 \"\r\n\r\n if visitRangea:\r\n query += f\"and SUM(TotalVisits) >= {visitRangea} \"\r\n\r\n if visitRangeb:\r\n query += f\"and SUM(TotalVisits) <= {visitRangeb} \"\r\n\r\n if countRangea:\r\n query += f\"and SUM(EventCount) >= {countRangea} \"\r\n\r\n if countRangeb:\r\n query += f\"and SUM(EventCount) <= {countRangeb} \"\r\n\r\n query += f\"order by {sort}\"\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n siteDetails = cursor.fetchall()\r\n\r\n\r\n if siteDetails:\r\n for i in siteDetails:\r\n for key in i:\r\n i[key] = str(i[key])\r\n siteDetails = {i+1: siteDetails[i] for i in range(len(siteDetails))}\r\n else:\r\n siteDetails = {1:{\"SiteName\":\"\",\"EventCount\":\"\",\"TotalVisits\":\"\",\"MyVisits\":\"\"}}\r\n\r\n return siteDetails\r\n\r\n\r\n\r\nclass visitorSiteDetail:\r\n \"\"\"(37) Visitor Site Detail\"\"\"\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, sitename):\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\"SELECT Name, OpenEveryday, Address FROM site WHERE Name = \\'\" +sitename+ \"\\'\")\r\n site = cursor.fetchone()\r\n siteName, openEveryday, address = site[\"Name\"], site[\"OpenEveryday\"], site[\"Address\"]\r\n if(openEveryday == \"0\"):\r\n openEveryday = \"No\"\r\n else:\r\n openEveryday = \"Yes\"\r\n return siteName, openEveryday, address\r\n\r\n\r\nclass VisitHistory:\r\n \"\"\"(38) VISTOR VISIT HISTORY\"\"\"\r\n\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def load(self, username):\r\n \"\"\"Given a username, return a list of sites and visit history\"\"\"\r\n with self.connection.cursor() as cursor:\r\n cursor.execute(\r\n f\"SELECT Name AS SiteName FROM site\")\r\n sites = cursor.fetchall()\r\n sites = [i['SiteName'] for i in sites]\r\n\r\n history = {1: {'Date': '', 'EventName': '', 'SiteName': '', 'Price': ''}}\r\n return sites, history\r\n\r\n def filter(self, username, startDate=None, endDate=None, event=None, site=None, sort='Date'):\r\n \"\"\"Given username or other filter requirements, return a dict represents visit history\"\"\"\r\n query = f\"select Date, EventName, SiteName, Price from (select v.VisUsername, v.SiteName, v.EventName, v.Date, e.Price from VisitEvent as v join Event as e \" \\\r\n f\"where v.SiteName = e.SiteName and v.EventName=e.EventName and v.StartDate = e.StartDate \" \\\r\n f\"union all select VisUsername, Sitename, ' ' as EventName, Date, 0 as Price from visitSite) as fullTable \" \\\r\n f\"where VisUsername = '{username}' \"\r\n\r\n if startDate and endDate:\r\n query += f\"and Date >= '{startDate}' and Date <= '{endDate}' \"\r\n elif startDate:\r\n query += f\"and Date >= '{startDate}' \"\r\n elif endDate:\r\n query += f\"and Date <= '{endDate}' \"\r\n\r\n if site:\r\n query += f\"and SiteName = '{site}' \"\r\n\r\n if event:\r\n query += f\"and EventName LIKE '%{event}%' \"\r\n\r\n # or order by EventName, SiteName, Price\r\n query += f\"ORDER BY {sort};\"\r\n\r\n with self.connection.cursor() as cursor:\r\n print(query)\r\n cursor.execute(query)\r\n history = cursor.fetchall()\r\n pprint(history)\r\n for i in history:\r\n for key in i:\r\n i[key] = str(i[key])\r\n pprint(history)\r\n history = {i + 1: history[i] for i in range(len(history))}\r\n pprint(history)\r\n if history == {}:\r\n return self.load(username)[1]\r\n else:\r\n return history\r\n"
},
{
"alpha_fraction": 0.8656716346740723,
"alphanum_fraction": 0.8656716346740723,
"avg_line_length": 32.5,
"blob_id": "68fa5a8730933aa326a3f9684f2691c33f558adf",
"content_id": "31bf9fdf3d993ef27a60f62c6ffa7f5f580491dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 2,
"path": "/README.md",
"repo_name": "kcozzone3/AtlantaBeltline",
"src_encoding": "UTF-8",
"text": "# AtlantaBeltline\nAtlanta Beltline GUI and Database Implementation\n"
},
{
"alpha_fraction": 0.5795502066612244,
"alphanum_fraction": 0.6091699004173279,
"avg_line_length": 48.53129959106445,
"blob_id": "32ab81865c1de4fb4bc0225f1b7f33e7aff688e2",
"content_id": "5dc10ed7a7ad3ec3298f03a2c3077a49f3743dc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 223601,
"license_type": "no_license",
"max_line_length": 413,
"num_lines": 4425,
"path": "/Beltline.py",
"repo_name": "kcozzone3/AtlantaBeltline",
"src_encoding": "UTF-8",
"text": "from tkinter import *\r\nfrom tkinter import messagebox\r\n\r\nimport pymysql\r\nimport Queries\r\n\r\nimport hashlib\r\nimport random\r\nfrom datetime import datetime\r\nfrom functools import partial\r\nimport re\r\n\r\nfrom tkintertable import TableModel, TableCanvas\r\n\r\n\r\n# PUT PASSWORD HERE\r\n#######################################\r\nMYSQL_PASSWORD = 'Gwhiteley99'\r\n#######################################\r\n\r\n\r\nclass Beltline(Frame):\r\n def __init__(self, root):\r\n Frame.__init__(self, root)\r\n self.root = root\r\n self.root.title(\"Atlanta Beltine DB Application\")\r\n self.root.withdraw()\r\n\r\n loginWindow = Login(self.root)\r\n loginWindow.display()\r\n\r\n\r\nclass Login(Toplevel):\r\n def __init__(self, master_window):\r\n Toplevel.__init__(self)\r\n self.master = master_window\r\n self.title('Beltline Login')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n self.loginEmail = StringVar()\r\n self.loginPassword = StringVar()\r\n\r\n # create a label (text) on the login window with the text of login with certain other properties\r\n loginLabel = Label(self, text=\"Login\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n\r\n # we place this on the grid in 1,3 with some padding to make it look nice. Sticky determines where in the cell\r\n # it is placed\r\n loginLabel.grid(row=1, column=2, pady=(2, 6), sticky=W)\r\n\r\n # create a username label and place in the grid\r\n emailLabel = Label(self, text=\"Email\", foreground='#000000', background='#ffffff')\r\n emailLabel.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n # create a username entry box, accounting for the inputted text to be the login username. We also set a width\r\n # for how many characters can be easily displayed\r\n emailBox = Entry(self, textvariable=self.loginEmail, width=20)\r\n emailBox.grid(row=2, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n # Password Label creation\r\n passwordLabel = Label(self, text=\"Password\", foreground='#000000', background='#ffffff')\r\n passwordLabel.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n # Password Entry Box creation: difference to username is the show='*', which displays *** instead of abc\r\n passwordBox = Entry(self, show='*', textvariable=self.loginPassword, width=20)\r\n passwordBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n # create buttons that as of now, do no logic checking, but simply move screens\r\n loginButton = Button(self, command=self.onLoginButtonClicked, text=\"Login\", background='#4286f4')\r\n loginButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=E)\r\n registerButton = Button(self, command=self.onRegisterButtonClicked, text=\"Register\",\r\n background='#4286f4')\r\n registerButton.grid(row=5, column=2, padx=(2, 2), pady=(2, 2))\r\n\r\n def onRegisterButtonClicked(self):\r\n registerWindow = RegistrationNav(self)\r\n self.withdraw()\r\n registerWindow.display()\r\n\r\n def onLoginButtonClicked(self):\r\n self.email = self.loginEmail.get()\r\n self.password = self.loginPassword.get()\r\n\r\n if not self.email:\r\n messagebox.showwarning(\"Email Field Empty\", \"The email field is empty. Please try again.\")\r\n return\r\n\r\n if not self.password:\r\n messagebox.showwarning(\"Password Field Empty\", \"The password field is empty. Please try again.\")\r\n return\r\n\r\n hashedPassword = encrypt(self.password)\r\n usernameValid = cursor.execute(\"SELECT Username FROM user where Username = (SELECT Username FROM emails where Email=%s)\",\r\n self.email)\r\n\r\n if usernameValid == 0:\r\n messagebox.showwarning(\"Email Invalid\", \"This email isn't registered in the system.\")\r\n return\r\n else:\r\n results = cursor.fetchone()\r\n username = results['Username']\r\n print(results['Username'])\r\n\r\n\r\n passwordMatching = cursor.execute(\r\n \"SELECT * FROM user where EXISTS (SELECT * FROM user where (Username=%s and Password=%s))\",\r\n (username, hashedPassword))\r\n\r\n if passwordMatching == 0:\r\n messagebox.showwarning(\"Invalid Login\",\r\n \"This email and password combination is not registered in the system.\")\r\n return\r\n\r\n cursor.execute(\"SELECT status FROM user where Username=%s\", username)\r\n accountStatus = cursor.fetchone()\r\n accountStatus = accountStatus.get('status').lower()\r\n\r\n global identifier\r\n identifier = username\r\n\r\n if accountStatus == \"declined\":\r\n messagebox.showwarning(\"Banned Account\", \"Your account has been banned. Please contact an administrator.\")\r\n return\r\n elif accountStatus == \"pending\":\r\n messagebox.showwarning(\"Pending Approval\", \"Your account is pending approval. Please be patient.\")\r\n return\r\n\r\n isVisitor = cursor.execute(\"SELECT * FROM visitor where EXISTS (SELECT * FROM visitor where VisUsername=%s)\",\r\n username)\r\n isEmployee = cursor.execute(\r\n \"SELECT * FROM employee where EXISTS (SELECT * FROM employee where EmpUsername=%s)\", username)\r\n if isEmployee:\r\n isAdmin = cursor.execute(\r\n \"SELECT * FROM administrator where EXISTS (SELECT * FROM administrator where AdminUsername=%s)\",\r\n username)\r\n isManager = cursor.execute(\r\n \"SELECT * FROM manager where EXISTS (SELECT * FROM manager where ManUsername=%s)\", username)\r\n isStaff = cursor.execute(\"SELECT * FROM staff where EXISTS (SELECT * FROM staff where StaffUsername=%s)\",\r\n username)\r\n\r\n if isVisitor:\r\n if isEmployee:\r\n if isAdmin:\r\n administratorVisitorFunctionalityWindow = AdministratorVisitorFunctionality(self)\r\n self.withdraw()\r\n administratorVisitorFunctionalityWindow.display()\r\n elif isManager:\r\n managerVisitorFunctionalityWindow = ManagerVisitorFunctionality(self)\r\n self.withdraw()\r\n managerVisitorFunctionalityWindow.display()\r\n elif isStaff:\r\n staffVisitorFunctionalityWindow = StaffVisitorFunctionality(self)\r\n self.withdraw()\r\n staffVisitorFunctionalityWindow.display()\r\n else:\r\n messagebox.showwarning(\"Uhhh\", \"You shouldn't be here (employee-visitor).\")\r\n else:\r\n # Just a visitor\r\n visitorFunctionalityWindow = VisitorFunctionality(self)\r\n self.withdraw()\r\n visitorFunctionalityWindow.display()\r\n\r\n elif isEmployee:\r\n if isAdmin:\r\n administratorFunctionalityWindow = AdministratorFunctionality(self)\r\n self.withdraw()\r\n administratorFunctionalityWindow.display()\r\n elif isManager:\r\n managerFunctionalityWindow = ManagerFunctionality(self)\r\n self.withdraw()\r\n managerFunctionalityWindow.display()\r\n elif isStaff:\r\n staffFunctionalityWindow = StaffFunctionality(self)\r\n self.withdraw()\r\n staffFunctionalityWindow.display()\r\n else:\r\n messagebox.showwarning(\"Uhhh\", \"You shouldn't be here (employee).\")\r\n else:\r\n # Just a user\r\n userFunctionalityWindow = UserFunctionality(self)\r\n self.withdraw()\r\n userFunctionalityWindow.display()\r\n\r\n\r\nclass RegistrationNav(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Registration Navigation')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n registerLabel = Label(self, text=\"Register Navigation\", font=\"Helvetica\", foreground='#000000',\r\n background='#ffffff')\r\n registerLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n userOnlyButton = Button(self, command=self.onUserOnlyButtonClicked, text=\"User Only\", background='#4286f4')\r\n userOnlyButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n visitorOnlyButton = Button(self, command=self.onVisitorOnlyButtonClicked, text=\"Visitor Only\",\r\n background='#4286f4')\r\n visitorOnlyButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n employeeOnlyButton = Button(self, command=self.onEmployeeOnlyButtonClicked, text=\"Employee Only\",\r\n background='#4286f4')\r\n employeeOnlyButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n employeeVisitorButton = Button(self, command=self.onEmployeeVisitorButtonClicked, text=\"Employee-Visitor\",\r\n background='#4286f4')\r\n employeeVisitorButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n backButton = Button(self, command=self.onRegistrationBackButtonClicked, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onUserOnlyButtonClicked(self):\r\n userRegistrationWindow = UserRegistration(self)\r\n self.withdraw()\r\n userRegistrationWindow.display()\r\n\r\n def onVisitorOnlyButtonClicked(self):\r\n visitorRegistrationWindow = VisitorRegistration(self)\r\n self.withdraw()\r\n visitorRegistrationWindow.display()\r\n\r\n def onEmployeeOnlyButtonClicked(self):\r\n employeeRegistrationWindow = EmployeeRegistration(self)\r\n self.withdraw()\r\n employeeRegistrationWindow.display()\r\n\r\n def onEmployeeVisitorButtonClicked(self):\r\n employeeVisitorRegistration = EmployeeVisitorRegistration(self)\r\n self.withdraw()\r\n employeeVisitorRegistration.display()\r\n\r\n def onRegistrationBackButtonClicked(self):\r\n self.destroy()\r\n self.master.deiconify()\r\n\r\n\r\nclass UserRegistration(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Registration -- User')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n self.registrationFirstName = StringVar()\r\n self.registrationLastName = StringVar()\r\n self.registrationUserName = StringVar()\r\n self.registrationPassword = StringVar()\r\n self.registrationConfirmPassword = StringVar()\r\n\r\n registerLabel = Label(self, text=\"User Only Registration\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n registerLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E, columnspan=2)\r\n\r\n firstNameLabel = Label(self, text=\"First Name\", background='#ffffff')\r\n firstNameLabel.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n firstNameBox = Entry(self, textvariable=self.registrationFirstName, width=20)\r\n firstNameBox.grid(row=2, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n lastNameLabel = Label(self, text=\"Last Name\", background='#ffffff')\r\n lastNameLabel.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n lastNameBox = Entry(self, textvariable=self.registrationLastName, width=20)\r\n lastNameBox.grid(row=3, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n usernameLabel = Label(self, text=\"Username\", background='#ffffff')\r\n usernameLabel.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n usernameBox = Entry(self, textvariable=self.registrationUserName, width=20)\r\n usernameBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n passwordLabel = Label(self, text=\"Password\", background='#ffffff')\r\n passwordLabel.grid(row=5, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n passwordBox = Entry(self, textvariable=self.registrationPassword, width=20)\r\n passwordBox.grid(row=5, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n confirmPasswordLabel = Label(self, text=\"Confirm Password\", background='#ffffff')\r\n confirmPasswordLabel.grid(row=6, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n confirmPasswordBox = Entry(self, textvariable=self.registrationConfirmPassword, width=20)\r\n confirmPasswordBox.grid(row=6, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n emailLabel = Label(self, text=\"Email(s)\", background='#ffffff')\r\n emailLabel.grid(row=7, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n self.emailBox = Text(self, height=4, width=15, wrap=WORD)\r\n self.emailBox.grid(row=7, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n self.emailBox.insert(\"1.0\",\r\n \"Enter emails with 1 comma in between.\\nEx: [email protected],[email protected]\")\r\n\r\n # EMAIL NOT CURRENTLY IMPLEMENTED\r\n\r\n backButton = Button(self, command=self.onUserOnlyRegistrationBackButtonClicked, text=\"Back\",\r\n background='#4286f4')\r\n backButton.grid(row=8, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n registerButton = Button(self, command=self.onUserOnlyRegistrationRegisterButtonClicked, text=\"Register\",\r\n background='#4286f4')\r\n registerButton.grid(row=8, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onUserOnlyRegistrationBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onUserOnlyRegistrationRegisterButtonClicked(self):\r\n firstName = self.registrationFirstName.get()\r\n lastName = self.registrationLastName.get()\r\n username = self.registrationUserName.get()\r\n password = self.registrationPassword.get()\r\n confirmPassword = self.registrationConfirmPassword.get()\r\n emailString = self.emailBox.get(\"1.0\", \"end-1c\")\r\n\r\n if not firstName:\r\n messagebox.showwarning(\"Missing First Name\", \"The first name field is empty. Please try again.\")\r\n return\r\n if not lastName:\r\n messagebox.showwarning(\"Missing Last Name\", \"The last name field is empty. Please try again.\")\r\n return\r\n\r\n if not username:\r\n messagebox.showwarning(\"Missing Username\", \"The username field is empty. Please try again.\")\r\n return\r\n if not password:\r\n messagebox.showwarning(\"Missing Password\", \"The password field is empty. Please try again.\")\r\n return\r\n if not confirmPassword:\r\n confirmPassword = \"\"\r\n\r\n if len(username) > 16:\r\n messagebox.showwarning(\"Username too long\", \"Usernames can have at maximum 16 letters.\")\r\n return\r\n\r\n usernameExists = cursor.execute(\"SELECT * from user where Username=%s\", username)\r\n if usernameExists:\r\n messagebox.showwarning(\"Username Already Taken\", \"This username already exists within the database.\")\r\n return\r\n\r\n if len(password) < 8:\r\n messagebox.showwarning(\"Password Too Short\", \"Passwords must have at least 8 characters.\")\r\n return\r\n\r\n if password != confirmPassword:\r\n messagebox.showwarning(\"Password Mismatch\", \"The password and the confirmed Password do not match.\")\r\n return\r\n\r\n if len(firstName) > 32:\r\n messagebox.showwarning(\"First Name too long\", \"First names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(lastName) > 32:\r\n messagebox.showwarning(\"Last Name too long\", \"Last names can only be 32 characters. Please abbreviate.\")\r\n return\r\n\r\n hasValidEmail=False\r\n\r\n emailList = []\r\n while len(emailString) > 0:\r\n commaIndex = emailString.find(',')\r\n if commaIndex > -1:\r\n emailList.append(emailString[0:commaIndex])\r\n emailString = emailString[commaIndex + 1:]\r\n else:\r\n emailList.append(emailString[0:])\r\n emailString = \"\"\r\n for email in emailList:\r\n curEmail = email\r\n if curEmail.find('\\n') > -1:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. You have an enter character somewhere.\")\r\n return\r\n atLocation = curEmail.find('@')\r\n if atLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the @ character.\")\r\n return\r\n beforeAt = email[0:atLocation]\r\n afterAt = email[atLocation + 1:]\r\n periodLocation = afterAt.find('.')\r\n if periodLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the . character.\")\r\n return\r\n beforePeriodAfterAt = afterAt[0:periodLocation]\r\n afterPeriod = afterAt[periodLocation + 1:]\r\n if not beforeAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not beforePeriodAfterAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not afterPeriod.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n emailExists = cursor.execute(\"SELECT * from emails where Email=%s\", curEmail)\r\n if emailExists:\r\n messagebox.showwarning(\"Email Already Taken\",\r\n \"An email you entered already exists within the database.\")\r\n return\r\n hasValidEmail=True\r\n\r\n if hasValidEmail == False:\r\n messagebox.showwarning(\"Email not entered\", \"Please enter at least 1 email.\")\r\n return\r\n\r\n hashedPassword = encrypt(password)\r\n cursor.execute(\"INSERT into user values (%s, %s, %s, %s, %s)\",\r\n (username, hashedPassword, firstName, lastName, \"Pending\"))\r\n for email in emailList:\r\n cursor.execute(\"INSERT into emails values (%s, %s)\", (username, email))\r\n db.commit()\r\n messagebox.showwarning(\"Registration Successful\",\r\n \"You are now registered. You will need to wait for administrator approval to login.\")\r\n\r\n self.destroy()\r\n self.master.deiconify()\r\n\r\n\r\nclass VisitorRegistration(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Registration -- Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n self.registrationFirstName = StringVar()\r\n self.registrationLastName = StringVar()\r\n self.registrationUserName = StringVar()\r\n self.registrationPassword = StringVar()\r\n self.registrationConfirmPassword = StringVar()\r\n\r\n registerLabel = Label(self, text=\"Visitor Only Registration\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n registerLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E, columnspan=2)\r\n\r\n firstNameLabel = Label(self, text=\"First Name\", background='#ffffff')\r\n firstNameLabel.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n firstNameBox = Entry(self, textvariable=self.registrationFirstName, width=20)\r\n firstNameBox.grid(row=2, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n lastNameLabel = Label(self, text=\"Last Name\", background='#ffffff')\r\n lastNameLabel.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n lastNameBox = Entry(self, textvariable=self.registrationLastName, width=20)\r\n lastNameBox.grid(row=3, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n usernameLabel = Label(self, text=\"Username\", background='#ffffff')\r\n usernameLabel.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n usernameBox = Entry(self, textvariable=self.registrationUserName, width=20)\r\n usernameBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n passwordLabel = Label(self, text=\"Password\", background='#ffffff')\r\n passwordLabel.grid(row=5, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n passwordBox = Entry(self, textvariable=self.registrationPassword, width=20)\r\n passwordBox.grid(row=5, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n confirmPasswordLabel = Label(self, text=\"Confirm Password\", background='#ffffff')\r\n confirmPasswordLabel.grid(row=6, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n confirmPasswordBox = Entry(self, textvariable=self.registrationConfirmPassword, width=20)\r\n confirmPasswordBox.grid(row=6, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n emailLabel = Label(self, text=\"Email(s)\", background='#ffffff')\r\n emailLabel.grid(row=7, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n self.emailBox = Text(self, height=4, width=15, wrap=WORD)\r\n self.emailBox.grid(row=7, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n self.emailBox.insert(\"1.0\",\r\n \"Enter emails with 1 comma in between.\\nEx: [email protected],[email protected]\")\r\n\r\n # EMAIL NOT CURRENTLY IMPLEMENTED\r\n\r\n backButton = Button(self, command=self.onVisitorOnlyRegistrationBackButtonClicked, text=\"Back\",\r\n background='#4286f4')\r\n backButton.grid(row=8, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n registerButton = Button(self, command=self.onVisitorOnlyRegistrationRegisterButtonClicked, text=\"Register\",\r\n background='#4286f4')\r\n registerButton.grid(row=8, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onVisitorOnlyRegistrationBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onVisitorOnlyRegistrationRegisterButtonClicked(self):\r\n firstName = self.registrationFirstName.get()\r\n lastName = self.registrationLastName.get()\r\n username = self.registrationUserName.get()\r\n password = self.registrationPassword.get()\r\n confirmPassword = self.registrationConfirmPassword.get()\r\n emailString = self.emailBox.get(\"1.0\", \"end-1c\")\r\n\r\n if not firstName:\r\n messagebox.showwarning(\"Missing First Name\", \"The first name field is empty. Please try again.\")\r\n return\r\n if not lastName:\r\n messagebox.showwarning(\"Missing Last Name\", \"The last name field is empty. Please try again.\")\r\n return\r\n\r\n if not username:\r\n messagebox.showwarning(\"Missing Username\", \"The username field is empty. Please try again.\")\r\n return\r\n if not password:\r\n messagebox.showwarning(\"Missing Password\", \"The password field is empty. Please try again.\")\r\n return\r\n if not confirmPassword:\r\n confirmPassword = \"\"\r\n\r\n if len(username) > 16:\r\n messagebox.showwarning(\"Username too long\", \"Usernames can have at maximum 16 letters.\")\r\n return\r\n\r\n usernameExists = cursor.execute(\"SELECT * from user where Username=%s\", username)\r\n if usernameExists:\r\n messagebox.showwarning(\"Username Already Taken\", \"This username already exists within the database.\")\r\n return\r\n\r\n if len(password) < 8:\r\n messagebox.showwarning(\"Password Too Short\", \"Passwords must have at least 8 characters.\")\r\n return\r\n\r\n if password != confirmPassword:\r\n messagebox.showwarning(\"Password Mismatch\", \"The password and the confirmed Password do not match.\")\r\n return\r\n\r\n if len(firstName) > 32:\r\n messagebox.showwarning(\"First Name too long\", \"First names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(lastName) > 32:\r\n messagebox.showwarning(\"Last Name too long\", \"Last names can only be 32 characters. Please abbreviate.\")\r\n return\r\n\r\n hasValidEmail=False\r\n\r\n emailList = []\r\n while len(emailString) > 0:\r\n commaIndex = emailString.find(',')\r\n if commaIndex > -1:\r\n emailList.append(emailString[0:commaIndex])\r\n emailString = emailString[commaIndex + 1:]\r\n else:\r\n emailList.append(emailString[0:])\r\n emailString = \"\"\r\n for email in emailList:\r\n curEmail = email\r\n if curEmail.find('\\n') > -1:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. You have an enter character somewhere.\")\r\n return\r\n atLocation = curEmail.find('@')\r\n if atLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the @ character.\")\r\n return\r\n beforeAt = email[0:atLocation]\r\n afterAt = email[atLocation + 1:]\r\n periodLocation = afterAt.find('.')\r\n if periodLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the . character.\")\r\n return\r\n beforePeriodAfterAt = afterAt[0:periodLocation]\r\n afterPeriod = afterAt[periodLocation + 1:]\r\n if not beforeAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not beforePeriodAfterAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not afterPeriod.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n emailExists = cursor.execute(\"SELECT * from emails where Email=%s\", curEmail)\r\n if emailExists:\r\n messagebox.showwarning(\"Email Already Taken\",\r\n \"An email you entered already exists within the database.\")\r\n return\r\n hasValidEmail=True\r\n\r\n if hasValidEmail == False:\r\n messagebox.showwarning(\"Email not entered\", \"Please enter at least 1 email.\")\r\n return\r\n\r\n hashedPassword = encrypt(password)\r\n cursor.execute(\"INSERT into user values (%s, %s, %s, %s, %s)\",\r\n (username, hashedPassword, firstName, lastName, \"Pending\"))\r\n cursor.execute(\"INSERT into visitor values (%s)\", username)\r\n for email in emailList:\r\n cursor.execute(\"INSERT into emails values (%s, %s)\", (username, email))\r\n db.commit()\r\n messagebox.showwarning(\"Registration Successful\",\r\n \"You are now registered. You will need to wait for administrator approval to login.\")\r\n\r\n self.self.destroy()\r\n self.loginWindow.deiconify()\r\n\r\n\r\nclass EmployeeRegistration(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Registration -- Employee')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n self.registrationFirstName = StringVar()\r\n self.registrationLastName = StringVar()\r\n self.registrationUserName = StringVar()\r\n self.registrationPassword = StringVar()\r\n self.registrationConfirmPassword = StringVar()\r\n self.registrationEmployeeType = StringVar()\r\n self.registrationEmployeeType.set(\"\")\r\n self.registrationState = StringVar()\r\n self.registrationState.set(\"\")\r\n self.registrationPhone = StringVar()\r\n self.registrationAddress = StringVar()\r\n self.registrationCity = StringVar()\r\n self.registrationZIP = StringVar()\r\n\r\n self.states = [\"AL\", \"AK\", \"AR\", \"AZ\", \"CA\", \"CO\", \"CT\", \"DE\", \"FL\", \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\",\r\n \"KS\", \"KY\", \"LA\", \"MA\", \"MD\", \"ME\", \"MI\", \"MN\", \"MO\", \"MS\", \"MT\", \"NC\", \"ND\", \"NE\", \"NH\",\r\n \"NJ\", \"NM\", \"NV\", \"NY\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VA\",\r\n \"VT\", \"WA\", \"WI\", \"WV\", \"WY\", \"other\"]\r\n\r\n self.employeeType = [\"Manager\", \"Staff\"]\r\n\r\n registerLabel = Label(self, text=\"Employee Only Registration\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n registerLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E, columnspan=2)\r\n\r\n firstNameLabel = Label(self, text=\"First Name\", background='#ffffff')\r\n firstNameLabel.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n firstNameBox = Entry(self, textvariable=self.registrationFirstName, width=20)\r\n firstNameBox.grid(row=2, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n lastNameLabel = Label(self, text=\"Last Name\", background='#ffffff')\r\n lastNameLabel.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n lastNameBox = Entry(self, textvariable=self.registrationLastName, width=20)\r\n lastNameBox.grid(row=3, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n usernameLabel = Label(self, text=\"Username\", background='#ffffff')\r\n usernameLabel.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n usernameBox = Entry(self, textvariable=self.registrationUserName, width=20)\r\n usernameBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n passwordLabel = Label(self, text=\"Password\", background='#ffffff')\r\n passwordLabel.grid(row=5, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n passwordBox = Entry(self, textvariable=self.registrationPassword, width=20)\r\n passwordBox.grid(row=5, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n confirmPasswordLabel = Label(self, text=\"Confirm Password\", background='#ffffff')\r\n confirmPasswordLabel.grid(row=6, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n confirmPasswordBox = Entry(self, textvariable=self.registrationConfirmPassword,\r\n width=20)\r\n confirmPasswordBox.grid(row=6, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n userTypeLabel = Label(self, text=\"Employee Type\", background='#ffffff')\r\n userTypeLabel.grid(row=7, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n userTypeDropdown = OptionMenu(self, self.registrationEmployeeType, *self.employeeType)\r\n userTypeDropdown.grid(row=7, column=2, padx=(8, 5), pady=(0, 4), sticky=W)\r\n\r\n phoneLabel = Label(self, text=\"Phone\", background='#ffffff')\r\n phoneLabel.grid(row=8, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n phoneBox = Entry(self, textvariable=self.registrationPhone, width=20)\r\n phoneBox.grid(row=8, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n addressLabel = Label(self, text=\"Address\", background='#ffffff')\r\n addressLabel.grid(row=9, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n addressBox = Entry(self, textvariable=self.registrationAddress, width=20)\r\n addressBox.grid(row=9, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n cityLabel = Label(self, text=\"City\", background='#ffffff')\r\n cityLabel.grid(row=10, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n cityBox = Entry(self, textvariable=self.registrationCity, width=20)\r\n cityBox.grid(row=10, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n stateLabel = Label(self, text=\"State\", background='#ffffff')\r\n stateLabel.grid(row=11, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n stateDropdown = OptionMenu(self, self.registrationState, *self.states)\r\n stateDropdown.grid(row=11, column=2, padx=(8, 5), pady=(0, 4), sticky=W)\r\n\r\n zipLabel = Label(self, text=\"Zipcode\", background='#ffffff')\r\n zipLabel.grid(row=12, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n zipBox = Entry(self, textvariable=self.registrationZIP, width=20)\r\n zipBox.grid(row=12, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n emailLabel = Label(self, text=\"Email(s)\", background='#ffffff')\r\n emailLabel.grid(row=13, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n self.emailBox = Text(self, height=4, width=15, wrap=WORD)\r\n self.emailBox.grid(row=13, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n self.emailBox.insert(\"1.0\",\r\n \"Enter emails with 1 comma in between.\\nEx: [email protected],[email protected]\")\r\n\r\n # EMAIL NOT CURRENTLY IMPLEMENTED\r\n\r\n backButton = Button(self, command=self.onEmployeeOnlyRegistrationBackButtonClicked, text=\"Back\",\r\n background='#4286f4')\r\n backButton.grid(row=14, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n registerButton = Button(self, command=self.onEmployeeOnlyRegistrationRegisterButtonClicked, text=\"Register\",\r\n background='#4286f4')\r\n registerButton.grid(row=14, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onEmployeeOnlyRegistrationBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onEmployeeOnlyRegistrationRegisterButtonClicked(self):\r\n firstName = self.registrationFirstName.get()\r\n lastName = self.registrationLastName.get()\r\n username = self.registrationUserName.get()\r\n password = self.registrationPassword.get()\r\n confirmPassword = self.registrationConfirmPassword.get()\r\n emailString = self.emailBox.get(\"1.0\", \"end-1c\")\r\n\r\n employeeType = self.registrationEmployeeType.get()\r\n state = self.registrationState.get()\r\n phone = self.registrationPhone.get()\r\n address = self.registrationAddress.get()\r\n city = self.registrationCity.get()\r\n zipcode = self.registrationZIP.get()\r\n\r\n if not firstName:\r\n messagebox.showwarning(\"Missing First Name\", \"The first name field is empty. Please try again.\")\r\n return\r\n if not lastName:\r\n messagebox.showwarning(\"Missing Last Name\", \"The last name field is empty. Please try again.\")\r\n return\r\n if not state:\r\n messagebox.showwarning(\"Missing State\", \"The state field is empty. Please try again.\")\r\n return\r\n if not address:\r\n messagebox.showwarning(\"Missing Address\", \"The address field is empty. Please try again.\")\r\n return\r\n if not city:\r\n messagebox.showwarning(\"Missing City\", \"The city field is empty. Please try again.\")\r\n return\r\n if not zipcode:\r\n messagebox.showwarning(\"Missing Zipcode\", \"The zipcode field is empty. Please try again.\")\r\n return\r\n if not confirmPassword:\r\n confirmPassword = \"\"\r\n\r\n if not username:\r\n messagebox.showwarning(\"Missing Username\", \"The username field is empty. Please try again.\")\r\n return\r\n if not password:\r\n messagebox.showwarning(\"Missing Password\", \"The password field is empty. Please try again.\")\r\n return\r\n if not employeeType:\r\n messagebox.showwarning(\"Missing Employee Type\", \"Please select an employee type.\")\r\n return\r\n if not phone:\r\n messagebox.showwarning(\"Missing Phone Number\", \"Please enter a phone number in format xxxxxxxxxx\")\r\n return\r\n\r\n if len(username) > 16:\r\n messagebox.showwarning(\"Username too long\", \"Usernames can have at maximum 16 letters.\")\r\n return\r\n\r\n usernameExists = cursor.execute(\"SELECT * from user where Username=%s\", username)\r\n if usernameExists:\r\n messagebox.showwarning(\"Username Already Taken\", \"This username already exists within the database.\")\r\n return\r\n\r\n if len(password) < 8:\r\n messagebox.showwarning(\"Password Too Short\", \"Passwords must have at least 8 characters.\")\r\n return\r\n\r\n if password != confirmPassword:\r\n messagebox.showwarning(\"Password Mismatch\", \"The password and the confirmed Password do not match.\")\r\n return\r\n\r\n if len(firstName) > 32:\r\n messagebox.showwarning(\"First Name too long\", \"First names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(lastName) > 32:\r\n messagebox.showwarning(\"Last Name too long\", \"Last names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(address) > 64:\r\n messagebox.showwarning(\"Address too long\", \"Addresses are limited to 64 characters. Please abbreviate.\")\r\n return\r\n if len(phone) > 10 or len(phone) < 10:\r\n messagebox.showwarning(\"Phone number incorrect\", \"Please enter a phone number in format xxxxxxxxxx\")\r\n return\r\n if len(zipcode) > 5:\r\n messagebox.showwarning(\"Zipcode too long\", \"Please enter a zipcode in format xxxxx\")\r\n return\r\n if zipcode != \"\":\r\n if len(zipcode) < 5:\r\n messagebox.showwarning(\"Zipcode too short\", \"Please enter a zipcode in format xxxxxx\")\r\n if len(city) > 32:\r\n messagebox.showwarning(\"City name too long\",\r\n \"The city name is limited to 32 characters. Please abbreviate.\")\r\n\r\n phoneExists = cursor.execute(\"SELECT * from employee where Phone=%s\", phone)\r\n if phoneExists:\r\n messagebox.showwarning(\"Phone Already Registered\", \"This phone number is already registered.\")\r\n return\r\n\r\n empId = random.randint(1, 999999999)\r\n while cursor.execute(\"SELECT * from employee where EmployeeID=%s\", empId):\r\n empId = random.randint(1, 999999999)\r\n\r\n hasValidEmail=False\r\n\r\n emailList = []\r\n while len(emailString) > 0:\r\n commaIndex = emailString.find(',')\r\n if commaIndex > -1:\r\n emailList.append(emailString[0:commaIndex])\r\n emailString = emailString[commaIndex + 1:]\r\n else:\r\n emailList.append(emailString[0:])\r\n emailString = \"\"\r\n for email in emailList:\r\n curEmail = email\r\n if curEmail.find('\\n') > -1:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. You have an enter character somewhere.\")\r\n return\r\n atLocation = curEmail.find('@')\r\n if atLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the @ character.\")\r\n return\r\n beforeAt = email[0:atLocation]\r\n afterAt = email[atLocation + 1:]\r\n periodLocation = afterAt.find('.')\r\n if periodLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the . character.\")\r\n return\r\n beforePeriodAfterAt = afterAt[0:periodLocation]\r\n afterPeriod = afterAt[periodLocation + 1:]\r\n if not beforeAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not beforePeriodAfterAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not afterPeriod.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n emailExists = cursor.execute(\"SELECT * from emails where Email=%s\", curEmail)\r\n if emailExists:\r\n messagebox.showwarning(\"Email Already Taken\",\r\n \"An email you entered already exists within the database.\")\r\n return\r\n hasValidEmail=True\r\n\r\n if hasValidEmail == False:\r\n messagebox.showwarning(\"Email not entered\", \"Please enter at least 1 email.\")\r\n return\r\n\r\n hashedPassword = encrypt(password)\r\n cursor.execute(\"INSERT into user values (%s, %s, %s, %s, %s)\",\r\n (username, hashedPassword, firstName, lastName, \"Pending\"))\r\n cursor.execute(\"INSERT into employee values (%s, %s, %s, %s, %s, %s, %s)\",\r\n (username, empId, phone, address, city, state, zipcode))\r\n\r\n if employeeType == \"Manager\":\r\n cursor.execute(\"INSERT into manager values (%s)\", username)\r\n elif employeeType == \"Staff\":\r\n cursor.execute(\"INSERT into staff values (%s)\", username)\r\n else:\r\n messagebox.showwarning(\"Uhh\", \"You shouldn't be here: employee\")\r\n\r\n for email in emailList:\r\n cursor.execute(\"INSERT into emails values (%s, %s)\", (username, email))\r\n db.commit()\r\n\r\n messagebox.showwarning(\"Registration Successful\",\r\n \"You are now registered. You will need to wait for administrator approval to login.\")\r\n\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass EmployeeVisitorRegistration(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Registration -- Employee Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n self.registrationFirstName = StringVar()\r\n self.registrationLastName = StringVar()\r\n self.registrationUserName = StringVar()\r\n self.registrationPassword = StringVar()\r\n self.registrationConfirmPassword = StringVar()\r\n self.registrationEmployeeType = StringVar()\r\n self.registrationState = StringVar()\r\n self.registrationPhone = StringVar()\r\n self.registrationAddress = StringVar()\r\n self.registrationCity = StringVar()\r\n self.registrationZIP = StringVar()\r\n\r\n self.states = [\"AL\", \"AK\", \"AR\", \"AZ\", \"CA\", \"CO\", \"CT\", \"DE\", \"FL\", \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\",\r\n \"KS\", \"KY\", \"LA\", \"MA\", \"MD\", \"ME\", \"MI\", \"MN\", \"MO\", \"MS\", \"MT\", \"NC\", \"ND\", \"NE\", \"NH\",\r\n \"NJ\", \"NM\", \"NV\", \"NY\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VA\",\r\n \"VT\", \"WA\", \"WI\", \"WV\", \"WY\", \"other\"]\r\n\r\n self.employeeType = [\"Manager\", \"Staff\"]\r\n\r\n registerLabel = Label(self, text=\"Employee-Visitor Registration\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n registerLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E, columnspan=2)\r\n\r\n firstNameLabel = Label(self, text=\"First Name\", background='#ffffff')\r\n firstNameLabel.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n firstNameBox = Entry(self, textvariable=self.registrationFirstName, width=20)\r\n firstNameBox.grid(row=2, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n lastNameLabel = Label(self, text=\"Last Name\", background='#ffffff')\r\n lastNameLabel.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n lastNameBox = Entry(self, textvariable=self.registrationLastName, width=20)\r\n lastNameBox.grid(row=3, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n usernameLabel = Label(self, text=\"Username\", background='#ffffff')\r\n usernameLabel.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n usernameBox = Entry(self, textvariable=self.registrationUserName, width=20)\r\n usernameBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n passwordLabel = Label(self, text=\"Password\", background='#ffffff')\r\n passwordLabel.grid(row=5, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n passwordBox = Entry(self, textvariable=self.registrationPassword, width=20)\r\n passwordBox.grid(row=5, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n confirmPasswordLabel = Label(self, text=\"Confirm Password\", background='#ffffff')\r\n confirmPasswordLabel.grid(row=6, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n confirmPasswordBox = Entry(self, textvariable=self.registrationConfirmPassword,\r\n width=20)\r\n confirmPasswordBox.grid(row=6, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n userTypeLabel = Label(self, text=\"Employee Type\", background='#ffffff')\r\n userTypeLabel.grid(row=7, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n userTypeDropdown = OptionMenu(self, self.registrationEmployeeType, *self.employeeType)\r\n userTypeDropdown.grid(row=7, column=2, padx=(16, 5), pady=(0, 4), sticky=W)\r\n\r\n phoneLabel = Label(self, text=\"Phone\", background='#ffffff')\r\n phoneLabel.grid(row=8, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n phoneBox = Entry(self, textvariable=self.registrationPhone, width=20)\r\n phoneBox.grid(row=8, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n addressLabel = Label(self, text=\"Address\", background='#ffffff')\r\n addressLabel.grid(row=9, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n addressBox = Entry(self, textvariable=self.registrationAddress, width=20)\r\n addressBox.grid(row=9, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n cityLabel = Label(self, text=\"City\", background='#ffffff')\r\n cityLabel.grid(row=10, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n cityBox = Entry(self, textvariable=self.registrationCity, width=20)\r\n cityBox.grid(row=10, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n stateLabel = Label(self, text=\"State\", background='#ffffff')\r\n stateLabel.grid(row=11, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n stateDropdown = OptionMenu(self, self.registrationState, *self.states)\r\n stateDropdown.grid(row=11, column=2, padx=(16, 5), pady=(0, 4), sticky=W)\r\n\r\n zipLabel = Label(self, text=\"Zipcode\", background='#ffffff')\r\n zipLabel.grid(row=12, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n zipBox = Entry(self, textvariable=self.registrationZIP, width=20)\r\n zipBox.grid(row=12, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n emailLabel = Label(self, text=\"Email(s)\", background='#ffffff')\r\n emailLabel.grid(row=13, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n self.emailBox = Text(self, height=4, width=15, wrap=WORD)\r\n self.emailBox.grid(row=13, column=2, padx=(0, 2), pady=(0, 4), sticky=E)\r\n self.emailBox.insert(\"1.0\",\r\n \"Enter emails with 1 comma in between.\\nEx: [email protected],[email protected]\")\r\n\r\n # EMAIL NOT CURRENTLY IMPLEMENTED\r\n\r\n backButton = Button(self, command=self.onEmployeeVisitorRegistrationBackButtonClicked, text=\"Back\",\r\n background='#4286f4')\r\n backButton.grid(row=14, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n registerButton = Button(self, command=self.onEmployeeVisitorRegistrationRegisterButtonClicked, text=\"Register\",\r\n background='#4286f4')\r\n registerButton.grid(row=14, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onEmployeeVisitorRegistrationBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onEmployeeVisitorRegistrationRegisterButtonClicked(self):\r\n firstName = self.registrationFirstName.get()\r\n lastName = self.registrationLastName.get()\r\n username = self.registrationUserName.get()\r\n password = self.registrationPassword.get()\r\n confirmPassword = self.registrationConfirmPassword.get()\r\n emailString = self.emailBox.get(\"1.0\", \"end-1c\")\r\n\r\n employeeType = self.registrationEmployeeType.get()\r\n state = self.registrationState.get()\r\n phone = self.registrationPhone.get()\r\n address = self.registrationAddress.get()\r\n city = self.registrationCity.get()\r\n zipcode = self.registrationZIP.get()\r\n\r\n if not firstName:\r\n messagebox.showwarning(\"Missing First Name\", \"The first name field is empty. Please try again.\")\r\n return\r\n if not lastName:\r\n messagebox.showwarning(\"Missing Last Name\", \"The last name field is empty. Please try again.\")\r\n return\r\n if not state:\r\n messagebox.showwarning(\"Missing State\", \"The state field is empty. Please try again.\")\r\n return\r\n if not address:\r\n messagebox.showwarning(\"Missing Address\", \"The address field is empty. Please try again.\")\r\n return\r\n if not city:\r\n messagebox.showwarning(\"Missing City\", \"The city field is empty. Please try again.\")\r\n return\r\n if not zipcode:\r\n messagebox.showwarning(\"Missing Zipcode\", \"The zipcode field is empty. Please try again.\")\r\n return\r\n if not confirmPassword:\r\n confirmPassword = \"\"\r\n\r\n if not username:\r\n messagebox.showwarning(\"Missing Username\", \"The username field is empty. Please try again.\")\r\n return\r\n if not password:\r\n messagebox.showwarning(\"Missing Password\", \"The password field is empty. Please try again.\")\r\n return\r\n if not employeeType:\r\n messagebox.showwarning(\"Missing Employee Type\", \"Please select an employee type.\")\r\n return\r\n if not phone:\r\n messagebox.showwarning(\"Missing Phone Number\", \"Please enter a phone number in format xxxxxxxxxx\")\r\n return\r\n\r\n if len(username) > 16:\r\n messagebox.showwarning(\"Username too long\", \"Usernames can have at maximum 16 letters.\")\r\n return\r\n\r\n usernameExists = cursor.execute(\"SELECT * from user where Username=%s\", username)\r\n if usernameExists:\r\n messagebox.showwarning(\"Username Already Taken\", \"This username already exists within the database.\")\r\n return\r\n\r\n if len(password) < 8:\r\n messagebox.showwarning(\"Password Too Short\", \"Passwords must have at least 8 characters.\")\r\n return\r\n\r\n if password != confirmPassword:\r\n messagebox.showwarning(\"Password Mismatch\", \"The password and the confirmed Password do not match.\")\r\n return\r\n\r\n if len(firstName) > 32:\r\n messagebox.showwarning(\"First Name too long\", \"First names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(lastName) > 32:\r\n messagebox.showwarning(\"Last Name too long\", \"Last names can only be 32 characters. Please abbreviate.\")\r\n return\r\n if len(address) > 64:\r\n messagebox.showwarning(\"Address too long\", \"Addresses are limited to 64 characters. Please abbreviate.\")\r\n return\r\n if len(phone) > 10 or len(phone) < 10:\r\n messagebox.showwarning(\"Phone number incorrect\", \"Please enter a phone number in format xxxxxxxxxx\")\r\n return\r\n if len(zipcode) > 5:\r\n messagebox.showwarning(\"Zipcode too long\", \"Please enter a zipcode in format xxxxx\")\r\n return\r\n if zipcode != \"\":\r\n if len(zipcode) < 5:\r\n messagebox.showwarning(\"Zipcode too short\", \"Please enter a zipcode in format xxxxxx\")\r\n if len(city) > 32:\r\n messagebox.showwarning(\"City name too long\",\r\n \"The city name is limited to 32 characters. Please abbreviate.\")\r\n\r\n phoneExists = cursor.execute(\"SELECT * from employee where Phone=%s\", phone)\r\n if phoneExists:\r\n messagebox.showwarning(\"Phone Already Registered\", \"This phone number is already registered.\")\r\n return\r\n\r\n empId = random.randint(1, 999999999)\r\n while cursor.execute(\"SELECT * from employee where EmployeeID=%s\", empId):\r\n empId = random.randint(1, 999999999)\r\n\r\n hasValidEmail=False\r\n\r\n emailList = []\r\n while len(emailString) > 0:\r\n commaIndex = emailString.find(',')\r\n if commaIndex > -1:\r\n emailList.append(emailString[0:commaIndex])\r\n emailString = emailString[commaIndex + 1:]\r\n else:\r\n emailList.append(emailString[0:])\r\n emailString = \"\"\r\n for email in emailList:\r\n curEmail = email\r\n if curEmail.find('\\n') > -1:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. You have an enter character somewhere.\")\r\n return\r\n atLocation = curEmail.find('@')\r\n if atLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the @ character.\")\r\n return\r\n beforeAt = email[0:atLocation]\r\n afterAt = email[atLocation + 1:]\r\n periodLocation = afterAt.find('.')\r\n if periodLocation < 0:\r\n messagebox.showwarning(\"Email Error\",\r\n \"The format of your email(s) is wrong. Some email(s) is missing the . character.\")\r\n return\r\n beforePeriodAfterAt = afterAt[0:periodLocation]\r\n afterPeriod = afterAt[periodLocation + 1:]\r\n if not beforeAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not beforePeriodAfterAt.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n if not afterPeriod.isalnum():\r\n messagebox.showwarning(\"Email Error\", \"The format of your email(s) is wrong.\")\r\n return\r\n emailExists = cursor.execute(\"SELECT * from emails where Email=%s\", curEmail)\r\n if emailExists:\r\n messagebox.showwarning(\"Email Already Taken\",\r\n \"An email you entered already exists within the database.\")\r\n return\r\n hasValidEmail=True\r\n\r\n if hasValidEmail == False:\r\n messagebox.showwarning(\"Email not entered\", \"Please enter at least 1 email.\")\r\n return\r\n\r\n hashedPassword = encrypt(password)\r\n cursor.execute(\"INSERT into user values (%s, %s, %s, %s, %s)\",\r\n (username, hashedPassword, firstName, lastName, \"Pending\"))\r\n cursor.execute(\"INSERT into employee values (%s, %s, %s, %s, %s, %s, %s)\",\r\n (username, empId, phone, address, city, state, zipcode))\r\n cursor.execute(\"INSERT into visitor values (%s)\", username)\r\n if employeeType == \"Manager\":\r\n cursor.execute(\"INSERT into manager values (%s)\", username)\r\n elif employeeType == \"Staff\":\r\n cursor.execute(\"INSERT into staff values (%s)\", username)\r\n else:\r\n messagebox.showwarning(\"Uhh\", \"You shouldn't be here: employee-visitor\")\r\n\r\n for email in emailList:\r\n cursor.execute(\"INSERT into emails values (%s, %s)\", (username, email))\r\n db.commit()\r\n\r\n messagebox.showwarning(\"Registration Successful\",\r\n \"You are now registered. You will need to wait for administrator approval to login.\")\r\n\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass UserFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- User')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n userLabel = Label(self, text=\"User Functionality\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n userLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n takeTransitButton = Button(self, command=self.onTakeTransitButtonClicked, text=\"Take Transit\",\r\n background='#4286f4')\r\n takeTransitButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n transitHistoryButton = Button(self, command=self.onTransitHistoryButtonClicked, text=\"Transit History\",\r\n background='#4286f4')\r\n transitHistoryButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n userNavBackButton = Button(self, command=self.onUserFunctionalityBackButtonClicked, text=\"Back\",\r\n background='#4286f4')\r\n userNavBackButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onUserFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onTakeTransitButtonClicked(self):\r\n TakeTransitWindow = TakeTransit(self)\r\n self.withdraw()\r\n TakeTransitWindow.display()\r\n\r\n def onTransitHistoryButtonClicked(self):\r\n TransitHistoryWindow = TransitHistory(self)\r\n self.withdraw()\r\n TransitHistoryWindow.display()\r\n\r\n\r\nclass VisitorFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n visitorFunctionalityLabel = Label(self, text=\"Visitor Functionality\", font=\"Helvetica\",\r\n foreground='#000000', background='#ffffff')\r\n visitorFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n exploreEventButton = Button(self,\r\n command=self.onVisitorFunctionalityExploreEventButtonClicked, text=\"Explore Event\",\r\n background='#4286f4')\r\n exploreEventButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n exploreSiteButton = Button(self,\r\n command=self.onVisitorFunctionalityExploreSiteButtonClicked,\r\n text=\"Explore Site\", background='#4286f4')\r\n exploreSiteButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n viewVisitHistoryButton = Button(self,\r\n command=self.onVisitorFunctionalityVisitHistoryButtonButtonClicked,\r\n text=\"View Visit History\", background='#4286f4')\r\n viewVisitHistoryButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n takeTransitButton = Button(self, command=self.onTakeTransitButtonClicked,\r\n text=\"Take Transit\", background='#4286f4')\r\n takeTransitButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n viewTransitHistoryButton = Button(self,\r\n command=self.onTransitHistoryButtonClicked,\r\n text=\"View Transit History\", background='#4286f4')\r\n viewTransitHistoryButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n backButton = Button(self, command=self.onVisitorFunctionalityBackButtonClicked,\r\n text=\"Back\",\r\n background='#4286f4')\r\n backButton.grid(row=7, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onVisitorFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onVisitorFunctionalityExploreEventButtonClicked(self):\r\n exploreEventWindow = visitorExploreEvent(self)\r\n exploreEventWindow.display()\r\n self.withdraw()\r\n\r\n\r\n def onVisitorFunctionalityExploreSiteButtonClicked(self):\r\n ExploreSiteWindow = visitorExploreSite(self)\r\n ExploreSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onVisitorFunctionalityVisitHistoryButtonButtonClicked(self):\r\n visitHistoryWindow = VisitHistory(self)\r\n self.withdraw()\r\n visitHistoryWindow.display()\r\n\r\n def onTakeTransitButtonClicked(self):\r\n TakeTransitWindow = TakeTransit(self)\r\n self.withdraw()\r\n TakeTransitWindow.display()\r\n\r\n def onTransitHistoryButtonClicked(self):\r\n TransitHistoryWindow = TransitHistory(self)\r\n self.withdraw()\r\n TransitHistoryWindow.display()\r\n\r\n\r\nclass AdministratorFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Adminstrator-Only')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n administratorFunctionalityLabel = Label(self, text=\"Administrator Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n administratorFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n adminmanageProfileButton = Button(self,command=self.onAdminManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n adminmanageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminmanageUserButton = Button(self,command=self.onAdminManageUserButtonClicked,text=\"Manage User\", background='#4286f4')\r\n adminmanageUserButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminmanageTransitButton = Button(self,command=self.onAdminManageTransitButtonClicked,text=\"Manage Transit\", background='#4286f4')\r\n adminmanageTransitButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminmanageSiteButton = Button(self, command=self.onAdminManageSiteButtonClicked,text=\"Manage Site\", background='#4286f4')\r\n adminmanageSiteButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n admintakeTransitButton = Button(self,command=self.onAdminTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n admintakeTransitButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminviewTransitHistoryButton = Button(self,command=self.onAdminViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n adminviewTransitHistoryButton.grid(row=7, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n administratorBackButton = Button(self, command=self.onAdministratorFunctionalityBackButtonClicked,text=\"Back\",background='#4286f4')\r\n administratorBackButton.grid(row=8, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onAdminManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n profileWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminManageUserButtonClicked(self):\r\n manageUserWindow = ManageUser(self)\r\n manageUserWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminManageTransitButtonClicked(self):\r\n manageTransitWindow = ManageTransit(self)\r\n manageTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminManageSiteButtonClicked(self):\r\n manageSiteWindow = ManageSite(self)\r\n manageSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onAdministratorFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass AdministratorVisitorFunctionality(Toplevel):\r\n def __init__(self,master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Adminstrator-Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n administratorVisitorFunctionalityLabel = Label(self, text=\"Administrator Visitor Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n administratorVisitorFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorManageProfileButton = Button(self,command=self.onAdminVisitorManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n adminVisitorManageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorManageUserButton = Button(self,command=self.onAdminVisitorManageUserButtonClicked, text=\"Manage User\",background='#4286f4')\r\n adminVisitorManageUserButton.grid(row=2, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorManageTransitButton = Button(self,command=self.onAdminVisitorManageTransitButtonClicked,text=\"Manage Transit\", background='#4286f4')\r\n adminVisitorManageTransitButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorTakeTransitButton = Button(self,command=self.onAdminVisitorTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n adminVisitorTakeTransitButton.grid(row=3, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorManageSiteButton = Button(self,command=self.onAdminVisitorManageSiteButtonClicked,text=\"Manage Site\", background='#4286f4')\r\n adminVisitorManageSiteButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorExploreSiteButton = Button(self, command=self.onAdminVisitorExploreSiteButtonClicked,text=\"Explore Site\", background='#4286f4')\r\n adminVisitorExploreSiteButton.grid(row=4, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorExploreEventButton = Button(self,command=self.onAdminVisitorExploreEventButtonClicked,text=\"Explore Event\", background='#4286f4')\r\n adminVisitorExploreEventButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorViewVisitHistoryButton = Button(self,command=self.onAdminVisitorViewVisitHistoryButtonClicked,text=\"View Visit History\", background='#4286f4')\r\n adminVisitorViewVisitHistoryButton.grid(row=5, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n adminVisitorViewTransitHistoryButton = Button(self,command=self.onAdminVisitorViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n adminVisitorViewTransitHistoryButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n administratorBackButton = Button(self, command=self.onAdministratorFunctionalityBackButtonClicked,text=\"Back\",background='#4286f4')\r\n administratorBackButton.grid(row=6, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onAdminVisitorManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n profileWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorManageUserButtonClicked(self):\r\n manageUserWindow = ManageUser(self)\r\n manageUserWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorManageTransitButtonClicked(self):\r\n manageTransitWindow = ManageTransit(self)\r\n manageTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorManageSiteButtonClicked(self):\r\n manageSiteWindow = ManageSite(self)\r\n manageSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorExploreSiteButtonClicked(self):\r\n ExploreSiteWindow = visitorExploreSite(self)\r\n ExploreSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorExploreEventButtonClicked(self):\r\n exploreEventWindow = visitorExploreEvent(self)\r\n exploreEventWindow.display()\r\n self.withdraw()\r\n\r\n def onAdminVisitorViewVisitHistoryButtonClicked(self):\r\n visitHistoryWindow = VisitHistory(self)\r\n self.withdraw()\r\n visitHistoryWindow.display()\r\n\r\n def onAdminVisitorViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onAdministratorFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass ManagerVisitorFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Manager-Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n managerVisitorFunctionalityLabel = Label(self, text=\"Manager Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n managerVisitorFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorManageProfileButton = Button(self,command=self.onManagerVisitorManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n managerVisitorManageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorManageEventButton = Button(self,command=self.onManagerVisitorManageEventButtonClicked,text=\"Manage Event\", background='#4286f4')\r\n managerVisitorManageEventButton.grid(row=2, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorViewStaffButton = Button(self,command=self.onManagerVisitorViewStaffButtonClicked,text=\"View Staff\", background='#4286f4')\r\n managerVisitorViewStaffButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorViewSiteReportButton = Button(self, command=self.onManagerVisitorViewSiteReportButtonClicked,text=\"View Site Report\", background='#4286f4')\r\n managerVisitorViewSiteReportButton.grid(row=3, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorExploreSiteButton = Button(self,command=self.onManagerVisitorViewStaffButtonClicked,text=\"Explore Site\", background='#4286f4')\r\n managerVisitorExploreSiteButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorExploreEventButton = Button(self, command=self.onManagerVisitorExploreEventButtonClicked,text=\"Explore Event\", background='#4286f4')\r\n managerVisitorExploreEventButton.grid(row=4, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorTakeTransitButton = Button(self,command=self.onManagerVisitorTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n managerVisitorTakeTransitButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorViewTransitHistoryButton = Button(self,command=self.onManagerVisitorViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n managerVisitorViewTransitHistoryButton.grid(row=5, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorViewVisitHistoryButton = Button(self,command=self.onManagerVisitorViewTransitHistoryButtonClicked,text=\"View Visit History\", background='#4286f4')\r\n managerVisitorViewVisitHistoryButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerVisitorBackButton = Button(self, command=self.onManagerVisitorFunctionalityBackButtonClicked,text=\"Back\",background='#4286f4')\r\n managerVisitorBackButton.grid(row=6, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onManagerVisitorManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n profileWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorManageEventButtonClicked(self):\r\n manageEventWindow = ManageEvent(self)\r\n manageEventWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorViewStaffButtonClicked(self):\r\n viewStaffWindow = ManageStaff(self)\r\n viewStaffWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorViewSiteReportButtonClicked(self):\r\n siteReportWindow = SiteReport(self)\r\n siteReportWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorExploreSiteButtonClicked(self):\r\n ExploreSiteWindow = visitorExploreSite(self)\r\n ExploreSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorExploreEventButtonClicked(self):\r\n exploreEventWindow = visitorExploreEvent(self)\r\n exploreEventWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerVisitorViewVisitHistoryButtonClicked(self):\r\n visitHistoryWindow = VisitHistory(self)\r\n self.withdraw()\r\n visitHistoryWindow.display()\r\n\r\n def onManagerVisitorFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass StaffVisitorFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Staff-Visitor')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n staffVisitorFunctionalityLabel = Label(self, text=\"Staff Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n staffVisitorFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorManageProfileButton = Button(self,command=self.onStaffVisitorManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n staffVisitorManageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorExploreEventButton = Button(self,command=self.onStaffVisitorExploreEventButtonClicked,text=\"Explore Event\", background='#4286f4')\r\n staffVisitorExploreEventButton.grid(row=2, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorViewScheduleButton = Button(self,command=self.onStaffVisitorViewScheduleButtonClicked,text=\"View Schedule\", background='#4286f4')\r\n staffVisitorViewScheduleButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorExploreSiteButton = Button(self,command=self.onStaffVisitorExploreSiteButtonClicked,text=\"Explore Site\", background='#4286f4')\r\n staffVisitorExploreSiteButton.grid(row=3, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorTakeTransitButton = Button(self,command=self.onStaffVisitorTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n staffVisitorTakeTransitButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorViewVisitHistoryButton = Button(self,command=self.onStaffVisitorViewVisitHistoryButtonClicked,text=\"View Visit History\", background='#4286f4')\r\n staffVisitorViewVisitHistoryButton.grid(row=4, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorViewTransitHistoryButton = Button(self,command=self.onStaffVisitorViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n staffVisitorViewTransitHistoryButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffVisitorBackButton = Button(self, command=self.onStaffVisitorFunctionalityBackButtonClicked,text=\"Back\",background='#4286f4')\r\n staffVisitorBackButton.grid(row=5, column=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onStaffVisitorManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n profileWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorExploreEventButtonClicked(self):\r\n exploreEventWindow = visitorExploreEvent(self)\r\n exploreEventWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorViewScheduleButtonClicked(self):\r\n viewScheduleWindow = StaffViewSchedule(self)\r\n viewScheduleWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorExploreSiteButtonClicked(self):\r\n ExploreSiteWindow = visitorExploreSite(self)\r\n ExploreSiteWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorViewVisitHistoryButtonClicked(self):\r\n visitHistoryWindow = VisitHistory(self)\r\n self.withdraw()\r\n visitHistoryWindow.display()\r\n\r\n def onStaffVisitorViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffVisitorFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass ManagerFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Manager-Only')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n managerFunctionalityLabel = Label(self, text=\"Manager Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n managerFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n managerManageProfileButton = Button(self,command=self.onManagerManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n managerManageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerManageEventButton = Button(self,command=self.onManagerManageEventButtonClicked,text=\"Manage Event\", background='#4286f4')\r\n managerManageEventButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerViewStaffButton = Button(self,command=self.onManagerViewStaffButtonClicked,text=\"View Staff\", background='#4286f4')\r\n managerViewStaffButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerViewSiteReportButton = Button(self, command=self.onManagerViewSiteReportButtonClicked,text=\"View Site Report\", background='#4286f4')\r\n managerViewSiteReportButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerTakeTransitButton = Button(self,command=self.onManagerTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n managerTakeTransitButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerViewTransitHistoryButton = Button(self,command=self.onManagerViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n managerViewTransitHistoryButton.grid(row=7, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n managerBackButton = Button(self, command=self.onManagerFunctionalityBackButtonClicked,text=\"Back\",background='#4286f4')\r\n managerBackButton.grid(row=8, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onManagerManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n profileWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerManageEventButtonClicked(self):\r\n manageEventWindow = ManageEvent(self)\r\n manageEventWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerViewStaffButtonClicked(self):\r\n viewStaffWindow = ManageStaff(self)\r\n viewStaffWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerViewSiteReportButtonClicked(self):\r\n siteReportWindow = SiteReport(self)\r\n siteReportWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onManagerFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass StaffFunctionality(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Functionality -- Staff-Only')\r\n self.config(background='#ffffff')\r\n\r\n def display(self):\r\n staffFunctionalityLabel = Label(self, text=\"Staff Functionality\", font=\"Helvetica\",foreground='#000000', background='#ffffff')\r\n staffFunctionalityLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=W + E)\r\n\r\n staffManageProfileButton = Button(self,command=self.onStaffManageProfileButtonClicked, text=\"Manage Profile\",background='#4286f4')\r\n staffManageProfileButton.grid(row=2, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffViewScheduleButton = Button(self, command=self.onStaffViewScheduleButtonClicked,text=\"View Schedule\", background='#4286f4')\r\n staffViewScheduleButton.grid(row=3, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffTakeTransitButton = Button(self,command=self.onStaffTakeTransitButtonClicked,text=\"Take Transit\", background='#4286f4')\r\n staffTakeTransitButton.grid(row=4, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffViewTransitHistoryButton = Button(self,command=self.onStaffViewTransitHistoryButtonClicked,text=\"View Transit History\", background='#4286f4')\r\n staffViewTransitHistoryButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n staffBackButton = Button(self, command=self.onStaffFunctionalityBackButtonClicked, text=\"Back\",background='#4286f4')\r\n staffBackButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def onStaffManageProfileButtonClicked(self):\r\n profileWindow = ManageProfile(self)\r\n self.withdraw()\r\n profileWindow.display()\r\n\r\n def onStaffViewScheduleButtonClicked(self):\r\n viewScheduleWindow = StaffViewSchedule(self)\r\n viewScheduleWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffTakeTransitButtonClicked(self):\r\n takeTransitWindow = TakeTransit(self)\r\n takeTransitWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffViewTransitHistoryButtonClicked(self):\r\n transitHistoryWindow = TransitHistory(self)\r\n transitHistoryWindow.display()\r\n self.withdraw()\r\n\r\n def onStaffFunctionalityBackButtonClicked(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass TakeTransit(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Take Transit')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.TakeTransit(db)\r\n\r\n def display(self):\r\n transits, sitelist = self.SQL.load()\r\n\r\n self.route, self.p1, self.p2, self.tdate = StringVar(), StringVar(), StringVar(), StringVar()\r\n self.sites, self.ttype = StringVar(), StringVar()\r\n\r\n self.sites.set('Any')\r\n self.ttype.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=transits,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(transits), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(transits.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n siteLabel = Label(self, text=\"Contains Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=0, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n siteDropdown = OptionMenu(self, self.sites, *sitelist + ['Any'])\r\n siteDropdown.grid(row=0, column=3, padx=(2, 5), pady=(0, 4))\r\n\r\n ttypeLabel = Label(self, text=\"Transport Type\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n ttypeLabel.grid(row=1, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n ttypeDropdown = OptionMenu(self, self.ttype, *['MARTA', 'Bus', 'Bike', 'Any'])\r\n ttypeDropdown.grid(row=1, column=3, padx=(2, 5), pady=(0, 4))\r\n\r\n priceLabel = Label(self, text=\"Price Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=2, column=2, padx=(2, 2), pady=(2, 2), sticky=W)\r\n p1Box = Entry(self, textvariable=self.p1, width=5)\r\n p1Box.grid(row=2, column=3, padx=(2, 2), pady=(2, 2), sticky=W)\r\n p2Box = Entry(self, textvariable=self.p2, width=5)\r\n p2Box.grid(row=2, column=3, padx=(2, 2), pady=(2, 2), sticky=E)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=3, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortType = partial(self.filter, 'TransportType')\r\n sortTypeButton = Button(self, command=sortType, text=\"Sort by Transit Type\", background='#4286f4')\r\n sortTypeButton.grid(row=4, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortPrice = partial(self.filter, 'Price')\r\n sortPriceButton = Button(self, command=sortPrice, text=\"Sort by Price\", background='#4286f4')\r\n sortPriceButton.grid(row=5, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortSites = partial(self.filter, 'NumSites')\r\n sortSitesButton = Button(self, command=sortSites, text=\"Sort by Number of Sites\", background='#4286f4')\r\n sortSitesButton.grid(row=6, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n dateLabel = Label(self, text=\"Transit Date: \", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n dateLabel.grid(row=8, column=2, padx=(4, 4), pady=(2, 2), sticky=E)\r\n dateBox = Entry(self, textvariable=self.tdate, width=5)\r\n dateBox.grid(row=8, column=3, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n logButton = Button(self, command=self.take, text=\"Log Transit\", background='#4286f4')\r\n logButton.grid(row=9, column=3, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Route'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n p1, p2, site, ttype = self.p1.get(), self.p2.get(), self.sites.get(), self.ttype.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n p1, p2, site, ttype = conv.get(p1, p1), conv.get(p2, p2), conv.get(site, site), conv.get(ttype, ttype)\r\n\r\n if sort is None:\r\n sort = 'TransportType'\r\n transits = self.SQL.filter(p1, p2, site, ttype, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(transits)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def take(self):\r\n row = self.resultTable.model.getData()[self.resultTable.getSelectedRow() + 1]\r\n route, ttype, date = row['Route'], row['TransportType'], self.tdate.get()\r\n\r\n if any([route == '', ttype == '']):\r\n messagebox.showwarning('Error', 'No transit selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n try:\r\n datetime.strptime(date, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n\r\n return\r\n\r\n if self.SQL.submit(route, ttype, date, identifier) == -1:\r\n messagebox.showwarning('Error', 'You may not take the same transit twice in one day.')\r\n\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Transit successfully logged.')\r\n\r\n\r\nclass TransitHistory(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Transit History')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.TransitHistory(db)\r\n\r\n def display(self):\r\n transits, sitelist = self.SQL.load()\r\n\r\n self.ttype, self.sites, self.d1, self.d2, self.route = StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.sites.set('Any')\r\n self.ttype.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=transits,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(transits), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(transits.values())[0]), height=25*7)\r\n #self.resultTable.grid(row=0, column=0, rowspan=10, sticky=W + E)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=20, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n siteLabel = Label(self, text=\"Contains Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=1, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n siteDropdown = OptionMenu(self, self.sites, *sitelist + ['Any'])\r\n siteDropdown.grid(row=1, column=3, padx=(2, 5), pady=(0, 4))\r\n\r\n ttypeLabel = Label(self, text=\"Transport Type\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n ttypeLabel.grid(row=2, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n ttypeDropdown = OptionMenu(self, self.ttype, *['MARTA', 'Bus', 'Bike', 'Any'])\r\n ttypeDropdown.grid(row=2, column=3, padx=(2, 5), pady=(0, 4))\r\n\r\n routeLabel = Label(self, text=\"Route\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n routeLabel.grid(row=3, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n routeDropdown = Entry(self, textvariable=self.route, width=10)\r\n routeDropdown.grid(row=3, column=3, padx=(2, 5), pady=(0, 4))\r\n\r\n\r\n dateLabel = Label(self, text=\"Date Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n dateLabel.grid(row=4, column=2, padx=(2, 2), pady=(2, 2), sticky=W)\r\n d1Box = Entry(self, textvariable=self.d1, width=5)\r\n d1Box.grid(row=4, column=3, padx=(2, 2), pady=(2, 2), sticky=W)\r\n d2Box = Entry(self, textvariable=self.d2, width=5)\r\n d2Box.grid(row=4, column=3, padx=(2, 2), pady=(2, 2), sticky=E)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=5, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortType = partial(self.filter, 'TransportType')\r\n sortTypeButton = Button(self, command=sortType, text=\"Sort by Transit Type\", background='#4286f4')\r\n sortTypeButton.grid(row=6, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortRoute = partial(self.filter, 'Route')\r\n sortRouteButton = Button(self, command=sortRoute, text=\"Sort by Route\", background='#4286f4')\r\n sortRouteButton.grid(row=7, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortDate = partial(self.filter, 'Date')\r\n sortDateButton = Button(self, command=sortDate, text=\"Sort by Date\", background='#4286f4')\r\n sortDateButton.grid(row=8, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortPrice = partial(self.filter, 'Price')\r\n sortPriceButton = Button(self, command=sortPrice, text=\"Sort by Price\", background='#4286f4')\r\n sortPriceButton.grid(row=9, column=2, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Route'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n d1, d2, site, ttype, route = self.d1.get(), self.d2.get(), self.sites.get(), self.ttype.get(), self.route.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n p1, p2, site, ttype = conv.get(d1, d1), conv.get(d2, d2), conv.get(site, site), conv.get(ttype, ttype)\r\n\r\n for d in [d1, d2]:\r\n if d:\r\n try:\r\n datetime.strptime(d, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n\r\n if sort is None:\r\n sort = 'Date'\r\n transits = self.SQL.filter(identifier, d1, d2, ttype, site, route, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(transits)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass ManageProfile(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage Profile')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageProfile(db)\r\n self.email_pattern = re.compile(r'\\w+@\\w+\\.\\w+')\r\n\r\n def display(self):\r\n fname, lname, empid, phone, address, emails, site, vis = self.SQL.load(identifier)\r\n\r\n self.fname, self.lname, self.phone, self.emails, self.vis = StringVar(), StringVar(), StringVar(), StringVar(), BooleanVar()\r\n self.fname.set(fname)\r\n self.lname.set(lname)\r\n self.phone.set(phone)\r\n self.emails.set(' '.join(emails))\r\n self.vis.set(vis)\r\n\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n titleLabel = Label(self, text='Manage Profile', font='Helvetica 15', foreground='#000000', background='#ffffff')\r\n titleLabel.grid(row=0, column=3, padx=(4,4), pady=(2,2), sticky=W+E)\r\n\r\n fnameLabel = Label(self, text=\"First Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n fnameLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n fnameBox = Entry(self, textvariable=self.fname, width=10)\r\n fnameBox.grid(row=1, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n lnameLabel = Label(self, text=\"Last Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n lnameLabel.grid(row=1, column=4, padx=(4, 4), pady=(2, 2), sticky=E)\r\n lnameBox = Entry(self, textvariable=self.lname, width=10)\r\n lnameBox.grid(row=1, column=5, padx=(4, 4), pady=(0, 4))\r\n\r\n usernameLabel = Label(self, text=\"Username\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n usernameLabel.grid(row=2, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n usernameLabel2 = Label(self, text=identifier, font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n usernameLabel2.grid(row=2, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n siteLabel = Label(self, text=\"Site Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=2, column=4, padx=(4, 4), pady=(2, 2), sticky=W)\r\n siteLabel2 = Label(self, text=site, font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel2.grid(row=2, column=5, padx=(4, 4), pady=(0, 4))\r\n\r\n empidLabel = Label(self, text=\"Employee ID\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n empidLabel.grid(row=3, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n empidLabel2 = Label(self, text=empid, font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n empidLabel2.grid(row=3, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n phoneLabel = Label(self, text=\"Phone\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n phoneLabel.grid(row=3, column=4, padx=(4, 4), pady=(2, 2), sticky=W)\r\n phoneBox = Entry(self, textvariable=self.phone, width=10)\r\n phoneBox.grid(row=3, column=5, padx=(4, 4), pady=(0, 4))\r\n\r\n addrLabel = Label(self, text=\"Address\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n addrLabel.grid(row=4, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n addrLabel2 = Label(self, text=address, font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n addrLabel2.grid(row=4, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n emailLabel = Label(self, text=\"Emails (Space Separated)\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n emailLabel.grid(row=5, column=1, padx=(4, 4), pady=(2, 2), sticky=W)\r\n emailBox = Entry(self, textvariable=self.emails, width=100)\r\n emailBox.grid(row=5, column=2, padx=(4, 4), pady=(0, 4))\r\n\r\n visCheckButton = Checkbutton(self, text='Visitor', variable=self.vis)\r\n visCheckButton.grid(row=6, column=3, padx=(4,4), pady=(4,4))\r\n\r\n updateButton = Button(self, command=self.update, text=\"Update\", background='#4286f4')\r\n updateButton.grid(row=10, column=3, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n\r\n def update(self):\r\n fname, lname, phone, emails, vis = self.fname.get(), self.lname.get(), self.phone.get(), self.emails.get(), self.vis.get()\r\n\r\n if len(fname) > 32:\r\n messagebox.showwarning('Error', 'First name is too long.')\r\n return\r\n elif len(lname) > 32:\r\n messagebox.showwarning('Error', 'Last name is too long.')\r\n return\r\n elif len(phone) != 10 or phone.isdigit() is False:\r\n messagebox.showwarning('Error', 'Phone number is invalid. Make sure to input a ten digit integer.')\r\n return\r\n\r\n try:\r\n emails = emails.split(' ')\r\n for email in emails:\r\n if self.email_pattern.fullmatch(email) is None:\r\n messagebox.showwarning('Error', 'One or more emails is not in a valid format.')\r\n return\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'The format of emails input is incorrect. Make sure to separate each email with a space.')\r\n return\r\n\r\n if any(len(email) > 32 for email in emails):\r\n messagebox.showwarning('Error', 'One or more of your emails is too long.')\r\n return\r\n\r\n if self.SQL.submit(identifier, fname, lname, phone, emails, vis) == -1:\r\n messagebox.showwarning('Error', 'One or more of your emails already exists in the database under other users.')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Profile successfully updated.')\r\n return\r\n\r\n\r\n def back(self):\r\n if 'admin' in self.master.title().lower(): # If you uncheck Visitor, then you lose visitor functionality and vice versa\r\n if self.SQL.get_vis(identifier):\r\n adminVisFuncWin = AdministratorVisitorFunctionality(self.master.master)\r\n adminVisFuncWin.display()\r\n self.destroy()\r\n self.master.destroy()\r\n else:\r\n adminFuncWin = AdministratorFunctionality(self.master.master)\r\n adminFuncWin.display()\r\n self.master.destroy()\r\n self.destroy()\r\n\r\n elif 'man' in self.master.title().lower():\r\n if self.SQL.get_vis(identifier):\r\n manVisFuncWin = ManagerVisitorFunctionality(self.master.master)\r\n manVisFuncWin.display()\r\n self.master.destroy()\r\n self.destroy()\r\n else:\r\n manFuncWin = ManagerFunctionality(self.master.master)\r\n manFuncWin.display()\r\n self.master.destroy()\r\n self.destroy()\r\n\r\n else:\r\n if self.SQL.get_vis(identifier):\r\n staffVisFuncWin = StaffVisitorFunctionality(self.master.master)\r\n staffVisFuncWin.display()\r\n self.master.destroy()\r\n self.destroy()\r\n else:\r\n staffFuncWin = StaffFunctionality(self.master.master)\r\n staffFuncWin.display()\r\n self.master.destroy()\r\n self.destroy()\r\n\r\n\r\nclass ManageUser(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage User')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageUser(db)\r\n\r\n def display(self):\r\n users = self.SQL.load()\r\n\r\n self.user_name, self.user_type, self.status, = StringVar(), StringVar(), StringVar()\r\n\r\n self.user_type.set('Any')\r\n self.status.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=users,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(users), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(users.values())[0]), height=25*7)\r\n #self.resultTable.grid(row=0, column=0, rowspan=10, sticky=W + E)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=13, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n unameLabel = Label(self, text=\"Username\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n unameLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n unameBox = Entry(self, textvariable=self.user_name, width=10)\r\n unameBox.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n utypeLabel = Label(self, text=\"User Type\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n utypeLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n utypeDropdown = OptionMenu(self, self.user_type, *['User', 'Manager', 'Visitor', 'Staff', 'Any'])\r\n utypeDropdown.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n statusLabel = Label(self, text=\"Status\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n statusLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n statusDropdown = OptionMenu(self, self.status, *['Approved', 'Pending', 'Declined', 'Any'])\r\n statusDropdown.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=5, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortType = partial(self.filter, 'UserType')\r\n sortTypeButton = Button(self, command=sortType, text=\"Sort by User Type\", background='#4286f4')\r\n sortTypeButton.grid(row=6, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortName = partial(self.filter, 'Username')\r\n sortNameButton = Button(self, command=sortName, text=\"Sort by Username\", background='#4286f4')\r\n sortNameButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortStatus = partial(self.filter, 'Status')\r\n sortStatusButton = Button(self, command=sortStatus, text=\"Sort by User Status\", background='#4286f4')\r\n sortStatusButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortEmail = partial(self.filter, 'Count(Email)')\r\n sortEmailButton = Button(self, command=sortEmail, text=\"Sort by Email\", background='#4286f4')\r\n sortEmailButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n approveButton = Button(self, command=self.approve, text=\"Approve\", background='#4286f4')\r\n approveButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n approveButton = Button(self, command=self.deny, text=\"Deny\", background='#4286f4')\r\n approveButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Username'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n user_name, user_type, status = self.user_name.get(), self.user_type.get(), self.status.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n user_name, user_type, status = conv.get(user_name, user_name), conv.get(user_type, user_type), conv.get(status, status)\r\n\r\n if sort is None:\r\n sort = 'UserName'\r\n transits = self.SQL.filter(user_name, user_type, status, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(transits)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def approve(self):\r\n row = self.resultTable.model.getData()[self.resultTable.getSelectedRow() + 1]\r\n user_name, user_type, status = row['Username'], row['UserType'], row['Status']\r\n\r\n if any([user_name == '', user_type == '', status == '']):\r\n messagebox.showwarning('Error', 'No user selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n self.SQL.submit(user_name, 'Approved')\r\n self.resultTable.model.setValueAt('Approved', self.resultTable.getSelectedRow(), 3)\r\n self.resultTable.redraw()\r\n messagebox.showwarning('Success', 'Status successfully updated.')\r\n\r\n def deny(self):\r\n row = self.resultTable.model.getData()[self.resultTable.getSelectedRow() + 1]\r\n user_name, user_type, status = row['Username'], row['UserType'], row['Status']\r\n\r\n if any([user_name == '', user_type == '', status == '']):\r\n messagebox.showwarning('Error', 'No user selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n elif status == 'Approved':\r\n messagebox.showwarning('Error', 'Cannot decline an already approved user. Let the man be, pesky admin.')\r\n return\r\n\r\n self.SQL.submit(user_name, 'Declined')\r\n self.resultTable.model.setValueAt('Declined', self.resultTable.getSelectedRow(), 3)\r\n self.resultTable.redraw()\r\n\r\n messagebox.showwarning('Success', 'Status successfully updated.')\r\n\r\n\r\nclass ManageSite(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage Site')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageSite(db)\r\n\r\n def display(self):\r\n sites, sitenames, managers = self.SQL.load()\r\n\r\n self.site, self.manager, self.everyday = StringVar(), StringVar(), StringVar()\r\n\r\n self.site.set('Any')\r\n self.manager.set('Any')\r\n self.everyday.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=sites,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(sites), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(sites.values())[0]), height=25*7)\r\n #self.resultTable.grid(row=0, column=0, rowspan=10, sticky=W + E)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=13, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n siteLabel = Label(self, text=\"Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n siteDropdown = OptionMenu(self, self.site, *sitenames + ['Any'])\r\n siteDropdown.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n manLabel = Label(self, text=\"Manager\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n manLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n manDropdown = OptionMenu(self, self.manager, *managers + ['Any'])\r\n manDropdown.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n everydayLabel = Label(self, text=\"Open Everyday\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n everydayLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n everydayDropdown = OptionMenu(self, self.everyday, *['True', 'False', 'Any'])\r\n everydayDropdown.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=5, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortName = partial(self.filter, 'Name')\r\n sortNameButton = Button(self, command=sortName, text=\"Sort by Name\", background='#4286f4')\r\n sortNameButton.grid(row=6, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortMan = partial(self.filter, 'Manager')\r\n sortManButton = Button(self, command=sortMan, text=\"Sort by Manager\", background='#4286f4')\r\n sortManButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortEveryday = partial(self.filter, 'OpenEveryday')\r\n sortStatusButton = Button(self, command=sortEveryday, text=\"Sort by Availability per Day\", background='#4286f4')\r\n sortStatusButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n createButton = Button(self, command=self.create, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n deleteButton = Button(self, command=self.delete, text=\"Delete\", background='#4286f4')\r\n deleteButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n editButton = Button(self, command=self.edit, text=\"Edit\", background='#4286f4')\r\n editButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['SiteName'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n site, manager, everyday = self.site.get(), self.manager.get(), self.everyday.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n site, manager, everyday = conv.get(site, site), conv.get(manager, manager), conv.get(everyday, everyday)\r\n\r\n if sort is None:\r\n sort = 'SiteName'\r\n sites = self.SQL.filter(site, manager, everyday, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(sites)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\n\r\n def edit(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n sitename = row['SiteName']\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n\r\n editSiteWindow = EditSite(self)\r\n editSiteWindow.display(sitename)\r\n self.withdraw()\r\n\r\n def create(self):\r\n createSiteWindow = CreateSite(self)\r\n createSiteWindow.display()\r\n self.withdraw()\r\n\r\n def delete(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n sitename = row['SiteName']\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n\r\n self.SQL.delete(sitename)\r\n self.resultTable.deleteRow()\r\n self.resultTable.redrawTable()\r\n messagebox.showwarning('Success', 'Site successfully deleted.')\r\n\r\n\r\nclass EditSite(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Edit Site')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.EditSite(db)\r\n\r\n def display(self, sitename):\r\n manager, managers, zipcode, address, everyday = self.SQL.load(sitename)\r\n self.original_sitename = sitename\r\n\r\n self.sitename, self.zipcode, self.address, self.managers, self.everyday = StringVar(), StringVar(), StringVar(), StringVar(), BooleanVar()\r\n self.sitename.set(sitename)\r\n self.managers.set(manager)\r\n self.zipcode.set(zipcode)\r\n self.address.set(address)\r\n self.everyday.set(everyday)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n titleLabel = Label(self, text='Edit Site', font='Helvetica 15', foreground='#000000', background='#ffffff')\r\n titleLabel.grid(row=0, column=3, padx=(4,4), pady=(2,2), sticky=W+E)\r\n\r\n sitenameLabel = Label(self, text=\"Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n sitenameLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n sitenameBox = Entry(self, textvariable=self.sitename, width=50)\r\n sitenameBox.grid(row=1, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n zipLabel = Label(self, text=\"Zip Code\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n zipLabel.grid(row=1, column=4, padx=(4, 4), pady=(2, 2), sticky=E)\r\n zipBox = Entry(self, textvariable=self.zipcode, width=7)\r\n zipBox.grid(row=1, column=5, padx=(4, 4), pady=(0, 4))\r\n\r\n addressLabel = Label(self, text=\"Address\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n addressLabel.grid(row=2, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n addressBox = Entry(self, textvariable=self.address, width=50)\r\n addressBox.grid(row=2, column=2, padx=(4, 4), pady=(0, 4))\r\n\r\n manLabel = Label(self, text=\"Manager\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n manLabel.grid(row=2, column=4, padx=(4, 4), pady=(2, 2), sticky=E)\r\n manDropdown = OptionMenu(self, self.managers, *managers)\r\n manDropdown.grid(row=2, column=5, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n everydayCheckButton = Checkbutton(self, text='Open Everyday', variable=self.everyday)\r\n everydayCheckButton.grid(row=3, column=3, padx=(4,4), pady=(4,4))\r\n\r\n updateButton = Button(self, command=self.submit, text=\"Update\", background='#4286f4')\r\n updateButton.grid(row=10, column=3, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n\r\n def submit(self):\r\n sitename, manager, zipcode, address, everyday = self.sitename.get(), self.managers.get(), self.zipcode.get(), self.address.get(), self.everyday.get()\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'A site must have a name.')\r\n return\r\n if len(sitename) > 64:\r\n messagebox.showwarning('Error', 'Site name is too long.')\r\n return\r\n elif len(address) > 64:\r\n messagebox.showwarning('Error', 'Address is too long.')\r\n return\r\n elif len(zipcode) != 5 or zipcode.isdigit() is False:\r\n messagebox.showwarning('Error', 'Zipcode is invalid. Make sure to input a 5 digit integer.')\r\n return\r\n\r\n if sitename != self.original_sitename:\r\n if self.SQL.update(sitename, address, zipcode, manager, everyday, original=self.original_sitename) == -1:\r\n messagebox.showwarning('Error', 'The sitename already exists in the database.')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Profile successfully updated.')\r\n return\r\n else:\r\n self.SQL.update(sitename, address, zipcode, manager, everyday, original=self.original_sitename)\r\n messagebox.showwarning('Success', 'Profile successfully updated.')\r\n return\r\n\r\n\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass CreateSite(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Create Site')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.CreateSite(db)\r\n\r\n def display(self):\r\n managers = self.SQL.load()\r\n\r\n self.sitename, self.zipcode, self.address, self.managers, self.everyday = StringVar(), StringVar(), StringVar(), StringVar(), BooleanVar()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n titleLabel = Label(self, text='Create Site', font='Helvetica 15', foreground='#000000', background='#ffffff')\r\n titleLabel.grid(row=0, column=3, padx=(4,4), pady=(2,2), sticky=W+E)\r\n\r\n sitenameLabel = Label(self, text=\"Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n sitenameLabel.grid(row=1, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n sitenameBox = Entry(self, textvariable=self.sitename, width=50)\r\n sitenameBox.grid(row=1, column=2, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n zipLabel = Label(self, text=\"Zip Code\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n zipLabel.grid(row=1, column=4, padx=(4, 4), pady=(2, 2), sticky=E)\r\n zipBox = Entry(self, textvariable=self.zipcode, width=7)\r\n zipBox.grid(row=1, column=5, padx=(4, 4), pady=(0, 4))\r\n\r\n addressLabel = Label(self, text=\"Address\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n addressLabel.grid(row=2, column=1, padx=(4, 4), pady=(2, 2), sticky=E)\r\n addressBox = Entry(self, textvariable=self.address, width=50)\r\n addressBox.grid(row=2, column=2, padx=(4, 4), pady=(0, 4))\r\n\r\n manLabel = Label(self, text=\"Manager\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n manLabel.grid(row=2, column=4, padx=(4, 4), pady=(2, 2), sticky=E)\r\n manDropdown = OptionMenu(self, self.managers, *managers)\r\n manDropdown.grid(row=2, column=5, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n everydayCheckButton = Checkbutton(self, text='Open Everyday', variable=self.everyday)\r\n everydayCheckButton.grid(row=3, column=3, padx=(4,4), pady=(4,4))\r\n\r\n createButton = Button(self, command=self.create, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=10, column=3, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def create(self):\r\n sitename, manager, zipcode, address, everyday = self.sitename.get(), self.managers.get(), self.zipcode.get(), self.address.get(), self.everyday.get()\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'A site must have a name.')\r\n return\r\n if len(sitename) > 64:\r\n messagebox.showwarning('Error', 'Site name is too long.')\r\n return\r\n elif len(address) > 64:\r\n messagebox.showwarning('Error', 'Address is too long.')\r\n return\r\n elif len(zipcode) != 5 or zipcode.isdigit() is False:\r\n messagebox.showwarning('Error', 'Zipcode is invalid. Make sure to input a 5 digit integer.')\r\n return\r\n elif manager is None or manager == '':\r\n messagebox.showwarning('Error', 'You must input a manager.')\r\n return\r\n\r\n if self.SQL.create(sitename, address, zipcode, manager, everyday) == -1:\r\n messagebox.showwarning('Error', 'The sitename already exists in the database.')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Profile successfully updated.')\r\n return\r\n\r\n\r\nclass ManageTransit(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage Transit')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageTransit(db)\r\n\r\n def display(self):\r\n transits, sitenames = self.SQL.load()\r\n\r\n self.site, self.ttype, self.route, self.p1, self.p2 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.site.set('Any')\r\n self.ttype.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=transits,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(transits), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(transits.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=13, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n ttypeLabel = Label(self, text=\"Transport Type\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n ttypeLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n ttypeDropdown = OptionMenu(self, self.ttype, *['Any', 'MARTA', 'Bus', 'Bike'])\r\n ttypeDropdown.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n routeLabel = Label(self, text=\"Route\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n routeLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2))\r\n routeBox = Entry(self, textvariable=self.route, width=10)\r\n routeBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n siteLabel = Label(self, text=\"Contain Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n siteDropdown = OptionMenu(self, self.site, *sitenames + ['Any'])\r\n siteDropdown.grid(row=4, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n priceLabel = Label(self, text=\"Price Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=5, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n p1Box = Entry(self, textvariable=self.p1, width=5)\r\n p1Box.grid(row=5, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n p2Box = Entry(self, textvariable=self.p2, width=5)\r\n p2Box.grid(row=5, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=6, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortTtype = partial(self.filter, 'TransportType')\r\n sortTtypeButton = Button(self, command=sortTtype, text=\"Sort by Transport Type\", background='#4286f4')\r\n sortTtypeButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortPrice = partial(self.filter, 'Price')\r\n sortPriceButton = Button(self, command=sortPrice, text=\"Sort by Price\", background='#4286f4')\r\n sortPriceButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n createButton = Button(self, command=self.create, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n deleteButton = Button(self, command=self.delete, text=\"Delete\", background='#4286f4')\r\n deleteButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n editButton = Button(self, command=self.edit, text=\"Edit\", background='#4286f4')\r\n editButton.grid(row=12, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Route'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n site, ttype, p1, p2, route = self.site.get(), self.ttype.get(), self.p1.get(), self.p2.get(), self.route.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n site, ttype, p1, p2, route = conv.get(site, site), conv.get(ttype, ttype), conv.get(p1, p1), conv.get(p2, p2), conv.get(route, route)\r\n\r\n if sort is None:\r\n sort = 'TransportType'\r\n\r\n sites = self.SQL.filter(site, ttype, route, p1, p2, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(sites)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def edit(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n ttype, route = row['TransportType'], row['Route']\r\n\r\n if any([ttype == '', route == '']):\r\n messagebox.showwarning('Error', 'No transit selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n\r\n editTransitWindow = EditTransit(self)\r\n editTransitWindow.display(ttype, route)\r\n self.withdraw()\r\n\r\n def create(self):\r\n createTransitWindow = CreateTransit(self)\r\n createTransitWindow.display()\r\n self.withdraw()\r\n\r\n def delete(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n ttype, route = row['TransportType'], row['Route']\r\n\r\n if route == '':\r\n messagebox.showwarning('Error', 'No transit selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n self.SQL.delete(ttype, route)\r\n self.resultTable.deleteRow()\r\n self.resultTable.redrawTable()\r\n messagebox.showwarning('Success', 'Transit successfully deleted.')\r\n\r\n\r\nclass EditTransit(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Edit Transit')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.EditTransit(db)\r\n\r\n def display(self, ttype, route):\r\n price, connected_sites, other_sites = self.SQL.load(ttype, route)\r\n self.ttype = ttype\r\n self.original_route = route\r\n self.route, self.price = StringVar(), StringVar()\r\n self.route.set(route)\r\n self.price.set(price)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n titleLabel = Label(self, text='Edit Transit', font='Helvetica 15', foreground='#000000', background='#ffffff')\r\n titleLabel.grid(row=0, column=0, padx=(4,4), pady=(2,2), sticky=W+E)\r\n\r\n ttypeLabel = Label(self, text=f\"Transport Type: {ttype}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n ttypeLabel.grid(row=1, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n routeLabel = Label(self, text=\"Route\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n routeLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n routeBox = Entry(self, textvariable=self.route, width=7)\r\n routeBox.grid(row=2, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n priceLabel = Label(self, text=\"Price\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n priceBox = Entry(self, textvariable=self.price, width=15)\r\n priceBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n self.sitesList = Listbox(self, selectmode=MULTIPLE) # Multiple means you may select multiple sites.\r\n self.sitesList.grid(row=4, column=0, padx=(4,4), pady=(0,4), columnspan=2, sticky=W+E)\r\n\r\n for i, site in enumerate(connected_sites + other_sites):\r\n self.sitesList.insert(i, site)\r\n if site in connected_sites:\r\n self.sitesList.selection_set(i)\r\n\r\n updateButton = Button(self, command=self.submit, text=\"Update\", background='#4286f4')\r\n updateButton.grid(row=10, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n\r\n def submit(self):\r\n route, price, sites = self.route.get(), self.price.get(), self.sitesList.curselection()\r\n sites = [self.sitesList.get(i) for i in sites]\r\n if price == '':\r\n messagebox.showwarning('Error', 'You must have a price.')\r\n return\r\n elif route == '':\r\n messagebox.showwarning('Error', 'You must have a route.')\r\n return\r\n elif len(sites) < 2:\r\n messagebox.showwarning('Error', 'Each transit must be connected to at least two sites.')\r\n return\r\n try:\r\n price = float(price)\r\n if price > 9999999.99 or price < 0:\r\n messagebox.showwarning('Error', 'Price is too high, negative, or has too many decimals. '\r\n 'It must be a 7 digit float, with two extra decimal places.')\r\n return\r\n except:\r\n messagebox.showwarning('Error', 'Price must be a float.')\r\n return\r\n\r\n if self.SQL.submit(self.ttype, route, price, sites, self.original_route) == -1:\r\n messagebox.showwarning('Error', 'That Route/Transit combination already exist')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Transit successfully updated')\r\n return\r\n\r\n\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass CreateTransit(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Create Transit')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.CreateTransit(db)\r\n\r\n def display(self):\r\n sites = self.SQL.load()\r\n self.ttype, self.route, self.price, self.connected_sites = StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n titleLabel = Label(self, text='Create Transit', font='Helvetica 15', foreground='#000000', background='#ffffff')\r\n titleLabel.grid(row=0, column=0, padx=(4,4), pady=(2,2), sticky=W+E)\r\n\r\n ttypeLabel = Label(self, text=f\"Transport Type\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n ttypeLabel.grid(row=1, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n utypeDropdown = OptionMenu(self, self.ttype, *['MARTA', 'Bus', 'Bike'])\r\n utypeDropdown.grid(row=1, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n routeLabel = Label(self, text=\"Route\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n routeLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n routeBox = Entry(self, textvariable=self.route, width=7)\r\n routeBox.grid(row=2, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n priceLabel = Label(self, text=\"Price\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n priceBox = Entry(self, textvariable=self.price, width=15)\r\n priceBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n self.sitesList = Listbox(self, selectmode=MULTIPLE) # Multiple means you may select multiple sites.\r\n self.sitesList.grid(row=4, column=0, padx=(4,4), pady=(0,4), columnspan=2, sticky=W+E)\r\n\r\n for i, site in enumerate(sites):\r\n self.sitesList.insert(i, site)\r\n\r\n createButton = Button(self, command=self.submit, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=10, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n\r\n def submit(self):\r\n ttype, route, price, sites = self.ttype.get(), self.route.get(), self.price.get(), self.sitesList.curselection()\r\n sites = [self.sitesList.get(i) for i in sites]\r\n\r\n if price == '':\r\n messagebox.showwarning('Error', 'You must have a price.')\r\n return\r\n elif route == '':\r\n messagebox.showwarning('Error', 'You must have a route.')\r\n return\r\n elif len(sites) < 2:\r\n messagebox.showwarning('Error', 'Each transit must be connected to at least two sites.')\r\n return\r\n try:\r\n price = float(price)\r\n if price > 9999999.99 or price < 0:\r\n messagebox.showwarning('Error', 'Price is too long, negative, or has too many decimals. '\r\n 'It must be a 7 digit float, with two extra decimal places.')\r\n return\r\n except:\r\n messagebox.showwarning('Error', 'Price must be a float.')\r\n return\r\n\r\n if self.SQL.create(ttype, route, price, sites) == -1:\r\n messagebox.showwarning('Error', 'That transit type/route combination already exists.')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Transit successfully created')\r\n return\r\n\r\n\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy() # Refreshes by removing all widgets and then reloading them.\r\n self.master.display()\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass ManageEvent(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage Event')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageEvent(db)\r\n\r\n def display(self):\r\n events = self.SQL.load()\r\n\r\n self.name, self.keyword, self.d1, self.d2, self.dur1 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n self.dur2, self.vis1, self.vis2, self.rev1, self.rev2 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=events,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(events), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(events.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=20, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n nameLabel = Label(self, text=\"Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n nameLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n nameBox = Entry(self, textvariable=self.name, width=10)\r\n nameBox.grid(row=2, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n keywordLabel = Label(self, text=\"Description Keyword\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n keywordLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2))\r\n keywordBox = Entry(self, textvariable=self.keyword, width=10)\r\n keywordBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d1Label = Label(self, text=\"Start Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d1Label.grid(row=4, column=0, padx=(4, 4), pady=(2, 2))\r\n d1Box = Entry(self, textvariable=self.d1, width=10)\r\n d1Box.grid(row=4, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d2Label = Label(self, text=\"End Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d2Label.grid(row=5, column=0, padx=(4, 4), pady=(2, 2))\r\n d2Box = Entry(self, textvariable=self.d2, width=10)\r\n d2Box.grid(row=5, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n durLabel = Label(self, text=\"Duration Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n durLabel.grid(row=6, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n dur1Box = Entry(self, textvariable=self.dur1, width=5)\r\n dur1Box.grid(row=6, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n dur2Box = Entry(self, textvariable=self.dur2, width=5)\r\n dur2Box.grid(row=6, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n revLabel = Label(self, text=\"Revenue Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n revLabel.grid(row=7, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n rev1Box = Entry(self, textvariable=self.rev1, width=5)\r\n rev1Box.grid(row=7, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n rev2Box = Entry(self, textvariable=self.rev2, width=5)\r\n rev2Box.grid(row=7, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n visLabel = Label(self, text=\"Visit Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n visLabel.grid(row=8, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n vis1Box = Entry(self, textvariable=self.vis1, width=5)\r\n vis1Box.grid(row=8, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n vis2Box = Entry(self, textvariable=self.vis2, width=5)\r\n vis2Box.grid(row=8, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortName = partial(self.filter, 'EventName')\r\n sortTtypeButton = Button(self, command=sortName, text=\"Sort by Name\", background='#4286f4')\r\n sortTtypeButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortStaff = partial(self.filter, 'StaffCount')\r\n sortStaffButton = Button(self, command=sortStaff, text=\"Sort by Staff Count\", background='#4286f4')\r\n sortStaffButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortDur = partial(self.filter, 'Duration')\r\n sortDurButton = Button(self, command=sortDur, text=\"Sort by Duration\", background='#4286f4')\r\n sortDurButton.grid(row=12, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortVisits = partial(self.filter, 'Visits')\r\n sortVisitsButton = Button(self, command=sortVisits, text=\"Sort by Visits\", background='#4286f4')\r\n sortVisitsButton.grid(row=13, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortRev = partial(self.filter, 'Revenue')\r\n sortRevButton = Button(self, command=sortRev, text=\"Sort by Revenue\", background='#4286f4')\r\n sortRevButton.grid(row=14, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n createButton = Button(self, command=self.create, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=15, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n deleteButton = Button(self, command=self.delete, text=\"Delete\", background='#4286f4')\r\n deleteButton.grid(row=16, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n editButton = Button(self, command=self.edit, text=\"Edit/View\", background='#4286f4')\r\n editButton.grid(row=17, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['EventName'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n name, keyword, d1, d2, dur1 = self.name.get(), self.keyword.get(), self.d1.get(), self.d2.get(), self.dur1.get()\r\n dur2, vis1, vis2, rev1, rev2 = self.dur2.get(), self.vis1.get(), self.vis2.get(), self.rev1.get(), self.rev2.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n filts = [conv.get(v,v) for v in [name, keyword, d1, d2, dur1, dur2, vis1, vis2, rev1, rev2]]\r\n\r\n if sort is None:\r\n sort = 'EventName'\r\n\r\n if d1:\r\n try:\r\n datetime.strptime(d1, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n return\r\n\r\n if d2:\r\n try:\r\n datetime.strptime(d2, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n return\r\n if dur1 and not dur1.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if dur2 and not dur2.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if rev1 and not rev1.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if rev2 and not rev2.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if vis1 and not vis1.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if vis2 and not vis2.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n\r\n events = self.SQL.filter(identifier, *filts + [sort])\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(events)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def edit(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n eventname, sitename, startdate = row['EventName'], row['SiteName'], row['StartDate']\r\n\r\n if eventname == '':\r\n messagebox.showwarning('Error', 'No event selected. Make sure to click on the non-empty '\r\n 'row number to select which event you are editing.')\r\n return\r\n\r\n editEventWindow = EditEvent(self)\r\n editEventWindow.display(eventname, sitename, startdate)\r\n self.withdraw()\r\n\r\n def create(self):\r\n createEventWindow = CreateEvent(self)\r\n createEventWindow.display()\r\n self.withdraw()\r\n\r\n def delete(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n eventname, sitename, startdate = row['EventName'], row['SiteName'], row['StartDate']\r\n\r\n if eventname == '':\r\n messagebox.showwarning('Error', 'No event selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n self.SQL.delete(eventname, sitename, startdate)\r\n self.resultTable.deleteRow()\r\n self.resultTable.redrawTable()\r\n messagebox.showwarning('Success', 'Event successfully deleted.')\r\n\r\n\r\nclass EditEvent(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Edit Event')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.EditEvent(db)\r\n\r\n def display(self, eventname, sitename, startdate):\r\n price, enddate, minstaffreq, cap, cur_staff, avail_staff, desc, dailies = self.SQL.load(identifier, eventname, sitename, startdate)\r\n self.staff, self.desc, self.vis1, self.vis2, self.rev1, self.rev2 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(),\r\n self.desc.set(desc)\r\n self.eventname, self.sitename, self.startdate = eventname, sitename, startdate\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=dailies,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(dailies), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150 * len(list(dailies.values())[0]), height=25 * 7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=25, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventnameLabel = Label(self, text=f\"Name: {eventname}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n eventnameLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n priceLabel = Label(self, text=f\"Price: ${price}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n startdateLabel = Label(self, text=f\"Start Date: {startdate}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n startdateLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n enddateLabel = Label(self, text=f\"End Date: {enddate}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n enddateLabel.grid(row=5, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n minStaffLabel = Label(self, text=f\"Min Staff Required: {minstaffreq}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n minStaffLabel.grid(row=6, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n capacityLabel = Label(self, text=f\"Capacity: {cap}\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n capacityLabel.grid(row=7, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n descLabel = Label(self, text=\"Description\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n descLabel.grid(row=8, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n descBox = Entry(self, textvariable=self.desc, width=100)\r\n descBox.grid(row=8, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n visLabel = Label(self, text=\"Daily Visit Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n visLabel.grid(row=9, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n vis1Box = Entry(self, textvariable=self.vis1, width=5)\r\n vis1Box.grid(row=9, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n vis2Box = Entry(self, textvariable=self.vis2, width=5)\r\n vis2Box.grid(row=9, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n revLabel = Label(self, text=\"Daily Revenue Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n revLabel.grid(row=10, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n rev1Box = Entry(self, textvariable=self.rev1, width=5)\r\n rev1Box.grid(row=10, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n rev2Box = Entry(self, textvariable=self.rev2, width=5)\r\n rev2Box.grid(row=10, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n self.staffList = Listbox(self, selectmode=MULTIPLE) # Multiple means you may select multiple sites.\r\n self.staffList.grid(row=11, column=0, padx=(4,4), pady=(0,4), columnspan=2, sticky=W+E)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=12, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortVisits = partial(self.filter, 'DailyVisits')\r\n sortVisitsButton = Button(self, command=sortVisits, text=\"Sort by Visits\", background='#4286f4')\r\n sortVisitsButton.grid(row=13, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortRev = partial(self.filter, 'DailyRevenue')\r\n sortRevButton = Button(self, command=sortRev, text=\"Sort by Revenue\", background='#4286f4')\r\n sortRevButton.grid(row=14, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortDate = partial(self.filter, 'Date')\r\n sortDateButton = Button(self, command=sortDate, text=\"Sort by Date\", background='#4286f4')\r\n sortDateButton.grid(row=15, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n updateButton = Button(self, command=partial(self.submit, minstaffreq), text=\"Update\", background='#4286f4')\r\n updateButton.grid(row=16, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n for i, staffmember in enumerate(cur_staff + avail_staff):\r\n self.staffList.insert(i, staffmember)\r\n if staffmember in cur_staff:\r\n self.staffList.selection_set(i)\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Date'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n rev1, rev2, vis1, vis2 = self.rev1.get(), self.rev2.get(), self.vis1.get(), self.vis2.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n rev1, rev2, vis1, vis2 = conv.get(rev1, rev1), conv.get(rev2, rev2), conv.get(vis1, vis1), conv.get(vis2,vis2)\r\n\r\n if sort is None:\r\n sort = 'Date'\r\n\r\n if rev1 and not rev1.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if rev2 and not rev2.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if vis1 and not vis1.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n if vis2 and not vis2.isdigit():\r\n messagebox.showwarning('Error', 'Duration, Visits, and Revenue must be numbers')\r\n return\r\n print(rev2)\r\n dailies = self.SQL.filter(identifier, self.eventname, self.sitename, self.startdate, rev1, rev2, vis1, vis2, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(dailies)\r\n self.resultTable.redraw()\r\n\r\n def submit(self, minstaff):\r\n desc, staff = self.desc.get(), self.staffList.curselection()\r\n staff = [self.staffList.get(i) for i in staff]\r\n if desc == '':\r\n messagebox.showwarning('Error', 'You must have a description.')\r\n return\r\n elif len(staff) < minstaff:\r\n messagebox.showwarning('Error', 'You need more staff')\r\n return\r\n\r\n self.SQL.submit(self.eventname, self.sitename, self.startdate, desc, staff)\r\n messagebox.showwarning('Success', 'Event successfully updated.')\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass CreateEvent(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Create Transit')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.CreateEvent(db)\r\n\r\n def display(self):\r\n self.eventname, self.price, self.cap, self.minstaff, self.d1, self.d2, self.desc = StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=25, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventnameLabel = Label(self, text=f\"Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n eventnameLabel.grid(row=1, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n eventnameBox = Entry(self, textvariable=self.eventname, width=100)\r\n eventnameBox.grid(row=1, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n priceLabel = Label(self, text=f\"Price\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n priceLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n priceBox = Entry(self, textvariable=self.price, width=100)\r\n priceBox.grid(row=2, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n capLabel = Label(self, text=f\"Capacity\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n capLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n capBox = Entry(self, textvariable=self.cap, width=100)\r\n capBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n minStaffLabel = Label(self, text=f\"Min Staff Required\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n minStaffLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n minstaffBox = Entry(self, textvariable=self.minstaff, width=100)\r\n minstaffBox.grid(row=4, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d1Label = Label(self, text=f\"Start Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d1Label.grid(row=5, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n d1Box = Entry(self, textvariable=self.d1, width=100)\r\n d1Box.grid(row=5, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d2Label = Label(self, text=f\"End Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d2Label.grid(row=6, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n d2Box = Entry(self, textvariable=self.d2, width=100)\r\n d2Box.grid(row=6, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n descLabel = Label(self, text=f\"Description\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n descLabel.grid(row=7, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n descBox = Entry(self, textvariable=self.desc, width=100)\r\n descBox.grid(row=7, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n self.staffList = Listbox(self, selectmode=MULTIPLE) # Multiple means you may select multiple sites.\r\n self.staffList.grid(row=8, column=0, padx=(4, 4), pady=(0, 4), columnspan=2, sticky=W + E)\r\n\r\n getStaffButton = Button(self, command=self.getstaff, text=\"Get possible staff members\", background='#4286f4')\r\n getStaffButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n createButton = Button(self, command=self.submit, text=\"Create\", background='#4286f4')\r\n createButton.grid(row=11, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def getstaff(self):\r\n d1, d2 = self.d1.get(), self.d2.get()\r\n\r\n if any([d1 == '', d2 == '']):\r\n messagebox.showwarning('Error', 'You must input both dates first.')\r\n return\r\n\r\n try:\r\n datetime.strptime(d1, '%Y-%m-%d')\r\n datetime.strptime(d2, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n\r\n staff = self.SQL.get_staff(d1, d2)\r\n self.staffList.delete(0, END)\r\n for i, staffmember in enumerate(staff):\r\n self.staffList.insert(i, staffmember)\r\n\r\n\r\n def submit(self):\r\n eventname, price, cap, minstaff, d1, d2, desc, staff = self.eventname.get(), self.price.get(), self.cap.get(), self.minstaff.get(), self.d1.get(), self.d2.get(), self.desc.get(), self.staffList.curselection()\r\n staff = [self.staffList.get(i) for i in staff]\r\n\r\n if any([price == '', cap == '', minstaff == '', d1 == '', d2 == '', desc == '', staff == '']):\r\n messagebox.showwarning('Error', 'All fields are required.')\r\n return\r\n elif any([not cap.isdigit(), not minstaff.isdigit()]):\r\n messagebox.showwarning('Error', 'Capacity and Min Staff must be ints')\r\n return\r\n elif len(staff) < int(minstaff):\r\n messagebox.showwarning('Error', 'You need more staff.')\r\n return\r\n\r\n try:\r\n price = float(price)\r\n if price > 9999999.99 or price < 0:\r\n messagebox.showwarning('Error', 'Price is too long, negative, or has too many decimals. '\r\n 'It must be a 7 digit float, with two extra decimal places.')\r\n return\r\n except:\r\n messagebox.showwarning('Error', 'Price must be a float.')\r\n return\r\n\r\n if self.SQL.create(identifier, eventname, price, cap, minstaff, d1, d2, desc, staff) == -1:\r\n messagebox.showwarning('Error', 'That event already exists.')\r\n return\r\n\r\n else:\r\n messagebox.showwarning('Success', 'Event successfully created')\r\n return\r\n\r\n\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy() # Refreshes by removing all widgets and then reloading them.\r\n self.master.display()\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass ManageStaff(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Manage Staff')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ManageStaff(db)\r\n\r\n def display(self):\r\n staff, sites = self.SQL.load()\r\n\r\n self.site, self.fname, self.lname, self.d1, self.d2 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.site.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=staff,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(staff), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(staff.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=13, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n siteLabel = Label(self, text=\"Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n siteDropdown = OptionMenu(self, self.site, *sites + ['Any'])\r\n siteDropdown.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n fnameLabel = Label(self, text=\"First Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n fnameLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2))\r\n fnameBox = Entry(self, textvariable=self.fname, width=10)\r\n fnameBox.grid(row=3, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n lnameLabel = Label(self, text=\"Last Name\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n lnameLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2))\r\n lnameBox = Entry(self, textvariable=self.lname, width=10)\r\n lnameBox.grid(row=4, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d1Label = Label(self, text=\"Start Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d1Label.grid(row=5, column=0, padx=(4, 4), pady=(2, 2))\r\n d1Box = Entry(self, textvariable=self.d1, width=10)\r\n d1Box.grid(row=5, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n d2Label = Label(self, text=\"End Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n d2Label.grid(row=6, column=0, padx=(4, 4), pady=(2, 2))\r\n d2Box = Entry(self, textvariable=self.d2, width=10)\r\n d2Box.grid(row=6, column=1, padx=(4, 4), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortType = partial(self.filter, 'Name')\r\n sortTypeButton = Button(self, command=sortType, text=\"Sort by Name\", background='#4286f4')\r\n sortTypeButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortName = partial(self.filter, 'NumShifts')\r\n sortNameButton = Button(self, command=sortName, text=\"Sort by # Of Event Shifts\", background='#4286f4')\r\n sortNameButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Name'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n site, fname, lname, d1, d2 = self.site.get(), self.fname.get(), self.lname.get(), self.d1.get(), self.d2.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n site, fname, lname, d1, d2 = conv.get(site, site), conv.get(fname, fname), conv.get(lname, lname), conv.get(d1, d1), conv.get(d2, d2)\r\n\r\n if sort is None:\r\n sort = 'Name'\r\n staff = self.SQL.filter(site, fname, lname, d1, d2, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(staff)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass SiteReport(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Site Report')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.SiteReport(db)\r\n\r\n def display(self):\r\n dailies = self.SQL.load()\r\n\r\n self.startdate, self.enddate, self.e1, self.e2, self.s1, self.s2, self.vis1, self.vis2, self.rev1, self.rev2 = StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=dailies,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(dailies), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(dailies.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=20, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n startLabel = Label(self, text=\"StartDate\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n startLabel.grid(row=2, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n startBox = Entry(self, textvariable=self.startdate, width=5)\r\n startBox.grid(row=2, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n\r\n endLabel = Label(self, text=\"End Date\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n endLabel.grid(row=3, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n endBox = Entry(self, textvariable=self.enddate, width=5)\r\n endBox.grid(row=3, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n\r\n e1Label = Label(self, text=\"Event Count\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n e1Label.grid(row=4, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n e1Box = Entry(self, textvariable=self.e1, width=5)\r\n e1Box.grid(row=4, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n e2Box = Entry(self, textvariable=self.e2, width=5)\r\n e2Box.grid(row=4, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n s1Label = Label(self, text=\"Staff Count\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n s1Label.grid(row=5, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n s1Box = Entry(self, textvariable=self.s1, width=5)\r\n s1Box.grid(row=5, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n s2Box = Entry(self, textvariable=self.s2, width=5)\r\n s2Box.grid(row=5, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n revLabel = Label(self, text=\"Revenue Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n revLabel.grid(row=6, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n rev1Box = Entry(self, textvariable=self.rev1, width=5)\r\n rev1Box.grid(row=6, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n rev2Box = Entry(self, textvariable=self.rev2, width=5)\r\n rev2Box.grid(row=6, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n visLabel = Label(self, text=\"Visit Range\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n visLabel.grid(row=7, column=0, padx=(2, 2), pady=(2, 2), sticky=W)\r\n vis1Box = Entry(self, textvariable=self.vis1, width=5)\r\n vis1Box.grid(row=7, column=0, padx=(4, 4), pady=(4, 4), sticky=E)\r\n vis2Box = Entry(self, textvariable=self.vis2, width=5)\r\n vis2Box.grid(row=7, column=1, padx=(4, 4), pady=(4, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortDate = partial(self.filter, 'Date')\r\n sortDateButton = Button(self, command=sortDate, text=\"Sort by Date\", background='#4286f4')\r\n sortDateButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortEvents = partial(self.filter, 'EventCount')\r\n sortNameButton = Button(self, command=sortEvents, text=\"Sort by Event Count\", background='#4286f4')\r\n sortNameButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortStaff = partial(self.filter, 'StaffCount')\r\n sortManButton = Button(self, command=sortStaff, text=\"Sort by StaffCount\", background='#4286f4')\r\n sortManButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortVisits = partial(self.filter, 'TotalVisits')\r\n sortVisitsButton = Button(self, command=sortVisits, text=\"Sort by Visits\", background='#4286f4')\r\n sortVisitsButton.grid(row=12, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortRev = partial(self.filter, 'TotalRevenue')\r\n sortRevButton = Button(self, command=sortRev, text=\"Sort by Revenue\", background='#4286f4')\r\n sortRevButton.grid(row=13, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n detailButton = Button(self, command=self.detail, text=\"Daily Detail\", background='#4286f4')\r\n detailButton.grid(row=14, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n def detail(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n date = row['Date']\r\n\r\n if date == '':\r\n messagebox.showwarning('Error', 'No day selected. Make sure to click on the non-empty '\r\n 'row number to select which date you are detailing.')\r\n return\r\n\r\n dailyDetailWindow = DailyDetail(self)\r\n dailyDetailWindow.display(date)\r\n self.withdraw()\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Date'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n startdate, enddate, e1, e2, s1, s2, rev1, rev2, vis1, vis2 = self.startdate.get(), self.enddate.get(), self.e1.get(), self.e2.get(), self.s1.get(), self.s2.get(), self.rev1.get(), self.rev2.get(), self.vis1.get(), self.vis2.get()\r\n\r\n try:\r\n datetime.strptime(startdate, '%Y-%m-%d')\r\n datetime.strptime(enddate, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'You must input a start and end date, please enter YYYY-MM-DD')\r\n return\r\n\r\n converted = []\r\n conv = {'': None, 'Any': None}\r\n for i in [e1, e2, s1, s2, rev1, rev2, vis1, vis2]:\r\n converted.append(conv.get(i, i))\r\n\r\n e1, e2, s1, s2, rev1, rev2, vis1, vis2 = converted\r\n\r\n if e1 and not e1.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if e2 and not e2.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if s1 and not s1.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if s2 and not s2.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if vis1 and not vis1.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if vis2 and not vis2.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if rev1 and not rev1.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n if rev2 and not rev2.isdigit():\r\n messagebox.showwarning('Error', 'All ranges must be numbers')\r\n return\r\n\r\n if sort is None:\r\n sort = 'Date'\r\n\r\n dailies = self.SQL.filter(identifier, startdate, enddate, e1, e2, s1, s2, rev1, rev2, vis1, vis2, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(dailies)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\n\r\n def edit(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n sitename = row['SiteName']\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n\r\n editSiteWindow = EditSite(self)\r\n editSiteWindow.display(sitename)\r\n self.withdraw()\r\n\r\n def create(self):\r\n createSiteWindow = CreateSite(self)\r\n createSiteWindow.display()\r\n self.withdraw()\r\n\r\n def delete(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n sitename = row['SiteName']\r\n\r\n if sitename == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n\r\n self.SQL.delete(sitename)\r\n self.resultTable.deleteRow()\r\n self.resultTable.redrawTable()\r\n messagebox.showwarning('Success', 'Site successfully deleted.')\r\n\r\n\r\nclass DailyDetail(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Daily Detail')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.DailyDetail(db)\r\n\r\n def display(self, date):\r\n self.date = date\r\n events = self.SQL.filter(identifier, date)\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=events,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(events), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150 * len(list(events.values())[0]), height=25 * 7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=25, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortEvent = partial(self.filter, 'EventName')\r\n sortEventButton = Button(self, command=sortEvent, text=\"Sort by Event\", background='#4286f4')\r\n sortEventButton.grid(row=13, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortRev = partial(self.filter, 'Revenue')\r\n sortRevButton = Button(self, command=sortRev, text=\"Sort by Revenue\", background='#4286f4')\r\n sortRevButton.grid(row=14, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortVisits = partial(self.filter, 'NumVisits')\r\n sortVisitsButton = Button(self, command=sortVisits, text=\"Sort by Visits\", background='#4286f4')\r\n sortVisitsButton.grid(row=15, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortStaff = partial(self.filter, 'StaffNames')\r\n sortVisitsButton = Button(self, command=sortStaff, text=\"Sort by Staff\", background='#4286f4')\r\n sortVisitsButton.grid(row=16, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def filter(self, sort):\r\n\r\n dailies = self.SQL.filter(identifier, self.date, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(dailies)\r\n self.resultTable.redraw()\r\n\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass StaffViewSchedule(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Staff View Schedule')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.ViewSchedule(db)\r\n\r\n def display(self):\r\n self.eventname, self.keyword, self.startdate, self.enddate = StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data={1: {'EventName': '', 'SiteName': '', 'StartDate': '', 'EndDate': '', 'StaffCount': ''}},\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n thefont=('Helvetica', 10), autoresizecols=1, rows=5,\r\n width=150*5, height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=25, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n eventNameLabel = Label(self, text=\"Event Name\", foreground='#000000', background='#ffffff')\r\n eventNameLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n eventNameBox = Entry(self, textvariable=self.eventname, width=20)\r\n eventNameBox.grid(row=2, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n keywordLabel = Label(self, text=\"Description Keyword\", foreground='#000000', background='#ffffff')\r\n keywordLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n keywordBox = Entry(self, textvariable=self.keyword, width=20)\r\n keywordBox.grid(row=4, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n startDateLabel = Label(self, text=\"Start Date\", foreground='#000000', background='#ffffff')\r\n startDateLabel.grid(row=5, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n startDateBox = Entry(self, textvariable=self.startdate, width=20)\r\n startDateBox.grid(row=5, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n endDateLabel = Label(self, text=\"End Date\", foreground='#000000', background='#ffffff')\r\n endDateLabel.grid(row=6, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n endDateBox = Entry(self, textvariable=self.enddate, width=20)\r\n endDateBox.grid(row=6, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n sortEvent = partial(self.filter, 'EventName')\r\n sortDateButton = Button(self, command=sortEvent, text=\"Sort Event\", background='#4286f4')\r\n sortDateButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n sortSite = partial(self.filter, 'SiteName')\r\n sortSiteButton = Button(self, command=sortSite, text=\"Sort Site\", background='#4286f4')\r\n sortSiteButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n sortStart = partial(self.filter, 'StartDate')\r\n sortDateButton = Button(self, command=sortStart, text=\"Sort Start Date\", background='#4286f4')\r\n sortDateButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n sortEnd = partial(self.filter, 'EndDate')\r\n sortEndButton = Button(self, command=sortEnd, text=\"Sort End Date\", background='#4286f4')\r\n sortEndButton.grid(row=11, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n sortStaff = partial(self.filter, 'StaffCount')\r\n sortStaffButton = Button(self, command=sortStaff, text=\"Sort Staff Count\", background='#4286f4')\r\n sortStaffButton.grid(row=12, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n viewEventButton = Button(self, command=self.view_event, text=\"View Event\", background='#4286f4')\r\n viewEventButton.grid(row=13, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W+E)\r\n\r\n def filter(self, sort='EventName'):\r\n eventname, keyword, startdate, enddate = self.eventname.get(), self.keyword.get(), self.startdate.get(), self.enddate.get()\r\n\r\n converted = []\r\n conv = {'': None, 'Any': None}\r\n for i in [eventname, keyword, startdate, enddate]:\r\n converted.append(conv.get(i, i))\r\n eventname, keyword, startdate, enddate = converted\r\n\r\n if startdate:\r\n try:\r\n datetime.strptime(startdate, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Please enter YYYY-MM-DD')\r\n return\r\n if enddate:\r\n try:\r\n datetime.strptime(enddate, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'YPlease enter YYYY-MM-DD')\r\n return\r\n\r\n events = self.SQL.filter(identifier, eventname, keyword, startdate, enddate, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(events)\r\n self.resultTable.redraw()\r\n\r\n def view_event(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n eventname, sitename, startdate = row['EventName'], row['SiteName'], row['StartDate']\r\n\r\n if eventname == '':\r\n messagebox.showwarning('Error', 'No event selected. Make sure to click on the non-empty '\r\n 'row number to select which event you are detailing.')\r\n return\r\n\r\n staffEventDetailWindow = StaffEventDetail(self)\r\n staffEventDetailWindow.display(eventname, sitename, startdate)\r\n self.withdraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass StaffEventDetail(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Event Detail')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.StaffEventDetail(db)\r\n\r\n def display(self, eventname, sitename, startdate):\r\n enddate, duration, cap, price, desc, staffnames = self.SQL.load(eventname, sitename, startdate)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=25, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventNameLabel = Label(self, text=f\"Event: {eventname}\", foreground='#000000', background='#ffffff')\r\n eventNameLabel.grid(row=1, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n siteNameLabel = Label(self, text=f\"Site: {sitename}\", foreground='#000000', background='#ffffff')\r\n siteNameLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n startLabel = Label(self, text=f\"Startdate: {startdate}\", foreground='#000000', background='#ffffff')\r\n startLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n endLabel = Label(self, text=f\"Enddate: {enddate}\", foreground='#000000', background='#ffffff')\r\n endLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n pLabel = Label(self, text=f\"Price: {price}\", foreground='#000000', background='#ffffff')\r\n pLabel.grid(row=5, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n durLabel = Label(self, text=f\"Duration: {duration}\", foreground='#000000', background='#ffffff')\r\n durLabel.grid(row=6, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n capLabel = Label(self, text=f\"Capacity: {cap}\", foreground='#000000', background='#ffffff')\r\n capLabel.grid(row=7, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n staffLabel = Label(self, text=f\"Staff: {staffnames}\", foreground='#000000', background='#ffffff')\r\n staffLabel.grid(row=8, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n\r\n descLabel = Label(self, text=f\"Description: {desc}\", foreground='#000000', background='#ffffff', wraplength=500)\r\n descLabel.grid(row=9, column=0, columnspan=1, rowspan=5, padx=(4, 4), pady=(2, 2))\r\n\r\n def back(self):\r\n for widget in self.master.winfo_children():\r\n widget.destroy()\r\n self.master.display() # Refreshes\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass visitorExploreEvent(Toplevel):\r\n def __init__(self,master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Explore Event')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.visitorExploreEvent(db)\r\n\r\n def display(self):\r\n events, eventNames, siteNames, startDates, ticketPrices, ticketRemainings, totalVisits, myVisits = self.SQL.load(identifier)\r\n\r\n self.eventName = StringVar()\r\n self.descriptionKeyword = StringVar()\r\n self.siteName = StringVar()\r\n self.siteName.set(\"Any\")\r\n self.startDate = StringVar()\r\n self.endDate = StringVar()\r\n self.TVR1 = StringVar()\r\n self.TVR2 = StringVar()\r\n self.TPR1 = StringVar()\r\n self.TPR2 = StringVar()\r\n self.TPR2 = StringVar()\r\n self.includeVisited = StringVar()\r\n self.includeVisited.set(\"0\")\r\n self.includeSoldOut = StringVar()\r\n self.includeSoldOut.set(\"0\")\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=events,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(events), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(events.values())[0]), height=25*7)\r\n self.resultTable.grid(row=1, column=1, rowspan=10, sticky=W + E)\r\n self.resultTable.show()\r\n\r\n sortEventName = partial(self.filter, 'EventName')\r\n sortEventNameButton = Button(self, command=sortEventName, text=\"Sort by Event Name\", background='#4286f4')\r\n sortEventNameButton.grid(row=16, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n sortSiteName = partial(self.filter, 'SiteName')\r\n sortSiteNameButton = Button(self, command=sortSiteName, text=\"Sort by Site Name\", background='#4286f4')\r\n sortSiteNameButton.grid(row=17, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n sortTicketPrice = partial(self.filter, 'Price')\r\n sortTicketPriceButton = Button(self, command=sortTicketPrice, text=\"Sort by Ticket Price\", background='#4286f4')\r\n sortTicketPriceButton.grid(row=18, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n sortTicketRemaining = partial(self.filter, 'TicketsRemaining')\r\n sortTicketRemainingButton = Button(self, command=sortTicketRemaining, text=\"Sort by Ticket Remaining\", background='#4286f4')\r\n sortTicketRemainingButton.grid(row=19, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n sortTotalVisits = partial(self.filter, 'TotalNumVisits')\r\n sortTotalVisitsButton = Button(self, command=sortTotalVisits, text=\"Sort by Total Visits\", background='#4286f4')\r\n sortTotalVisitsButton.grid(row=20, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n sortMyVisits = partial(self.filter, 'MyVisits')\r\n sortMyVisitsButton = Button(self, command=sortMyVisits, text=\"Sort by My Visits\", background='#4286f4')\r\n sortMyVisitsButton.grid(row=21, column=1, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventNameLabel = Label(self, text=\"Event Name\", foreground='#000000', background='#ffffff')\r\n eventNameLabel.grid(row=5, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n descriptionKeywordLabel = Label(self, text=\"Description Keyword\", foreground='#000000', background='#ffffff')\r\n descriptionKeywordLabel.grid(row=6, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n siteNameLabel = Label(self, text=\"Site Name\", foreground='#000000', background='#ffffff')\r\n siteNameLabel.grid(row=7, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n startDateLabel = Label(self, text=\"Start Date\", foreground='#000000', background='#ffffff')\r\n startDateLabel.grid(row=8, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n endDateLabel = Label(self, text=\"End Date\", foreground='#000000', background='#ffffff')\r\n endDateLabel.grid(row=9, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n TVRLabel = Label(self, text=\"Total Visits Range\", foreground='#000000', background='#ffffff')\r\n TVRLabel.grid(row=10, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n TPRLabel = Label(self, text=\"Ticket Price Range\", foreground='#000000', background='#ffffff')\r\n TPRLabel.grid(row=11, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n includeVisitedCheckbutton = Checkbutton(self, variable=self.includeVisited, text=\"Include Visited\", foreground='#000000', background='#ffffff')\r\n includeVisitedCheckbutton.grid(row=12, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n includeSoldOutCheckbutton = Checkbutton(self, variable=self.includeSoldOut, text=\"Include Sold Out\", foreground='#000000', background='#ffffff')\r\n includeSoldOutCheckbutton.grid(row=13, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n filterButton = Button(self,command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=14, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n eventDetailButton = Button(self, command=self.onEventDetailClicked, text=\"Event Detail\", background='#4286f4')\r\n eventDetailButton.grid(row=15, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=22, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n eventNameBox = Entry(self, textvariable=self.eventName, width=20)\r\n eventNameBox.grid(row=5, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n descriptionKeywordBox = Entry(self, textvariable=self.descriptionKeyword, width=20)\r\n descriptionKeywordBox.grid(row=6, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n siteNameDropdown = OptionMenu(self, self.siteName, *siteNames + ['Any'])\r\n siteNameDropdown.grid(row=7, column=3, padx=(2, 5), pady=(0, 4), sticky=W)\r\n startDateBox = Entry(self, textvariable=self.startDate, width=20)\r\n startDateBox.grid(row=8, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n endDateBox = Entry(self, textvariable=self.endDate, width=20)\r\n endDateBox.grid(row=9, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n TVR1Box = Entry(self, textvariable=self.TVR1, width=20)\r\n TVR1Box.grid(row=10, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n TVR2Box = Entry(self, textvariable=self.TVR2, width=20)\r\n TVR2Box.grid(row=10, column=4, padx=(0, 2), pady=(0, 4), sticky=E)\r\n TPR1Box = Entry(self, textvariable=self.TPR1, width=20)\r\n TPR1Box.grid(row=11, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n TPR2Box = Entry(self, textvariable=self.TPR2, width=20)\r\n TPR2Box.grid(row=11, column=4, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n def filter(self, sort=None):\r\n # if sort and self.resultTable.model.getData()[1]['SiteName'] == '':\r\n # messagebox.showwarning('Error', 'You must have data in order to sort')\r\n # return\r\n\r\n event, site, keyword, startDate, endDate, TVR1, TVR2, TPR1, TPR2, includeVisited, includeSoldOut = self.eventName.get(), self.siteName.get(), self.descriptionKeyword.get(), self.startDate.get(), self.endDate.get(), self.TVR1.get(), self.TVR2.get(), self.TPR1.get(), self.TPR2.get(), self.includeVisited.get(), self.includeSoldOut.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n event, site, keyword, startDate, endDate, TVR1, TVR2, TPR1, TPR2, includeVisited, includeSoldOut = conv.get(event, event), conv.get(site, site), conv.get(keyword, keyword), conv.get(startDate, startDate), conv.get(endDate, endDate), conv.get(TVR1, TVR1), conv.get(TVR2, TVR2), conv.get(TPR1, TPR1), conv.get(TPR2, TPR2), conv.get(includeVisited, includeVisited), conv.get(includeSoldOut, includeSoldOut)\r\n\r\n if sort is None:\r\n sort = 'EventName'\r\n sites = self.SQL.filter(identifier, event, site, keyword, startDate, endDate, TVR1, TVR2, TPR1, TPR2, includeVisited, includeSoldOut, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(sites)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n def onEventDetailClicked(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n eventName = row['EventName']\r\n siteName = row['SiteName']\r\n startDate = row['StartDate']\r\n\r\n if eventName == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n visitorEventDetailWindow = visitorEventDetail(self)\r\n self.withdraw()\r\n visitorEventDetailWindow.display(eventName, siteName, startDate)\r\n\r\n\r\nclass visitorEventDetail(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Event Detail')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.visitorEventDetail(db)\r\n\r\n def display(self, eventname, sitename, startdate):\r\n eventName, siteName, startDate, endDate, ticketPrice, ticketsRemaining, description = self.SQL.load(identifier, eventname, sitename, startdate)\r\n\r\n self.eventName = StringVar()\r\n self.siteName = StringVar()\r\n self.startDate = StringVar()\r\n self.endDate = StringVar()\r\n self.ticketPrice = StringVar()\r\n self.ticketsRemaining = StringVar()\r\n self.description = StringVar()\r\n self.visitDate = StringVar()\r\n\r\n self.eventName.set(eventName)\r\n self.siteName.set(siteName)\r\n self.startDate.set(startDate)\r\n self.endDate.set(endDate)\r\n self.ticketPrice.set(ticketPrice)\r\n self.ticketsRemaining.set(ticketsRemaining)\r\n self.description.set(description)\r\n\r\n eventNameLabel = Label(self, text='Event Name', foreground='#000000', background='#ffffff')\r\n eventNameLabel.grid(row=1, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n eventNameDataLabel = Label(self, text=self.eventName.get(), foreground='#000000', background='#ffffff')\r\n eventNameDataLabel.grid(row=1, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n siteNameLabel = Label(self, text='Site Name', foreground='#000000', background='#ffffff')\r\n siteNameLabel.grid(row=2, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n siteNameDataLabel = Label(self, text=self.siteName.get(), foreground='#000000', background='#ffffff')\r\n siteNameDataLabel.grid(row=2, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n startDateLabel = Label(self, text='Start Date', foreground='#000000', background='#ffffff')\r\n startDateLabel.grid(row=3, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n startDateDataLabel = Label(self, text=self.startDate.get(), foreground='#000000', background='#ffffff')\r\n startDateDataLabel.grid(row=3, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n endDateLabel = Label(self, text='End Date', foreground='#000000', background='#ffffff')\r\n endDateLabel.grid(row=4, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n endDateDataLabel = Label(self, text=self.endDate.get(), foreground='#000000', background='#ffffff')\r\n endDateDataLabel.grid(row=4, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n ticketPriceLabel = Label(self, text='Ticket Price($)', foreground='#000000', background='#ffffff')\r\n ticketPriceLabel.grid(row=5, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n ticketPriceDataLabel = Label(self, text=self.ticketPrice.get(), foreground='#000000', background='#ffffff')\r\n ticketPriceDataLabel.grid(row=5, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n ticketsRemainingLabel = Label(self, text='Tickets Remaining', foreground='#000000', background='#ffffff')\r\n ticketsRemainingLabel.grid(row=6, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n ticketsRemainingDataLabel = Label(self, text=self.ticketsRemaining.get(), foreground='#000000', background='#ffffff')\r\n ticketsRemainingDataLabel.grid(row=6, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n descriptionLabel = Label(self, text='Description', foreground='#000000', background='#ffffff')\r\n descriptionLabel.grid(row=7, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n descriptionLabelData = Text(self, height=4, width=15, wrap=WORD)\r\n descriptionLabelData.insert(\"1.0\", self.description.get())\r\n descriptionLabelData.grid(row=7, column=2, padx=(4, 4), pady=(2, 2), sticky=W)\r\n\r\n visitDateLabel = Label(self, text='Visit Date', foreground='#000000', background='#ffffff')\r\n visitDateLabel.grid(row=11, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n visitDateDataBox = Entry(self, textvariable=self.visitDate, width=20)\r\n visitDateDataBox.grid(row=11, column=2, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n logVisitButton = Button(self, command=self.logVisit, text=\"Log Visit\", background='#4286f4')\r\n logVisitButton.grid(row=12, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=13, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def logVisit(self):\r\n if((self.visitDate.get() < self.startDate.get()) or (self.visitDate.get() > self.endDate.get())):\r\n messagebox.showwarning(\"Invalid Date\",\r\n \"Date must be within the time of the event.\")\r\n elif(self.ticketsRemaining.get() == \"0\"):\r\n messagebox.showwarning(\"No Tickets Remaining\",\r\n \"There are no tickets remaining for this event.\")\r\n else:\r\n cursor.execute(\"SELECT EventName FROM visitevent WHERE EventName = \\'\" +self.eventName.get()+ \"\\' AND SiteName = \\'\" +self.siteName.get()+ \"\\' AND StartDate = \\'\" +self.startDate.get()+ \"\\' AND Date = \\'\" +self.visitDate.get()+ \"\\' AND visUsername = \\'\" +identifier+ \"\\'\")\r\n event = cursor.fetchone()\r\n if(event is not None):\r\n messagebox.showwarning(\"Already Logged\",\r\n \"There is already a visit logged for this event at this time.\")\r\n else:\r\n cursor.execute(\"INSERT into visitevent values (%s, %s, %s, %s, %s)\",\r\n (identifier, self.siteName.get(), self.eventName.get(), self.startDate.get(), self.visitDate.get()))\r\n messagebox.showinfo(\"Success\",\r\n \"Your visit has been logged.\")\r\n\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\nclass visitorTransitDetail(Toplevel):\r\n def __init__(self,master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Transit Detail')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.visitorTransitDetail(db)\r\n\r\n def display(self, sitename):\r\n routes, transportTypes = self.SQL.load(sitename)\r\n\r\n self.siteName = StringVar()\r\n self.siteName.set(sitename)\r\n self.transportType = StringVar()\r\n #self.transportTypes = transportTypes\r\n self.transitDate = StringVar()\r\n self.routeName = StringVar()\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=routes,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(routes), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(routes.values())[0]), height=25*7)\r\n self.resultTable.grid(row=1, column=1, rowspan=10, sticky=W + E)\r\n self.resultTable.show()\r\n\r\n siteNameLabel = Label(self, text=\"Site Name\", foreground='#000000', background='#ffffff')\r\n siteNameLabel.grid(row=11, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n siteNameDataLabel = Label(self, text=self.siteName.get(), foreground='#000000', background='#ffffff')\r\n siteNameDataLabel.grid(row=11, column=3, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n transportTypeLabel = Label(self, text=\"Transport Type\", foreground='#000000', background='#ffffff')\r\n transportTypeLabel.grid(row=12, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n transportTypeDropdown = OptionMenu(self, self.transportType, *transportTypes + ['Any'])\r\n transportTypeDropdown.grid(row=12, column=3, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self,command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=12, column=4, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n transitDateLabel = Label(self, text=\"Transit Date\", foreground='#000000', background='#ffffff')\r\n transitDateLabel.grid(row=13, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n transitDateBox = Entry(self, textvariable=self.transitDate, width=20)\r\n transitDateBox.grid(row=13, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n logVisitButton = Button(self, command=self.logVisit, text=\"Log Visit\", background='#4286f4')\r\n logVisitButton.grid(row=13, column=4, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=14, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n def logVisit(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n routeName = row['Route']\r\n cursor.execute(\"Select Route From take WHERE Username = \\'\" +identifier+ \"\\' AND Date = \\'\" +self.transitDate.get()+ \"\\' AND Route = \\'\" +routeName+ \"\\' AND TransportType = \\'\" +self.transportType.get()+ \"\\'\")\r\n route = cursor.fetchone()\r\n if(route is not None):\r\n messagebox.showwarning(\"Already Logged\",\r\n \"There is already a visit logged for you at this site and date.\")\r\n else:\r\n cursor.execute(\"INSERT into take values (%s, %s, %s, %s)\",\r\n (identifier, self.transportType.get(), routeName, self.transitDate.get()))\r\n messagebox.showinfo(\"Success\",\r\n \"Your visit has been logged.\")\r\n\r\n def filter(self):\r\n # event, site, keyword, startDate, endDate, TVR1, TVR2, TPR1, TPR2, includeVisited, includeSoldOut = self.eventName.get(), self.siteName.get(), self.descriptionKeyword.get(), self.startDate.get(), self.endDate.get(), self.TVR1.get(), self.TVR2.get(), self.TPR1.get(), self.TPR2.get(), self.includeVisited.get(), self.includeSoldOut.get()\r\n\r\n # conv = {'': None, 'Any': None}\r\n # event, site, keyword, startDate, endDate, TVR1, TVR2, TPR1, TPR2, includeVisited, includeSoldOut = conv.get(event, event), conv.get(site, site), conv.get(keyword, keyword), conv.get(startDate, startDate), conv.get(endDate, endDate), conv.get(TVR1, TVR1), conv.get(TVR2, TVR2), conv.get(TPR1, TPR1), conv.get(TPR2, TPR2), conv.get(includeVisited, includeVisited), conv.get(includeSoldOut, includeSoldOut)\r\n\r\n # if sort is None:\r\n # sort = 'EventName'\r\n routes = self.SQL.filter(self.siteName.get(), self.transportType.get())\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(routes)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass visitorExploreSite(Toplevel):\r\n def __init__(self,master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Explore Site')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.VisitorExploreSite(db)\r\n\r\n def display(self):\r\n sitedict = {1:{\"SiteName\":\"\",\"EventCount\":\"\",\"TotalVisits\":\"\",\"MyVisits\":\"\"}}\r\n sites = self.SQL.load(identifier)\r\n\r\n\r\n self.includeVisited = StringVar()\r\n self.includeVisited.set(\"0\")\r\n self.siteName = StringVar()\r\n self.siteName.set(\"Any\")\r\n self.openEveryday = StringVar()\r\n self.openEveryday.set(\"Any\")\r\n self.openEverydayList = [\"0\", \"1\"]\r\n self.startDate = StringVar()\r\n self.endDate = StringVar()\r\n self.totalVisitsRange1 = StringVar()\r\n self.totalVisitsRange2 = StringVar()\r\n self.eventCountRange1 = StringVar()\r\n self.eventCountRange2 = StringVar()\r\n self.resultTable = TableCanvas(self, editable=True, data=sitedict,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(sitedict), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(sitedict.values())[0]), height=25*7)\r\n\r\n self.resultTable.show()\r\n\r\n siteLabel = Label(self, text=\"Site\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=2, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n siteNameDropdown = OptionMenu(self, self.siteName, *sites)\r\n siteNameDropdown.grid(row=2,column=3,padx=(8,5),pady=(0,4),sticky = W)\r\n\r\n openEverydayLabel = Label(self, text=\"Open Everyday\", foreground='#000000', background='#ffffff')\r\n openEverydayLabel.grid(row=3, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n startDateLabel = Label(self, text=\"Start Date\", foreground='#000000', background='#ffffff')\r\n startDateLabel.grid(row=4, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n endDateLabel = Label(self, text=\"End Date\", foreground='#000000', background='#ffffff')\r\n endDateLabel.grid(row=5, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n totalVisitsRangeLabel = Label(self, text=\"Total Visits Range\", foreground='#000000', background='#ffffff')\r\n totalVisitsRangeLabel.grid(row=6, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n eventCountRangeLabel = Label(self, text=\"Event Count Range\", foreground='#000000', background='#ffffff')\r\n eventCountRangeLabel.grid(row=7, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan = 2)\r\n\r\n includeVisitedCheckbutton = Checkbutton(self, variable=self.includeVisited, text=\"Include Visited\", foreground='#000000', background='#ffffff')\r\n includeVisitedCheckbutton.grid(row=8, column=1, padx=(4, 4), pady=(2, 2), sticky=W, columnspan=2)\r\n\r\n filterButton = Button(self, command =self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=9, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n siteDetailButton = Button(self, command = self.onSiteDetailButtonClicked,text=\"Site Detail\", background='#4286f4')\r\n siteDetailButton.grid(row=9, column=2, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n transitDetailButton = Button(self, command = self.onTransitDetailButtonClicked,text=\"Transit Detail\", background='#4286f4')\r\n transitDetailButton.grid(row=9, column=3, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=10, column=1, padx=(2, 2), pady=(2, 2), sticky=W)\r\n\r\n sortSite = partial(self.filter,'SiteName')\r\n sortSiteButton = Button(self,command=sortSite,text='Sort by Site Name', background= '#4286f4')\r\n sortSiteButton.grid(row=11, column=1,columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventCount = partial(self.filter,'EventCount')\r\n eventCountButton = Button(self,command=eventCount, text = 'Sort by Event Count', background= '#4286f4')\r\n eventCountButton.grid(row = 12,column=1,columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n totalVisits = partial(self.filter,'TotalVisits')\r\n totalVisitsButton = Button(self,command=totalVisits, text='Sort by Total Visits', background= '#4286f4')\r\n totalVisitsButton.grid(row = 13,column=1,columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n myVisits = partial(self.filter, 'MyVisits')\r\n myVisitsButton = Button(self,command=myVisits, text = 'Sort by My Visits', background= '#4286f4')\r\n myVisitsButton.grid(row = 14,column=1,columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n openEverydayDropdown = OptionMenu(self, self.openEveryday, *self.openEverydayList + ['Any'])\r\n openEverydayDropdown.grid(row=3, column=3, padx=(8, 5), pady=(0, 4), sticky=W)\r\n startDateBox = Entry(self, textvariable=self.startDate, width=20)\r\n startDateBox.grid(row=4, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n endDateBox = Entry(self, textvariable=self.endDate, width=20)\r\n endDateBox.grid(row=5, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n totalVisitsRange1Box = Entry(self, textvariable=self.totalVisitsRange1, width=20)\r\n totalVisitsRange1Box.grid(row=6, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n totalVisitsRange2Box = Entry(self, textvariable=self.totalVisitsRange2, width=20)\r\n totalVisitsRange2Box.grid(row=6, column=4, padx=(0, 2), pady=(0, 4), sticky=E)\r\n eventCountRange1Box = Entry(self, textvariable=self.eventCountRange1, width=20)\r\n eventCountRange1Box.grid(row=7, column=3, padx=(0, 2), pady=(0, 4), sticky=E)\r\n eventCountRange2Box = Entry(self, textvariable=self.eventCountRange2, width=20)\r\n eventCountRange2Box.grid(row=7, column=4, padx=(0, 2), pady=(0, 4), sticky=E)\r\n\r\n\r\n def onSiteDetailButtonClicked(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n siteName = row['SiteName']\r\n\r\n if siteName == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n siteDetailWindow = visitorSiteDetail(self)\r\n siteDetailWindow.display(siteName)\r\n self.withdraw()\r\n\r\n def onTransitDetailButtonClicked(self):\r\n row = self.resultTable.model.getRecordAtRow(self.resultTable.getSelectedRow())\r\n siteName = row['SiteName']\r\n\r\n if siteName == '':\r\n messagebox.showwarning('Error', 'No site selected. Make sure to click on the non-empty '\r\n 'row number to select which transit you are taking.')\r\n return\r\n\r\n transitDetailWindow = visitorTransitDetail(self)\r\n transitDetailWindow.display(siteName)\r\n self.withdraw()\r\n\r\n def filter(self, sort = None):\r\n if sort and self.resultTable.model.getData()[1]['SiteName'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n includeVisited, siteName, openEveryday, startDate, endDate, totalVisitsRange1, totalVisitsRange2= self.includeVisited.get(), self.siteName.get(), self.openEveryday.get(), self.startDate.get(), self.endDate.get(), self.totalVisitsRange1.get(), self.totalVisitsRange2.get()\r\n eventCountRange1, eventCountRange2 = self.eventCountRange1.get(), self.eventCountRange2.get()\r\n converted = []\r\n conv = {'': None, 'Any': None}\r\n for i in [includeVisited, siteName, openEveryday, startDate, endDate, totalVisitsRange1, totalVisitsRange2, eventCountRange1, eventCountRange2]:\r\n converted.append(conv.get(i,i))\r\n print(converted)\r\n includeVisited, siteName, openEveryday, startDate, endDate, totalVisitsRange1, totalVisitsRange2, eventCountRange1, eventCountRange2 = converted\r\n\r\n if sort is None:\r\n sort = 'SiteName'\r\n sitedetail = self.SQL.filter(identifier, siteName, openEveryday, startDate, endDate, totalVisitsRange1, totalVisitsRange2, eventCountRange1, eventCountRange2, includeVisited, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(sitedetail)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass visitorSiteDetail(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Site Detail')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.visitorSiteDetail(db)\r\n\r\n def display(self, sitename):\r\n siteName, openEveryday, address = self.SQL.load(sitename)\r\n\r\n self.siteName = StringVar()\r\n self.openEveryday = StringVar()\r\n self.address = StringVar()\r\n self.visitDate = StringVar()\r\n\r\n self.siteName.set(siteName)\r\n self.openEveryday.set(openEveryday)\r\n self.address.set(address)\r\n\r\n siteNameLabel = Label(self, text='Site Name', foreground='#000000', background='#ffffff')\r\n siteNameLabel.grid(row=1, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n siteNameDataLabel = Label(self, text=self.siteName.get(), foreground='#000000', background='#ffffff')\r\n siteNameDataLabel.grid(row=1, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n openEverydayLabel = Label(self, text='Open Everyday', foreground='#000000', background='#ffffff')\r\n openEverydayLabel.grid(row=2, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n openEverydayDataLabel = Label(self, text=self.openEveryday.get(), foreground='#000000', background='#ffffff')\r\n openEverydayDataLabel.grid(row=2, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n addressLabel = Label(self, text='Address', foreground='#000000', background='#ffffff')\r\n addressLabel.grid(row=3, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n addressDataLabel = Label(self, text=self.address.get(), foreground='#000000', background='#ffffff')\r\n addressDataLabel.grid(row=3, column=2, padx=(4,4), pady=(2,2), sticky=W)\r\n\r\n visitDateLabel = Label(self, text='Visit Date', foreground='#000000', background='#ffffff')\r\n visitDateLabel.grid(row=4, column=1, padx=(4,4), pady=(2,2), sticky=W)\r\n visitDateDataBox = Entry(self, textvariable=self.visitDate, width=20)\r\n visitDateDataBox.grid(row=4, column=2, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n logVisitButton = Button(self, command=self.logVisit, text=\"Log Visit\", background='#4286f4')\r\n logVisitButton.grid(row=5, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=6, column=1, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def logVisit(self):\r\n cursor.execute(\"SELECT SiteName FROM visitsite WHERE SiteName = \\'\" +self.siteName.get()+ \"\\'\" +\" AND VisUsername = \\'\" +identifier+ \"\\'\"+ \" AND Date = \\'\" +self.visitDate.get()+ \"\\'\")\r\n site = cursor.fetchone()\r\n if(site is not None):\r\n messagebox.showwarning(\"Already Logged\",\r\n \"There is already a visit logged for you at this site and date.\")\r\n else:\r\n cursor.execute(\"INSERT into visitsite values (%s, %s, %s)\",\r\n (identifier, self.siteName.get(), self.visitDate.get()))\r\n messagebox.showinfo(\"Success\",\r\n \"Your visit has been logged.\")\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\nclass VisitHistory(Toplevel):\r\n def __init__(self, master):\r\n Toplevel.__init__(self)\r\n self.master = master\r\n self.title('Visit History')\r\n self.config(background='#ffffff')\r\n self.SQL = Queries.VisitHistory(db)\r\n\r\n def display(self):\r\n sites, history = self.SQL.load(identifier)\r\n\r\n self.event, self.site, self.d1, self.d2, = StringVar(), StringVar(), StringVar(), StringVar()\r\n\r\n self.site.set('Any')\r\n\r\n self.resultTable = TableCanvas(self, editable=True, data=history,\r\n read_only=True, rowheaderwidth=15, maxcellwidth=200, cellwidth=150,\r\n rows=len(history), thefont=('Helvetica', 10), autoresizecols=1,\r\n width=150*len(list(history.values())[0]), height=25*7)\r\n self.resultTable.show()\r\n\r\n backButton = Button(self, command=self.back, text=\"Back\", background='#4286f4')\r\n backButton.grid(row=20, column=0, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n eventLabel = Label(self, text=\"Event\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n eventLabel.grid(row=2, column=0, padx=(4, 4), pady=(2, 2), sticky=W)\r\n eventBox = Entry(self, textvariable=self.event, width=10)\r\n eventBox.grid(row=2, column=1, padx=(2, 5), pady=(0, 4), sticky=W+E)\r\n\r\n siteLabel = Label(self, text=\"Site\", font=\"Helvetica\", foreground='#000000', background='#ffffff')\r\n siteLabel.grid(row=3, column=0, padx=(4, 4), pady=(2, 2), sticky=W)\r\n siteDropdown = OptionMenu(self, self.site, *sites + ['Any'])\r\n siteDropdown.grid(row=3, column=1, padx=(2, 5), pady=(0, 4), sticky=W)\r\n\r\n startDateLabel = Label(self, text=\"Start Date\", foreground='#000000', background='#ffffff')\r\n startDateLabel.grid(row=4, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n startDateBox = Entry(self, textvariable=self.d1, width=20)\r\n startDateBox.grid(row=4, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n endDateLabel = Label(self, text=\"End Date\", foreground='#000000', background='#ffffff')\r\n endDateLabel.grid(row=5, column=0, padx=(4, 4), pady=(2, 2), sticky=E)\r\n endDateBox = Entry(self, textvariable=self.d2, width=20)\r\n endDateBox.grid(row=5, column=1, padx=(0, 2), pady=(0, 4), sticky=W)\r\n\r\n filterButton = Button(self, command=self.filter, text=\"Filter\", background='#4286f4')\r\n filterButton.grid(row=6, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortDate = partial(self.filter, 'Date')\r\n sortTypeButton = Button(self, command=sortDate, text=\"Sort by Date\", background='#4286f4')\r\n sortTypeButton.grid(row=7, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortEvent = partial(self.filter, 'EventName')\r\n sortEventButton = Button(self, command=sortEvent, text=\"Sort by Event\", background='#4286f4')\r\n sortEventButton.grid(row=8, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortSite = partial(self.filter, 'SiteName')\r\n sortSiteButton = Button(self, command=sortSite, text=\"Sort by Site\", background='#4286f4')\r\n sortSiteButton.grid(row=9, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n sortPrice = partial(self.filter, 'Price')\r\n sortPriceButton = Button(self, command=sortPrice, text=\"Sort by Price\", background='#4286f4')\r\n sortPriceButton.grid(row=10, column=0, columnspan=2, padx=(2, 2), pady=(2, 2), sticky=W + E)\r\n\r\n def filter(self, sort=None):\r\n if sort and self.resultTable.model.getData()[1]['Date'] == '':\r\n messagebox.showwarning('Error', 'You must have data in order to sort')\r\n return\r\n\r\n d1, d2, site, event = self.d1.get(), self.d2.get(), self.site.get(), self.event.get()\r\n\r\n conv = {'': None, 'Any': None}\r\n d1, d2, event, site = conv.get(d1, d1), conv.get(d2, d2), conv.get(event, event), conv.get(site, site),\r\n\r\n for d in [d1, d2]:\r\n if d:\r\n try:\r\n datetime.strptime(d, '%Y-%m-%d')\r\n except Exception as e:\r\n print(e)\r\n messagebox.showwarning('Error', 'Incorrect date format. Please enter YYYY-MM-DD')\r\n\r\n if sort is None:\r\n sort = 'Date'\r\n\r\n history = self.SQL.filter(identifier, d1, d2, event, site, sort)\r\n\r\n self.resultTable.model.deleteRows(range(0, self.resultTable.model.getRowCount()))\r\n self.resultTable.model.importDict(history)\r\n self.resultTable.redraw()\r\n\r\n def back(self):\r\n self.master.deiconify()\r\n self.destroy()\r\n\r\n\r\ndef encrypt(unhashed_string):\r\n return hashlib.sha256(unhashed_string.encode()).hexdigest()\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n print(\"Connecting to DB\")\r\n global db # Makes the connection accessible from all scopes\r\n\r\n db = pymysql.connect(host='localhost',\r\n user='root',\r\n password=MYSQL_PASSWORD,\r\n db='beltline',\r\n\r\n cursorclass=pymysql.cursors.DictCursor)\r\n\r\n global cursor\r\n cursor = db.cursor()\r\n\r\n except Exception as e:\r\n print(e)\r\n print('Error! Cannot connect. Please double check the password variable to your MySQL server at the top of '\r\n 'this file.')\r\n sys.exit()\r\n\r\n print(\"Connected!\")\r\n\r\n root = Tk()\r\n app = Beltline(root)\r\n root.mainloop()\r\n\r\n db.close()\r\n sys.exit()\r\n"
},
{
"alpha_fraction": 0.6220851540565491,
"alphanum_fraction": 0.7244170308113098,
"avg_line_length": 89.55440521240234,
"blob_id": "7ecf7a7e3ae3ce2f940c7d8653d3913549fe75ab",
"content_id": "cf0e4e1ea17edc09b76e4e5ef7ca7454eef05d4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 17682,
"license_type": "no_license",
"max_line_length": 810,
"num_lines": 193,
"path": "/insertValues.sql",
"repo_name": "kcozzone3/AtlantaBeltline",
"src_encoding": "UTF-8",
"text": "-- pw jsmith123\r\ninsert into user values ('james.smith', '7cf2e5f72d3e144cad58f95214f2dd20ad8f9979f34d561433a31dacbc16071b', 'James', 'Smith', 'Approved');\r\n-- pw msmith456\r\ninsert into user values ('michael.smith', '6d3a26d88ea77a9b07d79a48307644cd88976173f49f279fed04b681d713a541', 'Michael', 'Smith', 'Approved');\r\n-- pw rsmith789\r\ninsert into user values ('robert.smith', '232c98d6f01474e874341b78d28064ac6c318763dbf80b057e0ea116905c7fcc', 'Robert', 'Smith', 'Approved');\r\n-- pw mgarcia123\r\ninsert into user values ('maria.garcia', 'ddbdea14aecce91cd12172bce09e9b402a29ea0c2813dc35935095ead340cc35', 'Maria', 'Garcia', 'Approved');\r\n-- pw dsmith456\r\ninsert into user values ('david.smith', 'f79704e124b997b32bd83c014b05c20413c6a3e928ec8083bf1872c82c025672', 'David', 'Smith', 'Approved');\r\n-- pw manager1\r\ninsert into user values ('manager1', '380f9771d2df8566ce2bd5b8ed772b0bb74fd6457fb803ab2d267c394d89c750', 'Manager', 'One', 'Pending');\r\n-- pw manager2\r\ninsert into user values ('manager2', '9d05b6092d975b0884c6ba7fadb283ced03da9822ebbd13cc6b6d1855a6495ec', 'Manager', 'Two', 'Approved');\r\n-- pw manager3\r\ninsert into user values ('manager3', '42385b24804a6609a2744d414e0bf945704427b256ab79144b9ba93f278dbea7', 'Manager', 'Three', 'Approved');\r\n-- pw manager4\r\ninsert into user values ('manager4', 'e3c0f6e574f2e758a4d9d271fea62894230126062d74fd6d474e2046837f9bce', 'Manager', 'Four', 'Approved');\r\n-- pw manager5\r\ninsert into user values ('manager5', '60c6fc387428b43201be7da60da59934acb080b254e4eebead657b54154fbeb1', 'Manager', 'Five', 'Approved');\r\n-- pw mrodriguez\r\ninsert into user values ('maria.rodriguez', 'c50218388d572cbe6aac09b33ceb5189608d5b9ede429b5a17562a17fdd547c4', 'Maria', 'Rodriguez', 'Declined');\r\n-- pw msmith789\r\ninsert into user values ('mary.smith', '9ddbd60268ae6987437511066a2000f1f0017c23728700f9794628a9d3d33034', 'Mary', 'Smith', 'Approved');\r\n-- pw mhernandez\r\ninsert into user values ('maria.hernandez', '600d2690306308866676b4229d51e04857876021705362bf3b26b08a1f78f9cb', 'Maria', 'Hernandez', 'Approved');\r\n-- pw staff1234\r\ninsert into user values ('staff1', '02defbfb8190f9d0719ef7a23da2049bd2e61442bc14021a6d8a4ae35ca334b7', 'Staff', 'One', 'Approved');\r\n-- pw staff4567\r\ninsert into user values ('staff2', '6bd0987c664d5e7551004d30656ae1d12b9d262e2d128ba4200934b4116d96cd', 'Staff', 'Two', 'Approved');\r\n-- pw staff7890\r\ninsert into user values ('staff3', '8857a879cbea64f2d20c6c1bfab505f4b23c06d28decb3b9ddc5426b75f469f1', 'Staff', 'Three', 'Approved');\r\n-- pw user123456\r\ninsert into user values ('user1', '90aae915da86d3b3a4da7a996bc264bfbaf50a953cbbe8cd3478a2a6ccc7b900', 'User', 'One', 'Pending');\r\n-- pw visitor123\r\ninsert into user values ('visitor1', '5c1e1b5c8936669bfe844210fb7ae7d3411dd9f41614d09ce9732dfc17c266bc', 'Visitor', 'One', 'Approved');\r\n\r\n\r\ninsert into visitor values ('michael.smith');\r\ninsert into visitor values ('maria.garcia');\r\ninsert into visitor values ('manager2');\r\ninsert into visitor values ('manager4');\r\ninsert into visitor values ('manager5');\r\ninsert into visitor values ('maria.rodriguez');\r\ninsert into visitor values ('mary.smith');\r\ninsert into visitor values ('staff2');\r\ninsert into visitor values ('staff3');\r\ninsert into visitor values ('visitor1');\r\n\r\ninsert into emails values ('james.smith', '[email protected]');\r\ninsert into emails values ('james.smith', '[email protected]');\r\ninsert into emails values ('james.smith', '[email protected]');\r\ninsert into emails values ('james.smith', '[email protected]');\r\ninsert into emails values ('michael.smith', '[email protected]');\r\ninsert into emails values ('robert.smith', '[email protected]');\r\ninsert into emails values ('maria.garcia', '[email protected]');\r\ninsert into emails values ('maria.garcia', '[email protected]');\r\ninsert into emails values ('david.smith', '[email protected]');\r\ninsert into emails values ('maria.rodriguez', '[email protected]');\r\ninsert into emails values ('mary.smith', '[email protected]');\r\ninsert into emails values ('maria.hernandez', '[email protected]');\r\ninsert into emails values ('maria.hernandez', '[email protected]');\r\ninsert into emails values ('manager1', '[email protected]');\r\ninsert into emails values ('manager2', '[email protected]');\r\ninsert into emails values ('manager3', '[email protected]');\r\ninsert into emails values ('manager4', '[email protected]');\r\ninsert into emails values ('manager5', '[email protected]');\r\ninsert into emails values ('staff1', '[email protected]');\r\ninsert into emails values ('staff2', '[email protected]');\r\ninsert into emails values ('staff3', '[email protected]');\r\ninsert into emails values ('user1', '[email protected]');\r\ninsert into emails values ('visitor1', '[email protected]');\r\n\r\ninsert into employee values ('james.smith', 000000001, '4043721234', '123 East Main Street', 'Rochester', 'NY', '14604');\r\ninsert into employee values ('michael.smith', 000000002, '4043726789', '350 Ferst Drive', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('robert.smith', 000000003, '1234567890', '123 East Main Street', 'Columbus', 'OH', '43215');\r\ninsert into employee values ('maria.garcia', 000000004, '7890123456', '123 East Main Street', 'Richland', 'PA', '17987');\r\ninsert into employee values ('david.smith', 000000005, '5124776435', '350 Ferst Drive', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('manager1', 000000006, '8045126767', '123 East Main Street', 'Rochester', 'NY', '14604');\r\ninsert into employee values ('manager2', 000000007, '9876543210', '123 East Main Street', 'Rochester', 'NY', '14604');\r\ninsert into employee values ('manager3', 000000008, '5432167890', '350 Ferst Drive', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('manager4', 000000009, '8053467565', '123 East Main Street', 'Columbus', 'OH', '43215');\r\ninsert into employee values ('manager5', 000000010, '8031446782', '801 Atlantic Drive', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('staff1', 000000011, '8024456765', '266 Ferst Drive Northwest', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('staff2', 000000012, '8888888888', '266 Ferst Drive Northwest', 'Atlanta', 'GA', '30332');\r\ninsert into employee values ('staff3', 000000013, '3333333333', '801 Atlantic Drive', 'Atlanta', 'GA', '30332');\r\n\r\ninsert into administrator values ('james.smith');\r\n\r\ninsert into staff values ('michael.smith');\r\ninsert into staff values ('robert.smith');\r\ninsert into staff values ('staff1');\r\ninsert into staff values ('staff2');\r\ninsert into staff values ('staff3');\r\n\r\ninsert into manager values ('maria.garcia');\r\ninsert into manager values ('david.smith');\r\ninsert into manager values ('manager1');\r\ninsert into manager values ('manager2');\r\ninsert into manager values ('manager3');\r\ninsert into manager values ('manager4');\r\ninsert into manager values ('manager5');\r\n\r\ninsert into site values ('Piedmont Park', '400 Park Drive Northeast', '30306', true, 'manager2');\r\ninsert into site values ('Atlanta Beltline Center', '112 Krog Street Northeast', '30307', false, 'manager3');\r\ninsert into site values ('Historic Fourth Ward Park', '680 Dallas Street Northeast', '30308', true, 'manager4');\r\ninsert into site values ('Westview Cemetery', '1680 Westview Drive Southwest', '30310', false, 'manager5');\r\ninsert into site values ('Inman Park', '', '30307', true, 'david.smith');\r\n\r\ninsert into event values ('Piedmont Park', 'Eastside Trail', '2019-02-04', '2019-02-05', 0, 99999, 1, 'A combination of multi-use trail and linear green space, the Eastside Trail was the first finished section of the Atlanta BeltLine trail in the old rail corridor. The Eastside Trail, which was funded by a combination of public and private philanthropic sources, runs from the tip of Piedmont Park to Reynoldstown. More details at https://beltline.org/explore-atlanta-beltline-trails/eastside-trail/');\r\ninsert into event values ('Inman Park', 'Eastside Trail', '2019-02-04', '2019-02-05', 0, 99999, 1, 'A combination of multi-use trail and linear green space, the Eastside Trail was the first finished section of the Atlanta BeltLine trail in the old rail corridor. The Eastside Trail, which was funded by a combination of public and private philanthropic sources, runs from the tip of Piedmont Park to Reynoldstown. More details at https://beltline.org/explore-atlanta-beltline-trails/eastside-trail/');\r\ninsert into event values ('Inman Park', 'Eastside Trail', '2019-03-01', '2019-03-02', 0, 99999, 1, 'A combination of multi-use trail and linear green space, the Eastside Trail was the first finished section of the Atlanta BeltLine trail in the old rail corridor. The Eastside Trail, which was funded by a combination of public and private philanthropic sources, runs from the tip of Piedmont Park to Reynoldstown. More details at https://beltline.org/explore-atlanta-beltline-trails/eastside-trail/');\r\ninsert into event values ('Historic Fourth Ward Park', 'Eastside Trail', '2019-02-13', '2019-02-14', 0, 99999, 1, 'A combination of multi-use trail and linear green space, the Eastside Trail was the first finished section of the Atlanta BeltLine trail in the old rail corridor. The Eastside Trail, which was funded by a combination of public and private philanthropic sources, runs from the tip of Piedmont Park to Reynoldstown. More details at https://beltline.org/explore-atlanta-beltline-trails/eastside-trail/');\r\ninsert into event values ('Westview Cemetery', 'Westside Trail', '2019-02-18', '2019-02-21', 0, 99999, 1, 'The Westside Trail is a free amenity that offers a bicycle and pedestrian-safe corridor with a 14-foot-wide multi-use trail surrounded by mature trees and grasses thanks to Trees Atlanta’s Arboretum. With 16 points of entry, 14 of which will be ADA-accessible with ramp and stair systems, the trail provides numerous access points for people of all abilities. More details at: https://beltline.org/explore-atlanta-beltline-trails/westside-trail/');\r\ninsert into event values ('Inman Park', 'Bus Tour', '2019-02-01', '2019-02-02', 25, 6, 2, 'The Atlanta BeltLine Partnership’s tour program operates with a natural gas-powered, ADA accessible tour bus funded through contributions from 10th & Monroe, LLC, SunTrust Bank Trusteed Foundations–Florence C. and Harry L. English Memorial Fund and Thomas Guy Woolford Charitable Trust, and AGL Resources');\r\ninsert into event values ('Inman Park', 'Bus Tour', '2019-02-08', '2019-02-10', 25, 6, 2, 'The Atlanta BeltLine Partnership’s tour program operates with a natural gas-powered, ADA accessible tour bus funded through contributions from 10th & Monroe, LLC, SunTrust Bank Trusteed Foundations–Florence C. and Harry L. English Memorial Fund and Thomas Guy Woolford Charitable Trust, and AGL Resources');\r\ninsert into event values ('Inman Park', 'Private Bus Tour', '2019-02-01', '2019-02-02', 40, 4, 1, 'Private tours are available most days, pending bus and tour guide availability. Private tours can accommodate up to 4 guests per tour, and are subject to a tour fee (nonprofit rates are available). As a nonprofit organization with limited resources, we are unable to offer free private tours. We thank you for your support and your understanding as we try to provide as many private group tours as possible. The Atlanta BeltLine Partnership’s tour program operates with a natural gas-powered, ADA accessible tour bus funded through contributions from 10th & Monroe, LLC, SunTrust Bank Trusteed Foundations–Florence C. and Harry L. English Memorial Fund and Thomas Guy Woolford Charitable Trust, and AGL Resources');\r\ninsert into event values ('Inman Park', 'Arboretum Walking Tour', '2019-02-08', '2019-02-11', 5, 5, 1, 'Official Atlanta BeltLine Arboretum Walking Tours provide an up-close view of the Westside Trail and the Atlanta BeltLine Arboretum led by Trees Atlanta Docents. The one and a half hour tours step off at at 10am (Oct thru May), and 9am (June thru September). Departure for all tours is from Rose Circle Park near Brown Middle School. More details at: https://beltline.org/visit/atlanta-beltline-tours/#arboretum-walking');\r\ninsert into event values ('Atlanta Beltline Center', 'Official Atlanta BeltLine Bike Tour', '2019-02-09', '2019-02-14', 5, 5, 1, 'These tours will include rest stops highlighting assets and points of interest along the Atlanta BeltLine. Staff will lead the rides, and each group will have a ride sweep to help with any unexpected mechanical difficulties.');\r\n\r\ninsert into transit values ('MARTA', 'Blue', 2.00);\r\ninsert into transit values ('Bus', '152', 2.00);\r\ninsert into transit values ('Bike', 'Relay', 1.00);\r\n\r\ninsert into connect values ('Inman Park', 'MARTA', 'Blue');\r\ninsert into connect values ('Piedmont Park', 'MARTA', 'Blue');\r\ninsert into connect values ('Historic Fourth Ward Park', 'MARTA', 'Blue');\r\ninsert into connect values ('Westview Cemetery', 'MARTA', 'Blue');\r\ninsert into connect values ('Inman Park', 'Bus', '152');\r\ninsert into connect values ('Piedmont Park', 'Bus', '152');\r\ninsert into connect values ('Historic Fourth Ward Park', 'Bus', '152');\r\ninsert into connect values ('Piedmont Park', 'Bike', 'Relay');\r\ninsert into connect values ('Historic Fourth Ward Park', 'Bike', 'Relay');\r\n\r\ninsert into take values ('manager2', 'MARTA', 'Blue', '2019-03-20');\r\ninsert into take values ('manager2', 'Bus', '152', '2019-03-20');\r\ninsert into take values ('manager3', 'Bike', 'Relay', '2019-03-20');\r\ninsert into take values ('manager2', 'MARTA', 'Blue', '2019-03-21');\r\ninsert into take values ('maria.hernandez', 'Bus', '152', '2019-03-20');\r\ninsert into take values ('maria.hernandez', 'Bike', 'Relay', '2019-03-20');\r\ninsert into take values ('manager2', 'MARTA', 'Blue', '2019-03-22');\r\ninsert into take values ('maria.hernandez', 'Bus', '152', '2019-03-22');\r\ninsert into take values ('mary.smith', 'Bike', 'Relay', '2019-03-23');\r\ninsert into take values ('visitor1', 'MARTA', 'Blue', '2019-03-21');\r\n\r\ninsert into assignto values ('michael.smith', 'Piedmont Park', 'Eastside Trail', '2019-02-04');\r\ninsert into assignto values ('staff1', 'Piedmont Park', 'Eastside Trail', '2019-02-04');\r\ninsert into assignto values ('robert.smith', 'Inman Park', 'Eastside Trail', '2019-02-04');\r\ninsert into assignto values ('staff2', 'Inman Park', 'Eastside Trail', '2019-02-04');\r\ninsert into assignto values ('staff1', 'Inman Park', 'Eastside Trail', '2019-03-01');\r\ninsert into assignto values ('michael.smith', 'Historic Fourth Ward Park', 'Eastside Trail', '2019-02-13');\r\ninsert into assignto values ('staff1', 'Westview Cemetery', 'Westside Trail', '2019-02-18');\r\ninsert into assignto values ('staff3', 'Westview Cemetery', 'Westside Trail', '2019-02-18');\r\ninsert into assignto values ('michael.smith', 'Inman Park', 'Bus Tour', '2019-02-01');\r\ninsert into assignto values ('staff2', 'Inman Park', 'Bus Tour', '2019-02-01');\r\ninsert into assignto values ('robert.smith', 'Inman Park', 'Bus Tour', '2019-02-08');\r\ninsert into assignto values ('michael.smith', 'Inman Park', 'Bus Tour', '2019-02-08');\r\ninsert into assignto values ('robert.smith', 'Inman Park', 'Private Bus Tour', '2019-02-01');\r\ninsert into assignto values ('staff3', 'Inman Park', 'Arboretum Walking Tour', '2019-02-08');\r\ninsert into assignto values ('staff1', 'Atlanta BeltLine Center', 'Official Atlanta BeltLine Bike Tour', '2019-02-09');\r\n\r\ninsert into visitevent values ('mary.smith', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-01');\r\ninsert into visitevent values ('maria.garcia', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-02');\r\ninsert into visitevent values ('manager2', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-02');\r\ninsert into visitevent values ('manager4', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-01');\r\ninsert into visitevent values ('manager5', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-02');\r\ninsert into visitevent values ('staff2', 'Inman Park', 'Bus Tour', '2019-02-01', '2019-02-02');\r\ninsert into visitevent values ('mary.smith', 'Westview Cemetery', 'Westside Trail', '2019-02-18', '2019-02-19');\r\ninsert into visitevent values ('mary.smith', 'Inman Park', 'Private Bus Tour', '2019-02-01', '2019-02-01');\r\ninsert into visitevent values ('mary.smith', 'Inman Park', 'Private Bus Tour', '2019-02-01', '2019-02-02');\r\ninsert into visitevent values ('mary.smith', 'Atlanta BeltLine Center', 'Official Atlanta BeltLine Bike Tour', '2019-02-09', '2019-02-10');\r\ninsert into visitevent values ('mary.smith', 'Inman Park', 'Arboretum Walking Tour', '2019-02-08', '2019-02-10');\r\ninsert into visitevent values ('mary.smith', 'Piedmont Park', 'Eastside Trail', '2019-02-04', '2019-02-04');\r\ninsert into visitevent values ('mary.smith', 'Historic Fourth Ward Park', 'Eastside Trail', '2019-02-13', '2019-02-13');\r\ninsert into visitevent values ('mary.smith', 'Historic Fourth Ward Park', 'Eastside Trail', '2019-02-13', '2019-02-14');\r\ninsert into visitevent values ('visitor1', 'Historic Fourth Ward Park', 'Eastside Trail', '2019-02-13', '2019-02-14');\r\ninsert into visitevent values ('visitor1', 'Atlanta BeltLine Center', 'Official Atlanta BeltLine Bike Tour', '2019-02-09', '2019-02-10');\r\ninsert into visitevent values ('visitor1', 'Westview Cemetery', 'Westside Trail', '2019-02-18', '2019-02-19');\r\n\r\ninsert into visitsite values ('mary.smith', 'Inman Park', '2019-02-01');\r\ninsert into visitsite values ('mary.smith', 'Inman Park', '2019-02-02');\r\ninsert into visitsite values ('mary.smith', 'Inman Park', '2019-02-03');\r\ninsert into visitsite values ('mary.smith', 'Atlanta BeltLine Center', '2019-02-01');\r\ninsert into visitsite values ('mary.smith', 'Atlanta BeltLine Center', '2019-02-10');\r\ninsert into visitsite values ('mary.smith', 'Historic Fourth Ward Park', '2019-02-02');\r\ninsert into visitsite values ('mary.smith', 'Piedmont Park', '2019-02-02');\r\ninsert into visitsite values ('visitor1', 'Piedmont Park', '2019-02-11');\r\ninsert into visitsite values ('visitor1', 'Atlanta BeltLine Center', '2019-02-13');\r\ninsert into visitsite values ('visitor1', 'Historic Fourth Ward Park', '2019-02-11');\r\ninsert into visitsite values ('visitor1', 'Westview Cemetery', '2019-02-06');\r\ninsert into visitsite values ('visitor1', 'Inman Park', '2019-02-01');\r\ninsert into visitsite values ('visitor1', 'Piedmont Park', '2019-02-01');\r\ninsert into visitsite values ('visitor1', 'Atlanta BeltLine Center', '2019-02-09');"
},
{
"alpha_fraction": 0.629054844379425,
"alphanum_fraction": 0.6509918570518494,
"avg_line_length": 32.54838562011719,
"blob_id": "d2244234bdb197b4d2ec1e981fad92edef49ea58",
"content_id": "073173aa16010dcae9b2a1cc17d0e895aae40de0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 8570,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 248,
"path": "/buildDB.sql",
"repo_name": "kcozzone3/AtlantaBeltline",
"src_encoding": "UTF-8",
"text": "\r\nDROP DATABASE IF EXISTS Beltline;\r\nCREATE DATABASE Beltline;\r\nUSE Beltline;\r\n\r\nCREATE TABLE User\r\n \t ( Username varchar(16) NOT NULL,\r\n \t Password varchar(64) NOT NULL, \r\n \t \r\nFirstName varchar(32),\r\n \tLastName varchar(32),\r\n \tStatus varchar(16), \r\n \tPRIMARY KEY (Username)\r\n\t\r\n \t );\r\n\r\nCREATE TABLE Emails\r\n \t ( Username varchar(16) NOT NULL,\r\n \tEmail varchar(32) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (Username, Email),\r\n \tFOREIGN KEY (Username) REFERENCES User(Username)\r\n \t\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE\r\n);\r\n\r\nCREATE TABLE Visitor\r\n \t ( VisUsername varchar(16) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (VisUsername),\r\n \tFOREIGN KEY (VisUsername) REFERENCES User(Username)\r\n\t\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE \r\n \t);\r\n \t \r\nCREATE TABLE Employee\r\n \t ( EmpUsername varchar(16) NOT NULL,\r\n \tEmployeeID char(9) NOT NULL,\r\n \tPhone char(10) NOT NULL,\r\n \tAddress varchar(64),\r\n \tCity varchar(32),\r\n \tState varchar(32),\r\n \tZipcode char(5),\r\n \r\n\t UNIQUE EmployeeID (EmployeeID), \r\n\t UNIQUE Phone (Phone),\r\n \tPRIMARY KEY (EmpUsername),\r\n \tFOREIGN KEY (EmpUsername) REFERENCES User(Username)\r\n\r\n\tON UPDATE CASCADE\r\n ON DELETE CASCADE\r\n\r\n \t);\r\n\r\n\r\nCREATE TABLE Administrator\r\n \t ( AdminUsername varchar(16) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (AdminUsername),\r\n \tFOREIGN KEY (AdminUsername) REFERENCES Employee(EmpUsername)\r\n\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE\r\n \t);\r\n\t\r\n\r\nCREATE TABLE Manager\r\n \t ( ManUsername varchar(16) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (ManUsername),\r\n \tFOREIGN KEY (ManUsername) REFERENCES Employee(EmpUsername)\r\n\r\nON UPDATE CASCADE\r\n ON DELETE CASCADE \r\n \t);\r\n \t \r\nCREATE TABLE Staff\r\n \t ( StaffUsername varchar(16) NOT NULL,\r\n \t \r\n \t PRIMARY KEY (StaffUsername),\r\n \tFOREIGN KEY (StaffUsername) REFERENCES Employee(EmpUsername)\r\n\r\n\tON UPDATE CASCADE\r\n ON DELETE CASCADE\r\n \t);\r\n \t \r\nCREATE TABLE Transit\r\n \t ( TransportType varchar(16) NOT NULL,\r\n \tRoute varchar(32) NOT NULL,\r\n \tPrice decimal(9,2) NOT NULL,\r\n\r\n \tPRIMARY KEY (TransportType, Route)\r\n \t);\r\n \t \r\nCREATE TABLE Site\r\n \t ( Name varchar(64) NOT NULL,\r\n \tAddress varchar(64) NOT NULL,\r\n \tZipcode int(5),\r\n \tOpenEveryday bool NOT NULL,\r\n \tManUsername varchar(16) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (Name),\r\n \tFOREIGN KEY (ManUsername) REFERENCES Manager(ManUsername)\r\n\t\r\n\tON DELETE RESTRICT \r\n\tON UPDATE CASCADE\r\n \t);\r\n\r\nCREATE TABLE Event \r\n \t ( SiteName varchar(64) NOT NULL,\r\n \tEventName varchar(64) NOT NULL, \r\n \tStartDate date NOT NULL,\r\n \tEndDate date NOT NULL,\r\n \tPrice decimal(9,2) NOT NULL,\r\n \tCapacity int NOT NULL,\r\n \tMinStaffReq int NOT NULL,\r\n \tDescription varchar(800) NOT NULL,\r\n\r\n \tPRIMARY KEY (SiteName, EventName, StartDate),\r\n \tFOREIGN KEY (SiteName) REFERENCES Site(Name)\r\n \t \r\n \tON DELETE CASCADE\r\n\tON UPDATE CASCADE\r\n \t);\r\n\r\nCREATE TABLE Take\r\n \t ( Username varchar(16) NOT NULL,\r\n \tTransportType varchar(16) NOT NULL,\r\n \tRoute varchar(32) NOT NULL,\r\n \tDate date NOT NULL,\r\n \t \r\n \tPRIMARY KEY (Username, TransportType, Route, Date),\r\n \tFOREIGN KEY (Username) REFERENCES User(Username)\r\n\t\tON UPDATE CASCADE\r\n\t\tON DELETE CASCADE,\r\n \tFOREIGN KEY (TransportType, Route) REFERENCES Transit(TransportType, Route)\r\n\t\tON UPDATE CASCADE\r\n\t\tON DELETE CASCADE\r\n \t);\r\n \t \r\n\r\nCREATE TABLE AssignTo\r\n \t ( StaffUsername varchar(16) NOT NULL,\r\n \tSiteName varchar(64) NOT NULL,\r\n \tEventName varchar(64) NOT NULL,\r\n \tStartDate date NOT NULL,\r\n \t \r\n \tPRIMARY KEY (StaffUsername, SiteName, EventName, StartDate),\r\n \tFOREIGN KEY (StaffUsername) REFERENCES Staff(StaffUsername)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE,\r\n \tFOREIGN KEY (SiteName, EventName, StartDate) REFERENCES Event(SiteName, EventName, StartDate)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE\r\n \t);\r\n \t \r\nCREATE TABLE VisitSite\r\n \t ( VisUsername varchar(16) NOT NULL,\r\n \tSiteName varchar(64) NOT NULL,\r\n \tDate date NOT NULL,\r\n \t\r\n \tPRIMARY KEY (VisUsername, SiteName, Date),\r\n \tFOREIGN KEY (VisUsername) REFERENCES Visitor(VisUsername)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE,\r\n \tFOREIGN KEY (SiteName) REFERENCES Site(Name)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE\r\n \t);\r\n\r\nCREATE TABLE VisitEvent\r\n \t ( VisUsername varchar(16) NOT NULL,\r\n \tSiteName varchar(64) NOT NULL,\r\n \tEventName varchar(64) NOT NULL,\r\n \tStartDate date NOT NULL,\r\n \tDate date NOT NULL,\r\n \t \r\n \tPRIMARY KEY (VisUsername, SiteName, EventName, StartDate, Date),\r\n \tFOREIGN KEY (VisUsername) REFERENCES Visitor(VisUsername)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE,\r\n \tFOREIGN KEY (SiteName, EventName, StartDate) REFERENCES Event(SiteName, EventName, StartDate)\r\n\tON UPDATE CASCADE\r\n\tON DELETE CASCADE\r\n \t);\r\n\r\nCREATE TABLE Connect\r\n \t ( SiteName varchar(64) NOT NULL,\r\n \tTransportType varchar(16) NOT NULL,\r\n \tRoute varchar(32) NOT NULL,\r\n \t \r\n \tPRIMARY KEY (SiteName, TransportType, Route),\r\n \tFOREIGN KEY (SiteName) REFERENCES Site(Name)\r\n\tON DELETE CASCADE\r\n\tON UPDATE CASCADE,\r\n \tFOREIGN KEY (TransportType, Route) REFERENCES Transit(TransportType, Route)\r\n\tON DELETE CASCADE\r\n\tON UPDATE CASCADE\r\n\r\n \t);\r\n\r\nCREATE VIEW transit_connect AS\r\nSELECT T.TransportType, T.Route, T.Price, C.SiteName, tmp.num_sites as NumSites\r\n FROM transit AS T JOIN connect AS C \r\n ON (T.TransportType, T.Route) = (C.TransportType, C.Route) \r\n JOIN (SELECT TransportType, Route, count(*) AS num_sites FROM connect GROUP BY TransportType, Route) AS tmp \r\n ON (T.TransportType, T.Route) = (tmp.TransportType, tmp.Route);\r\n \r\nCREATE VIEW emp_profile AS\r\nSELECT E.EmpUsername, E.EmployeeID, E.Phone, Concat(E.Address, ', ', E.City, ' ', E.State, ', ', E.Zipcode) as Address\r\n\tFROM Employee as E;\r\n\r\nCREATE VIEW user_type AS -- https://stackoverflow.com/questions/63447/how-do-i-perform-an-if-then-in-an-sql-select <--- NEAT!!! Also, weird collate errors for some reason :/\r\nSELECT Username, CASE WHEN EXISTS(SELECT * FROM manager WHERE ManUsername = u.Username) = 1 THEN 'Manager' collate utf8mb4_general_ci\r\n\t\t\t\t WHEN EXISTS(SELECT * FROM staff WHERE StaffUsername = u.Username) = 1 THEN 'Staff' collate utf8mb4_general_ci\r\n\t\t\t\t WHEN EXISTS(SELECT * FROM visitor WHERE VisUsername = u.Username) = 1 THEN 'Visitor' collate utf8mb4_general_ci\r\n ELSE 'User' collate utf8mb4_general_ci\r\n END AS UserType\r\nFROM User AS u WHERE NOT EXISTS(SELECT * FROM administrator WHERE AdminUsername = u.Username);\r\n\r\nCREATE VIEW manage_event AS\r\nSELECT event.EventName, \r\n\t\tSiteName,\r\n StartDate,\r\n EndDate,\r\n\t\tManUsername,\r\n\t\tStaffCount,\r\n MinStaffReq,\r\n DATEDIFF(EndDate, StartDate) + 1 AS Duration, \r\n Visits, \r\n Price,\r\n Capacity,\r\n Price * Visits AS Revenue,\r\n Description\r\nFROM event NATURAL JOIN (SELECT COUNT(DISTINCT(StaffUsername)) AS StaffCount, SiteName, EventName, StartDate FROM assignto NATURAL RIGHT JOIN event GROUP BY SiteName, EventName, StartDate) as stf\r\n\t\t NATURAL JOIN (SELECT COUNT(VisUsername) AS Visits, EventName, SiteName, StartDate FROM visitevent NATURAL RIGHT JOIN event GROUP BY EventName, SiteName, StartDate) as vst\r\n JOIN site ON event.SiteName = site.Name\r\nGROUP BY EventName, SiteName, StartDate;\r\n\r\nCREATE VIEW dates_view AS \r\nselect * from \r\n(select adddate('1970-01-01',t4*10000 + t3*1000 + t2*100 + t1*10 + t0) gen_date from\r\n (select 0 t0 union select 1 union select 2 union select 3 union select 4 union select 5 union select 6 union select 7 union select 8 union select 9) t0,\r\n (select 0 t1 union select 1 union select 2 union select 3 union select 4 union select 5 union select 6 union select 7 union select 8 union select 9) t1,\r\n (select 0 t2 union select 1 union select 2 union select 3 union select 4 union select 5 union select 6 union select 7 union select 8 union select 9) t2,\r\n (select 0 t3 union select 1 union select 2 union select 3 union select 4 union select 5 union select 6 union select 7 union select 8 union select 9) t3,\r\n (select 0 t4 union select 1 union select 2 union select 3 union select 4 union select 5 union select 6 union select 7 union select 8 union select 9) t4) v\r\n"
}
] | 5 |
SocosLLC/gae-flask-base
|
https://github.com/SocosLLC/gae-flask-base
|
410e638210480bfb2c414dedf12b45daa3d48e1b
|
ecdde696e294c4fada79774a65c8b9e2186fe94c
|
5351eda6bfd67819cd49ceaa777339cc45fd4201
|
refs/heads/master
| 2021-01-21T16:58:16.812367 | 2017-05-20T23:04:00 | 2017-05-20T23:04:00 | 91,920,775 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6661211252212524,
"alphanum_fraction": 0.6661211252212524,
"avg_line_length": 21.592592239379883,
"blob_id": "201288b1e33bf44504dab7c8df728a64cd9908a5",
"content_id": "16183de8cbb5147536cab725b21b141f78a68f3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/src/application/models.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# models.py\n#\n\"\"\"\nGoogle App Engine NDB Models\n\"\"\"\n\nfrom google.appengine.ext import ndb\n\n\nclass BaseModel(ndb.Model):\n created = ndb.DateTimeProperty(auto_now_add=True)\n last_modified = ndb.DateTimeProperty(auto_now=True)\n\n def refreshed(self):\n \"\"\"Pulls the version of the instance entity from the datastore.\n\n Does not update this instance.\n\n Returns\n -------\n The instance entity pulled fresh from the datastore.\n \"\"\"\n return self.key.get(use_cache=False, use_memcache=False)\n\n\nclass Email(BaseModel):\n email = ndb.StringProperty(required=True)\n\n"
},
{
"alpha_fraction": 0.6073825359344482,
"alphanum_fraction": 0.6476510167121887,
"avg_line_length": 23.135135650634766,
"blob_id": "fdd3cc5f72c1410dbde5f842420eb8b8ae132d12",
"content_id": "c3425e980bec6efcb40179db40f498740653f1eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 37,
"path": "/src/application/urls.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#\n# urls.py\n#\n\nfrom flask import render_template\n\nfrom application import app, handlers\n\n\n# App Engine warm up handler\n# See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests\napp.add_url_rule('/_ah/warmup', 'warmup', view_func=handlers.warmup)\n\n# Home page\napp.add_url_rule('/', view_func=handlers.home)\n\n# Ajaxy page\napp.add_url_rule('/ajaxy', view_func=handlers.AjaxyView.as_view('ajaxy'))\n\n\n####################################################################################################\n# Error handlers\n\n# Handle 403 errors\[email protected](403)\ndef forbidden(e):\n return render_template('errors/403.html'), 403\n\n# Handle 404 errors\[email protected](404)\ndef page_not_found(e):\n return render_template('errors/404.html'), 404\n\n# Handle 500 errors\[email protected](500)\ndef server_error(e):\n return render_template('errors/500.html'), 500\n\n"
},
{
"alpha_fraction": 0.6132930517196655,
"alphanum_fraction": 0.6132930517196655,
"avg_line_length": 31.41176414489746,
"blob_id": "9b27a0ddc12757a136ae290634f56516cd04a170",
"content_id": "6590cb118be56b5266ade3ce6acab840c700c47f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1655,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 51,
"path": "/src/application/templating.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# templating.py\n#\n\nfrom flask import get_flashed_messages, flash, jsonify, request\nfrom flask import _app_ctx_stack\nfrom flask.templating import _render\nfrom flask.views import MethodView\n\nimport env_conf\n\n\n# AJAX response handling ###########################################################################\n# Pilfered from http://cam.st/ajax-block-rendering-in-flask/\n\ndef render_block(template, ctx, block=None):\n template_block = template.blocks.get(block)\n if not template_block:\n raise ValueError('Block {} does not exist in template {}.'.format(block, template.name))\n new_ctx = template.new_context(ctx)\n return ''.join(template_block(new_ctx))\n\n\nclass RenderView(MethodView):\n template_name = None\n base_template = 'layouts/base.html'\n container = '.view-container'\n block = 'view_container'\n title = None\n url = None\n\n def build_response(self, tmpl, context):\n return {\n 'page': render_block(tmpl, ctx=context, block=self.block),\n 'title': self.title,\n 'url': self.url,\n 'view_container': self.container,\n 'pushState': True,\n }\n\n def render_template(self, **context):\n ctx = _app_ctx_stack.top\n ctx.app.update_template_context(context)\n jinja_env = ctx.app.jinja_env\n context.update({\n 'content_template': self.template_name\n })\n template = jinja_env.get_or_select_template(self.template_name or self.base_template)\n if request.is_xhr:\n return jsonify(self.build_response(template, context))\n else:\n return _render(template, context, ctx.app)\n\n\n"
},
{
"alpha_fraction": 0.6275460720062256,
"alphanum_fraction": 0.6304558515548706,
"avg_line_length": 18.80769157409668,
"blob_id": "7c604aad98cfd7f20e9873e8fbffe5982d3d42f0",
"content_id": "2a3b14120a746174e6398f251fbd7743e8cdfc5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1031,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 52,
"path": "/setup.sh",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\necho\necho \"Installing npm dependencies\"\necho \"---------------------------\"\n# postcss/autoprefixer for compiling assets. See src/assets.py\nif ! hash postcss 2>/dev/null; then\n sudo npm -g install postcss-cli autoprefixer\nfi\n# bower for asset management\nif ! hash bower 2>/dev/null; then\n sudo npm install -g bower\nfi\n# phantomjs for Selenium tests\nif ! hash phantomjs 2>/dev/null; then\n sudo npm install -g phantomjs-prebuilt\nfi\n\necho\necho \"Setting up virtualenv\"\necho \"---------------------\"\nvirtualenv env\n./env/bin/easy_install -U pip\n./env/bin/pip install --upgrade -r requirements.txt\nsource env/bin/activate\n\necho\necho \"Creating symlinks\"\necho \"-----------------\"\nif [ ! -L src/libs ]; then\n cd src/\n ln -s ../env libs\n cd ..\nfi\n\necho\necho \"Running bower install\"\necho \"---------------------\"\nbower install\n\nif [ ! -f src/application/secret_keys.py ]; then\n cd src/\n ./generate_keys.py\n cd ../\nfi\n\necho\necho \"Building assets\"\necho \"---------------\"\n./src/assets.py\n\ndeactivate\n\n"
},
{
"alpha_fraction": 0.7115384340286255,
"alphanum_fraction": 0.7211538553237915,
"avg_line_length": 19.799999237060547,
"blob_id": "6b6c2834360102387b56631fc7f33dda9a89936b",
"content_id": "293a0e32c464c7527dca2a7ade4cb5575a7ccef4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 5,
"path": "/src/run.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\n\nsys.path.insert(0, os.path.join(os.path.abspath('.'), 'libs'))\nimport application\n"
},
{
"alpha_fraction": 0.558282196521759,
"alphanum_fraction": 0.558282196521759,
"avg_line_length": 26.16666603088379,
"blob_id": "b21817722fb0d2b1b85375675038650190e15480",
"content_id": "d0babaa5d87c07d65d4df042c1b1cbb5c0340bfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 6,
"path": "/src/application/assets/src/js/ajaxy.js",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "$( document ).ready(function() {\n\n $('#a-btn').click(function() { ajaxGet('ajaxy?state=a') });\n $('#b-btn').click(function() { ajaxGet('ajaxy?state=b') });\n\n});\n"
},
{
"alpha_fraction": 0.5041550993919373,
"alphanum_fraction": 0.5041550993919373,
"avg_line_length": 20.235294342041016,
"blob_id": "9115ed2ab5fe1a7ee9290355a8034632b6d89aff",
"content_id": "6a309dcd009bc1b6ec3d989dc09aa464912002f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 17,
"path": "/run_tests.sh",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\nTESTARGS=${@:-\"src/tests/\"}\n\nset +e # Don't die before we clean up\n./link_env_conf.sh test\nmkdir -p tmp/\n./env/bin/python src/assets.py\n./env/bin/nosetests \\\n --with-gae \\\n --verbose \\\n --gae-application src/ \\\n ${TESTARGS}\nRETVAL=$?\n./link_env_conf.sh dev\n\nexit ${RETVAL}\n"
},
{
"alpha_fraction": 0.594257652759552,
"alphanum_fraction": 0.6086135506629944,
"avg_line_length": 32.93589782714844,
"blob_id": "be323460b7df00da36464ebfbe69db02030cdc2d",
"content_id": "2e98959209b77d28a9aca953b11351e06368962c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2647,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 78,
"path": "/dev_server.sh",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# Failures\nfunction usage {\n echo \"Usage:\"\n echo \"./dev_server.sh start|reset|kill|clean [datastore_path] [storage_path]\"\n echo \" start: starts dev_appserver.py\"\n echo \" kill: stops all dev_appserver.py processes using \\`kill -9\\`\"\n echo \" reset: equivalent to \\`dev_server.sh kill; dev_server.sh start\\`\"\n echo \" clean: deletes all the data in tmp/dev_server_storage and tmp/dev_server_datastore,\"\n echo \" or <datastore_path> and <storage_path> if they are provided.\"\n echo \"dev_appserver_args are args passed directly to dev_appserver.py\"\n echo \"Server runs on port 8080 and logs to tmp/dev_server.log.\"\n echo \"Logs are not preserved between runs.\"\n exit 1\n}\n\nif [ $# -eq 0 ]; then\n usage\nfi\n\nif [ $1 != 'start' ] && [ $1 != 'reset' ] && [ $1 != 'kill' ] && [ $1 != 'clean' ]; then\n usage\nfi\n\ntrap 'kill_ps' SIGINT SIGTERM EXIT\n\nfunction rebuild {\n assets_dir=\"src/application/assets\"\n search_dirs=\"${assets_dir}/src/js ${assets_dir}/src/css\"\n build_file=\"src/assets.py\"\n command=\"./env/bin/python ${build_file}\"\n ./env/bin/when-changed ${search_dirs} -c ${command} &\n}\n\nfunction kill_ps {\n SERVER_PROCESS=$(ps aux | grep dev_appserver.py | grep -v grep | awk '{print $2}')\n if [[ ${SERVER_PROCESS} ]]; then\n echo \"Killing server process ${SERVER_PROCESS}\"\n kill -9 ${SERVER_PROCESS}\n fi\n ASSETS_PROCESS=$(ps aux | grep env/bin/when-changed | grep -v grep | awk '{print $2}')\n if [[ ${ASSETS_PROCESS} ]]; then\n echo \"Killing asset reloader process ${ASSETS_PROCESS}\"\n kill -9 ${ASSETS_PROCESS}\n fi\n}\n\nif [ \"$1\" = 'kill' ]; then\n kill_ps\n exit 0\nelif [ \"$1\" = 'start' ]; then\n DATASTORE_PATH=${2:-\"tmp/dev_server_datastore\"}\n STORAGE_PATH=${3:-\"tmp/dev_server_storage\"}\n ./link_env_conf.sh dev\n LOG_FILE=\"tmp/dev_server.log\"\n mkdir -p $(dirname ${DATASTORE_PATH})\n mkdir -p ${STORAGE_PATH}\n mkdir -p $(dirname ${LOG_FILE})\n rebuild &\n # It would be great to call dev_appserver.py with --quiet to bypass\n # interactive prompts, but as of 2016-12-10 it doesn't work.\n dev_appserver.py \\\n --application='socos-quickstep-601' \\\n --datastore_path=${DATASTORE_PATH} \\\n --storage_path=${STORAGE_PATH} \\\n --skip_sdk_update_check=1 \\\n src/ \\\n | tee ${LOG_FILE}\nelif [ \"$1\" = 'reset' ]; then\n ./dev_server.sh kill\n ./dev_server.sh start $3 $4\nelif [ \"$1\" = 'clean' ]; then\n DATASTORE_PATH=${2:-\"tmp/dev_server_datastore\"}\n STORAGE_PATH=${3:-\"tmp/dev_server_storage\"}\n rm -r ${STORAGE_PATH}\n rm ${DATASTORE_PATH}\nfi\n"
},
{
"alpha_fraction": 0.5977120995521545,
"alphanum_fraction": 0.6003119945526123,
"avg_line_length": 38.64948272705078,
"blob_id": "9c178cc46b3cc5ded36bc2a8926d4f02017a867c",
"content_id": "6744d6abf628ad9d1a2638fc946e2a717c3808d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11539,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 291,
"path": "/src/tests/base.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# base.py\n#\n\nfrom collections import Counter\nimport inspect\nimport logging\nimport os\nfrom pprint import pprint\nimport unittest\nimport urllib\n\nfrom flask_testing import TestCase\nfrom wsgi_liveserver import LiveServerTestCase\nfrom google.appengine.ext import ndb, testbed\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.remote.webdriver import WebDriver, WebElement\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import (NoSuchElementException, TimeoutException,\n ElementNotVisibleException)\n\nimport application\n\n# Make sure we don't collide with the dev server\nLiveServerTestCase.port_range = (9080, 9090)\n\n# Nose provides a handler that we want to reduce the verbosity of\nLOG = logging.getLogger('').handlers[0]\nLOG.setFormatter(logging.Formatter(unicode(logging.BASIC_FORMAT)))\nLOG.setLevel(logging.INFO)\n\nQUICK = os.environ.get('QUICK') == '1'\n\nQUIET = os.environ.get('QUIET') == '1'\n\n\n####################################################################################################\n# Test Base for NoseGAE web tests\n\nclass TestBase(TestCase):\n # Remove the ones you don't need\n nosegae_app_identity_service = True\n nosegae_blobstore = True\n nosegae_datastore_v3 = True\n nosegae_mail = True\n nosegae_memcache = True\n nosegae_logservice = True\n nosegae_user = True\n nosegae_urlfetch = True\n\n def create_app(self):\n # Flask apps testing. See: http://flask.pocoo.org/docs/testing/\n application.app.config['TESTING'] = True\n application.app.config['WTF_CSRF_ENABLED'] = False\n return application.app\n\n def setUp(self):\n self.client = application.app.test_client()\n\n ################################################################################################\n # Utility functions\n\n def assert_radio_selection_is(self, soup, expected_value):\n selected_device = soup.find('input', type='radio', checked=True)\n self.assertIsNotNone(selected_device, 'No radio selection has been made')\n if expected_value:\n self.assertEqual(selected_device.get('value'), expected_value)\n else:\n self.assertIsNone(selected_device)\n\n def assert_select_selection_is(self, soup, select_id, expected_value):\n select = soup.find('select', id=select_id)\n selected = select.find('option', selected=True).get('value').strip()\n self.assertEqual(selected, expected_value)\n\n def assert_redirect_path(self, rv, expected_path):\n self.assertEqual(rv.location.split('/')[-1], expected_path)\n\n def client_get(self, path, query_params=(), *args, **kwargs):\n \"\"\"\n A wrapper for self.client.get that parses query_param_data into a\n query_params string.\n \"\"\"\n query_string = urllib.urlencode(query_params)\n return self.client.get(path, *args, query_string=query_string, **kwargs)\n\n def client_post(self, path, query_params=(), *args, **kwargs):\n \"\"\"\n A wrapper for self.client.post that parses query_param_data into a\n query_params string.\n \"\"\"\n query_string = urllib.urlencode(query_params)\n return self.client.post(path, *args, query_string=query_string, **kwargs)\n\n\n####################################################################################################\n# Test Base for Selenium tests\n\nclass LiveServerTestBase(LiveServerTestCase):\n # Remove the ones you don't need\n nosegae_app_identity_service = True\n nosegae_blobstore = True\n nosegae_datastore_v3 = True\n nosegae_mail = True\n nosegae_memcache = True\n nosegae_logservice = True\n nosegae_user = True\n nosegae_urlfetch = True\n\n def create_app(self):\n context = ndb.get_context()\n context.set_cache_policy(False)\n context.set_memcache_policy(False)\n return application.app\n\n def setUp(self):\n # self.driver = self._firefox_webdriver() # Uncomment to use firefox\n self.driver = self._phantomjs_webdriver() # Comment out to use firefox\n # self._tear_down() # uncomment to clean up crap\n\n def _firefox_webdriver(self):\n return webdriver.Firefox()\n\n def _phantomjs_webdriver(self):\n driver = webdriver.PhantomJS(service_log_path='tmp/ghostdriver.log',\n service_args=['--load-images=false',\n '--ignore-ssl-errors=true',\n '--ssl-protocol=TLSv1'])\n driver.set_window_size(1920, 1080)\n return driver\n\n def _tear_down(self):\n self.driver.close()\n self.driver.quit()\n\n def tearDown(self):\n self._tear_down()\n\n def output_debug(self):\n if not QUIET:\n print 'CONSOLE'\n pprint(self.console())\n print 'SOURCE'\n print self.driver.page_source\n self.save_screenshot()\n\n def console(self):\n return self.driver.get_log('browser')\n\n # Keep track of what screenshot number we're on for each test, for save_screenshot\n _screenshot_call_counts = Counter()\n\n def save_screenshot(self):\n class_name = self.__class__.__name__\n stack = inspect.stack()\n stack_functions = []\n test_name = None\n for i in range(1, 6):\n fcn_name = stack[i][3]\n if fcn_name.startswith('test'):\n test_name = fcn_name\n break\n else:\n stack_functions.append(fcn_name)\n test_name = test_name or stack[1][3]\n count_key = class_name + '-' + test_name\n count = self.__class__._screenshot_call_counts[count_key]\n self.__class__._screenshot_call_counts.update([count_key])\n file_name = 'tmp/{}-{}-{}{}.png'.format(class_name, test_name, count,\n ''.join(['-' + n for n in stack_functions]))\n self.driver.save_screenshot(file_name)\n\n # Testing Stuff ################################################################################\n\n def assert_element_id_exists(self, id_, driver=None):\n self._assert_element_exists('id', id_, driver=driver)\n\n def assert_element_class_exists(self, class_, driver=None):\n self._assert_element_exists('class_name', class_, driver=driver)\n\n def assert_element_name_exists(self, name, driver=None):\n self._assert_element_exists('name', name, driver=driver)\n\n def _assert_element_exists(self, identifier_type, identifier, driver=None):\n driver = driver or self.driver\n try:\n find_fcn = getattr(driver, 'find_element_by_' + identifier_type)\n find_fcn(identifier)\n except NoSuchElementException:\n if isinstance(driver, WebElement) and not QUIET:\n print 'FAILED ON ELEMENT'\n print driver.get_attribute('innerHTML')\n self.save_screenshot()\n self.fail('No element with {} {} found'.format(identifier_type, identifier))\n\n def assert_element_id_does_not_exist(self, id_, driver=None):\n self._assert_element_does_not_exist('id', id_, driver=driver)\n\n def assert_element_class_does_not_exist(self, class_, driver=None):\n self._assert_element_does_not_exist('class', class_, driver=driver)\n\n def _assert_element_does_not_exist(self, identifier_type, identifier, driver=None):\n driver = driver or self.driver\n try:\n find_fcn = getattr(driver, 'find_element_by_' + identifier_type)\n find_fcn(identifier)\n if not QUIET:\n print 'FAILED ON ELEMENT'\n self.output_debug()\n self.fail('Element with {} {} found'.format(identifier_type, identifier))\n except NoSuchElementException:\n pass\n\n def assert_select_is(self, element_id, expected_text=None, expected_value=None, driver=None):\n driver = driver or self.driver\n select = Select(driver.find_element_by_id(element_id))\n selected_option = select.first_selected_option # type: WebElement\n if not QUIET:\n self.output_debug()\n if expected_value:\n self.assertEqual(selected_option.get_attribute('value').strip(), expected_value)\n else:\n self.assertEqual(selected_option.text.strip(), expected_text)\n\n # Driver Support ###############################################################################\n\n def print_source(self, driver_or_element=None):\n driver_or_element = driver_or_element or self.driver\n if isinstance(driver_or_element, WebDriver):\n print driver_or_element.page_source\n elif isinstance(driver_or_element, WebElement):\n print driver_or_element.get_attribute('outerHTML')\n else:\n raise NotImplementedError('Unknown type {}'.format(driver_or_element.__class__.__name__))\n\n def stub_js_confirm(self):\n \"\"\" PhantomJS doesn't support alert boxes. This stubs out the\n window.confirm function to immediately return true. \"\"\"\n script = \"window.confirm = function(msg) { return true; }\"\n self.driver.execute_script(script);\n\n def wait_for(self, element_id, driver=None):\n return self._wait_for(element_id, status='presence', identifier_type='ID', driver=driver)\n\n def wait_for_visibility_of(self, element_id, driver=None):\n return self._wait_for(element_id, driver=driver)\n\n def wait_for_invisibility_of(self, element_id, driver=None):\n return self._wait_for(element_id, status='invisibility',\n identifier_type='ID', driver=driver)\n\n def wait_for_visibility_of_class(self, element_class, driver=None):\n return self._wait_for(element_class, status='visibility',\n identifier_type='CLASS_NAME', driver=driver)\n\n def wait_for_invisibility_of_class(self, element_class, driver=None):\n return self._wait_for(element_class, status='invisibility',\n identifier_type='CLASS_NAME', driver=driver)\n\n def wait_for_visibility_of_tag(self, element_tag, driver=None):\n return self._wait_for(element_tag, status='visibility',\n identifier_type='TAG_NAME', driver=driver)\n\n def _wait_for(self, identifier, status='visibility', identifier_type='ID', driver=None):\n driver = driver or self.driver\n try:\n status_fcn = getattr(EC, status + '_of_element_located')\n by = getattr(By, identifier_type)\n WebDriverWait(driver, 6).until(\n status_fcn((by, identifier))\n )\n except TimeoutException, e:\n if not QUIET:\n print 'FAILED ON ELEMENT'\n self.output_debug()\n e.msg = '{} {} never got {}.'.format(identifier_type, identifier, status)\n raise e\n\n def wait_for_text_in_element(self, element_id, text, driver=None):\n driver = driver or self.driver\n try:\n WebDriverWait(driver, 6).until(\n EC.text_to_be_present_in_element((By.ID, element_id), text)\n )\n except TimeoutException, e:\n print 'FAILED ON ELEMENT'\n self.output_debug()\n e.msg = 'Text {} never showed up.'.format(text)\n raise e\n\n"
},
{
"alpha_fraction": 0.6047903895378113,
"alphanum_fraction": 0.6090675592422485,
"avg_line_length": 26.785715103149414,
"blob_id": "2f95a24e04b7241567ccf1824658905b6343326e",
"content_id": "7cf56c890cd5ef6c43a8ee06f4100bf8e17a987d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1169,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 42,
"path": "/src/tests/test_pages.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#\n# test_pages.py\n#\n\nimport unittest\n\nfrom bs4 import BeautifulSoup\n\nimport base\n\n\nclass TestEndpoints(base.TestBase):\n\n def test_thing(self):\n rv = self.client.get('/')\n soup = BeautifulSoup(rv.data, 'lxml')\n self.assert200(rv)\n self.assertIn('Hello', soup.find('body').get_text())\n\n\nclass TestInBrowser(base.LiveServerTestBase):\n\n def test_hello_in_browser(self):\n url = self.url_base()\n self.driver.get(url)\n self.wait_for_visibility_of_tag('body')\n body = self.driver.find_element_by_tag_name('body')\n self.assertIn('Hello', body.text)\n\n def test_ajaxy(self):\n url = self.url_base() + '/ajaxy?state=b'\n self.driver.get(url)\n email_input = self.driver.find_element_by_id('email')\n email_input.send_keys('[email protected]')\n submit_button = self.driver.find_element_by_id('email-form-submit')\n submit_button.click()\n a_btn = self.driver.find_element_by_id('a-btn')\n a_btn.click()\n self.wait_for_visibility_of('a')\n a_panel = self.driver.find_element_by_id('a')\n self.assertIn('[email protected]', a_panel.text)\n\n\n"
},
{
"alpha_fraction": 0.6058865785598755,
"alphanum_fraction": 0.6058865785598755,
"avg_line_length": 24.77777862548828,
"blob_id": "a8e6ecdb1fb1a41298577e203b6760f170992aba",
"content_id": "d384e398e86b4cb0770507f088cccc11395c28c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 54,
"path": "/src/application/handlers.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#\n# handlers.py\n#\n\nimport logging\n\nfrom flask import flash, render_template, request\n\nfrom application import forms, models, templating\n\nLOG = logging.getLogger(__name__)\n\n\ndef warmup():\n return ''\n\n\ndef home():\n return render_template('home.html')\n\n\nclass AjaxyView(templating.RenderView):\n\n template_name = 'ajaxy/main.html'\n container = '#ajaxy-panel'\n block = 'ajaxy_content'\n title = 'Ajaxy Account'\n url = '/ajaxy'\n\n def get(self):\n LOG.info(request.args)\n state = request.args.get('state') or 'a'\n return self.render_page(state=state)\n\n def post(self):\n form = forms.EmailForm(request.form)\n email = form.email.data\n entry = models.Email.query(models.Email.email == email).get()\n if entry:\n flash('Email already exists!', 'info')\n return self.render_page(state='b', form=form)\n else:\n new_entry = models.Email(email=email)\n new_entry.put()\n return self.render_page(state='b')\n\n def render_page(self, state, form=None):\n if state == 'b':\n form = form or forms.EmailForm()\n return self.render_template(state=state, form=form)\n else:\n email_entities = models.Email.query().fetch()\n emails = [e.email for e in email_entities]\n return self.render_template(state=state, emails=emails)\n\n"
},
{
"alpha_fraction": 0.6120331883430481,
"alphanum_fraction": 0.6120331883430481,
"avg_line_length": 24.35087776184082,
"blob_id": "42035f819ed0c0e1ec8a8593d20b6f52675539e3",
"content_id": "66792f75c946121c45c2020d9271ed679aa91d00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1446,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 57,
"path": "/src/application/assets/src/js/ajaxHandler.js",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "/* ajaxHandler.js\n *\n * Pilfered from http://cam.st/ajax-block-rendering-in-flask/\n */\n\nfunction ajaxGet(url, data) {\n\n var successFcn = function(data) {\n // If pushState is supported, use it for AJAXy response\n if (!!window.history && history.pushState) {\n history.pushState(data.page, data.title, data.url);\n $(data.view_container).html(data.page);\n } else { // If no pushState support, just go to URL\n console.log(\"No pushState. Redirecting to \" + data.url);\n location.href = data.url;\n }\n };\n\n var failureFcn = function(something, error) {\n console.log('GET failure: ' + error);\n };\n\n $.get(url, data)\n .done(successFcn)\n .fail(failureFcn)\n}\n\n\nfunction ajaxPost(url, data, handler /* = null */) {\n\n var successFcn = function(data) {\n // If pushState is supported, use it for AJAXy response\n if (!!window.history && history.pushState) {\n history.pushState(data.page, data.title, data.url);\n $(data.view_container).html(data.page);\n } else { // If no pushState support, just go to URL\n console.log(\"No pushState. Redirecting to \" + data.url);\n location.href = data.url;\n }\n };\n\n var failureFcn = function(_, error) {\n console.log('POST failure: ' + error);\n };\n\n if (handler) {\n if (data) {\n data += '&handler=' + handler;\n } else {\n data = 'handler=' + handler\n }\n }\n\n $.post(url, data)\n .done(successFcn)\n .fail(failureFcn)\n}\n\n"
},
{
"alpha_fraction": 0.7507002949714661,
"alphanum_fraction": 0.7535014152526855,
"avg_line_length": 22.733333587646484,
"blob_id": "7fb51db523c667444376e1edc29cb502c76d3ca9",
"content_id": "dfe305bec91fa3c2f5caaea981b2ee0e8f002f8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 15,
"path": "/src/application/forms.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# forms.py\n#\n\"\"\"\nWeb forms based on Flask-WTForms\nSee: http://flask.pocoo.org/docs/patterns/wtforms/\n http://wtforms.simplecodes.com/\n\"\"\"\n\nfrom flask_wtf import FlaskForm\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import InputRequired\n\n\nclass EmailForm(FlaskForm):\n email = EmailField('Email', validators=[InputRequired()])\n\n"
},
{
"alpha_fraction": 0.7638376355171204,
"alphanum_fraction": 0.7822878360748291,
"avg_line_length": 12.550000190734863,
"blob_id": "fd51c7f322fea4af60d30bc326e73c7869d10fa4",
"content_id": "065f3d2ca8c0cd8807f4ea348994c62508ad2b03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 20,
"path": "/requirements.txt",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# Application\nflask\nflask-assets\n cssmin\n jsmin\nflask-debugtoolbar\nflask-wtf\nwtforms\n\n# Testing\nbeautifulsoup4\nlxml # for bs4\nflask-testing\nnose\nnosegae>=0.5.8\nselenium\nwsgi-liveserver\n\n# Development\nwhen-changed # for rebuilding assets on changes in development\n"
},
{
"alpha_fraction": 0.5950413346290588,
"alphanum_fraction": 0.6239669322967529,
"avg_line_length": 23.100000381469727,
"blob_id": "5c63dbf89162cc78cd0cb3589147546449d7e506",
"content_id": "7c82db2a48c273f0c66e0c6d5afd4b51c8535546",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 242,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 10,
"path": "/src/application/templates/errors/403.html",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "{% extends 'layouts/error.html' %}\n\n{% block error_content %}\n <h1 class=\"center-align\">Forbidden</h1>\n <h2 class=\"center-align\">Error 403</h2>\n <br />\n <p>\n You're not allowed to access the content at this URL.\n </p>\n{% endblock %}\n\n"
},
{
"alpha_fraction": 0.6023896336555481,
"alphanum_fraction": 0.6090275645256042,
"avg_line_length": 26.633028030395508,
"blob_id": "0f6f6b3c0b1a67f5473c9dd6be59dbadb386b07c",
"content_id": "bcad988d621e2772b52514515e346644f022894c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3013,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 109,
"path": "/src/assets.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# assets.py\n#\n\"\"\"\nMakes assets ready for the browser.\n\"\"\"\n\nimport os\nimport shutil\nfrom flask import Flask\nfrom flask_assets import Bundle, Environment\n\nASSETS_DIR = os.path.join(os.path.dirname(__file__), 'application', 'assets')\nSTATIC_DIR = os.path.join(os.path.dirname(__file__), 'application', 'static')\n\n\ndef init(app=None):\n app = app or Flask(__name__)\n bundles = []\n\n with app.app_context():\n env = Environment(app)\n env.load_path = [ASSETS_DIR]\n env.set_directory(STATIC_DIR)\n # App Engine doesn't support automatic rebuilding.\n env.auto_build = False\n # This file needs to be shipped with your code.\n env.manifest = 'file'\n bundles.extend(_add_base_bundle(env))\n bundles.extend(_add_home_bundle(env))\n bundles.extend(_add_ajaxy_bundle(env))\n\n return bundles\n\n\ndef _add_base_bundle(env):\n css = Bundle(\n \"src/css/base.css\",\n \"src/css/navbar.css\",\n \"src/css/footer.css\",\n filters=[\"autoprefixer6\", \"cssmin\"], output=\"css/base.min.css\")\n env.register('base_css', css)\n\n libs_css = Bundle(\n \"libs/bootstrap/dist/css/bootstrap.css\",\n filters=[\"cssmin\"], output=\"css/base-libs.min.css\")\n env.register('base_libs_css', libs_css)\n\n js = Bundle(\n \"src/js/ajaxHandler.js\",\n filters=\"jsmin\", output=\"js/base.min.js\")\n env.register('base_js', js)\n\n libs_js = Bundle(\n \"libs/jquery/dist/jquery.js\",\n \"libs/bootstrap/dist/js/bootstrap.js\",\n filters=\"jsmin\", output=\"js/base-libs.min.js\")\n env.register('base_libs_js', libs_js)\n\n return css, libs_css, js, libs_js\n\n\ndef _add_home_bundle(env):\n css = Bundle(\n \"src/css/home.css\",\n filters=[\"autoprefixer6\", \"cssmin\"], output=\"css/home.min.css\")\n env.register('home_css', css)\n\n return css,\n\n\ndef _add_ajaxy_bundle(env):\n css = Bundle(\n \"src/css/ajaxy.css\",\n filters=[\"autoprefixer6\", \"cssmin\"], output=\"css/ajaxy.min.css\")\n env.register('ajaxy_css', css)\n\n js = Bundle(\n \"src/js/ajaxy.js\",\n filters=\"jsmin\", output=\"js/ajaxy.min.js\")\n env.register('ajaxy_js', js)\n\n return css, js\n\n\n# From http://stackoverflow.com/a/12514470/1464495\ndef _copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\nif __name__ == '__main__':\n print 'Building asset bundles'\n bundles = init()\n for bundle in bundles:\n bundle.build()\n print 'Copying fonts into static/'\n bootstrap_fonts_dir = os.path.join(ASSETS_DIR, 'libs', 'bootstrap', 'fonts')\n target_fonts_dir = os.path.join(STATIC_DIR, 'fonts')\n if not os.path.exists(target_fonts_dir):\n os.makedirs(target_fonts_dir)\n _copytree(bootstrap_fonts_dir, target_fonts_dir)\n\n"
},
{
"alpha_fraction": 0.7390761375427246,
"alphanum_fraction": 0.7440699338912964,
"avg_line_length": 25.66666603088379,
"blob_id": "5b2031e3de4e94d458aed209f1a1fc6a88c06802",
"content_id": "ed29dc097ef1d7dc452c88d884174ef8cd917083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 30,
"path": "/README.md",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# GAE Flask Web Base\n\n## Getting Started\n\nRun `./setup.sh`. It'll install some npm dependencies, create a Python\nvirtualenv, and compile assets.\n\nRun `./dev_server.sh start` to start the dev server at `localhost:8080`.\n\n`./run_tests.sh` does what it says on the tin.\n\n\n## Environments\n\nThere are three environments: TEST, DEV, and PROD. Since GAE doesn't\nsupport environment variables, you switch between them by symlinking\n`src/application/env_conf.py` to the file in `src/application/config/`\nthat you'd like.\n\n\n## Assets\n\nLibs are managed with bower. Put your own JS and CSS in\n`src/application/assets/`. These files get compiled by executing\n`src/assets.py`, putting artifacts in `src/application/static/`.\n\n\n## Credit\n\nMade with love by the eng team at [Socos LLC](http://www.socoslearning.com)\n\n"
},
{
"alpha_fraction": 0.5823293328285217,
"alphanum_fraction": 0.5838353633880615,
"avg_line_length": 23,
"blob_id": "20d45e0bca321e93d40d99493ab4d7a5f16fd6f7",
"content_id": "2c450e84dc4f2cebecaed30e6d1053493209894e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1992,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 83,
"path": "/src/application/__init__.py",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "# __init__.py\n#\n\nimport os\nimport sys\nimport logging\n\nimport env_conf\n\n\n# Initialize environment #######################################################\n\nclass Environment(object):\n DEV = False\n TEST = False\n PROD = False\n LOCAL = False # True if DEV or TEST, false if STAGING or PROD\n env_name = None\n\n def __init__(self, mode):\n self.env_name = mode\n if mode == 'DEV':\n self.DEV = True\n elif mode == 'TEST':\n self.TEST = True\n elif mode == 'PROD':\n self.PROD = True\n else:\n raise ValueError('No such mode: {}'.format(mode))\n if self.DEV or self.TEST:\n self.LOCAL = True\n\n_env_name = os.environ.get('ENV') or env_conf.FLASK_CONF\n\nenv = Environment(_env_name)\n\n\n# Generic Initialization #######################################################\n\nlogging.basicConfig(stream=sys.stderr)\n\nfrom flask import Flask\nfrom flask_debugtoolbar import DebugToolbarExtension\nimport assets\n\napp = Flask('application')\napp.env = env\n\nassets.init(app)\n\n# Environment-specific initialization ##########################################\n\nif app.env.DEV:\n print 'Environment: DEV'\n # Development settings\n app.config.from_object('application.settings.Development')\n # Flask-DebugToolbar\n toolbar = DebugToolbarExtension(app)\n\nelif app.env.TEST:\n print 'Environment: TEST'\n app.config.from_object('application.settings.Testing')\n\nelse:\n assert app.env.PROD\n print 'Environment: PROD'\n app.config.from_object('application.settings.Production')\n\n\n# Jinja2 Configuration ########################################################\n\n# Loop controls extension\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\n\n# Better debugging\nif app.env.LOCAL:\n from google.appengine.tools.devappserver2.python import sandbox\n sandbox._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']\n\n\n# Pull in URL dispatch routes #################################################\n\nimport urls\n"
},
{
"alpha_fraction": 0.6105263233184814,
"alphanum_fraction": 0.6263157725334167,
"avg_line_length": 16.272727966308594,
"blob_id": "06b2633d42efa41652c1ceccc92936275cb29a4d",
"content_id": "c93adfdc5665a60ff565391be2a78b9b9db284c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 22,
"path": "/link_env_conf.sh",
"repo_name": "SocosLLC/gae-flask-base",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nif [ -z $1 ]; then\n echo \"Usage: $0 ENV_CONF_EXTENSION\"\n exit 1\nfi\n\nFULL_PATH=src/application/config/env_conf.py.$1\nif [ ! -e $FULL_PATH ]; then\n echo \"No such env conf: $FULL_PATH\"\n exit 1\nfi\n\ncd src/application ;\n\n# Remove compiled version\nif [ -e env_conf.pyc ]; then\n rm env_conf.pyc\nfi\n\nln -sf config/env_conf.py.$1 env_conf.py ;\ncd ../..\n"
}
] | 19 |
python-util-type-detection/pyEntrezId
|
https://github.com/python-util-type-detection/pyEntrezId
|
bd8270cf2c4a8075f77398022e0a8a999a97a7d5
|
0417dd11ae2561d9ba9f6845682f62bffa3088c7
|
febd7c49f8282a25404d9bb23a1624b026fb6c9a
|
refs/heads/master
| 2021-01-20T03:04:17.121097 | 2017-08-24T23:06:19 | 2017-08-24T23:06:19 | 101,344,793 | 0 | 0 | null | 2017-08-24T23:05:44 | 2017-05-19T16:31:34 | 2017-02-15T15:29:42 | null |
[
{
"alpha_fraction": 0.8823529481887817,
"alphanum_fraction": 0.8823529481887817,
"avg_line_length": 33,
"blob_id": "0baacab89666f23d3b53aed7b10138a030cca91f",
"content_id": "7f2ab7c74f19d7fd608a796627333781062d7f6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 34,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 1,
"path": "/PyEntrezId/__init__.py",
"repo_name": "python-util-type-detection/pyEntrezId",
"src_encoding": "UTF-8",
"text": "from Conversion import Conversion\n"
}
] | 1 |
mormasa/Amazon
|
https://github.com/mormasa/Amazon
|
5cac7a12af433c2e3cdff7ab63874ce6d0ebcc33
|
98134a86c276482215e25840b132026d603764b7
|
5d4cfabbf9e066a634af86f90441448b8e491124
|
refs/heads/master
| 2022-04-30T10:31:45.257688 | 2022-03-25T09:15:48 | 2022-03-25T09:15:48 | 228,085,837 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6104984283447266,
"alphanum_fraction": 0.6197618246078491,
"avg_line_length": 32.835819244384766,
"blob_id": "3d1e5f8c61bacdcb81543255435467d10367d76b",
"content_id": "e5e8ad29e70ba9bceb8370681bb7c68b5bf56eb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2267,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 67,
"path": "/gb_upload.py",
"repo_name": "mormasa/Amazon",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nimport time\n\n\ndef get_all_upload_buttons(browser):\n # all_products = chrome.find_elements_by_xpath('.//div[@class = \"s-result-list s-search-results sg-row\"]//div[@data-asin]')\n all_products = chrome.find_elements_by_xpath('.//a[@class = \"btn btn-sm btn-primary]')\n\n asins = []\n for item in all_products:\n asins.append(item.get_attribute(\"data-asin\"))\n return asins\n return upload_buttons\n\n\ndef get_my_asins_from_inventory_txt_file(inventory_filename):\n parsed_data = []\n with open(inventory_filename) as file:\n data = [line.strip() for line in file.readlines()]\n for line in data:\n parsed_data.append(line.split(\"\\t\"))\n my_asins = [[parsed_data[i][10], parsed_data[i][2]] for i in range(1, len(parsed_data))]\n\n# Remove duplications\n for asin in my_asins:\n orig = True\n for j in range(0, len(my_asins)):\n if asin[0] == my_asins[j][0]:\n if not orig:\n my_asins.remove(asin)\n break\n orig = False\n return my_asins\n\n\ndef look_my_product_in_page(my_asins, page_asins, page_number):\n for asin in my_asins:\n if asin[0] in page_asins:\n print(f\"{asin[1]} ASIN {asin[0]} is in page {page_number}, position: {page_asins.index(asin[0])}\")\n\n\ndef go_to_next_page():\n next_button = chrome.find_elements_by_xpath('.//li[@class = \"a-last\"]')\n next_button[0].click()\n time.sleep(2)\n\n# try:\namazon_url = \"https://www.gearbubble.com/dropship_stores/11005/new_product\"\nchrome = webdriver.Chrome()\nchrome.get(amazon_url)\nupload_buttons = get_all_upload_buttons(chrome)\n\n# for keyword in string_to_check:\n# search_bar = chrome.find_element_by_id(\"twotabsearchtextbox\")\n# search_bar.send_keys(keyword)\n# search_bar.submit()\n# print(f\"---------------- printing ranking for keyword: {keyword}\")\n# for i in range(3):\n# print(f\"Checking page number {i+1}\")\n# page_asins = get_page_asins()\n# look_my_product_in_page(my_asins, page_asins, i+1)\n# go_to_next_page()\n# chrome.find_element_by_id(\"twotabsearchtextbox\").clear()\nprint(\"Done searching. Closing browser..\")\n # chrome.quit()\n# except IndexError as e:\n# chrome.quit()\n"
},
{
"alpha_fraction": 0.4692115783691406,
"alphanum_fraction": 0.49472472071647644,
"avg_line_length": 34.22297286987305,
"blob_id": "817c7bd62756fd0e5fdee7cde50f749c191f16ac",
"content_id": "b573166671497c24f717be964fcc9fe1777c246c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5213,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 148,
"path": "/FBA_Alert.py",
"repo_name": "mormasa/Amazon",
"src_encoding": "UTF-8",
"text": "import xlsxwriter\nimport pandas as pd\nsales_file = \"Sales.csv\"\ninventory_file = \"FBA_Inventory.csv\"\nreport_file = \"FBA_Inventory_Alert_Report.xlsx\"\nskus_to_ignore = ['1001']\n\ndef get_info_from_csv(filename):\n\n updated_output = []\n\n # if filename == sales_file:\n with open(filename) as f:\n file_out = [line.strip().split('\"') for line in f.readlines()]\n for j in range(len(file_out)):\n if j ==0:\n continue\n new_line = []\n line_to_check = file_out[j]\n for i in range(len(line_to_check)):\n if not (',' in line_to_check[i] and len(line_to_check[i]) == 1):\n new_line.append(line_to_check[i])\n updated_output.append(new_line)\n return updated_output\n\n\ndef update_fba_info_with_sales(fba_info, sales_info, skus_to_ignore):\n report = [['ASIN', 'SKU', 'TITLE', '14 Days Sales', '28 Days Sales', 'Inventory', 'Incoming', 'Inventory/28 Days Sales']]\n\n for i in range(len(fba_info)):\n product_sku = fba_info[i][1]\n if product_sku in skus_to_ignore:\n continue\n product_name = fba_info[i][4]\n fba_report_asin = fba_info[i][3]\n inventory_qty = int(fba_info[i][10])\n incoming = int(fba_info[i][17])\n\n for j in range(len(sales_info)):\n updated = False\n sales_report_asin = sales_info[j][2]\n\n if fba_report_asin == sales_report_asin:\n last_14_days_sales = int(sales_info[j][9])\n last_28_days_sales = last_14_days_sales * 2\n updated = True\n break\n if not updated:\n last_14_days_sales = 0\n last_28_days_sales = 0\n if inventory_qty > 0:\n ratio = 999999\n else:\n ratio = 0\n else:\n if last_14_days_sales == 0:\n ratio = 999999\n elif inventory_qty > 0:\n ratio = round(float(inventory_qty / last_28_days_sales), 2)\n else:\n ratio = 0\n report.append([fba_report_asin, product_sku, product_name, last_14_days_sales, last_28_days_sales, inventory_qty, incoming, ratio])\n return report\n\n\n\ndef build_report(data):\n workbook = xlsxwriter.Workbook(report_file)\n worksheet1 = workbook.add_worksheet()\n red = workbook.add_format({'bg_color': '#FF0000'})\n yellow = workbook.add_format({'bg_color': '#F7FE2E'})\n i = 0\n for row, row_data in enumerate(data):\n worksheet1.write_row(row, 0, row_data)\n i += 1\n\n worksheet1.conditional_format(f'F2:F{len(data)}', {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 0.01,\n 'maximum': 0.5,\n 'format': red})\n worksheet1.conditional_format(f'F2:F{len(data)}', {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 0.501,\n 'maximum': 1,\n 'format': yellow})\n\n workbook.close()\n\n\ndef build_report_panda(data):\n asin = []\n sku = []\n sales14 = []\n sales28 = []\n inventory = []\n incoming = []\n ratio = []\n title = []\n for i in range(len(data)):\n if i == 0:\n continue\n asin.append(data[i][0])\n title.append(data[i][2])\n sku.append(data[i][1])\n sales14.append(data[i][3])\n sales28.append(data[i][4])\n inventory.append(data[i][5])\n incoming.append(data[i][6])\n ratio.append(data[i][7])\n writer = pd.ExcelWriter(report_file, engine='xlsxwriter')\n df = pd.DataFrame({'Asin': asin,\n 'Title': title,\n 'SKU': sku,\n 'Sales 14 days': sales14,\n 'Sales 30 days': sales28,\n 'Inventory': inventory,\n 'Incoming': incoming,\n 'Inventory/Monthly Sales': ratio})\n df = df.sort_values('Inventory/Monthly Sales')\n print(df)\n df.to_excel(writer, sheet_name='Sheet1')\n workbook = writer.book\n worksheet1 = writer.sheets['Sheet1']\n red = workbook.add_format({'bg_color': '#FF0000'})\n yellow = workbook.add_format({'bg_color': '#F7FE2E'})\n\n\n worksheet1.conditional_format(f'I2:I{len(data)}', {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 0.01,\n 'maximum': 0.5,\n 'format': red})\n worksheet1.conditional_format(f'I2:I{len(data)}', {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 0.501,\n 'maximum': 1,\n 'format': yellow})\n\n writer.save()\n # workbook.close()\n\nsales_info = get_info_from_csv(sales_file)\nfba_info = get_info_from_csv(inventory_file)\nreport = update_fba_info_with_sales(fba_info, sales_info, skus_to_ignore)\nbuild_report_panda(report)\n# print(report)\nprint(\"Done!\")\n"
},
{
"alpha_fraction": 0.5640109181404114,
"alphanum_fraction": 0.5764136910438538,
"avg_line_length": 46.0990104675293,
"blob_id": "6bc5bfc2c93e1a7fe06471df80593f7af7296096",
"content_id": "5d17222190b80a52a2ee5c4a96402858c7edf05d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4758,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 101,
"path": "/NFT_Scrapper.py",
"repo_name": "mormasa/Amazon",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nimport telegram\nimport logging\nimport time\nfrom pathlib import Path\nfrom datetime import datetime\nURL = \"https://icy.tools/\"\nINTERVAL_REPEAT_TIME_MINUTES = 120\n\n\ndef init_loggers():\n now = str(datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\"))\n logger = logging.getLogger(__name__)\n logging.basicConfig(\n format=\"%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%d:%H:%M:%S\",\n level=logging.INFO\n )\n home = str(Path.home())\n logs_folder = home + str(Path(\"/logs\"))\n filename = logs_folder + str(Path(\"/\")) + f\"nft_scrapper_{now}.txt\"\n\n # Log file name\n test_handler = logging.FileHandler(filename)\n test_handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s\"\n )\n )\n logger.addHandler(test_handler)\n return logger\n\n\ndef construct_message(chrome):\n all_nfts_elements = chrome.find_elements_by_xpath(\n \"//tbody[@class='bg-white dark:bg-darker divide-y divide-gray-200 dark:divide-dark']/*\")\n message = [\"***LAST 1 HOUR ***\\n\"]\n for nft_elem in all_nfts_elements:\n try:\n name_elements, price_elements, sales_elements, average_elements, volume_elements = \\\n [nft_elem.find_element_by_xpath(f\"./td[{elem_index}]/a[1]\") for elem_index in range(1, 6)]\n name_text, price_text, sales_text, avg_text, vol_text = [elem.get_attribute(\"innerText\")\n for elem in\n [name_elements, price_elements, sales_elements,\n average_elements, volume_elements]]\n [name, quantaty], [sales, sales_change], [price, price_change], [avg, avg_change], [vol, vol_change] = \\\n [elem.split(\"\\n\\n\") for elem in [name_text, sales_text, price_text, avg_text, vol_text]]\n quantaty = quantaty.replace('Circulating supply:', '')\n price_change_elem, sales_chage_elem, avg_change_elem, vol_change_elem = \\\n [elem.find_element_by_xpath(\"./p[1]/div[1]/span\")\n for elem in [price_elements, sales_elements, average_elements, volume_elements]]\n price_change_color, sales_chage_color, avg_change_color, vol_change_color = \\\n [\"green\" if \"green\" in elem.get_attribute(\"class\") else \"red\" if \"red\" in elem.get_attribute(\"class\")\n else \"gray\" for elem in [price_change_elem, sales_chage_elem, avg_change_elem, vol_change_elem]]\n colors = [price_change_color, sales_chage_color, avg_change_color, vol_change_color]\n value_change = [sales_change, price_change, avg_change, vol_change]\n for i in range(0, len(colors)):\n value_change[i] = \"-\" + value_change[i] if colors[i] == \"red\" \\\n else \"+\" + value_change[i] if colors[i] == \"green\" else ''\n sales_change, price_change, avg_change, vol_change = value_change\n data = f\"---\\n{name}\\n---\\nquantity {quantaty}\\nfloor = {price} change {price_change}\\n\" \\\n f\"sales = {sales} change {sales_change}\\navg = {avg} change {avg_change}\\n\" \\\n f\"vol = {vol} change {vol_change}\"\n filtered_data = data.replace('Ξ', '')\n # logger.info(filtered_data)\n message.append(filtered_data)\n except Exception as e:\n logger.warning(f\"{str(e)} + {type(e).__name__}\")\n logger.info(message)\n str_message = \"\\n\".join(e for e in message)\n # logger.info(str_message)\n return str_message\n\n\ndef send_msg_to_telegram_bot(message):\n telegram_bot = telegram.Bot(token=\"5151726262:AAEdX_1ZeYNRV3dsjwxjGsB6V65aB0rvvQo\")\n\n if len(message) > 4096:\n for x in range(0, len(message), 4096):\n telegram_bot.send_message('637189504', message[x:x + 4096])\n else:\n telegram_bot.send_message('637189504', message)\n\n\nif __name__ == \"__main__\":\n logger = init_loggers()\n options = webdriver.ChromeOptions()\n options.add_argument(\"--headless\")\n chrome = webdriver.Chrome(options=options)\n chrome.get(URL)\n while True:\n logger.info(\"Starting new iteration\")\n try:\n message_to_send = construct_message(chrome)\n send_msg_to_telegram_bot(message_to_send)\n except Exception as e:\n logger.warning(f\"Exception during the flow. Exception details = {str(e)} + {type(e).__name__}\")\n logger.info('---------------UPDATED DONE -------------------')\n logger.info(\"Waiting 2 hours\")\n time.sleep(INTERVAL_REPEAT_TIME_MINUTES * 60)\n chrome.refresh()\n"
},
{
"alpha_fraction": 0.611717700958252,
"alphanum_fraction": 0.616511344909668,
"avg_line_length": 33.14545440673828,
"blob_id": "2286566d5a2d8d25b546a04f33a2e141ec06ef37",
"content_id": "38f4f678b8cbbb9188087fd69bba424233652045",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3755,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 110,
"path": "/asin_ranking.py",
"repo_name": "mormasa/Amazon",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom datetime import datetime\nimport time\n\n\ndef get_keywords(file_name=\"dogs.txt\"):\n keywords_to_search = []\n with open(file_name) as file:\n output = [line.strip() for line in file.readlines()]\n for line in output:\n keywords_to_search.append(f\"{line} coffee mug\")\n keywords_to_search.append(f\"{line} mug\")\n keywords_to_search.append(f\"{line} gifts\")\n return keywords_to_search\n\n\ndef get_page_asins(chrome):\n all_products = chrome.find_elements_by_xpath('.//div[@class = \"s-result-list s-search-results sg-row\"]//div[@data-asin]')\n asins = []\n for item in all_products:\n asins.append(item.get_attribute(\"data-asin\"))\n return asins\n\n\ndef get_my_asins_from_inventory_txt_file(inventory_filename):\n parsed_data = []\n with open(inventory_filename) as file:\n data = [line.strip() for line in file.readlines()]\n for line in data:\n parsed_data.append(line.split(\"\\t\"))\n my_asins = [[parsed_data[i][10], parsed_data[i][2]] for i in range(1, len(parsed_data))]\n\n# Remove duplications\n for asin in my_asins:\n orig = True\n for j in range(0, len(my_asins)):\n if asin[0] == my_asins[j][0]:\n if not orig:\n my_asins.remove(asin)\n break\n orig = False\n return my_asins\n\n\ndef look_my_product_in_page(my_asins, page_asins, page_number):\n for asin in my_asins:\n if asin[0] in page_asins:\n print(f\"{asin[1]} ASIN {asin[0]} is in page {page_number}, position: {page_asins.index(asin[0])}\")\n\n\ndef go_to_next_page(chrome):\n next_button = chrome.find_elements_by_xpath('.//li[@class = \"a-last\"]')\n next_button[0].click()\n time.sleep(2)\n\n\ndef get_rank_for_keyword(chrome, keyword, my_asins):\n try:\n search_bar = chrome.find_element_by_id(\"twotabsearchtextbox\")\n search_bar.send_keys(keyword)\n search_bar.submit()\n print(f\"---------------- printing ranking for keyword: {keyword}\")\n for i in range(3):\n print(f\"Checking page number {i + 1}\")\n page_asins = get_page_asins(chrome)\n look_my_product_in_page(my_asins, page_asins, i + 1)\n go_to_next_page(chrome)\n chrome.find_element_by_id(\"twotabsearchtextbox\").clear()\n # except IndexError as e:\n # print(\"Last page. Done\")\n # print(str(e))\n # chrome.find_element_by_id(\"twotabsearchtextbox\").clear()\n except Exception as e:\n print(f\"Exception occured during search of keyord {keyword} page {i+1}. Exception message: {str(e)}\")\n if \"no such element\" in str(e):\n print(f\"Refreshing page and moving to the next keyword\")\n chrome.refresh()\n chrome.find_element_by_id(\"twotabsearchtextbox\").clear()\n\n\n\n\n\n\n\n\ndef main():\n start_time = datetime.now()\n print(f\"Start time: {start_time}\")\n inventory_filename = \"inventory.txt\"\n amazon_url = \"http://www.amazon.com\"\n results = 0\n # string_to_check = [\"Yorkie Coffee Mug\", \"Chihuahua Coffee Mug\", \"Labrador Coffee Mug\", \"Dachshund Coffee Mug\",\n # \"German Shepherd Coffee Mug\", \"Border Collie Coffee Mug\", \"Pitbull Coffee Mug\", \"Pit Bull Coffee Mug\"]\n string_to_check = get_keywords(\"prof.txt\")\n # string_to_check = [\"Beagle gifts\"]\n my_asins = get_my_asins_from_inventory_txt_file(inventory_filename)\n chrome = webdriver.Chrome()\n chrome.get(amazon_url)\n for keyword in string_to_check:\n get_rank_for_keyword(chrome, keyword, my_asins)\n print(\"Done searching. Closing browser..\")\n print(f\"End time: {datetime.now()}\")\n # print(f\"Test Duration: {datetime.now - start_time}\")\n chrome.quit()\n\n\n\nmain()\n# get_keywords()"
},
{
"alpha_fraction": 0.5486559271812439,
"alphanum_fraction": 0.5506048202514648,
"avg_line_length": 47.6274528503418,
"blob_id": "4bf58cd6ce7b87644b01891950368ad114aac08a",
"content_id": "727e3bddd311ad1d14b2d50cf7adb64c0f176272",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14880,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 306,
"path": "/PL_Helper.py",
"repo_name": "mormasa/Amazon",
"src_encoding": "UTF-8",
"text": "import xlsxwriter\nimport os\nimport pandas as pd\nimport math\n\ndocs_folder = os.path.dirname(os.path.realpath(__file__)) + \"\\Docs\"\nbank_statement_file = docs_folder + \"\\BOA.xlsx\"\npaypal_statement_file = docs_folder + \"\\Paypal.xlsx\"\n\n\ndef boa_analysis(statement_file=bank_statement_file):\n aliexpress_balance = 0\n aliexpress_trans = []\n fb_balance = 0\n fb_trans = []\n shopify_balance = 0\n shopify_trans = []\n shopify_refunds = 0\n amazon_sales = 0\n amazon_trans = []\n gb = 0\n gb_trans = []\n todo_description = []\n todo_amount = []\n etsy_balance = 0\n etsy_trans = []\n klaviyo_balance = 0\n klaviyo_trans = []\n adwords_balance = 0\n adwords_trans = []\n stripe_refunds = 0\n stripe_balance = 0\n stripe_balance_trans = []\n dropified_balance = 0\n dropified_trans = []\n custom_happy = 0\n custom_happy_trans = []\n hired_help_trans = []\n shopify_apps = 0\n bank_fees = 0\n bank_fees_trans = []\n apps_and_platforms = []\n advertising_costs = []\n cogs = []\n refunds = []\n\n\n file = pd.read_excel(statement_file)\n description = file['Description']\n amount = file['Amount']\n\n for i in range(len(file['Description'])):\n if \"aliexpress\" in description[i].lower():\n aliexpress_balance = aliexpress_balance + amount[i]\n aliexpress_trans.append(amount[i])\n cogs.append(amount[i])\n elif \"face\" in description[i].lower():\n fb_balance = fb_balance + amount[i]\n fb_trans.append(amount[i])\n advertising_costs.append(amount[i])\n elif \"upwork\" in description[i].lower():\n hired_help_trans.append(amount[i])\n elif \"shopify\" in description[i].lower():\n if amount[i] < 0:\n if amount[i] < -30:\n shopify_apps = shopify_apps + amount[i]\n apps_and_platforms.append(amount[i])\n else:\n shopify_refunds = shopify_refunds + amount[i]\n refunds.append(amount[i])\n shopify_trans.append(amount[i])\n else:\n shopify_balance = shopify_balance + amount[i]\n shopify_trans.append(amount[i])\n elif \"stripe\" in description[i].lower():\n if \"CUSTOMHAPPY\" not in description[i]:\n if amount[i] < 0:\n stripe_refunds = stripe_refunds + amount[i]\n refunds.append(amount[i])\n else:\n stripe_balance = stripe_balance + amount[i]\n stripe_balance_trans.append(amount[i])\n else:\n custom_happy = custom_happy + amount[i]\n custom_happy_trans.append(amount[i])\n cogs.append(amount[i])\n elif \"amzn\" in description[i].lower() or \"amazon\" in description[i].lower():\n amazon_sales = amazon_sales + amount[i]\n amazon_trans.append(amount[i])\n elif \"gearbubble\" in description[i].lower():\n if amount[i] < 0:\n if \"dropship\" in description[i].lower():\n shopify_apps = shopify_apps + amount[i]\n apps_and_platforms.append(amount[i])\n else:\n cogs.append(amount[i])\n else:\n gb = gb + amount[i]\n gb_trans.append(amount[i])\n stripe_balance = stripe_balance + amount[i]\n elif \"etsy\" in description[i].lower():\n etsy_balance = etsy_balance + amount[i]\n etsy_trans.append(amount[i])\n elif \"klaviyo\" in description[i].lower():\n klaviyo_balance = klaviyo_balance + amount[i]\n klaviyo_trans.append(amount[i])\n apps_and_platforms.append(amount[i])\n elif \"GOOGLE\" in description[i] and \"*ADS\" in description[i]:\n adwords_balance = adwords_balance + amount[i]\n adwords_trans.append(amount[i])\n advertising_costs.append(amount[i])\n elif \"dropified\" in description[i].lower():\n dropified_balance = dropified_balance + amount[i]\n dropified_trans.append(amount[i])\n apps_and_platforms.append(amount[i])\n elif \"Online Banking transfer to SAV\" in description[i] or \"Online Banking transfer from SAV\" in description[i] \\\n or \"beginning balance\" in description[i].lower() or \"Online scheduled transfer to CHK\" in description[i]\\\n or \"MASARANO GROUP DES:PAYPAL IAT ID:\" in description[i]:\n continue\n elif \"external transfer fee\" in description[i].lower() or \"Online scheduled transfer to\" in description[i].lower()\\\n or \"OVERDRAFT ITEM FEE\" in description[i] or \"Monthly Fee for Business Advantage\" in description[i] \\\n or \"Monthly Fee Business Adv Relationship\" in description[i]:\n bank_fees = bank_fees + amount[i]\n bank_fees_trans.append(amount[i])\n else:\n todo_description.append(description[i])\n todo_amount.append(amount[i])\n\n\n items_to_print = [\"aliexpress_balance\", \"Aliexpress trans\", \"FB balance\", \"FB trans\", \"Shopify sales\", \"Shopify trans\",\n \"Shopify refunds\", \"Shopify apps\", \"Amazon sales\", \"Amazon trans\", \"GB Sales\", \"GB trans\",\n \"Etsy balance\", \"Etsy trans\", \"Klaviyo balance\",\n \"Adwords balance\", \"Adwords trans\", \"Stripe refunds\", \"stripe balance\",\n \"stripe balance trans\", \"Dropified balance\", \"Bank fees\", \"Bank fees trans\", \"Custom Happy\", \"Custom Happy Trans\",\n \"Hired help balance\", \"Hired help trans\"]\n items_amount = [aliexpress_balance, aliexpress_trans, fb_balance, fb_trans, shopify_balance, shopify_trans,\n shopify_refunds, shopify_apps, amazon_sales, amazon_trans, gb, gb_trans, etsy_balance,\n etsy_trans, klaviyo_balance, adwords_balance, adwords_trans,\n stripe_refunds, stripe_balance, stripe_balance_trans, dropified_balance, bank_fees, bank_fees_trans,\n custom_happy, custom_happy_trans, sum(hired_help_trans), hired_help_trans]\n for i in range(len(items_to_print)):\n print(f\"{items_to_print[i]} = {items_amount[i]}\")\n\n print(\"\\n********************************** TO DO **********************************************************************\\n\")\n for i in range(len(todo_description)):\n print(f\"{todo_description[i]} = {todo_amount[i]} \\n\")\n\n\n print(\"\\n********************************** SUMMARY **********************************************************************\\n\")\n summary_items = [\"Sales Amazon\", \"Sales Shopify\", \"Stripe Sales\", \"Sales GB\", \"Sales Etsy\", \"Refunds (Stripe + Shopify)\", \"COGS\", \"Advertisment\", \"Apps\", \"Bank Fees\", \"Hired Help\"]\n summary_values = [amazon_trans, shopify_trans, stripe_balance_trans, gb_trans, etsy_trans, refunds, cogs, advertising_costs, apps_and_platforms, bank_fees_trans, hired_help_trans]\n\n for i in range(len(summary_items)):\n print(f\"{summary_items[i]} = {sum(summary_values[i])} = {summary_values[i]}\\n\")\n\n\ndef paypal_analysis(statement_file=paypal_statement_file):\n\n paypal_sales = 0\n paypal_sales_trans = []\n paypal_fees = 0\n paypal_fees_trans = []\n ebay_balance = 0\n ebay_trans = []\n mor_payment = 0\n mor_payment_trans = []\n paypal_refunds = 0\n paypal_refunds_trans = []\n staff_payment = 0\n staff_payment_trans = []\n apps_and_platforms_fees = 0\n apps_and_platforms = []\n cogs = []\n professional_fees = []\n todo_description = []\n todo_amount = []\n todo_name = []\n fb_balance = 0\n fb_trans = []\n etsy_trans = []\n\n file = pd.read_excel(statement_file)\n fee = file['Fee']\n gross = file['Gross']\n payment_type = file['Type']\n name = file['Name']\n status = file['Status']\n currency = file['Currency']\n\n for i in range(len(file['Gross'])):\n if currency[i] != \"USD\" and status[i] != \"Denied\":\n todo_name.append(name[i])\n todo_description.append(payment_type[i])\n todo_amount.append(gross[i])\n continue\n if fee[i] and not math.isnan(fee[i]):\n paypal_fees = paypal_fees + fee[i]\n paypal_fees_trans.append(fee[i])\n if gross[i] < 0:\n if payment_type[i] == \"eBay Auction Payment\":\n ebay_balance = ebay_balance + gross[i]\n ebay_trans.append(gross[i])\n cogs.append(gross[i])\n elif payment_type[i] == \"General Credit Card Withdrawal\" and status[i] == \"Completed\":\n mor_payment = mor_payment + gross[i]\n mor_payment_trans.append(gross[i])\n professional_fees.append(gross[i])\n elif payment_type[i] == \"General Currency Conversion\" or payment_type[i] == \"Hold on Balance for Dispute Investigation\" \\\n or payment_type[i] == \"Chargeback Fee\":\n paypal_fees = paypal_fees + gross[i]\n paypal_fees_trans.append(gross[i])\n elif payment_type[i] == \"General Payment\":\n staff_payment = staff_payment + gross[i]\n staff_payment_trans.append(gross[i])\n elif payment_type[i] == \"Payment Refund\":\n paypal_refunds = paypal_refunds + gross[i]\n paypal_refunds_trans.append(gross[i])\n elif payment_type[i] == \"PreApproved Payment Bill User Payment\":\n if name[i] == \"USZoom\" or name[i] == \"Golan Telecom Ltd\" or \"skype\" in name[i].lower() or \"spotify\" in name[i].lower() or \"benthos labs\" in name[i].lower():\n apps_and_platforms_fees = apps_and_platforms_fees + gross[i]\n apps_and_platforms.append(gross[i])\n elif \"face\" in name[i].lower():\n fb_balance = fb_balance + gross[i]\n fb_trans.append(gross[i])\n elif \"fiverr\" in name[i].lower():\n staff_payment = staff_payment + gross[i]\n staff_payment_trans.append(gross[i])\n else:\n todo_name.append(name[i])\n todo_description.append(payment_type[i])\n todo_amount.append(gross[i])\n elif payment_type[i] == \"Website Payment\":\n if name[i] == \"Ileen Jasmine Lajo\":\n staff_payment = staff_payment + gross[i]\n staff_payment_trans.append(gross[i])\n else:\n todo_name.append(name[i])\n todo_description.append(payment_type[i])\n todo_amount.append(gross[i])\n elif payment_type[i] == \"Express Checkout Payment\":\n cogs.append(gross[i])\n elif payment_type[i] == \"Reserve Hold\" or payment_type[i] == \"Chargeback\" or payment_type[i] == \"General Authorization\" \\\n or payment_type[i] == \"Account Hold for Open Authorization\" or payment_type[i] == \"Payment Reversal\":\n if \"etsy\" in name[i].lower():\n etsy_trans.append(gross[i])\n else:\n paypal_sales = paypal_sales + gross[i]\n paypal_sales_trans.append(gross[i])\n elif payment_type[i] == \"General Withdrawal\":\n continue\n else:\n todo_name.append(name[i])\n todo_description.append(payment_type[i])\n todo_amount.append(gross[i])\n else:\n if status[i] == \"Denied\" or payment_type[i] == \"General Credit Card Deposit\":\n continue\n if payment_type[i] == \"Express Checkout Payment\" or payment_type[i] == \"Mass Pay Payment\" \\\n or payment_type[i] == \"Void of Authorization\" or payment_type[i] == \"General Authorization\" or \\\n payment_type[i] == \"Mobile Payment\" or payment_type[i] == \"Reserve Release\" or \\\n payment_type[i] == \"Reversal of General Account Hold\" or payment_type[i] == \"General Payment\" or payment_type[i] == \"Chargeback Reversal\":\n paypal_sales = paypal_sales + gross[i]\n paypal_sales_trans.append(gross[i])\n elif payment_type[i] == \"Fee Reversal\" or \"PayPal Protection Bonus\" in payment_type[i]:\n paypal_fees = paypal_fees + gross[i]\n paypal_fees_trans.append(gross[i])\n elif payment_type[i] == \"Payment Refund\":\n ebay_balance = ebay_balance + gross[i]\n ebay_trans.append(gross[i])\n cogs.append(gross[i])\n elif payment_type[i] == \"Cancellation of Hold for Dispute Resolution\" or payment_type[i] == \"General Currency Conversion\":\n paypal_fees = paypal_fees + gross[i]\n paypal_fees_trans.append(gross[i])\n elif payment_type[i] == \"Invoice Received\" or payment_type[i] == \"Request Received\":\n continue\n else:\n todo_name.append(name[i])\n todo_description.append(payment_type[i])\n todo_amount.append(gross[i])\n\n\n items_to_print = [\"Paypal sales\", \"Paypal sales trans\", \"Paypal fees\", \"Paypal fees trans\", \"Ebay expense\", \"Ebay expense trans\",\n \"Mor payment\", \"Mor payment trans\", \"Paypal refunds\", \"Paypal refunds trans\", \"Staff payment\",\n \"Staff payment trans\", \"Apps payments\", \"Apps payments trans\", \"FB Ads Spent\", \"FB trans\", \"Etsy Balance\", \"Etsy trans\"]\n items_amount = [paypal_sales, paypal_sales_trans, paypal_fees, paypal_fees_trans, ebay_balance, ebay_trans,\n mor_payment, mor_payment_trans, paypal_refunds, paypal_refunds_trans, staff_payment, staff_payment_trans,\n apps_and_platforms_fees, apps_and_platforms, fb_balance, fb_trans, sum(etsy_trans), etsy_trans]\n\n for i in range(len(items_to_print)):\n print(f\"{items_to_print[i]} = {items_amount[i]}\")\n\n print(\"\\n********************************** TO DO **********************************************************************\\n\")\n for i in range(len(todo_description)):\n print(f\"{todo_name[i]}, {todo_description[i]} = {todo_amount[i]} \\n\")\n\n\n print(\"\\n********************************** SUMMARY **********************************************************************\\n\")\n summary_items = [\"Paypal Sales\", \"Paypal Refunds\", \"COGS\", \"Apps\", \"Paypal Fees\", \"Hired Help\", \"Mor Payment\", \"Advertisment\", \"Etsy\"]\n summary_values = [[paypal_sales], [paypal_refunds], cogs, apps_and_platforms, paypal_fees_trans, staff_payment_trans, mor_payment_trans, fb_trans, etsy_trans]\n\n for i in range(len(summary_items)):\n print(f\"{summary_items[i]} = {sum(summary_values[i])} = {summary_values[i]}\\n\")\n\n\n# boa_analysis()\npaypal_analysis()\n"
}
] | 5 |
jriveramerla/collective.pfg.dexterity
|
https://github.com/jriveramerla/collective.pfg.dexterity
|
c642920354ff9039a7b5feb1edd609bdcbfe827a
|
2735e630721aab7acf405ebf72d6bf46aa2bebde
|
03a56a4d4086aea7b37b95e41c23b23349639fc2
|
refs/heads/master
| 2021-01-16T23:15:37.091483 | 2013-03-15T17:11:08 | 2013-03-15T17:11:08 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6113409399986267,
"alphanum_fraction": 0.6142709255218506,
"avg_line_length": 36.92156982421875,
"blob_id": "1f8d3eb6eaab66b24c5d690a69e1d09f5ccb6063",
"content_id": "68da950a0f2f95d34068afdd7af3511a1b53c249",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5802,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 153,
"path": "/src/collective/pfg/dexterity/testing_robot.py",
"repo_name": "jriveramerla/collective.pfg.dexterity",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom OFS.SimpleItem import SimpleItem\nfrom Products.PlonePAS.Extensions.Install import activatePluginInterfaces\nfrom Products.PluggableAuthService.plugins import DomainAuthHelper\n\n\nclass RemoteKeywordsLibrary(SimpleItem):\n \"\"\"Robot Framework Remote Library Tool for Plone\n\n See also: http://robotframework.googlecode.com/hg/doc/userguide/RobotFrameworkUserGuide.html?r=2.7.65#remote-library-interface\n\n \"\"\"\n def get_keyword_names(self):\n \"\"\"Return names of the implemented keywords\n \"\"\"\n blacklist = dir(SimpleItem)\n blacklist.extend(['get_keyword_names', 'run_keyword'])\n names = filter(lambda x: x[0] != '_' and x not in blacklist, dir(self))\n return names\n\n def run_keyword(self, name, args):\n \"\"\"Execute the specified keyword with given arguments.\n \"\"\"\n func = getattr(self, name, None)\n result = {'error': '', 'return': ''}\n try:\n retval = func(*args)\n except Exception, e:\n result['status'] = 'FAIL'\n result['error'] = str(e)\n else:\n result['status'] = 'PASS'\n result['return'] = retval\n result['output'] = retval\n return result\n\n def product_is_activated(self, product_name):\n \"\"\"Assert that given product_name is activated in\n portal_quickinstaller.\n\n \"\"\"\n from Products.CMFCore.utils import getToolByName\n quickinstaller = getToolByName(self, \"portal_quickinstaller\")\n assert quickinstaller.isProductInstalled(product_name),\\\n \"Product '%s' was not installed.\" % product_name\n\n def enable_autologin_as(self, *args):\n \"\"\"Add and configure DomainAuthHelper PAS-plugin to login\n all anonymous users from localhost as a special *Remote User* with\n one or more given roles. Examples of use::\n\n Enable autologin as Manager\n Enable autologin as Site Administrator\n Enable autologin as Member Contributor\n\n \"\"\"\n if \"robot_login\" in self.acl_users.objectIds():\n self.acl_users.robot_login._domain_map.clear()\n else:\n DomainAuthHelper.manage_addDomainAuthHelper(\n self.acl_users, \"robot_login\")\n activatePluginInterfaces(self, \"robot_login\")\n user = \", \".join(sorted(args))\n self.acl_users.robot_login.manage_addMapping(\n match_type=\"regex\", match_string=\".*\", roles=args, username=user)\n\n def set_autologin_username(self, login):\n \"\"\"Update autologin mapping with the given username\n \"\"\"\n if \"robot_login\" not in self.acl_users.objectIds():\n raise Exception(u\"Autologin is not enabled\")\n if len(self.acl_users.robot_login._domain_map) == 0:\n raise Exception(u\"Autologin is not enabled\")\n domain_map_key = self.acl_users.robot_login._domain_map.keys()[0]\n domain_map = self.acl_users.robot_login._domain_map[domain_map_key]\n domain_map[0][\"user_id\"] = domain_map[0][\"username\"] = login\n self.acl_users.robot_login._domain_map[domain_map_key] = domain_map\n\n def disable_autologin(self):\n \"\"\"Clear DomainAuthHelper's map to effectively 'logout' user\n after 'autologin_as'. Example of use::\n\n Disable autologin\n\n \"\"\"\n if \"robot_login\" in self.acl_users.objectIds():\n self.acl_users.robot_login._domain_map.clear()\n\n def portal_type_is_installed(self, portal_type):\n ids = self.portal_types.objectIds()\n titles = map(lambda x: x.title, self.portal_types.objectValues())\n assert portal_type in ids + titles,\\\n u\"'%s' was not found in portal types.\" % portal_type\n\n def change_ownership(self, path, user_id):\n from AccessControl.interfaces import IOwned\n obj = self.restrictedTraverse(path)\n\n acl_users = self.get('acl_users')\n if acl_users:\n user = acl_users.getUser(user_id)\n if not user:\n root = self.getPhysicalRoot()\n acl_users = root.get('acl_users')\n if acl_users:\n user = acl_users.getUser(user_id)\n\n IOwned(obj).changeOwnership(user, recursive=1)\n\n def create_type_with_date_field(self, name):\n from plone.dexterity.fti import DexterityFTI\n fti = DexterityFTI(str(name), title=name)\n fti.behaviors = (\"plone.app.dexterity.behaviors.metadata.IBasic\",)\n fti.model_source = u\"\"\"\\\n<model xmlns=\"http://namespaces.plone.org/supermodel/schema\">\n<schema>\n<field name=\"duedate\" type=\"zope.schema.Date\">\n <description />\n <required>False</required>\n <title>Due Date</title>\n</field>\n</schema>\n</model>\"\"\"\n self.portal_types._setObject(str(name), fti)\n\n def report_sauce_status(self, job_id, test_status, test_tags=[]):\n import os\n import httplib\n import base64\n try:\n import json\n json # pyflakes\n except ImportError:\n import simplejson as json\n\n username = os.environ.get('SAUCE_USERNAME')\n access_key = os.environ.get('SAUCE_ACCESS_KEY')\n\n if not job_id:\n return u\"No Sauce job id found. Skipping...\"\n elif not username or not access_key:\n return u\"No Sauce environment variables found. Skipping...\"\n\n token = base64.encodestring('%s:%s' % (username, access_key))[:-1]\n body = json.dumps({'passed': test_status == 'PASS',\n 'tags': test_tags})\n\n connection = httplib.HTTPConnection('saucelabs.com')\n connection.request('PUT', '/rest/v1/%s/jobs/%s' % (\n username, job_id), body,\n headers={'Authorization': 'Basic %s' % token}\n )\n return connection.getresponse().status\n"
},
{
"alpha_fraction": 0.6967850923538208,
"alphanum_fraction": 0.7008460164070129,
"avg_line_length": 33.764705657958984,
"blob_id": "329eef7fce406cc27ab8a5ac20edb84fd1ace65d",
"content_id": "06a3912b68117392b6f7f215776e7ffc70713e61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2955,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 85,
"path": "/src/collective/pfg/dexterity/testing.py",
"repo_name": "jriveramerla/collective.pfg.dexterity",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom plone.app.testing import (\n FunctionalTesting,\n IntegrationTesting,\n PLONE_FIXTURE,\n PloneSandboxLayer,\n)\nfrom plone.testing import z2\n\n\nclass Layer(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import plone.app.dexterity\n self.loadZCML(package=plone.app.dexterity)\n\n import Products.PloneFormGen\n self.loadZCML(package=Products.PloneFormGen)\n z2.installProduct(app, \"Products.PloneFormGen\")\n\n import Products.DataGridField\n self.loadZCML(package=Products.DataGridField)\n z2.installProduct(app, \"Products.DataGridField\")\n\n import collective.pfg.dexterity\n self.loadZCML(package=collective.pfg.dexterity)\n z2.installProduct(app, \"collective.pfg.dexterity\")\n\n def setUpPloneSite(self, portal):\n # PLONE_FIXTURE has no default workflow chain set\n portal.portal_workflow.setDefaultChain(\"simple_publication_workflow\")\n\n self.applyProfile(portal, \"plone.app.dexterity:default\")\n self.applyProfile(portal, \"Products.PloneFormGen:default\")\n self.applyProfile(portal, \"Products.DataGridField:default\")\n self.applyProfile(portal, \"collective.pfg.dexterity:default\")\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, \"collective.pfg.dexterity\")\n z2.uninstallProduct(app, \"Products.DataGridField\")\n z2.uninstallProduct(app, \"Products.PloneFormGen\")\n\n def testSetUp(self):\n # XXX: How should we invalidate Dexterity fti.lookupSchema() cache?\n import plone.dexterity.schema\n for name in dir(plone.dexterity.schema.generated):\n if name.startswith(\"plone\"):\n delattr(plone.dexterity.schema.generated, name)\n plone.dexterity.schema.SCHEMA_CACHE.clear()\n\n\nFIXTURE = Layer()\n\n\nINTEGRATION_TESTING = IntegrationTesting(\n bases=(FIXTURE,), name=\"Integration\")\nFUNCTIONAL_TESTING = FunctionalTesting(\n bases=(FIXTURE,), name=\"Functional\")\nACCEPTANCE_TESTING = FunctionalTesting(\n bases=(FIXTURE, z2.ZSERVER_FIXTURE), name=\"Acceptance\")\n\n\nclass RobotLayer(PloneSandboxLayer):\n defaultBases = (FIXTURE,)\n\n def setUpPloneSite(self, portal):\n # Inject keyword for getting the selenium session id\n import Selenium2Library\n Selenium2Library.keywords._browsermanagement.\\\n _BrowserManagementKeywords.get_session_id = lambda self:\\\n self._cache.current.session_id\n # Inject remote keywords library into site\n from collective.pfg.dexterity import testing_robot\n portal._setObject(\"RemoteKeywordsLibrary\",\n testing_robot.RemoteKeywordsLibrary())\n\n def tearDownPloneSite(self, portal):\n portal._delObject(\"RemoteKeywordsLibrary\")\n\nROBOT_FIXTURE = RobotLayer()\n\n\nROBOT_TESTING = FunctionalTesting(\n bases=(ROBOT_FIXTURE, z2.ZSERVER_FIXTURE), name=\"Robot\")\n"
}
] | 2 |
udikshasingh/VSpeak
|
https://github.com/udikshasingh/VSpeak
|
ea17c7b585244a996ff2ebdf05e27dc1b6cec26c
|
af527291512d9d45295cedf7ce74899cd57b937b
|
289ae1c6373a59ab3da7bd8b6f5d79c90ae66c78
|
refs/heads/master
| 2022-12-06T00:50:55.113764 | 2020-08-22T23:33:11 | 2020-08-22T23:33:11 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5727781057357788,
"alphanum_fraction": 0.5764411091804504,
"avg_line_length": 33.23762512207031,
"blob_id": "59a778fd96a2d0e7c29bfe47ea476bff84b4a513",
"content_id": "1019ccf126437caaefcc10faa5849c2d7320bdc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 10377,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 303,
"path": "/src/extension.ts",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "import { spawn } from \"child_process\";\nimport { join } from \"path\";\nimport { commands } from \"vscode\";\n\n// The module 'vscode' contains the VS Code extensibility API\n// Import the module and reference it with the alias vscode in your code below\nimport * as vscode from \"vscode\";\nimport COMMANDS from \"./commands\";\nvar MAP = new Map();\n// var HashMap = require(\"hashmap\");\n// var map: {\n// set: (arg0: any, arg1: any) => void;\n// has: (arg0: string) => any;\n// get: (arg0: string) => string;\n// };\n\n// this method is called when your extension is activated\n// your extension is activated the very first time the command is executed\nexport function activate(context: vscode.ExtensionContext) {\n console.log('Congratulations, your extension \"extension\" is now active!');\n\n new SpeechListener(context);\n // Temporary blank command used to activate the extension through the command palette\n let disposable = commands.registerCommand(\"extension.activateVSpeak\", () => {\n commands.executeCommand(\"start_listen\");\n vscode.window.showInformationMessage(\"VSpeak is activated\");\n });\n\n context.subscriptions.push(disposable);\n\n context.subscriptions.push(\n vscode.commands.registerCommand(\"extension.deactivateVSpeak\", () => {\n commands.executeCommand(\"stop_listen\");\n vscode.window.showInformationMessage(\"VSpeak is deactivated\");\n })\n );\n for (var i = 0; i < COMMANDS.length; i++) {\n var item = COMMANDS[i];\n MAP.set(COMMANDS[i].command, COMMANDS[i].exec);\n }\n}\n\nclass SpeechListener {\n private execFile: any;\n private child: any;\n private sttbar: SttBarItem;\n\n constructor(context: vscode.ExtensionContext) {\n this.execFile = spawn;\n this.sttbar = new SttBarItem();\n const d1 = commands.registerCommand(\"toggle\", () => {\n if (this.sttbar.getSttText() === \"on\") {\n this.sttbar.off();\n this.killed();\n } else {\n this.sttbar.on();\n this.run();\n }\n });\n const d2 = commands.registerCommand(\"stop_listen\", () => {\n this.sttbar.off();\n this.killed();\n });\n const d3 = commands.registerCommand(\"start_listen\", () => {\n this.sttbar.on();\n this.run();\n });\n context.subscriptions.concat([d1, d2, d3]);\n this.sttbar.setSttCmd(\"toggle\");\n }\n\n run() {\n print(\"Trying to run speech detection\");\n this.child = this.execFile(\"python3\", [\n join(__dirname, \"tts.py\"),\n ]).on(\"error\", (error: any) => print(error));\n this.child.stdout.on(\"data\", (data: Buffer) => {\n //print(data);\n let commandRunner = new CommandRunner();\n commandRunner.runCommand(data.toString().trim());\n });\n\n this.child.stderr.on(\"data\", (data: any) => print(data));\n }\n\n killed() {\n this.child.kill();\n }\n}\n\nclass SttBarItem {\n private statusBarItem: vscode.StatusBarItem;\n private statusText: string;\n\n constructor() {\n this.statusBarItem = vscode.window.createStatusBarItem(\n vscode.StatusBarAlignment.Left,\n 10\n );\n this.statusText = \"off\";\n this.off();\n }\n\n on() {\n this.statusBarItem.text = \"VSpeak listening!\";\n this.statusBarItem.show();\n this.statusText = \"on\";\n }\n\n off() {\n this.statusBarItem.text = \"VSpeak off 🤐\";\n this.statusBarItem.show();\n this.statusText = \"off\";\n }\n\n getSttText() {\n return this.statusText;\n }\n\n setSttCmd(cmd: string | undefined) {\n this.statusBarItem.command = cmd;\n }\n}\n\nclass CommandRunner {\n runCommand(receivedString: string) {\n print(\"Command received: \" + receivedString);\n let activeTextEditor;\n let lineNumber;\n const words = receivedString.split(\" \");\n const status = words[0] === \"success\";\n // var result = receivedString.substr(receivedString.indexOf(\" \") + 1);\n // print(result.trim());\n\n // added vscode.window.state.focused because commands should only run when vs code window is in the foreground\n if (status && vscode.window.state.focused) {\n vscode.window.setStatusBarMessage(\"Success!\", 3000);\n const commandWords = words.slice(1);\n if (MAP.has(commandWords[0])) {\n commands.executeCommand(MAP.get(commandWords[0]));\n } else {\n switch (commandWords[0]) {\n case \"continue\":\n if (vscode.debug.activeDebugSession) {\n print(\"Context aware continue while in debug\");\n commands.executeCommand(\"workbench.action.debug.continue\");\n } else {\n print('Falling back as no context found for \"continue\"');\n }\n break;\n case \"stop\":\n if (vscode.debug.activeDebugSession) {\n print('Context aware \"stop\" while in debug');\n commands.executeCommand(\"workbench.action.debug.stop\");\n } else {\n print('Falling back as no context found for \"stop\"');\n }\n break;\n case \"continue\":\n if (vscode.debug.activeDebugSession) {\n commands.executeCommand(\"workbench.action.debug.continue\");\n }\n break;\n case \"search_google\":\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n const text = activeTextEditor.document.getText(\n activeTextEditor.selection\n );\n vscode.env.openExternal(\n vscode.Uri.parse(\"https://www.google.com/search?q=\" + text)\n );\n }\n break;\n case \"navigate_line\":\n commands.executeCommand(\"workbench.action.focusActiveEditorGroup\");\n lineNumber = parseInt(commandWords[1]);\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n const range = activeTextEditor.document.lineAt(lineNumber - 1)\n .range;\n activeTextEditor.selection = new vscode.Selection(\n range.start,\n range.start\n );\n activeTextEditor.revealRange(range);\n }\n break;\n case \"breakpoint_add\":\n commands.executeCommand(\"workbench.action.focusActiveEditorGroup\");\n lineNumber = parseInt(commandWords[1]);\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n let position = new vscode.Position(lineNumber - 1, 0);\n let location = new vscode.Location(\n activeTextEditor.document.uri,\n position\n );\n let breakpointToAdd = [\n new vscode.SourceBreakpoint(location, true),\n ];\n vscode.debug.addBreakpoints(breakpointToAdd);\n }\n break;\n case \"breakpoint_remove\":\n commands.executeCommand(\"workbench.action.focusActiveEditorGroup\");\n lineNumber = parseInt(commandWords[1]);\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n let existingBreakPoints = vscode.debug.breakpoints;\n for (let breakpoint of existingBreakPoints) {\n if (\n breakpoint instanceof vscode.SourceBreakpoint &&\n breakpoint.location.uri.path ===\n activeTextEditor.document.uri.path &&\n breakpoint.location.range.start.line === lineNumber - 1\n ) {\n vscode.debug.removeBreakpoints([breakpoint]);\n }\n }\n }\n break;\n // case \"navigate_file\":\n // vscode.commands.executeCommand(\"workbench.action.quickOpen\");\n // vscode.window.showQuickPick();\n // console.debug(vscode.workspace.);\n // console.debug(vscode.workspace.asRelativePath(\".\"));\n // vscode.workspace.fs\n // .readDirectory(\n // vscode.Uri.file(vscode.workspace.asRelativePath(\".\"))\n // )\n // .then(files => {\n // let filenames: string[] = files.map(\n // (filename, filetype) => filename[0]\n // );\n // vscode.window.showQuickPick(filenames);\n // });\n // break;\n case \"copy\":\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n const text = activeTextEditor.document.getText(\n activeTextEditor.selection\n );\n vscode.env.clipboard.writeText(text);\n }\n break;\n case \"navigate_class\":\n let className = commandWords[1];\n // TODO: implement functionality\n break;\n case \"run_file\":\n activeTextEditor = vscode.window.activeTextEditor;\n if (activeTextEditor) {\n activeTextEditor.document.save(); //should probably save all files\n const currentFileName = activeTextEditor.document.fileName;\n const activeTerminal = vscode.window.activeTerminal;\n if (activeTerminal) {\n if (activeTextEditor.document.languageId === \"python\") {\n // TODO: implement functionality for other languages\n activeTerminal.sendText(\"python \" + currentFileName);\n } else {\n vscode.window.showErrorMessage(\n \"Oops! Unsupported language for run commapnd\"\n );\n }\n }\n }\n break;\n case \"copy_file\":\n // TODO: implement functionality\n break;\n case \"git_status\":\n const activeTerminal = vscode.window.activeTerminal;\n if (\n activeTerminal &&\n vscode.extensions.getExtension(\"vscode.git\")\n ) {\n activeTerminal.sendText(\"git status\");\n }\n break;\n }\n }\n } else {\n vscode.window.setStatusBarMessage(\n \"Recognition Failure (\" +\n receivedString.substr(receivedString.indexOf(\" \") + 1) +\n \")\",\n 3000\n );\n }\n }\n}\n\n// helper method for printing to console\nfunction print(data: any) {\n console.log(\"Vspeak Debug: \" + data.toString());\n}\n\n// this method is called when your extension is deactivated\nexport function deactivate() {\n vscode.window.setStatusBarMessage(\"\");\n}\n"
},
{
"alpha_fraction": 0.7996070981025696,
"alphanum_fraction": 0.7996070981025696,
"avg_line_length": 55.38888931274414,
"blob_id": "60726282bd98bd4cd4c487f6b3394834f5522317",
"content_id": "3c898f0ea398e04a5d70d19b658edbdc56441839",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1024,
"license_type": "no_license",
"max_line_length": 258,
"num_lines": 18,
"path": "/README.md",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "\n## Dependency Installation\n\nFollowing command can be used for dependency installation:\n\nnpm install\n\n## System Architecture\n\nVSpeak is a VS Code extension that provides voice-based control of the IDE to the user.\n\nOnce the extension is loaded, user can give speech commands to the focused editor window. For the speech to text conversion, Google Cloud Speech to Text API is used.\n\nThe command interpretation process is an amalgamation of identifying commands on both python and javascript end.\n\n * In the first phase, a command is interpreted from the generated text using the command dictionary on the python end.\n * In the second phase, results from python’s interpreter are then mapped to the extension’s VS Code API functions on the javascript end.\n\nAfter successful recognition of speech and interpretation, it is either executed, or there is a fall back to a failure scenario where no command is identified and the speech gets ignored. The result of the recognition is displayed on the editor’s status bar.\n\n\n"
},
{
"alpha_fraction": 0.5724944472312927,
"alphanum_fraction": 0.5766396522521973,
"avg_line_length": 37.09122848510742,
"blob_id": "c5a2a3230f1f3a07a7dae7f682ffe6918f05c1bd",
"content_id": "2e2ef88c9bf8d5c1f25ec76a420901a9b41f4600",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10856,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 285,
"path": "/out/tts.py",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport re\nimport sys\nimport os\n\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\nfrom google.oauth2 import service_account\nimport json\nimport pyaudio\nfrom six.moves import queue\nfrom commands import Commands\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\ncommandObj = Commands()\n\n\nclass MicrophoneStream(object):\n \"\"\"Opens a recording stream as a generator yielding the audio chunks.\"\"\"\n\n def __init__(self, rate, chunk):\n self._rate = rate\n self._chunk = chunk\n\n # Create a thread-safe buffer of audio data\n self._buff = queue.Queue()\n self.closed = True\n self.commands = {}\n\n def __enter__(self):\n self._audio_interface = pyaudio.PyAudio()\n self._audio_stream = self._audio_interface.open(\n format=pyaudio.paInt16,\n # The API currently only supports 1-channel (mono) audio\n # https://goo.gl/z757pE\n channels=1,\n rate=self._rate,\n input=True,\n frames_per_buffer=self._chunk,\n # Run the audio stream asynchronously to fill the buffer object.\n # This is necessary so that the input device's buffer doesn't\n # overflow while the calling thread makes network requests, etc.\n stream_callback=self._fill_buffer,\n )\n\n self.closed = False\n\n return self\n\n def __exit__(self, type, value, traceback):\n self._audio_stream.stop_stream()\n self._audio_stream.close()\n self.closed = True\n # Signal the generator to terminate so that the client's\n # streaming_recognize method will not block the process termination.\n self._buff.put(None)\n self._audio_interface.terminate()\n\n def _fill_buffer(self, in_data, frame_count, time_info, status_flags):\n \"\"\"Continuously collect data from the audio stream, into the buffer.\"\"\"\n self._buff.put(in_data)\n return None, pyaudio.paContinue\n\n def generator(self):\n while not self.closed:\n # Use a blocking get() to ensure there's at least one chunk of\n # data, and stop iteration if the chunk is None, indicating the\n # end of the audio stream.\n chunk = self._buff.get()\n if chunk is None:\n return\n data = [chunk]\n\n # Now consume whatever other data's still buffered.\n while True:\n try:\n chunk = self._buff.get(block=False)\n if chunk is None:\n return\n data.append(chunk)\n except queue.Empty:\n break\n\n yield b\"\".join(data)\n\n\ndef listen_print_loop(responses):\n \"\"\"Iterates through server responses and prints them.\n\n The responses passed is a generator that will block until a response\n is provided by the server.\n\n Each response may contain multiple results, and each result may contain\n multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we\n print only the transcription for the top alternative of the top result.\n\n In this case, responses are provided for interim results as well. If the\n response is an interim one, print a line feed at the end of it, to allow\n the next result to overwrite it, until the response is a final one. For the\n final one, print a newline to preserve the finalized transcription.\n \"\"\"\n for response in responses:\n if not response.results:\n continue\n\n # The `results` list is consecutive. For streaming, we only care about\n # the first result being considered, since once it's `is_final`, it\n # moves on to considering the next utterance.\n result = response.results[0]\n if not result.alternatives:\n continue\n\n # Display the transcription of the top alternative.\n transcript = result.alternatives[0].transcript\n\n if result.is_final:\n processTranscript2(transcript)\n\n\ndef processTranscript(transcript):\n sys.stdout.flush()\n transcriptWords = transcript.split()\n transcriptWordsCount = len(transcriptWords)\n if \"go\" in transcriptWords:\n if transcriptWordsCount > 1:\n if \"line\" in transcriptWords:\n # case: commands may be: go to line x, go to line number x, go\n numbers = [int(s) for s in transcript.split() if s.isdigit()]\n if len(numbers) == 1:\n print(\"success\", \"navigate_line\", numbers[0])\n else:\n # case: if the transcript contains two different numbers, it may be noise or the user error\n # or it does not have any number; we can't execute navigate_line command in either case\n print(\"fallback\", transcript)\n elif \"definition\" in transcriptWords:\n if transcriptWordsCount < 5:\n excludedWords = set(transcriptWords) - {\n \"go\",\n \"to\",\n \"definition\",\n \"class\",\n \"function\",\n \"variable\",\n \"symbol\",\n }\n if len(excludedWords) > 0:\n # commands: go (to) (class/function/variable/symbol) definition\n print(\"success\", \"navigate_definition\")\n else:\n # case: likely noise or false positive\n print(\"fallback\", transcript)\n else:\n # case: likely noise or false positive\n print(\"fallback\", transcript)\n elif \"class\" in transcriptWords:\n if transcriptWordsCount > 2:\n # command: 'go (to) file fileName' (assuming implicitly that the last word will be the fileName)\n print(\n \"success\",\n \"navigate_class\",\n transcriptWords[transcriptWordsCount - 1],\n )\n else:\n print(\"fallback\", transcript)\n elif \"terminal\" in transcriptWords and transcriptWordsCount < 4:\n print(\"navigate_terminal\")\n elif \"file\" in transcriptWords:\n if transcriptWordsCount > 2:\n # command: 'go (to) file fileName' (assuming implicitly that the last word will be the fileName)\n print(\n \"success\",\n \"navigate_file\",\n transcriptWords[transcriptWordsCount - 1],\n )\n else:\n print(\"fallback\", transcript)\n else:\n print(\"fallback\")\n else:\n # case: 'go' by itself has no meaning, we need another argument to gather where do we need to navigate\n print(\"fallback\", transcript)\n elif \"copy\" in transcript:\n if transcriptWordsCount == 1:\n # commands: copy\n print(\"success\", \"copy\")\n elif \"file\" in transcriptWords < 4 and (\n len(set(transcriptWords) - {\"copy\", \"current\", \"this\"})\n ):\n # case: commands: copy (current/this) file\n print(\"success\", \"copy_file\")\n else:\n # case: the user may want to copy something else which we don't support yet or the detected 'copy' phrase may be noise\n print(\"fallback\", transcript)\n elif \"format\" in transcriptWords:\n if transcriptWordsCount == 1:\n # commands: format\n print(\"success\", \"format_document\")\n else:\n if \"selection\" in transcriptWords or \"selected\" in transcriptWords:\n # commands: format selected text, format selected, format selection\n print(\"success\", \"format_selection\")\n elif transcriptWordsCount == 2 and (\n \"text\" in transcriptWords or \"document\" in transcriptWords\n ):\n # commands: format text or format document\n print(\"success\", \"format_document\")\n else:\n # case: detected 'format' should be false positive\n print(\"fallback\", transcript)\n elif (\n \"terminal\" in transcriptWords\n and \"open\" in transcriptWords\n and transcriptWordsCount < 5\n ):\n # commands: open (a) (new) terminal\n print(\"success\", \"open_terminal\")\n\n elif \"run\" in transcriptWords:\n if transcriptWordsCount == 1:\n # case: user may want to run the entire project, my interpretation is running this file (need to evaluate this)\n print(\"run_file\")\n # case: discarded cases of noise or false positives\n elif (\n transcriptWordsCount < 4\n and len(set(transcriptWords) - {\"this\", \"current\", \"file\", \"project\"}) > 0\n ):\n # case: discarded cases of noise or false positives\n if \"file\" in transcriptWords:\n print(\"run_file\")\n elif \"project\" in transcriptWords:\n print(\"run_project\")\n else:\n print(\"fallback\", transcript)\n else:\n print(\"fallback\", transcript)\n\n else:\n # default case when neither a command is matched nor a fallback is reached\n # but we still output the transcript for debugging and analysing\n print(\"fallback\", transcript)\n\n\ndef processTranscript2(transcript):\n commandObj.getCommand(transcript)\n\n\ndef main():\n # See http://g.co/cloud/speech/docs/languages\n # for a list of supported languages.\n language_code = \"en-IN\" # a BCP-47 language tag\n dialogflow_key = json.load(open(sys.path[0] + \"/chatbot.json\"))\n credentials = service_account.Credentials.from_service_account_info(dialogflow_key)\n client = speech.SpeechClient(credentials=credentials)\n\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=RATE,\n language_code=language_code,\n )\n streaming_config = types.StreamingRecognitionConfig(\n config=config, interim_results=True\n )\n\n with MicrophoneStream(RATE, CHUNK) as stream:\n audio_generator = stream.generator()\n requests = (\n types.StreamingRecognizeRequest(audio_content=content)\n for content in audio_generator\n )\n\n responses = client.streaming_recognize(streaming_config, requests)\n\n # Now, put the transcription responses to use.\n try:\n listen_print_loop(responses)\n except:\n main()\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.2552781403064728,
"alphanum_fraction": 0.2663879096508026,
"avg_line_length": 36.07143020629883,
"blob_id": "4af8eff77edb62caa6722e12125ef9e5673a5dae",
"content_id": "7ee3c98508396088483bab5bab24d56a67ea35e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24393,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 658,
"path": "/out/commands.py",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "# CONSTANT = {\n# \"number\": \"$number\",\n# \"string\": \"$string\",\n# \"function\": \"$function\",\n# \"selected\": \"$selected\",\n# }\n\n\nclass Commands:\n def __init__(self):\n self.commanKeyDict = {\n \"open\": {\n \"tags\": [\"open\"],\n \"attributes\": {\n \"name\": [\n \"new file\",\n \"file\",\n \"folder\",\n \"workspace\",\n \"terminal\",\n \"settings\",\n \"new window\",\n \"\",\n ],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"new_file\",\n \"open_file\",\n \"open_file\", # we execute the same command on the vscode side for folder/file, so send the same command from here too\n \"open_workspace\",\n \"navigate_terminal\",\n \"open_settings\",\n \"open_window\",\n \"open_file\",\n ],\n \"wordlen\": [\n (3, 4),\n (2, 3),\n (2, 3),\n (2, 3),\n (2, 3),\n (2, 3),\n (3, 4),\n (1, 1),\n ],\n },\n },\n \"run\": {\n \"tags\": [\"run\"],\n \"attributes\": {\n \"name\": [\"file\", \"project\", \"\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\n \"run_file\",\n \"run_project\",\n \"run_file\", # could be run_project when nothing is specified, need to understand user's expectation\n ],\n \"wordlen\": [(2, 4), (2, 4), (1, 1)],\n },\n },\n \"debug\": {\n \"tags\": [\"debug\", \"debugging\", \"debugger\"],\n \"attributes\": {\n \"name\": [\"start\", \"stop\", \"project\", \"pause\", \"continue\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"start_debug\",\n \"stop_debug\",\n \"start_debug\",\n \"pause_debug\",\n \"continue_debug\",\n \"start_debug\",\n ],\n \"wordlen\": [(2, 5), (2, 2), (2, 4), (2, 4), (2, 4), (1, 3)],\n },\n },\n \"step\": {\n \"tags\": [\"step\", \"stepover\"],\n \"attributes\": {\n \"name\": [\"over\", \"into\", \"out\", \"stepover\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\"step_over\", \"step_into\", \"step_out\", \"step_out\"],\n \"wordlen\": [(2, 3), (2, 3), (2, 3), (1, 2)],\n },\n },\n \"search\": {\n \"tags\": [\"search\", \"find\"],\n \"attributes\": {\n \"name\": [\"file\", \"folder\", \"workspace\", \"google\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"search_workspace\",\n \"search_workspace\",\n \"search_workspace\",\n \"search_google\",\n \"search\",\n ],\n \"wordlen\": [(2, 4), (2, 4), (2, 3), (2, 2), (1, 2)],\n },\n },\n \"next\": {\n \"tags\": [\"next\"],\n \"attributes\": {\n \"name\": [\"match\", \"\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"next_match\", \"next_match\",],\n \"wordlen\": [(1, 2), (1, 2)],\n },\n },\n \"go\": {\n \"tags\": [\"go to\", \"goto\", \"navigate to\", \"move to\"],\n \"attributes\": {\n \"name\": [\n \"line\",\n \"definition\",\n \"class\",\n \"file\",\n \"terminal\",\n \"editor\",\n ],\n \"parameter\": [\"number\", \"\", \"string\", \"\", \"\", \"\"],\n \"command\": [\n \"navigate_line\",\n \"navigate_definition\",\n \"navigate_class\",\n \"navigate_file\",\n \"navigate_terminal\",\n \"navigate_editor\",\n ],\n \"wordlen\": [(4, 7), (3, 5), (3, 6), (2, 5), (2, 5), (2, 5)],\n },\n },\n \"close\": {\n \"tags\": [\"close\"],\n \"attributes\": {\n \"name\": [\n \"current file\",\n \"this file\",\n \"all files\",\n \"files to the right\",\n \"file to the right\",\n \"files to the left\",\n \"file to the left\",\n \"editors to the right\",\n \"editor to the right\",\n \"editors to the left\",\n \"editor to the left\",\n \"window\",\n \"file\",\n \"editor\",\n \"others\",\n \"other\"\n ],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\"],\n \"command\": [\n \"close_current_file\",\n \"close_current_file\",\n \"close_all_files\",\n \"close_to_the_right\",\n \"close_to_the_right\",\n \"close_to_the_left\",\n \"close_to_the_left\",\n \"close_to_the_right\",\n \"close_to_the_right\",\n \"close_to_the_left\",\n \"close_to_the_left\",\n \"close_window\",\n \"close_current_file\",\n \"close_current_file\",\n \"close_other\",\n \"close_other\"\n ],\n \"wordlen\": [\n (3, 4),\n (3, 4),\n (3, 5),\n (3, 6),\n (3, 6),\n (3, 6),\n (3, 6),\n (3, 6),\n (3, 6),\n (3, 6),\n (3, 6),\n (2, 3),\n (2, 2),\n (2, 2),\n (2, 4),\n (2, 4),\n ],\n },\n },\n \"cut\": {\n \"tags\": [\"cut\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"cut\"],\n \"wordlen\": [(1, 1)],\n },\n },\n \"copy\": {\n \"tags\": [\"copy\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"copy\"],\n \"wordlen\": [(1, 1)],\n },\n },\n \"paste\": {\n \"tags\": [\"paste\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"paste\"],\n \"wordlen\": [(1, 1)],\n },\n },\n \"show\": {\n \"tags\": [\"show\"],\n \"attributes\": {\n \"name\": [\n \"all commands\",\n \"context menu\",\n \"next change\",\n \"previous change\",\n ],\n \"parameter\": [\"\", \"\", \"\", \"\"],\n \"command\": [\n \"show_commands\",\n \"show_contextMenu\",\n \"show_nextChange\",\n \"show_previousChange\",\n ],\n \"wordlen\": [(3, 4), (3, 4), (3, 4), (3, 4)],\n },\n },\n \"zoom\": {\n \"tags\": [\"zoom\"],\n \"attributes\": {\n \"name\": [\"in\", \"out\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"zoom_in\", \"zoom_out\"],\n \"wordlen\": [(2, 2), (2, 2)],\n },\n },\n \"save\": {\n \"tags\": [\"save\"],\n \"attributes\": {\n \"name\": [\"as\", \"all\", \"\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\"save_as\", \"save_all\", \"save\"],\n \"wordlen\": [(2, 2), (2, 3), (1, 2)],\n },\n },\n \"scroll\": {\n \"tags\": [\"scroll\"],\n \"attributes\": {\n \"name\": [\"up\", \"down\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"scroll_up\", \"scroll_down\"],\n \"wordlen\": [(2, 2), (2, 2)],\n },\n },\n \"comment\": {\n \"tags\": [\"comment\", \"uncomment\"],\n \"attributes\": {\n \"name\": [\"line\", \"lines\", \"selection\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\"],\n \"command\": [\n \"toggle_comment\",\n \"toggle_comment\",\n \"toggle_comment\",\n \"toggle_comment\",\n ],\n \"wordlen\": [(2, 3), (2, 3), (2, 3), (1, 1)],\n },\n },\n \"extensions\": {\n \"tags\": [\"install\", \"update\"],\n \"attributes\": {\n \"name\": [\"extension\", \"extensions\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"install_extension\", \"update_extension\"],\n \"wordlen\": [(2, 3), (3, 4)],\n },\n },\n \"breakpoint\": {\n \"tags\": [\"breakpoint\", \"breakpoints\"],\n \"attributes\": {\n \"name\": [\n \"add\",\n \"delete all\",\n \"remove all\",\n \"delete\",\n \"remove\",\n \"toggle\",\n \"disable all\",\n \"enable all\",\n ],\n \"parameter\": [\"number\", \"\", \"\", \"number\", \"number\", \"\", \"\", \"\"],\n \"command\": [\n \"breakpoint_add\",\n \"breakpoint_remove_all\",\n \"breakpoint_remove_all\",\n \"breakpoint_remove\",\n \"breakpoint_remove\",\n \"breakpoint_toggle\",\n \"breakpoint_disable_all\",\n \"breakpoint_enable_all\",\n ],\n \"wordlen\": [\n (3, 7),\n (3, 4),\n (3, 4),\n (3, 7),\n (3, 7),\n (2, 2),\n (3, 4),\n (3, 4),\n ],\n },\n },\n \"format\": {\n \"tags\": [\"format\"],\n \"attributes\": {\n \"name\": [\"selection\", \"selected\", \"\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\n \"format_selection\",\n \"format_selection\",\n \"format_document\",\n ],\n \"wordlen\": [(2, 3), (2, 3), (1, 3)],\n },\n },\n \"compare\": {\n \"tags\": [\"compare\"],\n \"attributes\": {\n \"name\": [\"clipboard\", \"copied\", \"\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\n \"compare_clipboard\",\n \"compare_clipboard\",\n \"compare_file\",\n ],\n \"wordlen\": [(2, 4), (2, 4), (1, 3)],\n },\n },\n # general commands created for continue and stop while debugging but may be used as other context-aware commands;\n # make sure to keep this below the original command as it should have lower priority as\n # previous exact command may match completely\n \"conitnue\": {\n \"tags\": [\"continue\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"continue\"],\n \"wordlen\": [(1, 2)],\n },\n },\n \"stop\": {\n \"tags\": [\"stop\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"stop\"],\n \"wordlen\": [(1, 2)],\n },\n },\n \"theme\": {\n \"tags\": [\"icon\", \"theme\"],\n \"attributes\": {\n \"name\": [\"icon theme\", \"\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"select_icon_theme\", \"select_theme\"],\n \"wordlen\": [(2, 4), (2, 4)],\n },\n },\n \"git\": {\n \"tags\": [\n \"git\",\n \"push\",\n \"pull\",\n \"revert\",\n \"commit\",\n \"git reset\",\n \"git add\",\n \"unstage\",\n \"branch\",\n \"branches\",\n \"stage\",\n \"clone\",\n ],\n \"init\": {\n \"tags\": [\"git init\", \"init\", \"git new\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"git_init\"],\n \"wordlen\": [(2, 3)],\n },\n },\n \"clone\": {\n \"tags\": [\"git clone\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"git_clone\"],\n \"wordlen\": [(2, 3)],\n },\n },\n \"diff\": {\n \"tags\": [\"git diff\", \"git difference\"],\n \"attributes\": {\n \"name\": [\"\", \"\"],\n \"parameter\": [\"\", \"\"],\n \"command\": [\"git_diff\", \"git_diff\"],\n \"wordlen\": [(2, 3), (2, 3)],\n },\n },\n \"push\": {\n \"tags\": [\"git push\", \"push\"],\n \"attributes\": {\n \"name\": [\"changes\", \"files\", \"branch\", \"change\", \"file\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"git_push\",\n \"git_push\",\n \"git_push\",\n \"git_push\",\n \"git_push\",\n \"git_push\",\n ],\n \"wordlen\": [(2, 4), (2, 4), (2, 4), (2, 4), (2, 4), (2, 3)],\n },\n },\n \"status\": {\n \"tags\": [\"git status\", \"status\"],\n \"attributes\": {\n \"name\": [\"\"],\n \"parameter\": [\"\"],\n \"command\": [\"git_status\"],\n \"wordlen\": [(2, 3)],\n },\n },\n \"pull\": {\n \"tags\": [\"git pull\", \"pull\"],\n \"attributes\": {\n \"name\": [\"changes\", \"files\", \"branch\", \"change\", \"file\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"git_pull\",\n \"git_pull\",\n \"git_pull\",\n \"git_pull\",\n \"git_pull\",\n \"git_pull\",\n ],\n \"wordlen\": [(2, 4), (2, 4), (2, 4), (2, 4), (2, 4), (2, 3)],\n },\n },\n \"reset\": {\n \"tags\": [\"git reset\", \"reset\", \"unstage\"],\n \"attributes\": {\n \"name\": [\n \"changes\",\n \"files\",\n \"change\",\n \"file\",\n \"all\",\n \"everything\",\n \"\",\n ],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"git_reset\",\n \"git_reset\",\n \"git_reset\",\n \"git_reset\",\n \"git_reset_all\",\n \"git_reset_all\",\n \"git_reset\",\n ],\n \"wordlen\": [\n (2, 4),\n (2, 4),\n (2, 4),\n (2, 4),\n (3, 4),\n (2, 3),\n (2, 3),\n ],\n },\n },\n \"add\": {\n \"tags\": [\"git add\", \"add\", \"stage\"],\n \"attributes\": {\n \"name\": [\n \"changes\",\n \"files\",\n \"change\",\n \"file\",\n \"all\",\n \"everything\",\n \"tracked\",\n \"untracked\",\n \"\",\n ],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"git_stage_change\",\n \"git_stage\",\n \"git_stage_change\",\n \"git_stage_all\",\n \"git_stage_all\",\n \"git_stage_all_tracked\",\n \"git_stage_all_untracked\",\n \"git_stage\",\n ],\n \"wordlen\": [\n (2, 4),\n (2, 4),\n (2, 4),\n (2, 4),\n (3, 4),\n (3, 5),\n (3, 5),\n (2, 3),\n ],\n },\n },\n \"commit\": {\n \"tags\": [\"git commit\", \"commit\"],\n \"attributes\": {\n \"name\": [\"changes\", \"files\", \"change\", \"file\", \"\"],\n \"parameter\": [\"\", \"\", \"\", \"\", \"\", \"\"],\n \"command\": [\n \"git_commit\",\n \"git_commit\",\n \"git_commit\",\n \"git_commit\",\n \"git_commit\",\n \"git_commit\",\n ],\n \"wordlen\": [(2, 4), (2, 4), (2, 4), (2, 4), (2, 3)],\n },\n },\n \"branch\": {\n \"tags\": [\"git branch\", \"branch\", \"branches\"],\n \"attributes\": {\n \"name\": [\"delete\", \"rename\", \"\"],\n \"parameter\": [\"\", \"\", \"\"],\n \"command\": [\n \"git_branch_delete\",\n \"git_branch_rename\",\n \"git_branch\",\n ],\n \"wordlen\": [(2, 3), (2, 3), (2, 3)],\n },\n },\n },\n }\n self.transcript = \"\"\n self.transcriptLength = 0\n\n def getParams(self, param, argName):\n if param == \"number\":\n numbers = [int(s) for s in self.transcript if s.isdigit()]\n if len(numbers) == 1:\n return numbers[0]\n else:\n return None\n\n elif param == \"string\":\n argIndex = self.transcript.index(argName)\n if argIndex < (len(self.transcript) - 1):\n nextword = self.transcript[argIndex + 1]\n return nextword\n else:\n return None\n return param\n\n def getCommand(self, transcript):\n\n self.transcript = transcript.lower().split()\n self.transcriptLength = len(self.transcript)\n attributes = self.getCommandKeyAttributes(None)\n response = \"fallback\"\n command = transcript\n paramValue = \"\"\n # print(attributes)\n if attributes is not None:\n idx = -1\n names = attributes.get(\"name\")\n for i in range(len(names)):\n index = (\n self.subfinder(self.transcript, names[i].split())\n if len(names[i]) > 0\n else 0\n )\n if index > -1:\n idx = i\n break\n if idx > -1:\n (minLen, maxLen) = attributes[\"wordlen\"][idx]\n if minLen <= len(self.transcript) <= maxLen:\n paramValue = self.getParams(\n attributes.get(\"parameter\")[idx], attributes.get(\"name\")[idx]\n )\n response = \"success\"\n command = attributes.get(\"command\")[idx]\n if paramValue is None:\n response = \"fallback\"\n command = transcript\n\n print(response, command, paramValue, flush=True)\n return\n # return {\"response\": response, \"command\": command, \"parameter\": paramValue}\n\n def getCommandKeyAttributes(self, subcommand):\n\n commandKeys = (\n list(subcommand.keys())\n if subcommand is not None\n else list(self.commanKeyDict.keys())\n )\n commandObj = subcommand if subcommand is not None else self.commanKeyDict\n l2 = [\"tags\"]\n commandKeys = [x for x in commandKeys if x not in l2]\n for key in commandKeys:\n tags = commandObj[key].get(\"tags\")\n for tag in tags:\n index = self.subfinder(self.transcript, tag.split())\n if index > -1:\n # self.transcript = self.transcript[index:]\n if commandObj[key].get(\"attributes\") is not None:\n return commandObj[key].get(\"attributes\")\n else:\n return self.getCommandKeyAttributes(commandObj[key])\n\n return None\n\n def subfinder(self, mylist, pattern):\n for i in range(len(mylist)):\n if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern:\n return i\n return -1\n\n\n# def main():\n# commandObj = Commands()\n# commandObj.getCommand(\"close window\")\n\n\n# if __name__ == \"__main__\":\n# main()\n"
},
{
"alpha_fraction": 0.47724655270576477,
"alphanum_fraction": 0.4821428656578064,
"avg_line_length": 32.06666564941406,
"blob_id": "66fccdc45e7efaf0612e0827eb427dcd3847bcc2",
"content_id": "19798e611e806d1d8152f75f2502417b5ddad039",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3472,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 105,
"path": "/out/vspeak-test/commands2.py",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "# CONSTANT = {\n# \"number\": \"$number\",\n# \"string\": \"$string\",\n# \"function\": \"$function\",\n# \"selected\": \"$selected\",\n# }\n\n\nclass Commands:\n def __init__(self):\n self.commanKeyDict = {\n \"go\": {\n \"tags\": [\"go to\", \"goto\", \"naviagte to\", \"move to\"],\n \"attributes\": {\n \"name\": [\"line\", \"definition\", \"class\", \"file\"],\n \"parameter\": [\"number\", \"\", \"string\", \"string\"],\n \"command\": [\n \"navigate_line\",\n \"navigate_definition\",\n \"navigate_class\",\n \"navigate_file\",\n ],\n \"wordlen\": [(4, 7), (3, 5), (3, 6)],\n },\n }\n }\n self.original = \"\"\n self.transcript = \"\"\n self.transcriptLength = 0\n\n def getParams(self, param, argName):\n if param == \"number\":\n numbers = [int(s) for s in self.transcript if s.isdigit()]\n if len(numbers) == 1:\n return numbers[0]\n else:\n return None\n\n if param == \"string\":\n argIndex = self.transcript.index(argName)\n if argIndex < (len(self.transcript) - 1):\n nextword = self.transcript[argIndex + 1]\n return nextword\n else:\n return None\n return param\n\n def getCommand(self, transcript):\n self.transcript = transcript.lower().split()\n self.original = self.transcript\n self.transcriptLength = len(self.transcript)\n attributes = self.getCommandKeyAttributes()\n response = \"fallback\"\n command = self.original\n paramValue = \"\"\n if attributes is not None:\n idx = -1\n names = attributes.get(\"name\")\n for i in range(len(names)):\n index = self.subfinder(self.transcript, names[i].split())\n # index = self.transcript.find(names[i])\n if index > -1:\n idx = i\n\n if idx > -1:\n paramValue = self.getParams(\n attributes.get(\"parameter\")[idx], attributes.get(\"name\")[idx]\n )\n response = \"success\"\n command = attributes.get(\"command\")[idx]\n if paramValue is None:\n response = \"fallback\"\n command = self.original\n\n print(response, command, paramValue)\n return\n # return {\"response\": response, \"command\": command, \"parameter\": paramValue}\n\n def getCommandKeyAttributes(self):\n commandKeys = self.commanKeyDict.keys()\n for key in commandKeys:\n tags = self.commanKeyDict[key].get(\"tags\")\n for tag in tags:\n index = self.subfinder(self.transcript, tag.split())\n if index > -1:\n self.transcript = self.transcript[index:]\n return self.commanKeyDict[key].get(\"attributes\")\n\n return None\n\n def subfinder(self, mylist, pattern):\n for i in range(len(mylist)):\n if mylist[i] == pattern[0] and mylist[i : i + len(pattern)] == pattern:\n return i\n return -1\n\n\n# def main():\n# commandObj = Commands(\"Please go to class Rambo chutiya hai\")\n# final = commandObj.getCommand()\n# print(final)\n\n\n# if __name__ == \"__main__\":\n# main()\n"
},
{
"alpha_fraction": 0.6017315983772278,
"alphanum_fraction": 0.6017315983772278,
"avg_line_length": 18.510135650634766,
"blob_id": "9332497bcdc01de3ed43ec54607810981ab0a201",
"content_id": "ab7ed5b920a30a60b059655ace0f8b68222ce070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 5775,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 296,
"path": "/src/commands.ts",
"repo_name": "udikshasingh/VSpeak",
"src_encoding": "UTF-8",
"text": "export default [\n //OPEN\n {\n command: \"open_file\",\n exec: \"workbench.action.files.openFileFolder\"\n },\n {\n command: \"open_workspace\",\n exec: \"workbench.action.openWorkspace\"\n },\n {\n command: \"navigate_definition\",\n exec: \"editor.action.revealDefinition\"\n },\n {\n command: \"navigate_file\",\n exec: \"workbench.action.quickOpen\"\n },\n {\n command: \"format_document\",\n exec: \"editor.action.formatDocument\"\n },\n {\n command: \"format_selection\",\n exec: \"editor.action.formatSelection\"\n },\n {\n command: \"navigate_terminal\",\n exec: \"workbench.action.terminal.focus\"\n },\n {\n command: \"navigate_editor\",\n exec: \"workbench.action.focusActiveEditorGroup\"\n },\n {\n command: \"open_terminal\",\n exec: \"workbench.action.terminal.new\"\n },\n {\n command: \"close_current_file\",\n exec: \"workbench.action.closeActiveEditor\"\n },\n {\n command: \"close_all_files\",\n exec: \"workbench.action.closeAllEditors\"\n },\n {\n command: \"close_to_the_right\",\n exec: \"workbench.action.closeEditorsToTheRight\"\n },\n {\n command: \"close_to_the_left\",\n exec: \"workbench.action.closeEditorsToTheLeft\"\n },\n {\n command: \"close_window\",\n exec: \"workbench.action.closeWindow\"\n },\n {\n command: \"next_match\",\n exec: \"editor.action.nextMatchFindAction\"\n },\n {\n command: \"search_workspace\",\n exec: \"filesExplorer.findInWorkspace\"\n },\n {\n command: \"run_project\",\n exec: \"workbench.action.debug.run\"\n },\n {\n command: \"run_file\",\n exec: \"code-runner.run\"\n },\n {\n command: \"start_debug\",\n exec: \"workbench.action.debug.start\"\n },\n {\n command: \"pause_debug\",\n exec: \"workbench.action.debug.pause\"\n },\n {\n command: \"continue_debug\",\n exec: \"workbench.action.debug.continue\"\n },\n {\n command: \"stop_debug\",\n exec: \"workbench.action.debug.stop\"\n },\n {\n command: \"step_over\",\n exec: \"workbench.action.debug.stepOver\"\n },\n {\n command: \"step_into\",\n exec: \"workbench.action.debug.stepInto\"\n },\n {\n command: \"step_out\",\n exec: \"workbench.action.debug.stepOut\"\n },\n {\n command: \"cut\",\n exec: \"editor.action.clipboardCutAction\"\n },\n {\n command: \"copy\",\n exec: \"editor.action.clipboardCopyAction\"\n },\n {\n command: \"paste\",\n exec: \"editor.action.clipboardPasteAction\"\n },\n {\n command: \"show_commands\",\n exec: \"workbench.action.showCommands\"\n },\n {\n command: \"show_contextMenu\",\n exec: \"editor.action.showContextMenu\"\n },\n {\n command: \"show_hover\",\n exec: \"editor.action.showHover\"\n },\n {\n command: \"show_nextChange\",\n exec: \"editor.action.dirtydiff.next\"\n },\n {\n command: \"show_previousChange\",\n exec: \"editor.action.dirtydiff.previous\"\n },\n {\n command: \"zoom_in\",\n exec: \"workbench.action.zoomIn\"\n },\n {\n command: \"zoom_out\",\n exec: \"workbench.action.zoomOut\"\n },\n {\n command: \"save\",\n exec: \"workbench.action.files.save\"\n },\n {\n command: \"save_as\",\n exec: \"workbench.action.files.saveAs\"\n },\n {\n command: \"save_all\",\n exec: \"workbench.action.files.saveAll\"\n },\n {\n command: \"scroll_up\",\n exec: \"scrollPageUp\"\n },\n {\n command: \"scroll_down\",\n exec: \"scrollPageDown\"\n },\n {\n command: \"open_settings\",\n exec: \"workbench.action.openSettings\"\n },\n {\n command: \"add_comment\",\n exec: \"editor.action.addCommentLine\"\n },\n {\n command: \"remove_comment\",\n exec: \"editor.action.removeCommentLine\"\n },\n {\n command: \"toggle_comment\",\n exec: \"editor.action.commentLine\"\n },\n {\n command: \"search\",\n exec: \"actions.find\"\n },\n {\n command: \"open_window\",\n exec: \"workbench.action.newWindow\"\n },\n {\n command: \"install_extension\",\n exec: \"workbench.extensions.action.installExtensions\"\n },\n {\n command: \"update_extension\",\n exec: \"workbench.extensions.action.updateAllExtensions\"\n },\n {\n command: \"compare_file\",\n exec: \"workbench.files.action.compareFileWith\"\n },\n {\n command: \"compare_clipboard\",\n exec: \"workbench.files.action.compareWithClipboard\"\n },\n {\n command: \"breakpoint_toggle\",\n exec: \"editor.debug.action.toggleBreakpoint\"\n },\n {\n command: \"breakpoint_remove_all\",\n exec: \"workbench.debug.viewlet.action.removeAllBreakpoints\"\n },\n {\n command: \"breakpoint_enable_all\",\n exec: \"workbench.debug.viewlet.action.enableAllBreakpoints\"\n },\n {\n command: \"breakpoint_disable_all\",\n exec: \"workbench.debug.viewlet.action.disableAllBreakpoints\"\n },\n {\n command: \"new_file\",\n exec: \"workbench.action.files.newUntitledFile\"\n },\n {\n command: \"select_icon_theme\",\n exec: \"workbench.action.selectIconTheme\"\n },\n {\n command: \"select_theme\",\n exec: \"workbench.action.selectTheme\"\n },\n //GIT\n {\n command: \"git_init\",\n exec: \"git.init\"\n },\n {\n command: \"git_clone\",\n exec: \"git.clone\"\n },\n {\n command: \"git_branch\",\n exec: \"git.branch\"\n },\n {\n command: \"git_branch_rename\",\n exec: \"git.renameBranch\"\n },\n {\n command: \"git_branch_delete\",\n exec: \"git.deleteBranch\"\n },\n {\n command: \"git_push\",\n exec: \"git.push\"\n },\n {\n command: \"git_pull\",\n exec: \"git.pull\"\n },\n {\n command: \"git_stage\",\n exec: \"git.stage\"\n },\n {\n command: \"git_stage_change\",\n exec: \"git.stageChange\"\n },\n {\n command: \"git_stage_all\",\n exec: \"git.stageAll\"\n },\n {\n command: \"git_stage_all_tracked\",\n exec: \"git.stageAllTracked\"\n },\n {\n command: \"git_stage_all_untracked\",\n exec: \"git.stageAllUntracked\"\n },\n {\n command: \"git_commit\",\n exec: \"git.commit\"\n },\n {\n command: \"git_reset\",\n exec: \"git.unstage\"\n },\n {\n command: \"git_reset_all\",\n exec: \"git.unstageAll\"\n },\n {\n command: \"git_diff\",\n exec: \"git.timeline.openDiff\"\n }\n];\n"
}
] | 6 |
itsmnjn/data-structures
|
https://github.com/itsmnjn/data-structures
|
a3e4e233739e1879d132be02a8b6d051eed83b22
|
3e6343c4e95c465a9ca8f9ac155c3004ea7b8b4b
|
23428b8ce22f2cbf47cf30e617ad756676d03a24
|
refs/heads/master
| 2020-04-07T11:12:18.517391 | 2018-11-20T22:19:33 | 2018-11-20T22:19:33 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.531461775302887,
"alphanum_fraction": 0.5348499417304993,
"avg_line_length": 21.714284896850586,
"blob_id": "dd2d96683304a69f3f7114c5d34f9af78825806a",
"content_id": "e9046aea06d511eabeacdffa05b0aaef095fe27b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2066,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 91,
"path": "/binary_tree.py",
"repo_name": "itsmnjn/data-structures",
"src_encoding": "UTF-8",
"text": "from queue import Queue\n\nclass BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left_child = None\n self.right_child = None\n \n def insert_left(self, value):\n new = BinaryTree(value)\n if self.left_child:\n new.left_child = self.left_child\n self.left_child = new\n else:\n self.left_child = new\n\n def insert_right(self, value):\n new = BinaryTree(value)\n if self.right_child:\n new.right_child = self.right_child\n self.right_child = new\n else:\n self.right_child = new\n\n def pre_order(self):\n print(self.value)\n\n if self.left_child:\n self.left_child.pre_order()\n \n if self.right_child:\n self.right_child.pre_order()\n\n def in_order(self):\n if self.left_child:\n self.left_child.in_order()\n \n print(self.value)\n\n if self.right_child:\n self.right_child.in_order()\n\n def post_order(self):\n if self.left_child:\n self.left_child.post_order()\n\n if self.right_child:\n self.right_child.post_order()\n \n print(self.value)\n\n def bfs(self):\n queue = Queue()\n queue.put(self)\n\n while not queue.empty():\n current_node = queue.get()\n print(current_node.value)\n\n if current_node.left_child:\n queue.put(current_node.left_child)\n \n if current_node.right_child:\n queue.put(current_node.right_child)\n \nif __name__ == \"__main__\":\n root = BinaryTree(1)\n root.insert_left(2)\n root.insert_right(5)\n\n root.left_child.insert_left(3)\n root.left_child.insert_right(4)\n\n root.right_child.insert_left(6)\n root.right_child.insert_right(7)\n\n print(\"Pre-order:\")\n root.pre_order()\n print()\n\n print(\"In-order:\")\n root.in_order()\n print()\n\n print(\"Post-order:\")\n root.post_order()\n print()\n\n print(\"BFS:\")\n root.bfs()\n print()"
},
{
"alpha_fraction": 0.45491182804107666,
"alphanum_fraction": 0.4629722833633423,
"avg_line_length": 22.630952835083008,
"blob_id": "d990ae40093ffd79ab9c4f033c5e8c4e9f77644e",
"content_id": "575d7e5a1b730dea03e64d1dc0b4d6404ba185f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1985,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 84,
"path": "/linked_list.py",
"repo_name": "itsmnjn/data-structures",
"src_encoding": "UTF-8",
"text": "class LinkedList:\n class Node:\n def __init__(self, data, next = None):\n self.data = data\n self.next = next\n \n def __init__(self, data = None):\n self.head = self.Node(data)\n self.size = 1\n \n def add_to_head(self, data):\n new = self.Node(data, self.head)\n self.head = new\n self.size += 1\n\n def add_to_tail(self, data):\n new = self.Node(data)\n tail = self.end()\n tail.next = new\n self.size += 1\n\n def remove_from_head(self):\n if self.size > 1:\n self.head = self.head.next\n self.size -= 1\n else:\n self.head = None\n self.size = 0\n\n def remove_from_tail(self):\n if self.size > 1:\n i = self.head\n while i.next.next != None:\n i = i.next\n i.next = None\n self.size -= 1\n else:\n self.head = None\n self.size = 0\n \n\n def end(self):\n if self.size > 0:\n i = self.head\n while i.next != None:\n i = i.next \n return i\n else:\n return None\n \n def print(self):\n i = self.head\n while i != None:\n if i.next != None:\n print(str(i.data) + \" -> \", end = \"\")\n else: print(str(i.data))\n i = i.next\n print()\n\nif __name__ == \"__main__\":\n print(\"[+] Creating LinkedList 'list' with head node value of 1 ...\\n\")\n list = LinkedList(1)\n\n list.print()\n\n print(\"[+] Adding node with data = 2 to tail of 'list' ...\\n\")\n list.add_to_tail(2)\n\n list.print()\n\n print(\"[+] Adding node with data = 0 to head of 'list' ...\\n\")\n list.add_to_head(0)\n\n list.print()\n\n print(\"[+] Removing node from head of 'list' ...\\n\")\n list.remove_from_head()\n\n list.print()\n\n print(\"[+] Removing node from tail of 'list' ...\\n\")\n list.remove_from_tail()\n\n list.print()\n"
},
{
"alpha_fraction": 0.7761194109916687,
"alphanum_fraction": 0.7761194109916687,
"avg_line_length": 26,
"blob_id": "9931de2656e6f20eb31ea5c55b7fff064b51b781",
"content_id": "dd3519db3ecae9fc1434a40c74454db91d4f25c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 5,
"path": "/README.md",
"repo_name": "itsmnjn/data-structures",
"src_encoding": "UTF-8",
"text": "# Data Structures\n\nThe most commonly used data structures (in interviews), implemented in Python.\n\nC++ implementations coming soon (?)"
}
] | 3 |
nickolas1/ramses_plot_scripts
|
https://github.com/nickolas1/ramses_plot_scripts
|
473685f295ce33581c8123da18e84bea9a4d3989
|
997e5ad2c4ef64ac2489c6b87fa0627f71a73aa5
|
392bb7216edb6516ecb363806bdf0cb232410ed1
|
refs/heads/master
| 2020-12-24T18:03:13.505726 | 2014-06-03T10:10:40 | 2014-06-03T10:10:40 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5881176590919495,
"alphanum_fraction": 0.624470591545105,
"avg_line_length": 34.09504318237305,
"blob_id": "1187861f3a86aaadb472fcf00cbaa6c4e8551fc3",
"content_id": "7b5081343e657a96a1d825adfd863fbde04135af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8500,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 242,
"path": "/make_both_density_pdfs_from_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'/Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \nfig = plt.figure(figsize = (10,3.5))\n\nax = fig.add_axes([0.1, 0.2, 0.35, 0.75])\nax2 = fig.add_axes([0.6, 0.2, 0.35, 0.75])\n\ntimes = []\nmass1 = []\nmass2 = []\nmass3 = []\nsinkmasses = []\n\n\nprint os.path.basename(os.path.normpath(os.getcwd()))\nif os.path.basename(os.path.normpath(os.getcwd())) == 'turbshock512k4gcl':\n snaps = [15, 82, 95, 109]\n myrstrings = ['1', '6', '7', '8']\nif os.path.basename(os.path.normpath(os.getcwd())) == 'turbshock512k4gcsl':\n snaps = [4, 28, 41, 55]\n myrstrings = ['0.1', '1.0', '1.5', '2.0']\ncolors = [c1, c2, c3, c4]\n\n#snaps=[1]\n#myrstrings=['0.0']\n\nsnapstart = int(sys.argv[1])\nsnapend = int(sys.argv[2])\nsnapiter = int(sys.argv[3])\n\nfor snap in range(snapstart, snapend, snapiter):\n datanamelo = 'reduced_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins_Low.dat'\n datanamemid = 'reduced_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins_Mid.dat'\n datanamehi = 'reduced_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins_High.dat'\n infoname = 'reduced_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'reduced_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n framesdir = outdir+'both_pdfs/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n (time, unit_t) = get_time(infoname)\n \n # volume of the sphere the profile comes from\n (boxlen, unit_l) = get_boxsize(infoname)\n if boxlen > 7:\n sphererad = 27.0\n else:\n sphererad = 13.5\n spherevol = 4.0 * np.pi / 3.0 * (sphererad * 3.086e18)**3\n \n timeMyr = time * unit_t / 31557600.0 / 1.e6\n timeMyrRoundString = np.round(timeMyr)\n times.append(timeMyr)\n\n data = ascii.read(datanamelo)\n ax.plot(data['Lowdens'], data['CellVolume']/spherevol, color = colors[1], linewidth = 1.5)\n \n data = ascii.read(datanamemid)\n ax.plot(data['Middens'], data['CellVolume']/spherevol, color = colors[2], linewidth = 1.5)\n \n data = ascii.read(datanamehi)\n ax.plot(data['Highdens'], data['CellVolume']/spherevol, color = colors[3], linewidth = 1.5)\n \n # maximum likelihood fit to the first snap\n if snap == -1:\n dens = np.array(data['Density'])\n vals = np.array(data['CellVolume'])\n # find the maximum value above 1.e-24 (want to avoid the ISM peak)\n cloudsel = (dens > 1.e-24)\n denssub = dens[cloudsel]\n valssub = vals[cloudsel]\n densmax = denssub[valssub.argmax()]\n logdensmax = np.log10(densmax)\n\n # select 3 decades surrounding the density maximum to fit to\n logdens = np.log10(dens)\n fitsel = logdens[np.abs(logdens - logdensmax) <= 1.5]\n print fitsel\n # get the maximum likelihood lognormal to those 3 decades\n muML = fitsel.sum() / len(fitsel)\n print len(fitsel)\n print muML\n\n sys.exit()\n # fit to three orders of magnitude surrounding peak density\n maxpt = data['Density'].argmax()\n maxdens = np.max(data['Density'])\n fitvals = np.max\n \n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim(1.e-24, 2.e-17)\n ax.set_ylim(1.e-9,0.1)\n set_ticks(ax, '0.6')\n\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n\n ax.set_xlabel(r'density / g $\\mathdefault{cm^{-3}}$', fontproperties = tfm, size = 15)\n ax.set_ylabel(r'volume weighted PDF', fontproperties = tfm, size = 15)\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\n#plt.savefig(outdir+'VolumeDensityPDFs.pdf')\n\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n cdensmax = np.log10(10**2.0 / (mu * mH)) # convert these from g to n\n cdensmin = np.log10(10**-6.0 / (mu * mH))\n bins = 128\n binvals = np.arange(cdensmin, 1.000001*cdensmax, (cdensmax - cdensmin) / (bins))\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n \n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5']\n files = ['surface_density_0.hdf5']\n colors = [c1,c2,c3,c4]\n \n f = h5py.File('reduced_'+str(snap).zfill(5)+'/surface_density_0.hdf5', 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n # totalhistall keeps count of how many pixels are part of the cloud's area\n totalhistall = 0\n print snap\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n # the maximum value of the histogram is the background ISM- don't count that \n # in the calculation of the cloud area\n totalhistall += np.sum(hist) - np.max(hist)\n totalhist += hist \n f.close() \n #totalhistall = np.sum(totalhist)\n ax2.plot(10**binmids, totalhist/totalhistall, color = colors[0], linewidth = 1.5)\n print totalhistall\n \n # since we have three channels contributing to this PDF, need this factor of three\n # to correctly normalize them.\n totalhistall *= 3\n \n f = h5py.File('reduced_'+str(snap).zfill(5)+'/surface_density_low_0.hdf5', 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n ax2.plot(10**binmids, totalhist/totalhistall, color = colors[1], linewidth = 1.5)\n print np.sum(totalhist),totalhistall\n \n f = h5py.File('reduced_'+str(snap).zfill(5)+'/surface_density_mid_0.hdf5', 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n ax2.plot(10**binmids, totalhist/totalhistall, color = colors[2], linewidth = 1.5)\n print np.sum(totalhist),totalhistall\n \n f = h5py.File('reduced_'+str(snap).zfill(5)+'/surface_density_high_0.hdf5', 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n ax2.plot(10**binmids, totalhist/totalhistall, color = colors[3], linewidth = 1.5)\n np.sum(totalhist),totalhistall\n print np.sum(totalhist),totalhistall\n \n ax2.set_xlabel(r'surface density / $\\mathdefault{cm^{-2}}$', fontproperties = tfm, size = 15)\n #ax.set_ylabel('d', fontproperties = tfm, size = 15)\n ax2.set_ylabel(r'area weighted PDF', fontproperties = tfm, size = 15)\n \n ax2.set_xlim(0.99e19,1.01e24)\n ax2.set_ylim(0.99e-5,1.01e-1)\n ax2.set_yscale('log',nonposy='clip')\n ax2.set_xscale('log')\n set_ticks(ax2, '0.6')\n \n ax2.xaxis.grid(False,which='minor')\n ax2.yaxis.grid(False,which='minor')\n \n for label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\n\n\n\n plt.savefig(framesdir+'BothDensityPDFs_'+str(snap).zfill(5)+'.png')\n plt.close()\n\n\n \n"
},
{
"alpha_fraction": 0.6795865893363953,
"alphanum_fraction": 0.7041343450546265,
"avg_line_length": 21.14285659790039,
"blob_id": "b0fdb5e868f578f2b9fc2842b73283285ca6a892",
"content_id": "742e945da681bca8a6321635c94c590f6bbacc0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 35,
"path": "/turbgravfilaments2/convert_ppp_hdf5_to_fits.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom astropy.io import fits\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\nsnap = int(sys.argv[1])\n\ninfofile = 'reduced_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n\n(time, unit_t) = get_time(infofile)\ntime_kyr = time * unit_t / (3.15569e7 * 1000)\n\ntimestr = str(int(round(time_kyr))).zfill(4)\n\n\nf = h5py.File('ppp'+str(snap).zfill(5)+'.hdf5', 'r')\noutcube = f['density']\n\nhdu = fits.PrimaryHDU(outcube)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('ppp.'+timestr+'.fits')\n\nf.close()"
},
{
"alpha_fraction": 0.542830765247345,
"alphanum_fraction": 0.5744631886482239,
"avg_line_length": 27.615894317626953,
"blob_id": "d519e08a3fe5f2cd1e6496151c657a1c7011e1f8",
"content_id": "7c9cc0daf7ec2783f096e662ac86f8f54ae4749a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4331,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 151,
"path": "/turbgravfilaments2/make_spectra_N2Hplus_json.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nimport os.path\nfrom scipy import special\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# downsample the spectra to a 256 squared grid\ninres = 1024#1024\noutres = 256#256\nstride = int(inres / outres)\nprint stride\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\nf = h5py.File(fileprefix+'posvel_'+str(axis)+'/spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\nf = open('velocities.json', 'w')\nf.write('[\\n{\"velocity\":[')\nfor v in xrange(len(vels)):\n f.write(str(round(vels[v],3)))\n if v < len(vels) - 1: \n f.write(',')\nf.write(']}\\n]')\nf.close()\n\noutspecs = np.array([[np.zeros(len(vels)) for x in xrange(outres)] for y in xrange(outres)])\n\n#\n# j is image up\n# |\n# | \n# |_______i is image right\n#\n# spectra files march along j\n#\ntotnonzero = 0\nfor inj in xrange(inres):\n specfile = fileprefix+'posvel_'+str(axis)+'/spectra_N2Hplus_'+str(inj).zfill(4)+'.hdf5'\n if os.path.isfile(specfile):\n f = h5py.File(specfile, 'r')\n specs = f['spectraN2Hplus']\n print specfile\n outj = inj//stride\n for ini in xrange(inres):\n outi = ini//stride\n outspecs[outi,outj] += specs[ini]\n totnonzero += len(specs[ini][specs[ini]>0])\n f.close()\nmeannonzero = outspecs.sum() / totnonzero\n#outspecs /= meannonzero\nnonzero = outspecs[outspecs > 0]\n\nprint np.mean(outspecs),' ',np.median(outspecs),' ',np.max(outspecs)\nprint np.mean(nonzero),' ',np.median(nonzero),' ',np.max(nonzero)\n\n\"\"\"\noutspecs /= np.median(nonzero)\nprint np.mean(outspecs),' ',np.median(outspecs),' ',np.max(outspecs)\n\n# if doing this, round to 1 decimal in the write section below\n\n# median is 3.15197674139e-24 for 256\n# median is 1.0888351136e-23 for 128. use the same conversion for n2h+?\n\"\"\"\n\n# new strategy: make the highest value = 512, divide by that and round.\n# highest density for snapshot_00018 in C18O is 1.55814587039e-18\noutspecs /= 1.55814587039e-18\noutspecs *= 512\n# if doing this, int the rounded values in the write section below\noutspecs = np.round(outspecs,0)\n\nf = open('spectra_N2Hplus_'+str(snap).zfill(5)+'.json', 'w')\nf.write('[\\n')\nfor j in xrange(outres):\n for i in xrange(outres): \n if outspecs[i,j].sum() > 0.2:\n f.write('{\"c\":'+str(i + j*outres)+',\"s\":[')\n for v in xrange(len(vels)):\n if outspecs[i,j,v] > 0:\n sstr = str(int(outspecs[i,j,v]))\n else:\n sstr = '0'\n f.write(sstr)\n if v < len(vels) - 1: \n f.write(',') \n f.write(']}')\n if i + j*outres < outres**2 - 1:\n f.write(',\\n')\nf.write(']')\nf.close()\n\nf = open('spectra_N2Hplus_'+str(snap).zfill(5)+'_sparse.json', 'w')\nf.write('[\\n')\nfor j in xrange(outres):\n for i in xrange(outres): \n if outspecs[i,j].sum() > 0.2:\n f.write('{\"c\":'+str(i + j*outres)+',\"s\":[')\n zerocount = 0\n for v in xrange(len(vels)):\n val = int(outspecs[i,j,v])\n if val > 0:\n if zerocount > 0:\n f.write(str(-zerocount))\n f.write(',')\n zerocount = 0\n f.write(str(val))\n if v < len(vels) - 1: \n f.write(',') \n else:\n zerocount += 1 \n if zerocount > 0:\n f.write(str(-zerocount))\n f.write(']}')\n if i + j*outres < outres**2 - 1:\n f.write(',\\n')\nf.write(']')\nf.close()\n\n \n\n "
},
{
"alpha_fraction": 0.5941619873046875,
"alphanum_fraction": 0.6353577971458435,
"avg_line_length": 32.22047424316406,
"blob_id": "6adb4875995cbc97ce0ae9795c21cbe9ae5d4778",
"content_id": "056af8d85370590781585d8e738a35e1d65768a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4248,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 127,
"path": "/plot_PDFplots_from_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom matplotlib.collections import LineCollection\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\ninfoname = 'reduced_00001/info_00001.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n sdoff = np.log10(4)\n vdoff = np.log10(8)\nelse:\n sdoff = 0.0\n vdoff = 0.0\n\n\n\"\"\"\nThis first part makes plots of the surface density PDF\n\"\"\"\n\n# override the defaults for this movie plot\nmpl.rc('grid', color='0.15')\nmpl.rc('grid', linewidth='1.0')\nmpl.rc('axes', facecolor='0.0')\nmpl.rc('xtick', color='0.6')\nmpl.rc('ytick', color='0.6')\nmpl.rc('figure', facecolor='0.0')\nmpl.rc('savefig', facecolor='0.0')\n\nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nsnapstart = int(sys.argv[1])\nsnapend = int(sys.argv[2])\nsnapiter = int(sys.argv[3])\n\nfor snap in range(snapstart, snapend, snapiter):\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n framesdir = outdir+'surfacedensity0pdfs/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n \n ax.set_xlim(0.99e19,1.01e24)\n ax.set_ylim(0.99e-5,1.01e-1)\n ax.set_yscale('log',nonposy='clip')\n ax.set_xscale('log')\n ax.set_axisbelow(False)\n \n cdensmax = np.log10(10**2.0 / (mu * mH)) # convert these from g to n\n cdensmin = np.log10(10**-6.0 / (mu * mH))\n bins = 128\n binvals = np.arange(cdensmin, 1.000001*cdensmax, (cdensmax - cdensmin) / (bins))\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n \n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5']\n files = ['surface_density_0.hdf5']\n colors = [c1,c2,c3]\n \n for i in xrange(len(files)):\n f = h5py.File('reduced_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n print snap\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n ax.plot(10**binmids, totalhist/np.sum(totalhist), color = '1.0', linewidth = 1.5, alpha=0.8)\n \n plotlim = plt.xlim() + plt.ylim()\n print plotlim\n ax.imshow([[0,0],[1,1]], cmap='bone_r', interpolation='bicubic', extent=plotlim,alpha=0.4,zorder=-1)\n ax.fill_between(10**binmids, totalhist/np.sum(totalhist), 10, facecolor='0.0')\n\n set_ticks(ax, '0.15')\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n\n ax.set_xlabel(r'surface density / $\\mathdefault{cm^{-2}}$', fontproperties = tfm, size = 15, color='0.6')\n #ax.set_ylabel('d', fontproperties = tfm, size = 15)\n ax.set_ylabel(r'volume weighted PDF', fontproperties = tfm, size = 15, color='0.6')\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n \n [line.set_zorder(30) for line in ax.lines]\n [line.set_zorder(30) for line in ax.lines]\n \n plt.savefig(framesdir+'SurfaceDensityPDF_'+str(snap).zfill(5)+'.png', dpi=400) \n plt.close() \n\n\n \n \n\n"
},
{
"alpha_fraction": 0.5642147064208984,
"alphanum_fraction": 0.5958250761032104,
"avg_line_length": 33.18367385864258,
"blob_id": "8d5bbd7cd534071e5299bf69da12dccb4ad6fbf0",
"content_id": "3ffbbf325f82f24f83a9141b2321108784c1f5c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5030,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 147,
"path": "/turbgravfilaments2/make_reduced_data_all.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _C18O(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 0.0\n return newfield\n \ndef _N2Hplus(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 31622.0 * mu * mH # not interested in anything below 10^4.5 / cm^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim)\n newfield[antiselection] = 0.0\n return newfield \n\nMakeDensityPDF = False\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n \n # get time string\n (time, unit_t) = get_time(infofile)\n time_kyr = time * unit_t / (3.15569e7 * 1000)\n timestr = str(int(round(time_kyr))).zfill(4)\n \n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n sinkname2 = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.csv'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n sinknametime = fileprefix+'/sink.'+timestr+'.csv'\n\n # copy info files to the reduced directory\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix) \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n if os.path.exists(sinkname2):\n shutil.copy(sinkname2, fileprefix) \n shutil.copy(sinkname2, sinknametime)\n \n \n # figure out resolution and box size\n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname)\n \n # add new density fields\n add_field('C18O', function=_C18O)\n add_field('N2Hplus', function=_N2Hplus)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # full res should be base resolution times 2**levels of refinement * wd\n # we're going to reduce to a 1024 squared image\n lmaxplot = min(10, lmax) \n resx = int(wd * 2**lmaxplot)\n res = (resx, resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n\n for i in range(3):\n # get projections in each direction\n proj = ds.h.proj('C18O', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n print 'done creating C18O frb ',i \n filename = fileprefix+'surface_density_C18O'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density_C18O', data = np.log10(frb['C18O']), dtype='float32')\n print 'done creating HDF5 dset for C18O ',i\n f.close()\n del(proj)\n del(frb)\n del(f)\n del(dset)\n gc.collect() \n \n proj = ds.h.proj('N2Hplus', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n print 'done creating N2Hplus frb ',i \n filename = fileprefix+'surface_density_N2Hplus'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density_N2Hplus', data = np.log10(frb['N2Hplus']), dtype='float32')\n print 'done creating HDF5 dset for N2Hplus ',i\n f.close()\n del(proj)\n del(frb)\n del(f)\n del(dset)\n gc.collect()\n \n proj = ds.h.proj('Density', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n print 'done creating Density frb ',i \n filename = fileprefix+'surface_density'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']), dtype='float32')\n print 'done creating HDF5 dset for Density ',i\n f.close()\n del(proj)\n del(frb)\n del(f)\n del(dset)\n gc.collect() \n \n \n if MakeDensityPDF: \n ad = ds.h.all_data()\n nbins = 128\n dmin = 1.e-27\n dmax = 1.e-17\n profilename = fileprefix+'MassAndVolumeInDensityBins.dat'\n profile = BinnedProfile1D(ad,nbins,'Density',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n del(ad)\n del(profile)\n \n del(ds)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5245046019554138,
"alphanum_fraction": 0.5451909303665161,
"avg_line_length": 35.91071319580078,
"blob_id": "32d48205baf1c41de47ac41ee4d26609461d3fda",
"content_id": "c393ae2d24e59e6f2d24e0870863eaebefc8e953",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10345,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 280,
"path": "/turbgravfilaments2/make_spectra_basic.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\nfrom scipy import special\n\n\"\"\"\nusage: python make_spectra_basic.py <snap> <axis>\ne.g. python make_spectra_basic.py 18 2 will get create spectra along the z axis for\noutput_00018\n\nthis requires h5py. if you don't want to mess around with hdf5, save the data\nin some other format\n\nimportant comments about how things work, or things you might want to change, are in this \ntriple-quotes style\n\"\"\"\n\n\"\"\"\n these definitions are for looking at certain density ranges\n\"\"\"\ndef _C18O(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 0.0\n return newfield\n \ndef _N2Hplus(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 31622.0 * mu * mH # not interested in anything below 10^4.5 / cm^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim)\n newfield[antiselection] = 0.0\n return newfield \n \n# grab the command line arguments\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\nif axis == 0:\n los = 'x'\n dlos = 'dx'\n vlos = 'x-velocity'\n sliceax = 'z'\nif axis == 1:\n los = 'y'\n dlos = 'dy'\n vlos = 'y-velocity'\n sliceax = 'z'\nif axis == 2:\n los = 'z'\n dlos = 'dz'\n vlos = 'z-velocity'\n sliceax = 'y'\n\ninfoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n\n\"\"\" \n create the directory you want to put the spectra files in\n\"\"\"\nspecdir = 'reduced_'+str(snap).zfill(5)+'/posvel_'+str(axis)+'/'\nif not os.path.exists(specdir):\n os.makedirs(specdir)\n\nds = load(infoname)\n\n\"\"\" \n comment both of these out if you aren't using a density range\n\"\"\"\n# add new density fields\nadd_field('C18O', function=_C18O)\n#add_field('N2Hplus', function=_N2Hplus)\n\n\"\"\"\n set the velocity limits of your spectra in cgs\n\"\"\"\nvmax = 2.5e5\nvmin = -2.5e5\n# roughly match hacar et al by takin 0.05 km/s bins\nbins = (vmax - vmin) / 1.e5 / 0.05\nbinvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)\nbinmids = 0.5 * (np.roll(binvals, -1) + binvals)\nbinmids = binmids[:len(binmids) - 1]\n# get a version of the bins in km/s instead of cgs\nbinmidskms = binmids / 1.e5\n \n# save the velocities to a file\nf = h5py.File(specdir+'spectrumvels.hdf5', 'w')\ndset = f.create_dataset('binmidskms', data = binmidskms, dtype='float32')\nf.close()\n\n\"\"\"\n to keep things manageable, make this output map on the 1024**3 base grid.\n outres: the output resolution.\n refinefac: how far down the refinement hierarchy to go. this choice creates:\n inres: the resolution we sample the grid at.\n\"\"\"\noutres = 1024\nrefinefac = 8\n\noutdres = 1.0 / outres\ninres = outres * refinefac\nindres = 1.0 / inres\n\n\"\"\"\n start at the bottom of the projection (y = 0), and create slices through the box in \n the middle of each grid cell. we'll create an output file for each slice. if we're \n interested in z velocities, the slices will be at given values of y, and will be of \n the x-z plane. \n \n march along the x direction of the slice, and create a spectra for that (x,y) point\n by taking each value of v and rho along the z direction of the slice.\n\"\"\"\n\nfor j in xrange(outres):\n outpty = (j + 0.5) * outdres \n thesehists = []\n print j, outpty\n \n jj = 0\n for rj in xrange(refinefac):\n inpty = (j*refinefac + jj + 0.5) * indres\n print 'inpty: ',inpty\n # get a slice\n slc = ds.h.slice(sliceax, inpty)\n \n # get it into a frb\n frb = slc.to_frb(\n (1.0, 'unitary'), # get the whole extent of the box\n inres, # don't degrade anything\n center = [0.5, 0.5, 0.5], # centered in the box\n height = (1.0, 'unitary')) # get the whole extent of the box\n \n \"\"\"\n if you just want density weighted velocity rather than a density subrange,\n replace this with np.array(frb['rho'])\n \n replace sigma with some sort of line width to smooth the velocities\n \"\"\"\n rhoC18O = np.array(frb['C18O'])\n # rhoN2Hplus = np.array(frb['N2Hplus'])\n sigmaC18O = 0.0526 # thermal width of C18O line in km/s\n \n sigma = sigmaC18O * 1.e5 # convert to cm/s\n erfdenom = np.sqrt(2*sigma**2) #denominator for the error function involved in \n #calculating the cumulative distribution of a gaussian\n x = np.array(frb[los]) # the position along the line of sight\n vx = np.array(frb[vlos]) # the line of sight velocities\n dx = np.array(frb[dlos]) # the size of the cell at each point\n mindx = np.min(dx) # the minimum cell size in this slice\n print 'max(dx), min(dx), outdres: ',np.max(dx),np.min(dx),outdres\n \n weight = indres * rhoC18O \n # we need to grab rows from the slice differently depending on what axis we're projecting\n hist = np.zeros(len(binmids))\n erfvals = np.zeros(len(binvals))\n \"\"\"\n this if axis === 0 part is very wrong .... \n \"\"\"\n if axis == 0: \n for i in xrange(inres):\n hist, binedges = np.histogram(\n vx[i,:],\n range = (vmin, vmax),\n bins = binvals,\n weights = weight[i,:])\n thesehists.append(hist)\n \"\"\"\n this if axis > 0 part is up to date\n \"\"\"\n if axis > 0:\n i = 0\n for ii in xrange(inres):\n # for each point along the slice, march along the projecting dimension\n # and turn each detection into a gaussian. bin this gaussian into the \n # velbins.\n hist[:] = 0\n k = 0\n dkmin = 1 # keep track of the minimum cell size along this line of sight\n if(weight[:,i].sum() >= 0): # if there isn't any interesting density in this slice, leave the spectra at zeros\n for ik in xrange(len(vx[:,i])):\n peak = vx[k,i]\n thisdx = dx[k,i]\n dkmin = min(dkmin, thisdx)\n \"\"\"\n this if elif elif else thing lets you skip needless computations\n if the cell we're looking at is unrefined. \n we do this in all three dimensions. this saves a great deal \n of time if the simulation is large\n \"\"\"\n if(thisdx == outdres): # this cell is unrefined\n kincr = refinefac\n elif(thisdx == outdres / 2):\n kincr = int(refinefac / 2)\n elif(thisdx == outdres / 4):\n kincr = int(refinefac / 4)\n else:\n kincr = 1\n # calculate the cumulative distribution of this line at each velocity bin edge\n cdfs = 0.5 * (1 + special.erf((binvals - peak) / erfdenom)) * weight[k,i] * kincr\n # subtract adjacent values to get the contribution to each bin\n hist = hist + np.diff(cdfs)\n k += kincr\n if(k == len(vx[:,i])):\n break\n if(dkmin == outdres):\n iincr = refinefac\n elif(dkmin == outdres / 2):\n iincr = int(refinefac / 2)\n elif(dkmin == outdres / 4):\n iincr = int(refinefac / 4)\n else:\n iincr = 1\n \"\"\"\n this next bit handles binning together a refinefac**2 patch into one output cell\n jj==0 handles glomming together the direction perpindicular to the slices\n i%refinefac==0 handles glomming to gether along the slice\n \"\"\"\n if(jj == 0 and i%refinefac == 0): \n thesehists.append(hist * iincr)\n else:\n thesehists[i//refinefac] += hist * iincr\n i += iincr\n if(i == inres):\n break\n # figure out if we can skip reading some of these slices\n if(mindx == outdres): # there are no refined cells in this slice\n # all the subslices are going to be the same\n jincr = refinefac\n elif (mindx == outdres / 2): # there is only one level of refinement in this slice\n # the first refinefac/2 subsclices are going to be the same\n jincr = int(refinefac / 2)\n elif (mindx == outdres / 4): # there are two levels of refinement in this slice\n # the first refinefac/4 subsclices are going to be the same\n jincr = int(refinefac / 4)\n else:\n jincr = 1\n if(jj == 0):\n thesehistsaccum = np.array(thesehists) * jincr\n else:\n thesehistsaccum += np.array(thesehists) * jincr\n jj += jincr\n if(jj == refinefac):\n break;\n \"\"\"\n once we have the histograms of mass-weighted velocity along each point for this\n row, save it. I use hdf5 files, you can save it however you want\n \"\"\"\n f = h5py.File(specdir+'spectra_C18O_'+str(j).zfill(4)+'.hdf5', 'w')\n dset = f.create_dataset('spectraC18O', data = thesehistsaccum, dtype='float32')\n dset.attrs['slowindex'] = j\n dset.attrs[sliceax] = outpty\n f.close()\n \n \"\"\"\n force deletions for memory preservation\n \"\"\"\n del(slc)\n del(frb)\n del(f)\n del(dset)\n del(x)\n del(vx)\n del(dx)\n del(rhoC18O)\n del(weight)\n del(hist)\n del(thesehists)\n del(thesehistsaccum)\n gc.collect()\n \n \n"
},
{
"alpha_fraction": 0.5901306867599487,
"alphanum_fraction": 0.6335060596466064,
"avg_line_length": 35.52263259887695,
"blob_id": "b3d113f8db5973c846454426e2b47a7b6a803c25",
"content_id": "c74c6f3fd24c8ae57c780f686e18708809ac6af8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8876,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 243,
"path": "/turbgravfilaments2/plot_CO_filament_finder_image.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom matplotlib import rcParams\n\n\n\"\"\"\nusage: python plot_CO_filament_finder_image_turbgrav.py <snap> <axis>\n\"\"\"\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=7)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\n\n\nimshowmap = 'nickmapVD2'\n#imshowmap = 'bone_r'\n\n\"\"\"\nthis data is generated by making a surface density map taking into account only\ngas that is between 10^3 and 10^4.5 cm^-3. to convert to a crude approximation \nof a C18O map, use\nDavid S. Meier and Jean L. Turner ApJ 551:687 2001 equation 2\n\nN(H2)C18O = 2.42e14 cm^-2 [H2]/[C18O] * exp(5.27/Tex)/(exp(5.27/Tex)-1) IC18O K km/s\n[H2]/[C18O] = 2.94e6\n\nso first convert g cm^-2 to cm^-2 using mu = 2.33, then convert to ICO using Tex=10 K\n\"\"\"\ncdmin = 10**-3.3 \ncdmax = 10**-1.5\ncdmin = 0\ncdmax = 4\n\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\nsinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.csv'\n\n\nfile = fileprefix+'surface_density_C18O'+str(axis)+'.hdf5'\n\nif os.path.exists(file):\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density_C18O']\n sd = 10**np.array(sd) # convert to linear units\n print np.mean(sd),np.max(sd)\n sd /= (2.33 * const.m_p.cgs.value) # convert to number density\n print np.mean(sd),np.max(sd)\n sd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\n sd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\n print np.mean(sd),np.max(sd)\n fig = plt.figure(figsize = (1.1111111*sd.shape[0]/200, 1.1111111*sd.shape[1]/200), dpi=200)\n axu = fig.add_axes([0.05, 0.95, .9, .9])\n axl = fig.add_axes([-0.85, 0.05, .9, .9])\n axr = fig.add_axes([0.95, 0.05, 0.9, 0.9])\n axd = fig.add_axes([0.05, -0.85, 0.9, 0.9])\n axul = fig.add_axes([-0.85, 0.95, 0.9, 0.9])\n axur = fig.add_axes([0.95, 0.95, 0.9, 0.9])\n axdl = fig.add_axes([-0.85, -0.85, 0.9, 0.9])\n axdr = fig.add_axes([0.95, -0.85, 0.9, 0.9])\n ax = fig.add_axes([0.05, 0.05, .9, .9])\n ax4 = fig.add_axes([0, 0, 1, 1])\n \n axu.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axl.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axr.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axd.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axul.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axur.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axdl.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axdr.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n \n \n \n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a colorbar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n #ax2 = fig.add_axes([0.45, 0.76, 0.4, 0.015])\n ax2 = fig.add_axes([.07, .12, .4, .015])\n a = np.outer(np.arange(cdmin, cdmax, (cdmax - cdmin)/255), np.ones(10)).T\n ax2.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax2.set_frame_on(False)\n ax2.axes.get_yaxis().set_visible(False)\n ax2.xaxis.set_ticks(np.arange(cdmin, cdmax+1, 1.0))\n ax2.set_xlabel(r'$\\mathdefault{I_{C^{18}O}}$ / K km s$\\mathdefault{^{-1}}$', fontproperties = tfm, size=10, color='0.15')\n \n set_ticks(ax2, '0.15')\n for label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\n\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a scalebar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n ax3 = fig.add_axes([0.27, 0.145, 0.2, 0.0015])\n a = np.outer(np.ones(100)*.8*cdmax, np.ones(10)).T\n ax3.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax3.set_frame_on(False)\n ax3.axes.get_yaxis().set_visible(False)\n ax3.axes.get_xaxis().set_visible(False)\n ax3.text(0.1, 0.85, r'2pc', transform = ax3.transAxes,\n va = 'bottom', ha = 'left', fontproperties = tfm, color='0.15', snap = False)\n set_ticks(ax3, '0.15')\n for label in ax3.get_xticklabels() + ax3.get_yticklabels():\n label.set_fontproperties(tfm)\n \n\n \n ax.autoscale(False)\n # define a rectangle by drawing a line on a filament and choosing a width\n rectdata = ascii.read(fileprefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt')\n for fil in rectdata:\n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]])\n width = fil[5]\n vector = rightpoint - leftpoint\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n x = (leftpoint[0], rightpoint[0])\n y = (leftpoint[1], rightpoint[1])\n ul = leftpoint + orthovec * width/2\n ll = leftpoint - orthovec * width/2\n ur = rightpoint + orthovec * width/2\n lr = rightpoint - orthovec * width/2\n rectangle = np.transpose([ul, ll, lr, ur, ul])\n rectangle2 = rectangle\n #ax.plot(x,y,lw=.3,color=cred)\n #ax.plot(rectangle[0]*sd.shape[0], rectangle[1]*sd.shape[0], lw=.7,color='m', solid_joinstyle='miter') \n ax4.plot(0.05 + 0.9*rectangle2[0], 0.05 + 0.9*rectangle2[1], lw=.7,color='m', solid_joinstyle='miter') \n # turn off axes\n \n # outline the original periodic box\n ax.plot([0, sd.shape[0], sd.shape[0], 0, 0],[0, 0, sd.shape[0], sd.shape[0], 0],\n lw=0.5, ls='--', color = 'c', solid_joinstyle='miter')\n \n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n axu.set_frame_on(False)\n axu.axes.get_yaxis().set_visible(False)\n axu.axes.get_xaxis().set_visible(False)\n axl.set_frame_on(False)\n axl.axes.get_yaxis().set_visible(False)\n axl.axes.get_xaxis().set_visible(False)\n axr.set_frame_on(False)\n axr.axes.get_yaxis().set_visible(False)\n axr.axes.get_xaxis().set_visible(False)\n axd.set_frame_on(False)\n axd.axes.get_yaxis().set_visible(False)\n axd.axes.get_xaxis().set_visible(False)\n axul.set_frame_on(False)\n axul.axes.get_yaxis().set_visible(False)\n axul.axes.get_xaxis().set_visible(False)\n axur.set_frame_on(False)\n axur.axes.get_yaxis().set_visible(False)\n axur.axes.get_xaxis().set_visible(False)\n axdl.set_frame_on(False)\n axdl.axes.get_yaxis().set_visible(False)\n axdl.axes.get_xaxis().set_visible(False)\n axdr.set_frame_on(False)\n axdr.axes.get_yaxis().set_visible(False)\n axdr.axes.get_xaxis().set_visible(False)\n \n ax4.set_frame_on(False)\n ax4.axes.get_yaxis().set_visible(False)\n ax4.axes.get_xaxis().set_visible(False)\n ax4.set_xlim(0,1)\n ax4.set_ylim(0,1)\n \n framesdir = 'finderimage'+str(axis)+'/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n\n framename = framesdir+'finderimage'+str(axis)+'_frame_'+str(snap).zfill(5)\n plt.savefig(framename+'.png', dpi = 400)\n plt.savefig(framename+'.pdf', rasterized = True)\n \n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect()\n\n"
},
{
"alpha_fraction": 0.5551625490188599,
"alphanum_fraction": 0.5936902761459351,
"avg_line_length": 32.26837158203125,
"blob_id": "9e7c2334bf2cb17c4ff2c15a5392da9693f2e3c2",
"content_id": "3deb0c6d83847b900a331d442e99d11b352fd165",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10460,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 313,
"path": "/make_plots_from_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom matplotlib.collections import LineCollection\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\ninfoname = 'reduced_00001/info_00001.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n sdoff = np.log10(4)\n vdoff = np.log10(8)\nelse:\n sdoff = 0.0\n vdoff = 0.0\n\n\ndef make_my_cmaps():\n # surface density color map\n x = [-4.11 - sdoff, -3.11 - sdoff, -.931 - sdoff, .069 - sdoff]\n # x[3] and x[0] are cdmax and cdmin below.\n beginx = (x[1] - x[0]) / (x[3] - x[0])\n begingray = 0.9\n transitionx = (x[2] - x[0]) / (x[3] - x[0])\n transitiongray = 0.35\n finishr = 37 / 256\n finishg = 49 / 256\n finishb = 111 / 256\n cdict = {'red': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishr, finishr)),\n 'green': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishg, finishg)),\n 'blue': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishb, finishb))} \n cmap1 = col.LinearSegmentedColormap('my_colormapSD', cdict, N=256, gamma=1.0)\n cm.register_cmap(name='nickmapSD', cmap=cmap1)\n \n \nmake_my_cmaps()\n\n\n# these two from http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb\ndef make_segments(x, y):\n '''\n Create list of line segments from x and y coordinates, in the correct format for LineCollection:\n an array of the form numlines x (points per line) x 2 (x and y) array\n '''\n \n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n #segments2 = np.concatenate([0.999*points[:], 1.001*points[:]], axis=1)\n #segments = np.concatenate([segments1, segments2])\n \n # segments[:,0,0] *= 0.999\n # segments[:,1,0] *= 1.001\n \n return segments\n\n\n# Interface to LineCollection:\n\ndef colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=6, alpha=1.0):\n '''\n Plot a colored line with coordinates x and y\n Optionally specify colors in the array z\n Optionally specify a colormap, a norm function and a line width\n '''\n \n # Default colors equally spaced on [0,1]:\n if z is None:\n z = np.linspace(0.0, 1.0, len(x))\n \n # Special case if a single number:\n if not hasattr(z, \"__iter__\"): # to check for numerical input -- this is a hack\n z = np.array([z])\n \n z = np.asarray(z)\n \n segments = make_segments(x, y)\n lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth,alpha=alpha)\n \n ax = plt.gca()\n ax.add_collection(lc)\n \n return lc\n \n\n\n\"\"\"\nThis first part makes plots of the surface density PDF\n\"\"\"\n\n# override the defaults for this movie plot\nmpl.rc('grid', color='0.15')\nmpl.rc('grid', linewidth='0.8')\nmpl.rc('axes', facecolor='0.0')\nmpl.rc('xtick', color='0.6')\nmpl.rc('ytick', color='0.6')\nmpl.rc('figure', facecolor='0.0')\nmpl.rc('savefig', facecolor='0.0')\n\nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nsnapstart = int(sys.argv[1])\nsnapend = int(sys.argv[2])\nsnapiter = int(sys.argv[3])\n\nfor snap in range(snapstart, snapend, snapiter):\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n framesdir = outdir+'surfacedensity0pdfs/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n \n ax.set_xlim(0.99e19,1.01e24)\n ax.set_ylim(0.99e-5,1.01e-1)\n ax.set_yscale('log',nonposy='clip')\n ax.set_xscale('log')\n ax.set_axisbelow(False)\n \n cdensmax = np.log10(10**2.0 / (mu * mH)) # convert these from g to n\n cdensmin = np.log10(10**-6.0 / (mu * mH))\n bins = 128\n binvals = np.arange(cdensmin, 1.000001*cdensmax, (cdensmax - cdensmin) / (bins))\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n \n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5']\n files = ['surface_density_0.hdf5']\n colors = [c1,c2,c3]\n \n for i in xrange(len(files)):\n f = h5py.File('reduced_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n print snap\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n #if j == 600:\n # print coldensvals\n # print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n ax.plot(10**binmids, totalhist/np.sum(totalhist), color = '1.0', linewidth = 1.5, alpha=0.8)\n \n plotlim = plt.xlim() + plt.ylim()\n ax.imshow([[0,0],[1,1]], cmap='bone_r', interpolation='bicubic', extent=plotlim,alpha=0.2,zorder=-1)\n ax.fill_between(10**binmids, totalhist/np.sum(totalhist), 10, facecolor='0.0')\n\n set_ticks(ax, '0.15')\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n\n ax.set_xlabel(r'surface density / $\\mathdefault{cm^{-2}}$', fontproperties = tfm, size = 15, color='0.6')\n #ax.set_ylabel('d', fontproperties = tfm, size = 15)\n ax.set_ylabel(r'volume weighted PDF', fontproperties = tfm, size = 15, color='0.6')\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n \n [line.set_zorder(30) for line in ax.lines]\n [line.set_zorder(30) for line in ax.lines]\n \n plt.savefig(framesdir+'SurfaceDensityPDF_'+str(snap).zfill(5)+'.png', dpi=400) \n plt.close() \n\n\n \n \n\"\"\"\nThis second part makes plots of the sink particle masses with time\n\"\"\"\n\n# override the defaults for this movie plot\nmpl.rc('grid', color='0.15')\nmpl.rc('grid', linewidth='0.8')\nmpl.rc('axes', facecolor='0.0')\nmpl.rc('xtick', color='0.6')\nmpl.rc('ytick', color='0.6')\nmpl.rc('figure', facecolor='0.0')\nmpl.rc('savefig', facecolor='0.0')\n\ntimes = [0.0]\nalltimes = []\nsinkmasses = [0.0]\n\nindivnames = []\nindivmasses = []\nindivtimes = []\n\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n framesdir = outdir+'sinkmasses/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n infoname = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.out'\n\n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n alltimes.append(timeMyr)\n if len(sinks) > 0:\n sinkmass = sinks[:,1].sum() # total mass of sinks in Msun\n times.append(timeMyr)\n sinkmasses.append(sinkmass)\n timerow = np.ones((sinks.shape[0],1)) * timeMyr\n # nmt = name, mass, time\n nmt = np.hstack((sinks[:,[0,1]],timerow))\n indivnames.append(nmt[:,0])\n indivmasses.append(nmt[:,1])\n indivtimes.append(nmt[:,2])\n except IOError:\n pass \n\n# flatten lists of individual sink properties, stick in an array\nindivnames = [j for i in indivnames for j in i]\nindivmasses = [j for i in indivmasses for j in i] \nindivtimes = [j for i in indivtimes for j in i] \n# nmt is name mass time\nnmt = np.array([indivnames, indivmasses, indivtimes])\n \n# plot individual sink masses\nmmin = np.log10(1)\nmmax = np.log10(100)\n\nfor snap in range(snapstart, snapend, snapiter):\n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n t = alltimes[snap-1]\n print 'plotting sink masses for time ',t\n for i in xrange(int(max(nmt[0,:]))):\n thisone = nmt[:,nmt[0,:] == i]\n if len(thisone[0]) == 0: continue\n x = thisone[2,:]\n if np.min(x) > t: continue\n y = thisone[1,:]\n # if len(x) < 50 and len(x) > 1:\n # xnew = np.linspace(np.min(x), np.max(x), 50)\n # ynew = np.interp(xnew, x, y) \n # x = xnew\n # y = ynew\n colorline(x, y, \n np.log10(y), \n color='0.8', \n norm=plt.Normalize(mmin, mmax),\n linewidth = 1)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(y) - mmin) / (mmax - mmin)) \n ax.scatter(x, y,marker='.',s=3,facecolor=sinkcolors, lw=0) \n\n ax.set_yscale('log')\n ax.set_xlim(0,2.0)\n ax.set_ylim(0.1,250)\n set_ticks(ax, '0.15')\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n\n ax.set_xlabel('time / Myr', fontproperties = tfm, size = 15, color='0.6')\n ax.set_ylabel('total sink mass / '+r'M${_\\odot}$', fontproperties = tfm, size = 15, color='0.6')\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\n \n plt.savefig(framesdir+'sinkmasses_'+str(snap).zfill(5)+'.png', dpi=400) \n plt.close() \n \n \n"
},
{
"alpha_fraction": 0.6099680066108704,
"alphanum_fraction": 0.6268861293792725,
"avg_line_length": 31.567163467407227,
"blob_id": "3863786a70741245f354a693ea547f9f9b503541",
"content_id": "bcc4b01970cfe20e1c6780e5e0bb0205eb51f7f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2187,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 67,
"path": "/make_reduced_ic_slices.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'ic_slices_reduced_'+str(snap).zfill(5)+'/'\n\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix)\n \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n pf = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 0.625 # this is messed up- figure it out. yt might not get size right.\n wd = 0.5\n # res should be base resolution times 2**levels of refinement * wd\n resx = int(wd * 2**lmax)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n \n i = 0 # get slices along x axis\n dslice = wd/resx\n counter = 0\n for slice in np.arange(cntr[i] - wd/2, cntr[0] + wd/2 + dslice, dslice):\n print counter, slice\n counter += 1\n slc = pf.h.slice(i, slice)\n frb = slc.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'density_slice_'+str(i)+'_'+str(counter).zfill(5)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('volume_density', data = np.log10(frb['Density']))\n f.close() \n \n del(slc)\n del(frb)\n gc.collect()\n if counter == 7: sys.exit()\n \n"
},
{
"alpha_fraction": 0.5807504057884216,
"alphanum_fraction": 0.6117455363273621,
"avg_line_length": 33.60483932495117,
"blob_id": "01cf407342be88860ff2e3f5695dd483196ccc06",
"content_id": "26e7e71bc47f37060fc70d8e7ca30d46b2c616ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4291,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 124,
"path": "/turbgravfilaments2/make_reduced_data_CO_filaments_only_slices.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\nfrom mpi4py import MPI\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _CO(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 0.0\n \n # cut out the filaments we're interested in\n leftpoint = np.array([0.795, 0.34])\n rightpoint = np.array([0.805, 0.05]) \n width = 0.05\n vector = rightpoint - leftpoint\n midpoint = leftpoint + 0.5 * vector\n # translate to midpoint\n transx = data['x'] - midpoint[0]\n transy = data['y'] - midpoint[1]\n length = np.linalg.norm(vector)\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n vector /= np.linalg.norm(vector)\n \n # rotate around midpoint. orthovec is already a unit vector now.\n beta = np.arccos(orthovec[1])\n rotx = transx * np.cos(beta) - transy * np.sin(beta)\n roty = transx * np.sin(beta) + transy * np.cos(beta)\n \n # cut based on width and length of box\n antiselection2 = (np.abs(rotx) > 0.5*length) | (np.abs(roty) > 0.5 * width)\n newfield[antiselection2] = 1.e-99\n \n return newfield\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix) \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n add_field('CO', function=_CO)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n lmaxplot = min(11, lmax) \n resx = int(wd * 2**lmaxplot)\n\n # slice through the filaments we're interested in \n leftpoint = np.array([0.795, 0.34])\n rightpoint = np.array([0.805, 0.05]) \n width = 0.045\n \n # figure out how many steps to take\n dresx = 1 / resx\n nstep = int(np.round(width / dresx))\n dstep = width / nstep\n \n vector = rightpoint - leftpoint\n length = np.linalg.norm(vector)\n midpoint = leftpoint + 0.5 * vector\n orthovec = (-vector[1], vector[0], 0)\n orthovec /= np.linalg.norm(orthovec)\n print orthovec\n cntr = [midpoint[0], midpoint[1], 0.5]\n\n #get a projection orthogonal to the filament\n startpoint = cntr - orthovec * width/2\n print cntr\n print startpoint\n for s in xrange(nstep):\n print 'slice number %i out of %i' %(s, nstep)\n point = startpoint + s * orthovec * dstep\n print 'midpoint: ',point\n cut = ds.h.cutting(\n orthovec, # normal vector\n point, # center of slice\n north_vector = [0, 0, -1])\n frb = cut.to_frb((1.0, 'unitary'), resx)\n # we're going to march through and manually create a projection\n if s == 0:\n frbtotal = frb['CO'] * dstep * ds.units['cm']\n if s > 0:\n frbtotal += frb['CO'] * dstep * ds.units['cm']\n del(frb)\n del(cut)\n gc.collect()\n filename = fileprefix + 'surface_density_C18O_fil0_sliced.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density_C18O', data = np.log10(frbtotal))\n f.close()\n"
},
{
"alpha_fraction": 0.4796902537345886,
"alphanum_fraction": 0.5052302479743958,
"avg_line_length": 39.79213333129883,
"blob_id": "24be938901fd8e5ad7f4e53e6c4577aa87c5b052",
"content_id": "9b2bb8775e29dc1201e100a3f0b53e27af03eb6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7361,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 178,
"path": "/turbgravfilaments/plot_images_from_reduced_data_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom matplotlib import rcParams\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n # first do projections\n imshowmap = 'nickmapSD'\n #imshowmap = 'bone_r'\n #cdmin = -3.3 + np.log10((u.g / u.cm**2).to(const.M_sun / u.pc**2))\n #cdmax = -0.3 + np.log10((u.g / u.cm**2).to(const.M_sun / u.pc**2))\n \n cdmin = 0.35\n cdmax = 3.35\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.csv'\n \n for i in xrange(3):\n file = fileprefix+'surface_density_'+str(i)+'.hdf5'\n if os.path.exists(file):\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density']\n sd = 10**np.array(sd) # convert to linear units, g / cm^2\n sd *= (u.g / u.cm**2).to(const.M_sun / u.pc**2) # convert to Msun / pc^2\n sd = np.log10(sd) # convert back to log\n \n fig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0., 0., 1., 1.])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n \n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = ascii.read(sinkname, names=sinkcolumnnames, converters=sinkconverters)\n if len(sinks['ID']) > 0: \n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n sscale = sd.shape[0] / boxlen\n \n if i == 0:\n i0 = 'y'\n i1 = 'z'\n if i == 1:\n i0 = 'x'\n i1 = 'z'\n if i == 2:\n i0 = 'x'\n i1 = 'y'\n\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(100)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinks['mass']) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinks[i0]*sscale,sinks[i1]*sscale,marker='.',s=9,facecolor=sinkcolors,lw=0) \n except IOError:\n pass \n \n \n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a colorbar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n ax2 = fig.add_axes([0.1, 0.1, 0.4, 0.015])\n a = np.outer(np.arange(cdmin, cdmax, (cdmax - cdmin)/255), np.ones(10)).T\n ax2.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax2.set_frame_on(False)\n ax2.axes.get_yaxis().set_visible(False)\n ax2.xaxis.set_ticks(np.arange(cdmin, cdmax+1, 1.0))\n #ax2.set_xlabel(r'log$\\mathdefault{_{10}}$(column density / g cm$\\mathdefault{^{-2}}$)', fontproperties = tfm, size=15, color='0.15')\n ax2.set_xlabel(r'log$\\mathdefault{_{10}}$(column density / M$_\\odot$ pc$\\mathdefault{^{-2}}$)', fontproperties = tfm, size=15, color='0.15')\n set_ticks(ax2, '0.15')\n for label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\n\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a scalebar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n ax3 = fig.add_axes([0.7, 0.1, 0.2, 0.0015])\n a = np.outer(np.ones(100)*.8*cdmax, np.ones(10)).T\n ax3.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax3.set_frame_on(False)\n ax3.axes.get_yaxis().set_visible(False)\n ax3.axes.get_xaxis().set_visible(False)\n ax3.text(0.1, 0.75, r'2pc', transform = ax3.transAxes,\n va = 'bottom', ha = 'left', fontproperties = tfm, color='0.15', snap = False)\n set_ticks(ax3, '0.15')\n for label in ax3.get_xticklabels() + ax3.get_yticklabels():\n label.set_fontproperties(tfm)\n \n \n \n framesdir = outdir+'surfacedensity'+str(i)+'/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n framename = framesdir+'sd'+str(i)+'_frame_'+str(snap).zfill(5)+'.png'\n plt.savefig(framename, dpi = 200)\n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect()\n \n \n\n \n \n \n \n"
},
{
"alpha_fraction": 0.586056649684906,
"alphanum_fraction": 0.6089324355125427,
"avg_line_length": 32.938270568847656,
"blob_id": "cc88c6639076cb242348d1f3188373bbb6a00868",
"content_id": "a3fa01f05353ccbdc4e04b437106ee23bd07b69f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2754,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 81,
"path": "/turbgravfilaments/make_reduced_data_CO_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\nfrom mpi4py import MPI\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _CO(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 1.e-99\n return newfield\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix) \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n add_field('CO', function=_CO)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n lmaxplot = min(11, lmax) \n resx = int(wd * 2**lmaxplot)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n\n for i in range(3):\n # get projection in each direction\n proj = ds.h.proj('CO', i)\n MPI.COMM_WORLD.Barrier()\n frb = proj.to_frb(width, res, center = cntr, height = height)\n print 'done frb',ytcfg.getint('yt', '__topcomm_parallel_rank')\n MPI.COMM_WORLD.Barrier()\n filename = fileprefix+'surface_density_CO'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density_CO', data = np.log10(frb['CO']))\n print 'done dset',ytcfg.getint('yt', '__topcomm_parallel_rank')\n MPI.COMM_WORLD.Barrier()\n f.close()\n del(f)\n del(dset)\n del(frb)\n del(proj)\n gc.collect() \n del(ds)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5815302133560181,
"alphanum_fraction": 0.6092490553855896,
"avg_line_length": 31.07305908203125,
"blob_id": "04c3072135fcbad155a1cca5fafee18444214d96",
"content_id": "76f6b6f273e9cdb3efdce75e15513f41face4f72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7071,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 219,
"path": "/plot_sinkplots_from_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom matplotlib.collections import LineCollection\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\ninfoname = 'reduced_00001/info_00001.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n tlimmin = 3.5\n mlimmax = 1.e4\nelse:\n tlimmin = 0.5\n mlimmax = 1.e4\n\n\n# these two from http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb\ndef make_segments(x, y):\n '''\n Create list of line segments from x and y coordinates, in the correct format for LineCollection:\n an array of the form numlines x (points per line) x 2 (x and y) array\n '''\n \n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n #segments2 = np.concatenate([0.999*points[:], 1.001*points[:]], axis=1)\n #segments = np.concatenate([segments1, segments2])\n \n # segments[:,0,0] *= 0.999\n # segments[:,1,0] *= 1.001\n \n return segments\n\n\n# Interface to LineCollection:\n\ndef colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=6, alpha=1.0):\n '''\n Plot a colored line with coordinates x and y\n Optionally specify colors in the array z\n Optionally specify a colormap, a norm function and a line width\n '''\n \n # Default colors equally spaced on [0,1]:\n if z is None:\n z = np.linspace(0.0, 1.0, len(x))\n \n # Special case if a single number:\n if not hasattr(z, \"__iter__\"): # to check for numerical input -- this is a hack\n z = np.array([z])\n \n z = np.asarray(z)\n \n segments = make_segments(x, y)\n lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth,alpha=alpha)\n \n ax = plt.gca()\n ax.add_collection(lc)\n \n return lc\n \n \n\"\"\"\nThis second part makes plots of the sink particle masses with time\n\"\"\"\n\n# override the defaults for this movie plot\nmpl.rc('grid', color='0.15')\nmpl.rc('grid', linewidth='1.0')\nmpl.rc('axes', facecolor='0.0')\nmpl.rc('xtick', color='0.6')\nmpl.rc('ytick', color='0.6')\nmpl.rc('figure', facecolor='0.0')\nmpl.rc('savefig', facecolor='0.0')\n\ntimes = [0.0]\nalltimes = []\nsinkmasses = [0.0]\n\nindivnames = []\nindivmasses = []\nindivtimes = []\n\nsnapstart = int(sys.argv[1])\nsnapend = int(sys.argv[2])\nsnapiter = int(sys.argv[3])\n\nfor snap in range(snapstart, snapend, snapiter):\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n framesdir = outdir+'sinkmasses/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n infoname = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.out'\n\n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n alltimes.append(timeMyr)\n if len(sinks) > 0:\n sinkmass = sinks[:,1].sum() # total mass of sinks in Msun\n times.append(timeMyr)\n sinkmasses.append(sinkmass)\n timerow = np.ones((sinks.shape[0],1)) * timeMyr\n # nmt = name, mass, time\n nmt = np.hstack((sinks[:,[0,1]],timerow))\n indivnames.append(nmt[:,0])\n indivmasses.append(nmt[:,1])\n indivtimes.append(nmt[:,2])\n except IOError:\n pass \n\n# flatten lists of individual sink properties, stick in an array\nindivnames = [j for i in indivnames for j in i]\nindivmasses = [j for i in indivmasses for j in i] \nindivtimes = [j for i in indivtimes for j in i] \n# nmt is name mass time\nnmt = np.array([indivnames, indivmasses, indivtimes])\n \ntimes = np.array(times[1:])\nsinkmasses = np.array(sinkmasses[1:]) \n\nprint 'times = ',times\nprint 'total sink masses = ',sinkmasses\n\n# plot individual sink masses\nmmin = np.log10(1)\nmmax = np.log10(100)\n\nfor snap in range(snapstart, snapend, snapiter):\n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.248, 0.2, 0.652, 0.75])\n t = alltimes[snap-1]\n print 'plotting sink masses for time ',t\n \n nplotted = 0\n for i in xrange(int(max(nmt[0,:]))):\n thisone = nmt[:,nmt[0,:] == i]\n if len(thisone[0]) == 0: continue\n x = thisone[2,:]\n if np.min(x) > t: continue\n nplotted += 1\n sel = (x <= t)\n x = x[sel]\n y = thisone[1,sel]\n # if len(x) < 50 and len(x) > 1:\n # xnew = np.linspace(np.min(x), np.max(x), 50)\n # ynew = np.interp(xnew, x, y) \n # x = xnew\n # y = ynew\n colorline(x, y, \n np.log10(y), \n cmap='nickmapSink', \n norm=plt.Normalize(mmin, mmax),\n linewidth = 1)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(y) - mmin) / (mmax - mmin)) \n ax.scatter(x, y,marker='.',s=3,facecolor=sinkcolors, lw=0) \n \n # plot the total mass in sinks too\n if nplotted > 1:\n sel = (np.array(times) <= t)\n x = times[sel]\n y = sinkmasses[sel]\n ax.plot(x, y, color='1.0', linewidth=1.25, alpha=0.6, zorder=1)\n \n ax.set_yscale('log')\n #tmin = np.min(times)\n tmax = np.max(times)\n #tspan = tmax - tmin\n #ax.set_xlim(tmin - 0.1 * tspan, tmax)\n ax.set_xlim(tlimmin, tmax)\n ax.set_ylim(1.0,mlimmax)\n set_ticks(ax, '0.15')\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n \n ax.set_xlabel('time / Myr', fontproperties = tfm, size = 15, color='0.6')\n ax.set_ylabel('sink mass / '+r'M${_\\odot}$', fontproperties = tfm, size = 15, color='0.6')\n ax.set_xlabel('time / Myr', fontproperties = tfm, size = 15, color='0.6')\n ax.set_ylabel('sink mass / '+r'M${_\\odot}$', fontproperties = tfm, size = 15, color='0.6') \n\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n \n plt.savefig(framesdir+'sinkmasses_'+str(snap).zfill(5)+'.png', dpi=400) \n plt.close() \n \n \n"
},
{
"alpha_fraction": 0.542469322681427,
"alphanum_fraction": 0.5773496627807617,
"avg_line_length": 37.42696762084961,
"blob_id": "83c7c96e236be8c2681742f10c3af9cebb816cc1",
"content_id": "ca3e4ceb8388bc8534fca565c1199579507db7f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6852,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 178,
"path": "/turbgravfilaments2/make_surface_density_json.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom matplotlib import rcParams\nimport scipy.ndimage as ndimage\n\n\n\"\"\"\nplot column density from different density ranges. \n\nthe hdf5 files contain log surface density in g cm^-2\nfiles with 'C18O': surface density of gas with number density 10^3 < n < 10^4.5\nfiles with 'N2Hplus': surface density of gas with number density 10^4.5 < n\nfiles that just say 'surface_density': all the gas\n\nthis script plots column density in cm^-2, converting from g cm^-2 using mu=2.33\n\"\"\"\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=7)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=6) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\n\n\"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n toggle these to plot a colorbar and length scale bar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\"\"\"\nPlotColorBars = False\nPlotScaleBar = False\nPlotSinks = False\n\noutputres = 256\n\nmu = 2.33\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n # first do projections\n imshowmap = 'nickmapVD2'\n #imshowmap = 'bone_r'\n \n cdmin = 10**-3.3 \n cdmax = 10**-1.5\n cdmin = 0\n cdmax = 4\n \n cdmin = 0\n cdmax = 1.*10**22\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.csv'\n \n \n # downsampled surface density\n dsSD = np.zeros([outputres, outputres])\n # downsampled C18O\n dsC18O = np.zeros([outputres, outputres])\n # downsampled N2Hplus\n dsN2Hplus = np.zeros([outputres, outputres])\n \n for i in xrange(2,3):\n fileSD = fileprefix+'surface_density'+str(i)+'.hdf5'\n fileCO = fileprefix+'surface_density_C18O'+str(i)+'.hdf5'\n fileN2Hplus = fileprefix+'surface_density_N2Hplus'+str(i)+'.hdf5'\n if os.path.exists(fileCO):\n print snap,fileCO\n fSD = h5py.File(fileSD, 'r')\n sd = fSD['surface_density']\n # convert to linear units, divide by mu * mH\n sd = 10**np.array(sd) / (mu * const.m_p.cgs.value)\n sd = sd.transpose()\n # there are a lot of very small values- cull them before getting some\n # info on the range of interesting values \n sdnonzero = sd[sd > 10**4]\n print 'mean non-zero column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero column density: ',np.median(sdnonzero),'cm^-2'\n print 'max column density: ',np.max(sd),'cm^-2'\n \n fC18O = h5py.File(fileCO, 'r')\n sdC18O = fC18O['surface_density_C18O']\n # convert to linear units, divide by mu * mH\n sdC18O = 10**np.array(sdC18O) / (mu * const.m_p.cgs.value)\n sdC18O = sdC18O.transpose()\n # there are a lot of very small values- cull them before getting some\n # info on the range of interesting values \n sdnonzero = sdC18O[sdC18O > 10**4]\n print 'mean non-zero C18O column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero C18O column density: ',np.median(sdnonzero),'cm^-2'\n print 'max C18O column density: ',np.max(sdC18O),'cm^-2'\n \n fN2Hplus = h5py.File(fileN2Hplus, 'r')\n sdN2Hplus = fN2Hplus['surface_density_N2Hplus']\n # convert to linear units, divide by mu * mH\n sdN2Hplus = 10**np.array(sdN2Hplus) / (mu * const.m_p.cgs.value)\n sdN2Hplus = sdN2Hplus.transpose()\n # there are a lot of very small values- cull them before getting some\n # info on the range of interesting values \n sdnonzero = sdN2Hplus[sdN2Hplus > 10**4]\n print ''\n print 'mean non-zero N2Hplus column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero N2Hplus column density: ',np.median(sdnonzero),'cm^-2'\n print 'max N2Hplus column density: ',np.max(sdN2Hplus),'cm^-2'\n \n glom = sd.shape[0] / outputres\n for ii in xrange(dsSD.shape[0]):\n iindexlo = ii * glom\n iindexhi = (ii + 1) * glom\n for jj in xrange(dsSD.shape[1]):\n jindexlo = jj * glom\n jindexhi = (jj +1) * glom\n dsSD[ii, jj] = sd[iindexlo:iindexhi, jindexlo:jindexhi].sum() / glom**2\n dsC18O[ii, jj] = sdC18O[iindexlo:iindexhi, jindexlo:jindexhi].sum() / glom**2\n dsN2Hplus[ii, jj] = sdN2Hplus[iindexlo:iindexhi, jindexlo:jindexhi].sum() / glom**2\n \n # convert 0 to 10**-99\n dsSD[dsSD < 1.e-99] = 1.e-99\n dsC18O[dsC18O < 1.e-99] = 1.e-99\n dsN2Hplus[dsN2Hplus < 1.e-99] = 1.e-99\n \n dsSD = np.round(np.log10(dsSD),2)\n dsC18O = np.round(np.log10(dsC18O),2)\n dsN2Hplus = np.round(np.log10(dsN2Hplus),2)\n \n # convert 10**-99 to 0\n dsSD[dsSD < 1] = 0\n dsC18O[dsC18O < 1] = 0\n dsN2Hplus[dsN2Hplus < 1] = 0\n \n f = open('columndensities_'+str(snap).zfill(5)+'.json', 'w')\n f.write('[\\n')\n for ii in xrange(dsSD.shape[0]):\n f.write('{\"x\":'+str(ii)+',\\n\"SD\":['+str(dsSD[ii, 0]))\n for jj in xrange(1, dsSD.shape[1]):\n f.write(','+str(dsSD[ii, jj]))\n f.write('],\\n')\n f.write('\"C18O\":['+str(dsC18O[ii, 0]))\n for jj in xrange(1, dsC18O.shape[1]):\n f.write(','+str(dsC18O[ii, jj]))\n f.write('],\\n')\n f.write('\"N2Hplus\":['+str(dsN2Hplus[ii, 0]))\n for jj in xrange(1, dsN2Hplus.shape[1]):\n f.write(','+str(dsN2Hplus[ii, jj]))\n f.write(']\\n')\n f.write('}')\n if ii < dsSD.shape[0] - 1:\n f.write(',\\n')\n f.write('\\n]')\n f.close()\n "
},
{
"alpha_fraction": 0.5729314684867859,
"alphanum_fraction": 0.5897071361541748,
"avg_line_length": 35.54166793823242,
"blob_id": "2c426ff89830876c91d6d441869a92c8f15a6ffe",
"content_id": "f2ce1da0ed9d91be9d41274a9437117af54ea496",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3517,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 96,
"path": "/prepare_sinks_for_d3.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom astropy.io import ascii\nfrom astropy.table import Table\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\nnsinkmax = 1000\nsinktimes = np.zeros([nsinkmax, int(sys.argv[2]) - int(sys.argv[1])])\nsinkmasses = np.zeros([nsinkmax, int(sys.argv[2]) - int(sys.argv[1])])\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.csv'\n \n (time, unit_t) = get_time(infoname)\n # convert to Myr\n tmyr = time * unit_t / (31557600 * 1.e6)\n print 'time = ',tmyr\n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = ascii.read(sinkname, names=sinkcolumnnames, converters=sinkconverters)\n sinkid = sinks['ID']\n sinkmass = sinks['mass']\n sinktimes[:, snap - int(sys.argv[1])] = tmyr\n for i in xrange(len(sinkid)):\n sinkmasses[sinkid[i], snap - int(sys.argv[1])] = sinkmass[i]\n except IOError:\n pass \n \nsinkdir = './sinkmasstime/' \nif not os.path.exists(sinkdir):\n os.makedirs(sinkdir) \n\nf = open(sinkdir+'sinkmasses.json', 'w')\nf.write('[\\n')\n# write the first sink\nif sinkmasses[1].sum() > 0:\n f.write('{\\n')\n f.write('\"name\": \"'+str(1)+'\",\\n')\n f.write('\"data\":[ ')\n for ii in xrange(0, len(sinkmasses[1])): # trim all leading zeros but one\n\tprint sinkmasses[1,ii]\n if sinkmasses[1, ii] > 0: \n break\n print \"for sink 1 start at \",ii-1,\" that's time \",sinktimes[1,ii-1]\n f.write('{\"time\":'+str(sinktimes[1,ii-1])+', \"mass\":'+str(sinkmasses[1,ii-1])+'}')\n for j in xrange(ii,len(sinkmasses[1])):\n if sinkmasses[1, j] == 0: # trim trailing zeros in case of merger\n break\n f.write(',{\"time\":'+str(sinktimes[1,j])+', \"mass\":'+str(sinkmasses[1,j])+'}')\n f.write(' ]\\n')\n f.write('}')\nfor i in xrange(2,nsinkmax):\n if sinkmasses[i].sum() > 0:\n f.write(', {\\n')\n f.write('\"name\": \"'+str(i)+'\",\\n')\n f.write('\"data\":[ ')\n for ii in xrange(0, len(sinkmasses[i])): # trim all leading zeros but one\n if sinkmasses[i, ii] > 0: \n break\n print \"for sink \",i,\" start at \",ii-1,\" that's time \",sinktimes[i,ii-1]\n f.write('{\"time\":'+str(sinktimes[i,ii-1])+', \"mass\":'+str(sinkmasses[i,ii-1])+'}')\n for j in xrange(ii,len(sinkmasses[i])):\n if sinkmasses[i, j] == 0: # trim trailing zeros in case of merger\n break\n f.write(',{\"time\":'+str(sinktimes[i,j])+', \"mass\":'+str(sinkmasses[i,j])+'}')\n f.write(' ]\\n')\n f.write('}')\nf.write(']\\n')\nf.close()\n\nfor i in xrange(nsinkmax):\n if sinkmasses[i].sum() > 0:\n sinkfile = sinkdir + 'sink'+str(i)+'.csv'\n print sinkfile\n data = Table([sinktimes[i], sinkmasses[i]], names=['t', 'm'])\n ascii.write(data, sinkfile, delimiter=',')\n \n"
},
{
"alpha_fraction": 0.6275492906570435,
"alphanum_fraction": 0.6486124992370605,
"avg_line_length": 33.32183837890625,
"blob_id": "5ddb7ce172f3bdfa0fc356b91b192eebb344e7a4",
"content_id": "9c67d9e894ee8b2771d7dde416575d0a6ee59bf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2991,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 87,
"path": "/make_surface_density_projections.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font main\n fname=fontdir+'Gotham-Book.ttf', size=13) \nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=11) \n\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n \n projaxis = 1\n\n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n # set column density limits so that the images appear the same.\n # low dens cloud has boxsize 10, high dens cloud has boxsize 5.\n # offsets in log of column density limits are thus log10(8)\n if boxlen > 7:\n cdmin = -5.1\n cdmax = -2.0\n else:\n cdmin = -4.19\n cdmax = -1.09\n\n pf = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n # get a projection of density along y axis\n proj = pf.h.proj('Density', 1)\n\n wd = 0.625 # this is messed up- figure it out. yt might not get size right.\n # res should be base resolution times 2**levels of refinement * wd\n resx = int(wd * 2**lmax)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = sinkname = 'output_'+str(snap).zfill(5)+'/surface_density_1.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']))\n f.close()\n \n \n # get a projection of density along x axis\n proj = pf.h.proj('Density', 0)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = sinkname = 'output_'+str(snap).zfill(5)+'/surface_density_0.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']))\n f.close()\n \n # get a projection of density along z axis\n proj = pf.h.proj('Density', 2)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = sinkname = 'output_'+str(snap).zfill(5)+'/surface_density_2.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']))\n f.close() \n \n del(frb)\n del(pf)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5807999968528748,
"alphanum_fraction": 0.6139428615570068,
"avg_line_length": 31.574626922607422,
"blob_id": "e89ccebdfca61fbba847a816a2181cdaa2409f74",
"content_id": "01823f3b7224d5041cbdc454f73be7b042707fbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4375,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 134,
"path": "/make_sfrtff_plot.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nfrom astropy.io import ascii\n\n# import ramses helper functions and get figure directory\nhomedir = '/home/moon/moeckel/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = '/home/moon/moeckel/Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \nfig = plt.figure(figsize = (5,3.5))\n\nax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n#ax = fig.add_axes([0., 0., 1., 1.])\n\ntimes = []\nmass1 = []\nmass2 = []\nmass3 = []\nsinkmasses = []\n\n\n# set limits for density that we're interested in\nndense1 = 1.e3\nndense2 = 1.e4\nndense3 = 1.e5\nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nmdense1 = ndense1 * mu * mH\nmdense2 = ndense2 * mu * mH\nmdense3 = ndense3 * mu * mH\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n dataname = 'output_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins.dat'\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n limitsname = './densegasplotlimits.dat'\n \n f = open(limitsname)\n line = f.readline()\n sl = line.split()\n alltlow = float(sl[0])\n allthi = float(sl[1])\n line = f.readline()\n sl = line.split()\n qtlow = float(sl[0])\n qthi = float(sl[1])\n f.close()\n \n if os.path.isfile(dataname):\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n times.append(timeMyr)\n \n data = ascii.read(dataname)\n \n # first get M(>rho) / tff(rho) \n cumulativemass = np.cumsum(data['CellMassMsun'])\n massabove = cumulativemass.max() - cumulativemass\n logdens = np.log10(data['Density'])\n G = 6.67e-8\n yr = 31557600.0\n msun = 1.99e33\n tffcgs = np.sqrt(3 * np.pi / (32 * G * data['Density']))\n tffMyr = tffcgs / 1.e6 / yr \n ndense = data['Density'] / (mu * mH)\n print massabove\n \n # add the mass in sinks to these massabove values\n sinks = get_sinks(sinkname)\n Msinks = np.sum(sinks[:,1])\n massabove[:] += Msinks\n rhoTffRatio = massabove / tffMyr\n \n # next approximate the instantaneous star formation rate\n infolast = 'output_'+str(snap-1).zfill(5)+'/info_'+str(snap-1).zfill(5)+'.txt'\n infonext = 'output_'+str(snap+1).zfill(5)+'/info_'+str(snap+1).zfill(5)+'.txt'\n sinklast = 'output_'+str(snap-1).zfill(5)+'/sink_'+str(snap-1).zfill(5)+'.out'\n sinknext = 'output_'+str(snap+1).zfill(5)+'/sink_'+str(snap+1).zfill(5)+'.out'\n sinkslast = get_sinks(sinklast)\n sinksnext = get_sinks(sinknext)\n Msinkslast = np.sum(sinkslast[:,1])\n Msinksnext = np.sum(sinksnext[:,1])\n (time, unit_t) = get_time(infolast)\n Tlast = time * unit_t / yr / 1.e6\n (time, unit_t) = get_time(infonext)\n Tnext = time * unit_t / yr / 1.e6\n sfeNow = (Msinksnext - Msinkslast) / (Tnext - Tlast)\n print Msinksnext, Msinkslast, Tnext, Tlast, sfeNow\n \n \nax.plot(ndense, sfeNow / rhoTffRatio, color = c2, linewidth = 2)\n\n(time, unit_t) = get_time(infoname)\ntimeMyr = time * unit_t / yr / 1.e6\nhoriz = 1.e4\nvert = 0.8\nax.text(horiz, vert, r'%.1f' %timeMyr, transform = ax.transData, \n ha = 'right',va = 'baseline', fontproperties = lfm, color = c1, snap = False)\nax.text(1.1*horiz, vert, r'Myr', transform = ax.transData,\n ha = 'left', va = 'baseline', fontproperties = lfm, color = c1, snap = False) \nax.set_ylim(.001, 1.0)\nax.set_xscale('log')\nax.set_yscale('log') \n \n \nset_ticks(ax, '0.6')\n\nax.set_xlabel('n / cm'+r'$\\mathdefault{{-3}}$', fontproperties = tfm, size = 15)\nax.set_ylabel(r'$\\mathdefault{SFR_{ff}}$', fontproperties = tfm, size = 15)\n\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\nplt.savefig(outdir+'SFRff.pdf')\n\n\nsys.exit()\n \n\n\n\n \n"
},
{
"alpha_fraction": 0.4787861704826355,
"alphanum_fraction": 0.5172801613807678,
"avg_line_length": 39.74809265136719,
"blob_id": "1a4562c0bef0fedd56395d7818a0aad7ca739f66",
"content_id": "c3276c458504108a21ed46647d849c1afcac735b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10677,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 262,
"path": "/turbgravfilaments2/plot_C18O_N2Hplus_map.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom matplotlib import rcParams\nimport scipy.ndimage as ndimage\n\n\n\"\"\"\nplot column density from different density ranges. \n\nthe hdf5 files contain log surface density in g cm^-2\nfiles with 'C18O': surface density of gas with number density 10^3 < n < 10^4.5\nfiles with 'N2Hplus': surface density of gas with number density 10^4.5 < n\nfiles that just say 'surface_density': all the gas\n\nthis script plots column density in cm^-2, converting from g cm^-2 using mu=2.33\n\"\"\"\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=7)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=6) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\n\n\"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n toggle these to plot a colorbar and length scale bar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\"\"\"\nPlotColorBars = False\nPlotScaleBar = False\nPlotSinks = False\n\noutputres = 512\n\nmu = 2.33\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n # first do projections\n imshowmap = 'nickmapVD2'\n #imshowmap = 'bone_r'\n \n cdmin = 10**-3.3 \n cdmax = 10**-1.5\n cdmin = 0\n cdmax = 4\n \n cdmin = 0\n cdmax = 1.*10**22\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.csv'\n\n \n for i in xrange(3):\n fileCO = fileprefix+'surface_density_C18O'+str(i)+'.hdf5'\n fileN2Hplus = fileprefix+'surface_density_N2Hplus'+str(i)+'.hdf5'\n if os.path.exists(fileCO):\n print snap,fileCO\n fC18O = h5py.File(fileCO, 'r')\n sdC18O = fC18O['surface_density_C18O']\n # convert to linear units, divide by mu * mH\n sdC18O = 10**np.array(sdC18O) / (mu * const.m_p.cgs.value)\n # there are a lot of very small values- cull them before getting some\n # info on the range of interesting values \n sdnonzero = sdC18O[sdC18O > 10**4]\n print 'mean non-zero C18O column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero C18O column density: ',np.median(sdnonzero),'cm^-2'\n print 'max C18O column density: ',np.max(sdC18O),'cm^-2'\n \n\n \n fN2Hplus = h5py.File(fileN2Hplus, 'r')\n sdN2Hplus = fN2Hplus['surface_density_N2Hplus']\n # convert to linear units, divide by mu * mH\n sdN2Hplus = 10**np.array(sdN2Hplus) / (mu * const.m_p.cgs.value)\n # there are a lot of very small values- cull them before getting some\n # info on the range of interesting values \n sdnonzero = sdN2Hplus[sdN2Hplus > 10**4]\n print ''\n print 'mean non-zero N2Hplus column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero N2Hplus column density: ',np.median(sdnonzero),'cm^-2'\n print 'max N2Hplus column density: ',np.max(sdN2Hplus),'cm^-2'\n \n \n fig = plt.figure(figsize = (outputres/128, outputres/128), dpi=128)\n ax = fig.add_axes([0., 0., 1., 1.])\n #sdC18O = np.ma.masked_where(sdC18O < 10**20, sdC18O)\n ax.imshow(sdC18O,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n \n \n # see if we have any sink particles to plot\n if PlotSinks:\n try:\n with open(sinkname): \n sinks = ascii.read(sinkname, names=sinkcolumnnames, converters=sinkconverters, data_start=0)\n if len(sinks['ID']) > 0: \n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n sscale = sd.shape[0] / boxlen\n \n if i == 0:\n i0 = 'y'\n i1 = 'z'\n if i == 1:\n i0 = 'x'\n i1 = 'z'\n if i == 2:\n i0 = 'x'\n i1 = 'y'\n\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(100)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinks['mass']) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinks[i0]*sscale,sinks[i1]*sscale,marker='.',s=9,facecolor=sinkcolors,lw=0) \n except IOError:\n pass \n \n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a colorbar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n if PlotColorBars:\n ax2 = fig.add_axes([0.1, 0.1, 0.4, 0.015])\n a = np.outer(np.arange(cdmin, cdmax, (cdmax - cdmin)/255), np.ones(10)).T\n ax2.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax2.set_frame_on(False)\n ax2.axes.get_yaxis().set_visible(False)\n ax2.xaxis.set_ticks(np.arange(cdmin, cdmax+1, 1.0))\n ax2.set_xlabel(r'$\\mathdefault{I_{C^{18}O}}$ / K km s$\\mathdefault{^{-1}}$', fontproperties = tfm, size=8, color='0.15')\n \n set_ticks(ax2, '0.15')\n for label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\n\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a scalebar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n if PlotScaleBar:\n ax3 = fig.add_axes([0.7, 0.1, 0.2, 0.0015])\n a = np.outer(np.ones(100)*.8*cdmax, np.ones(10)).T\n ax3.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax3.set_frame_on(False)\n ax3.axes.get_yaxis().set_visible(False)\n ax3.axes.get_xaxis().set_visible(False)\n ax3.text(0.1, 0.75, r'2pc', transform = ax3.transAxes,\n va = 'bottom', ha = 'left', fontproperties = tfm, size=8, color='0.15', snap = False)\n set_ticks(ax3, '0.15')\n for label in ax3.get_xticklabels() + ax3.get_yticklabels():\n label.set_fontproperties(tfm)\n \n \n \n framesdir = outdir+'surfacedensityCO'+str(i)+'/'\n framesdir = './'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n framename = framesdir+'C18O_'+str(i)+'_frame_'+str(snap).zfill(5)+'.png'\n plt.savefig(framename, dpi = 128, transparent = True)\n \n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n contours of NH2Plus\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n \n fig = plt.figure(figsize = (2*outputres/128, 2*outputres/128), dpi=128)\n ax = fig.add_axes([0., 0., 1., 1.])\n contourcolors = [(252/255, 182/255, 252/255),\n (247/255, 141/255, 255/255),\n (246/255, 103/255, 255/255),\n (241/255, 56/255, 255/255),\n (236/255, 0/255, 255/255)]\n contourvals = [5.e20, 1.e21, 1.5e21, 2.e21, 2.5e21, 2.e21]\n ax.contour(ndimage.gaussian_filter(sdN2Hplus, sigma=0.5, order=0),contourvals,\n origin = 'lower',\n linewidths = 0.25,\n colors = contourcolors)\n \n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False) \n \n framename = framesdir+'N2Hplus_'+str(i)+'_frame_'+str(snap).zfill(5)+'.png'\n plt.savefig(framename, dpi = 128, transparent = True)\n fC18O.close() \n fN2Hplus.close()\n plt.close() \n del(fC18O)\n del(fN2Hplus)\n del(sdC18O)\n del(sdN2Hplus)\n del(sdnonzero)\n gc.collect()\n\n"
},
{
"alpha_fraction": 0.48573416471481323,
"alphanum_fraction": 0.5168754458427429,
"avg_line_length": 38.095237731933594,
"blob_id": "c8815d3181b3789fee0735c38cdb363784299e4f",
"content_id": "4879e9ec5188eafd9e3aec686d882b0f9ea61680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5748,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 147,
"path": "/turbgravfilaments/plot_CO_map_from_reduced_data_filament_only_sliced.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom matplotlib import rcParams\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\n#outdir = './'\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n # first do projections\n imshowmap = 'nickmapVD2'\n #imshowmap = 'bone_r'\n \n \"\"\"\n this data is generated by making a surface density map taking into account only\n gas that is between 10^3 and 10^4.5 cm^-3. to convert to a crude approximation \n of a C18O map, use\n David S. Meier and Jean L. Turner ApJ 551:687 2001 equation 2\n \n N(H2)C18O = 2.42e14 cm^-2 [H2]/[C18O] * exp(5.27/Tex)/(exp(5.27/Tex)-1) IC18O K km/s\n [H2]/[C18O] = 2.94e6\n \n so first convert g cm^-2 to cm^-2 using mu = 2.33, then convert to ICO using Tex=10 K\n \"\"\"\n cdmin = 10**-3.3 \n cdmax = 10**-1.5\n cdmin = 0\n cdmax = 4\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n sinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.out'\n \n for i in xrange(2):\n file = fileprefix+'surface_density_CO_fil'+str(i)+'_sliced.hdf5'\n if os.path.exists(file):\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density_CO']\n sd = 10**np.array(sd) \n print sd[128]\n sd /= (2.33 * const.m_p.cgs.value) # convert to number density\n print sd[128]\n sd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\n sd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\n print sd[128]\n fig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0., 0., 1., 1.])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n \n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n \n sinkpos = sinks[:,2:5]\n sinkpos[:] /= boxlen # shrink to 0-1 in all dimensions\n # get projected positions\n keep = np.array([1,1,1])\n keep[i] = 0\n keep = np.array(keep, dtype=bool)\n sinkpos = sinkpos[:,keep]\n \n # restrict to same region as density plot\n #ledge = cntr[0] - wd/2\n #bedge = cntr[2] - ht/2\n #sinkpos[:] -= np.array([ledge, bedge])\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n sinkpos *= sd.shape[0]\n print sinkpos \n sinkmass = sinks[:,1]\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(100)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinkmass) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinkpos[:,0],sinkpos[:,1],marker='.',s=9,facecolor=sinkcolors,lw=0) \n except IOError:\n pass \n \n \n \n \n framesdir = outdir+'surfacedensityCOfils/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n framename = framesdir+'sd_fil'+str(i)+'_frame_sliced_'+str(snap).zfill(5)+'.png'\n print framename\n\t plt.savefig(framename, dpi = 200)\n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect()\n\n"
},
{
"alpha_fraction": 0.6041588187217712,
"alphanum_fraction": 0.626086950302124,
"avg_line_length": 29.34482765197754,
"blob_id": "698a75a5f2549b434995d124fd9e8413435fc853",
"content_id": "d51948bf0f4decab4db211b4bfb5e3f91ef4999f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2645,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 87,
"path": "/make_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n sinkname2 = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.csv'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix)\n \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n shutil.copy(sinkname2, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n resx = int(wd * 2**lmax)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n \n \n for i in range(3):\n # get projection in each direction\n proj = ds.h.proj('Density', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']))\n f.close()\n del(proj)\n del(frb)\n del(dset)\n gc.collect()\n \n if boxlen > 7:\n sphererad = 27.0\n else:\n sphererad = 13.5\n spherevol = 4.0 * np.pi / 3.0 * (sphererad * 3.086e18)**3\n sp = ds.h.sphere(cntr, (sphererad, 'pc'))\n \n nbins = 128\n dmin = 1.e-25\n dmax = 1.e-17\n profilename = fileprefix+'MassAndVolumeInDensityBins.dat'\n profile = BinnedProfile1D(sp,nbins,'Density',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n \n del(ds)\n del(sp)\n del(profile)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.514665961265564,
"alphanum_fraction": 0.5616512894630432,
"avg_line_length": 32.88679122924805,
"blob_id": "55f07e789aa63a73ea90c88566f4fc651aa7a751",
"content_id": "c1526b844eeb69de0a0a5112e45c86f6bc6ec0c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3682,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 106,
"path": "/make_frame_from_projection.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\n\n\ndef make_my_cmap():\n x = [-4.11, -3.11, -.931, .069]\n # x[3] and x[0] are cdmax and cdmin below.\n beginx = (x[1] - x[0]) / (x[3] - x[0])\n begingray = 0.9\n transitionx = (x[2] - x[0]) / (x[3] - x[0])\n transitiongray = 0.35\n finishr = 37.0 / 256\n finishg = 49.0 / 256\n finishb = 111. / 256\n cdict = {'red': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishr, finishr)),\n 'green': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishg, finishg)),\n 'blue': ((0.0, 1.0, 1.0),\n (beginx, begingray, begingray),\n (transitionx, transitiongray, transitiongray),\n (1.0, finishb, finishb))} \n cmap1 = col.LinearSegmentedColormap('my_colormap', cdict, N=256, gamma=1.0)\n cm.register_cmap(name='nickmap', cmap=cmap1)\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \nmake_my_cmap()\n\n# convert these to \nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n cdensmax = np.log10(10**2.0 / (mu * mH)) # convert these from g to n\n cdensmin = np.log10(10**-6.0 / (mu * mH))\n bins = 128\n binvals = np.arange(cdensmin, 1.000001*cdensmax, (cdensmax - cdensmin) / (bins))\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n\n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5']\n colors = [c1,c2,c3]\n \n imshowmap = 'nickmap'\n cdmin = -4.11 \n cdmax = 0.069\n \n print snap\n \n for i in xrange(len(files)):\n f = h5py.File('output_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n print np.max(sd)\n fig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0., 0., 1., 1.])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n \n framename = outdir+'frames'+str(i).zfill(1)+'/frame_'+str(snap).zfill(4)+'.png'\n \n plt.savefig(framename, dpi = 200)\n del(fig)\n gc.collect()\n \n\n \n \n \n "
},
{
"alpha_fraction": 0.5904762148857117,
"alphanum_fraction": 0.625551700592041,
"avg_line_length": 33.92683029174805,
"blob_id": "4ebf0aebcd3d27aa7166af4b71fc193d0f582870",
"content_id": "74e5f5763306a7d49736a4c9b12cf034f64807fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4305,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 123,
"path": "/make_volume_density_pdf.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'/Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \nfig = plt.figure(figsize = (5,3.5))\n\nax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n#ax = fig.add_axes([0., 0., 1., 1.])\n\ntimes = []\nmass1 = []\nmass2 = []\nmass3 = []\nsinkmasses = []\n\n# volume of the sphere the profile comes from\nsphererad = 27.0\nspherevol = 4.0 * np.pi / 3.0 * (sphererad * 3.086e18)**3\n\nprint os.path.basename(os.path.normpath(os.getcwd()))\nif os.path.basename(os.path.normpath(os.getcwd())) == 'turbshock512k4gcl':\n snaps = [15, 82, 95, 109]\n myrstrings = ['1', '6', '7', '8']\nif os.path.basename(os.path.normpath(os.getcwd())) == 'turbshock512k4gcsl':\n snaps = [4, 28, 41, 55]\n myrstrings = ['0.1', '1.0', '1.5', '2.0']\ncolors = [c1, c2, c3, c4]\n\n#snaps=[1]\n#myrstrings=['0.0']\n\nfor i in range(len(snaps)):\n snap = snaps[i]\n dataname = 'output_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins.dat'\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n if os.path.isfile(dataname):\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n timeMyrRoundString = np.round(timeMyr)\n times.append(timeMyr)\n \n data = ascii.read(dataname)\n \n ax.plot(data['Density'], data['CellVolume']/spherevol, color = colors[i], linewidth = 1.5)\n \n # maximum likelihood fit to the first snap\n if i == 1:\n dens = np.array(data['Density'])\n vals = np.array(data['CellVolume'])\n # find the maximum value above 1.e-24 (want to avoid the ISM peak)\n cloudsel = (dens > 1.e-24)\n denssub = dens[cloudsel]\n valssub = vals[cloudsel]\n densmax = denssub[valssub.argmax()]\n logdensmax = np.log10(densmax)\n\n # select 3 decades surrounding the density maximum to fit to\n logdens = np.log10(dens)\n fitsel = logdens[np.abs(logdens - logdensmax) <= 1.5]\n print fitsel\n # get the maximum likelihood lognormal to those 3 decades\n muML = fitsel.sum() / len(fitsel)\n print len(fitsel)\n print muML\n\n sys.exit()\n # fit to three orders of magnitude surrounding peak density\n maxpt = data['Density'].argmax()\n maxdens = np.max(data['Density'])\n fitvals = np.max\n \nhoriz = 1.2e-24\nvertbase = 1.e-5\nax.text(horiz, vertbase, myrstrings[0]+' Myr', transform=ax.transData,\n va = 'baseline', fontproperties = lfm, color=c1, snap = False) \nif len(myrstrings) > 1:\n\tax.text(horiz, vertbase / 5, myrstrings[1]+' Myr', transform=ax.transData,\n \t va = 'baseline', fontproperties = lfm, color=c2, snap = False)\n\tax.text(horiz, vertbase / 5**2, myrstrings[2]+' Myr', transform=ax.transData,\n \t va = 'baseline', fontproperties = lfm, color=c3, snap = False) \n\tax.text(horiz, vertbase / 5**3, myrstrings[3]+' Myr', transform=ax.transData,\n \t va = 'baseline', fontproperties = lfm, color=c4, snap = False) \n\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlim(1.e-25, 2.e-18)\nax.set_ylim(1.e-8,0.1)\nset_ticks(ax, '0.6')\n\nax.xaxis.grid(False,which='minor')\nax.yaxis.grid(False,which='minor')\n\nax.set_xlabel(r'density / g $\\mathdefault{cm^{-3}}$', fontproperties = tfm, size = 15)\n#ax.set_ylabel('d', fontproperties = tfm, size = 15)\n\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\nplt.savefig(outdir+'VolumeDensityPDFs.pdf')\n\n\n\n\n \n"
},
{
"alpha_fraction": 0.734883725643158,
"alphanum_fraction": 0.7441860437393188,
"avg_line_length": 25.875,
"blob_id": "c58e1f62aad3557254f7ef38ad0ca764db4bf229",
"content_id": "2058905ca2030898a4f0b9d3c93eb0b01f232d8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 8,
"path": "/README.md",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "ramses_plot_scripts\n===================\n\nscripts for plotting and analysing ramses simulations using yt\n\nworking directory for my analysis scripts for different projects using ramses.\n\nmost of these require yt-3.0.\n"
},
{
"alpha_fraction": 0.5459305047988892,
"alphanum_fraction": 0.5687767863273621,
"avg_line_length": 28.072463989257812,
"blob_id": "1c02548ad388b0850c62ed4c9988e5534f9b46fb",
"content_id": "89bef2e32f828c1f78486e6527ad15b9a3eaff97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2101,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 69,
"path": "/make_ic_slice_images.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n fileprefix = 'ic_slices_reduced_'+str(snap).zfill(5)+'/'\n \n framesdir = outdir+'ic_slices/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n \n i = 0\n \n # now do slices\n imshowmap = 'bone_r'\n imshowmap = 'nickmapVD'\n vdoff = np.log10(8)\n cdmin = -24.5 - vdoff \n cdmax = -19. - vdoff \n # imshowmap = 'gray_r'\n cdmin = - 24\n cdmax = -20.25\n \n for j in xrange(256,512):\n file = fileprefix+'density_slice_'+str(i)+'_'+str(j).zfill(5)+'.hdf5'\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['volume_density']\n print np.min(sd),np.max(sd), cdmin, cdmax\n fig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0., 0., 1., 1.])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n\n framename = framesdir+'sl'+str(i)+'_'+str(j)+'_frame_'+str(snap).zfill(5)+'.png'\n plt.savefig(framename, dpi = 200)\n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect() \n \n\n \n \n \n \n"
},
{
"alpha_fraction": 0.5757167339324951,
"alphanum_fraction": 0.6072733998298645,
"avg_line_length": 29.875,
"blob_id": "73f758c7e55fd661685e4a9f8b437f30b1b7ca1e",
"content_id": "1d966f3b4d91b7b96c1c4b7f3f6026493c93a26d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5197,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 168,
"path": "/turbgravfilaments2/make_N2Hplus_spectra.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom scipy import special\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _C18O(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 0.0\n return newfield\n \ndef _N2Hplus(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 31622.0 * mu * mH # not interested in anything below 10^4.5 / cm^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim)\n newfield[antiselection] = 0.0\n return newfield \n\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\nif axis == 0:\n los = 'x'\n dlos = 'dx'\n vlos = 'x-velocity'\n sliceax = 'z'\nif axis == 1:\n los = 'y'\n dlos = 'dy'\n vlos = 'y-velocity'\n sliceax = 'z'\nif axis == 2:\n los = 'z'\n dlos = 'dz'\n vlos = 'z-velocity'\n sliceax = 'y'\n\ninfoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n\nspecdir = 'reduced_'+str(snap).zfill(5)+'/posvel_'+str(axis)+'/'\nif not os.path.exists(specdir):\n os.makedirs(specdir)\n\n(lmin, lmax) = get_level_min_max(infoname)\n(boxlen, unit_l) = get_boxsize(infoname)\n\nds = load(infoname)\n\n# add new density fields\n#add_field('C18O', function=_C18O)\nadd_field('N2Hplus', function=_N2Hplus)\n\nvmax = 2.5e5\nvmin = -2.5e5\n# roughly match hacar et al by takin 0.05 km/s bins\nbins = (vmax - vmin) / 1.e5 / 0.05\nbinvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)\nbinmids = 0.5 * (np.roll(binvals, -1) + binvals)\nbinmids = binmids[:len(binmids) - 1]\n# get a version of the bins in km/s instead of cgs\nbinmidskms = binmids / 1.e5\n \n# save the velocities to a file\nf = h5py.File(specdir+'spectrumvels.hdf5', 'w')\ndset = f.create_dataset('binmidskms', data = binmidskms, dtype='float32')\nf.close()\n\n\"\"\"\n to keep things manageable, make this map on the 1024**3 base grid.\n since refinement is only in regions that are collapsing, and we're\n not interested in those dense regions for the C18O map anyway, this is fine.\n\"\"\"\nres = 2**lmin\ndres = 1.0 / res\n\nfor j in xrange(400):\n pty = (j + 0.5) * dres\n thesehists = []\n print j, pty\n # get a slice\n slc = ds.h.slice(sliceax, pty)\n \n # get it into a frb\n frb = slc.to_frb(\n (1.0, 'unitary'), # get the whole extent of the box\n res, # don't degrade anything\n center = [0.5, 0.5, 0.5], # centered in the box\n height = (1.0, 'unitary')) # get the whole extent of the box\n \n rhoN2Hplus = np.array(frb['N2Hplus'])\n # rhoN2Hplus = np.array(frb['N2Hplus'])\n sigmaN2Hplus = 0.0535 # thermal width of N2Hplus line in km/s\n \n sigma = sigmaN2Hplus * 1.e5 # convert to cm/s\n erfdenom = np.sqrt(2*sigma**2)\n \n x = np.array(frb[los])\n dx = np.array(frb[dlos])\n vx = np.array(frb[vlos])\n weight = dx * rhoN2Hplus\n # we need to grab rows from the slice differently depending on what axis we're projecting\n if axis == 0:\n for i in xrange(res):\n hist, binedges = np.histogram(\n vx[i,:],\n range = (vmin, vmax),\n bins = binvals,\n weights = weight[i,:])\n thesehists.append(hist)\n if axis > 0:\n for i in xrange(res):\n # for each point along the slice, march along the projecting dimension\n # and turn each detection into a gaussian. bin this gaussian into the \n # velbins.\n hist = np.zeros(len(binmids))\n for k in xrange(len(vx[:,i])):\n peak = vx[k,i]\n # calculate the cumulative distribution of this line at each velocity bin edge\n cdfs = 0.5 * (1 + special.erf((binvals - peak) / erfdenom)) * weight[k,i]\n # subtract adjacent values to get the contribution to each bin\n thislinebinned = np.roll(cdfs, -1) - cdfs\n hist = hist + thislinebinned[:-1]\n thesehists.append(hist) \n\n # once we have the histograms of mass-weighted velocity along each point for this\n # row, save it to an hdf5 file\n f = h5py.File(specdir+'spectra_C18O_'+str(j).zfill(4)+'.hdf5', 'w')\n dset = f.create_dataset('spectraC18O', data = thesehists, dtype='float32')\n dset.attrs['slowindex'] = j\n dset.attrs[sliceax] = pty\n f.close()\n \n del(slc)\n del(frb)\n del(f)\n del(dset)\n del(x)\n del(vx)\n del(dx)\n del(rhoN2Hplus)\n del(weight)\n del(hist)\n del(thesehists)\n gc.collect()\n \n \n"
},
{
"alpha_fraction": 0.5962209105491638,
"alphanum_fraction": 0.625,
"avg_line_length": 32.32038879394531,
"blob_id": "8b93e22b54e4679367f9d1286130706e82d26837",
"content_id": "914fde5bded40d155e189fa659bd2064ba050bb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3440,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 103,
"path": "/turbgravfilaments/make_reduced_data_CO_filaments_only_gridaligned_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\nfrom mpi4py import MPI\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _CO(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 1.e-99\n \n # cut out the filaments we're interested in\n leftpoint = np.array([0.795, 0.34])\n rightpoint = np.array([0.805, 0.05]) \n width = 0.045\n vector = rightpoint - leftpoint\n midpoint = leftpoint + 0.5 * vector\n print midpoint\n # translate to midpoint\n transx = data['x'] - midpoint[0]\n transy = data['y'] - midpoint[1]\n length = np.linalg.norm(vector)\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n vector /= np.linalg.norm(vector)\n print vector\n print orthovec\n \n # rotate around midpoint. orthovec is already a unit vector now.\n beta = np.arccos(orthovec[1])\n print beta\n rotx = transx * np.cos(beta) - transy * np.sin(beta)\n roty = transx * np.sin(beta) + transy * np.cos(beta)\n \n # cut based on width and length of box\n antiselection2 = (np.abs(rotx) > 0.5*length) | (np.abs(roty) > 0.5 * width)\n newfield[antiselection2] = 1.e-99\n \n return newfield\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix) \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n add_field('CO', function=_CO)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n lmaxplot = min(11, lmax) \n resx = int(wd * 2**lmaxplot)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n\n\n #get a projection orthogonal to the filament\n proj = ds.h.proj('Density', 0)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_CO_fil0_0.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density_CO', data = np.log10(frb['CO']))\n print 'done dset',ytcfg.getint('yt', '__topcomm_parallel_rank')\n f.close()\n del(f)\n del(dset)\n del(frb)\n del(proj)\n gc.collect() \n\n \n"
},
{
"alpha_fraction": 0.6910994648933411,
"alphanum_fraction": 0.7100785374641418,
"avg_line_length": 21.308822631835938,
"blob_id": "29c6f621df55ec0c10e4962923c3eacd2cadb854",
"content_id": "e7d4550058ec91a7f28e5f3ee91daa4e402e1cfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1528,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 68,
"path": "/turbgravfilaments2/make_spectra_fits.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom astropy.io import fits\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom scipy import special\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# downsample the spectra to a 256 squared grid\ninres = 1024\n\nsnap = 18\naxis = 2\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\nf = h5py.File(fileprefix+'posvel_'+str(axis)+'/spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\noutcube = np.zeros([inres, inres, len(vels)])\n\n#\n# j is image up\n# |\n# | \n# |_______i is image right\n#\n# spectra files march along j\n#\ntotnonzero = 0\nfor inj in xrange(inres):\n specfile = fileprefix+'posvel_'+str(axis)+'/spectra_C18O_'+str(inj).zfill(4)+'.hdf5'\n f = h5py.File(specfile)\n specs = f['spectraC18O']\n print specfile\n outcube[:, inj, :] = specs\n f.close()\n \nhdu = fits.PrimaryHDU(outcube)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('spectra_C18O.fits')\n\n \n\n \n"
},
{
"alpha_fraction": 0.5415162444114685,
"alphanum_fraction": 0.5635780096054077,
"avg_line_length": 27.284090042114258,
"blob_id": "8406d1eb5e150e5272cb15a907284c5f7f8feddb",
"content_id": "1e2144841874582913a4f99b198d070697ba2229",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2493,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 88,
"path": "/turbgravfilaments/make_detections_from_spectra.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n\"\"\"\nusage:\nrun this in the posvel directory that contains all of the spectra.hdf5 files.\n\nthis goes through each spectra file and reduced the spectra down to a list of \n'detected' velocities.\n\ne.g. in posvel_0:\npython make_detections_from_spectra.py\n\"\"\"\n\nres = 1024\ndres = 1 / res\n\nf = h5py.File('spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\n# make empty array of detections\ndets = np.ndarray([res, res, 32])\ndets.fill(np.nan)\n\nfor i in xrange(231,res):\n print i\n ypt = (i + 0.5) * dres\n # read in this row's hdf5 spectra file\n specfile = 'spectra_'+str(i).zfill(4)+'.hdf5'\n f = h5py.File(specfile)\n specs = f['spectra']\n \n # check to make sure we are where we think we are\n if ypt != specs.attrs['z']:\n print 'PROBLEM ',i, ypt, specs.attrs['z']\n \n for s in xrange(res):\n spec = np.array(specs[s])\n # remove anything that was recorded by density = 1.e-99 material\n spec[spec < 1.e-90] = 0\n # normalize the spectrum by its rms\n if sum(spec) > 0:\n spec /= np.sqrt(np.mean(spec**2))\n # set non-'detected' vels equal to zero\n spec[spec < 2] = 0\n \n vcount = 0\n # march through and merge together touching detections\n runningn = 0\n runningv = 0\n if spec[0] >= 1:\n runningn = spec[0]\n runningv = vels[0] * spec[0]\n for v in xrange(1, len(spec)):\n if (spec[v] > 0) & (spec[v-1] == 0):\n # we are starting a new detection\n runningn = spec[v]\n runningv = vels[v] * spec[v]\n if (spec[v] > 0) & (spec[v-1] > 0):\n # we are continuing a detection\n runningn += spec[v]\n runningv += vels[v] * spec[v]\n if (spec[v] == 0) & (spec[v-1] > 0):\n # we are terminating a detection, log it\n dets[i,s,vcount] = runningv / runningn\n vcount += 1\n if vcount > 10:\n print s\n print dets[i,s]\n f.close()\n \n \n# NOTE this is [z, y, [vels]]\nf = h5py.File('detections.hdf5', 'w')\ndset = f.create_dataset('veldetections', data = dets)\nf.close()\n "
},
{
"alpha_fraction": 0.6052106022834778,
"alphanum_fraction": 0.6338309645652771,
"avg_line_length": 29.844573974609375,
"blob_id": "bc1cc8ded339b09a052b4418485e8a976b55385d",
"content_id": "d1b4b67289c07bfaf674a8cc7136326b628dc008",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10517,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 341,
"path": "/plot_fil_0.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\nF: filament number \n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\nmpl.rcParams['xtick.major.size'] = 9\n\ntc = '0.5'\ntc1 = '0.9'\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\nfig = plt.figure(figsize = (8, 7))\nax0 = fig.add_axes([0.1, 0.1, 0.75, 0.25])\nax1 = fig.add_axes([0.1, 0.3635, 0.75, 0.25])\nax2 = fig.add_axes([0.1, 0.615, 0.75, 0.25])\n\n#snap = int(sys.argv[1])\n#axis = int(sys.argv[2])\n#filnumber = int(sys.argv[3])\n# hardwire this\nsnap = 81\naxis = 0\nfilnumber = 0\n\nfildir = 'reduced_'+str(snap).zfill(5)+'/filaments'+str(axis)+'_'+str(filnumber)+'/'\n\nprefix = 'reduced_'+str(snap).zfill(5)+'/'\ninfoname = prefix+'info_'+str(snap).zfill(5)+'.txt'\nfilprefix = prefix+'filament_'+str(filnumber)+'/'\nfilname = prefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt'\n\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n sdoff = np.log10(4)\n vdoff = np.log10(8)\nelse:\n sdoff = 0.0\n vdoff = 0.0\n \nimshowmap = 'nickmapSD'\n#imshowmap = 'bone_r'\ncdmin = -4.11 - sdoff + 1.2\ncdmax = 0.069 - sdoff \n\n(lmin, lmax) = get_level_min_max(infoname)\n(boxlen, unit_l) = get_boxsize(infoname)\n\nplotsurfacedensity = True\n\n# read in the rectangles that define the filaments we're interested in\n# these are in units of pixels in the finder image, so we will need to translate these\n# to unitary units!\nrectdata = ascii.read(filname)\nfil = rectdata[filnumber]\n\nleftpoint = np.array([fil[1], fil[2]])\nrightpoint = np.array([fil[3], fil[4]])\nwidth = fil[5]\nprint leftpoint\nprint rightpoint\nprint width\n\n# vector pointing along the filament box's long axis\nvec = rightpoint - leftpoint\nlength = np.linalg.norm(vec)\n# the orthogonal direction\northovec = (-vec[1], vec[0])\n# normalize them\northovec /= np.linalg.norm(orthovec)\nvec /= np.linalg.norm(vec)\nprint length\n\n# we will move along the lower-left line of the rectangle, starting from startpoint \n# and ending at endpoint \nstartpoint = leftpoint - orthovec * width/2\nendpoint = rightpoint - orthovec * width/2\nprint startpoint,endpoint\n# for the distance to move in each step, choose a step size closest to the number of\n# grid cells (at the finest refinement level) it would take to traverse the box \nexpandfac = 4 # resample the surface density map\nnl = int(length * expandfac)\nnw = int(width * expandfac)\ndl = length / nl\ndw = width / nw\n\nprint nl, nw\n\nif plotsurfacedensity:\n subbox = np.zeros([nw, nl])\n\n file = prefix+'surface_density_'+str(axis)+'.hdf5'\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density']\n\n ny = sd.shape[1]\n for il in xrange(nl):\n l = startpoint + vec * dl * (il + 0.5)\n for iw in xrange(nw):\n pt = l + orthovec * dw * (iw + 0.5)\n # the sd array and imshow have different row-column ordering \n # convention than everything else in the world, including the \n # coordinates that we use to define the rectangles. so the \n # ordering of the points here is reversed.\n subbox[iw, il] = sd[int(pt[1]),int(pt[0])]\n f.close()\n to_unitary = 1 / 2**lmax\n lscale = length * to_unitary * 10 * boxlen #unit length is 10 pc\n ax0.imshow(subbox,\n origin='lower',\n extent = [0.001, lscale, 0.001, lscale*width/length],\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n interpolation = 'nearest')\n \n # turn off axes\n #ax0.set_frame_on(False)\n #ax0.axes.get_yaxis().set_visible(False)\n #ax0.axes.get_xaxis().set_visible(False)\n ax0.get_yaxis().tick_left()\n for line in ax0.xaxis.get_ticklines():\n line.set_color(tc1)\n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n ax0.tick_params(which = 'minor', color = tc1) \n ax0.grid(False)\n \n ax0.set_xlabel(r'$\\mathdefault{L_{fil}}$ / pc', fontproperties = tfm, size = 15, color=tc)\n ax0.set_ylabel(r'$\\mathdefault{W_{fil}}$ / pc', fontproperties = tfm, size = 15, color=tc)\n for label in ax0.get_xticklabels() + ax0.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n\n\"\"\"\nthis section plots the 'observed' density-weighted line of sight velocity.\nthis is plotted in the middle axis.\n\"\"\"\nspectra = []\n#for i in xrange(358):\nfor filename in glob.glob(fildir+'combinedspectrum_*.hdf5'): \n# filename = 'combinedspectrum_'+str(i).zfill(4)+'.hdf5'\n print filename\n f = h5py.File(filename, 'r')\n spectrum = np.array(f['spectra'])\n # normalize each slice by the rms of itself. this allows you to cut by 'detection' \n # rather than the 'intensity' of the line. comment this out to go back to intensity.\n # note: this is probably bogus\n #spectrum /= np.sqrt(np.mean(spectrum**2))\n spectra.append(spectrum)\n f.close()\n\nspectra /= np.mean(spectra)\n#spectra /= np.sqrt(np.mean(spectrum**2))\n\nprint np.min(spectra),np.mean(spectra),np.median(spectra),np.max(spectra)\n\nf = h5py.File(fildir+'spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\nax1.imshow(np.transpose(spectra),\n interpolation='nearest',\n origin = 'lower',\n extent = [0.001, lscale, 0.001, np.max(vels) - np.min(vels)],\n aspect = 0.13,\n #vmin = 3,\n vmax = 8,\n #cmap = 'gray_r',\n cmap = 'nickmapVD')\n\nax1.get_yaxis().tick_left()\nfor line in ax1.xaxis.get_ticklines():\n line.set_color(tc1)\nfor line in ax1.yaxis.get_ticklines():\n line.set_color(tc1) \nfor line in ax1.yaxis.get_ticklines():\n line.set_color(tc1) \nax1.tick_params(which = 'minor', color = tc1) \nax1.tick_params(labelbottom='off')\nax1.grid(color='0.0',alpha=0.1)\n\n\nax1.set_ylabel(r'v / km $\\mathdefault{s^{-1}}$', fontproperties = tfm, size = 15, color=tc)\nfor label in ax1.get_xticklabels() + ax1.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc) \n \n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\n\n\nf = h5py.File(fildir+'spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\nl_det = [] # detected l coordinates (length)\nw_det = [] # detected w coordinates (width)\nv_det = [] # detected v coordinates (velocity)\n#for i in xrange(358):\n\nl = 0 # keep track of l coordinate\nfor filename in glob.glob(fildir+'spectra_*.hdf5'): \n# filename = 'combinedspectrum_'+str(i).zfill(4)+'.hdf5'\n print filename\n f = h5py.File(filename, 'r')\n specs = f['spectra']\n # treat the spectrum of each point individually\n spectrum = np.zeros(specs.shape[1])\n \n for s in xrange(specs.shape[0]):\n spec = np.array(specs[s])\n # normalize the spectrum by its rms\n spec /= np.sqrt(np.mean(spec**2))\n \n # set 'detected' vels equal to one, the rest to zero\n spec[spec < 3] = 0.0\n spec[spec > 0] = 1.0\n \n # march through and glom together touching detections\n runningn = 0\n runningv = 0\n if spec[0] >= 1:\n runningn = spec[0]\n runningv = vels[0] * spec[0]\n for i in xrange(1,len(spec)):\n if (spec[i] >= 1) & (spec[i-1] == 0):\n # we are starting a new detection\n runningn = spec[i]\n runningv = vels[i] * spec[i]\n if (spec[i] >= 1) & (spec[i-1] >= 1):\n # we are continuing a new detection\n runningn += spec[i]\n runningv += vels[i] * spec[i]\n if (spec[i] == 0) & (spec[i-1] >= 1):\n # we are ending a detection, record it\n l_det.append(l*dl)\n w_det.append(s*dw)\n # need to change w to be an average.\n v_det.append(runningv / runningn)\n # check to see if this is really averaging- if runningn>1, print\n #if runningn > 1:\n # print runningv, runningv/runningn, runningn\n l += 1\n f.close()\n \n# v_det is already in km/s. \n# offset by vmin to match the middle plot\nv_det -= np.min(vels)\n\n#convert l_det and w_det to pc\nto_unitary = 1 / 2**lmax * expandfac\nlscale2 = length * to_unitary * 10 * boxlen / l #unit length is 10 pc\nl_det = np.array(l_det)*lscale2\nw_det = np.array(w_det)*lscale2\n#ax2.scatter(l_det,v_det,marker='.',s=13,facecolor='0.0',lw=0,alpha=.33)\n#ax2.set_xlim(0.001, lscale)\n#ax2.set_ylim(np.min(v_det)-.1, np.max(v_det)+.1)\n#ax2.set_axis_bgcolor('1.0')\n\ndvel = vels[1] - vels[0]\nvelmin = 2.1\nvelmax = 5.6\nvelbins = np.arange(velmin, velmax, dvel)\ndl = (np.max(l_det) - np.min(l_det))/l\nlbins = np.arange(np.min(l_det), np.max(l_det), dl)\n\nax2.tick_params(labelbottom='off')\n\n\nH, xedges, yedges = np.histogram2d(v_det, l_det, bins=(velbins, lbins))\nprint np.min(H), np.max(H), np.mean(H), np.median(H)\nextent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\nax2.imshow(H,\n extent = [0.001, lscale, velmin, velmax],\n origin = 'lower',\n interpolation = 'nearest',\n aspect=.25,\n cmap = 'nickmapVD',\n vmax = 8\n )\n\nax2.get_yaxis().tick_left()\nax2.get_xaxis().tick_bottom()\nfor line in ax2.xaxis.get_ticklines():\n line.set_color(tc1)\nfor line in ax2.yaxis.get_ticklines():\n line.set_color(tc1) \nfor line in ax2.yaxis.get_ticklines():\n line.set_color(tc1) \nax2.tick_params(which = 'minor', color = tc1) \nax2.tick_params(labelbottom='off')\nax2.grid(color='0.0',alpha=0.1)\n\nax2.set_ylabel(r'v / km $\\mathdefault{s^{-1}}$', fontproperties = tfm, size = 15, color=tc)\nfor label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n\nplt.savefig('foofil1.pdf') \nsys.exit()\n\nplt.savefig('fooppv.png',dpi=200)\nplt.savefig('fooppv.pdf')\n#plt.show()"
},
{
"alpha_fraction": 0.5829235315322876,
"alphanum_fraction": 0.6113097667694092,
"avg_line_length": 30.443662643432617,
"blob_id": "d0986bb19429595a5e6f90224aaa32b0c615bd04",
"content_id": "99f456c92320b90e0b3857dac66dc21948457c39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4474,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 142,
"path": "/plot_finder_image_from_reduced_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# the limits on the surface density color map is set up for the compact clouds. here we\n# see if we're looking at diffuse clouds, and if so we adjust for that.\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n sdoff = np.log10(4)\n vdoff = np.log10(8)\nelse:\n sdoff = 0.0\n vdoff = 0.0\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\n \nimshowmap = 'nickmapSD'\n#imshowmap = 'bone_r'\ncdmin = -4.11 - sdoff\ncdmax = 0.069 - sdoff\n\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\nsinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.out'\n\n\nfile = fileprefix+'surface_density_'+str(axis)+'.hdf5'\n\nprint snap,file\nf = h5py.File(file, 'r')\nsd = f['surface_density']\nfig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\nax = fig.add_axes([0., 0., 1., 1.])\nax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n# turn off axes\nax.set_frame_on(False)\nax.axes.get_yaxis().set_visible(False)\nax.axes.get_xaxis().set_visible(False)\n\n# see if we have any sink particles to plot\ntry:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n \n sinkpos = sinks[:,2:5]\n sinkpos[:] /= boxlen # shrink to 0-1 in all dimensions\n # get projected positions\n keep = np.array([1,1,1])\n keep[axis] = 0\n keep = np.array(keep, dtype=bool)\n sinkpos = sinkpos[:,keep]\n\n # restrict to same region as density plot\n #ledge = cntr[0] - wd/2\n #bedge = cntr[2] - ht/2\n #sinkpos[:] -= np.array([ledge, bedge])\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n sinkpos *= sd.shape[0]\n print sinkpos \n sinkmass = sinks[:,1]\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(100)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinkmass) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinkpos[:,0],sinkpos[:,1],marker='.',s=9,facecolor=sinkcolors,lw=0) \nexcept IOError:\n pass \n\nx = [1,10, 100,1000]\ny = [10,10,10,10]\n#ax.scatter(x, y, s=.5) \n# define a rectangle by drawing a line on a filament and choosing a width\nrectdata = ascii.read(fileprefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt')\nfor fil in rectdata:\n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]])\n width = fil[5]\n vector = rightpoint - leftpoint\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n x = (leftpoint[0], rightpoint[0])\n y = (leftpoint[1], rightpoint[1])\n ul = leftpoint + orthovec * width/2\n ll = leftpoint - orthovec * width/2\n ur = rightpoint + orthovec * width/2\n lr = rightpoint - orthovec * width/2\n rectangle = np.transpose([ul, ll, lr, ur, ul])\n #ax.plot(x,y,lw=.3,color=cred)\n ax.plot(rectangle[0], rectangle[1], lw=.7,color='m', solid_joinstyle='miter')\n \n\nframesdir = 'finderimage'+str(axis)+'/'\nif not os.path.exists(framesdir):\n os.makedirs(framesdir)\n\nframename = 'finderimage'+str(axis)+'_frame_'+str(snap).zfill(5)+'.pdf'\nprint framename\nplt.savefig(framename, dpi = 200)\nf.close() \nplt.close() \ndel(f)\ndel(sd)\ngc.collect()\n \n"
},
{
"alpha_fraction": 0.577400267124176,
"alphanum_fraction": 0.6003053784370422,
"avg_line_length": 32.42948532104492,
"blob_id": "b057f112c0074c2142037cfb576cc932e63d310c",
"content_id": "bb449cbd7189c56f67ace5137e6f19d057c97774",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5239,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 156,
"path": "/make_filament_posvel_data.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\ndef _CO13(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 1.e-99\n return newfield\n\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\ninfoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\nfilname = 'reduced_'+str(snap).zfill(5)+'/filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt'\n\n(lmin, lmax) = get_level_min_max(infoname)\n(boxlen, unit_l) = get_boxsize(infoname)\n\nds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n\nadd_field('CO13', function=_CO13)\n\n# make a projection of the CO13 density\nfor i in xrange(1):\n COmap = 'reduced_'+str(snap).zfill(5)+'/CO13sufacedensity_'+str(i)+'.hdf5'\n if not os.path_exists(COmap):\n proj = pf.h.proj('CO13', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n f = h5py.File(COmap, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['CO13']))\n f.close()\n\n# read in the rectangles that define the filaments we're interested in\n# these are in units of pixels in the finder image, so we will need to translate these\n# to unitary units!\nrectdata = ascii.read(filname)\n\nfor fil in rectdata:\n fildir = 'reduced_'+str(snap).zfill(5)+'/filaments'+str(axis)+'_'+str(fil[0])+'/'\n if not os.path.exists(fildir):\n os.makedirs(fildir)\n \n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]])\n width = fil[5]\n # translate to unitary units\n leftpoint /= 2**lmax\n rightpoint /= 2**lmax\n width /= 2**lmax\n print leftpoint\n print rightpoint\n print width\n \n # vector pointing along the filament box's long axis\n vec = rightpoint - leftpoint\n length = np.linalg.norm(vec)\n # the orthogonal direction\n orthovec = (-vec[1], vec[0])\n # normalize them\n orthovec /= np.linalg.norm(orthovec)\n vec /= np.linalg.norm(vec)\n \n # we will move along the lower-left line of the rectangle, starting from startpoint \n # and ending at endpoint \n startpoint = leftpoint - orthovec * width/2\n endpoint = rightpoint - orthovec * width/2\n \n # for the distance to move in each step, choose a step size closest to the number of\n # grid cells (at the finest refinement level) it would take to travers the box \n nl = int(length * 2**lmax)\n nw = int(width * 2**lmax)\n dl = length / nl\n dw = width / nw\n print nl, nw\n print dl, dw\n print length, width, 1/(2**lmax)\n \n print startpoint\n print endpoint\n print dl\n \n vmax = 4.e5\n vmin = -vmax\n vmax = 4.e5\n vmin = -4.e5\n # roughly match hacar et al by takin 0.075 km/s bins\n bins = (vmax - vmin) / 1.e5 / 0.1\n binvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n # get a version of the bins in km/s instead of cgs\n binmidskms = binmids / 1.e5\n \n f = h5py.File(fildir+'spectrumvels.hdf5', 'w')\n dset = f.create_dataset('binmidskms', data = binmidskms)\n f.close()\n histim = []\n \n for il in xrange(nl):\n print 'step %d of %d along the filament' % (il, nl)\n l = startpoint + vec * dl * (il + 0.5)\n totalhist = np.zeros(bins)\n thesehists = []\n for iw in xrange(nw):\n pt = l + orthovec * dw * (iw + 0.5)\n ray = ds.h.ortho_ray(axis, pt)\n x = ray['x']\n vx = ray['x-velocity']\n dx = ray.fwidth[:,0]\n rho = ray['CO13']\n weight = dx * rho\n #weight /= np.sum(weight)\n \"\"\"inds = np.argsort(x)\n xs = np.take(x, inds)\n vxs = np.take(vx, inds)\n dxs = np.take(dx, inds)\n rhos = np.take(rho, inds)\"\"\"\n hist, binedges = np.histogram(\n vx, \n range = (vmin, vmax), \n bins = binvals,\n weights = weight)\n thesehists.append(hist)\n totalhist += hist\n f = h5py.File(fildir+'spectra_'+str(il).zfill(4)+'.hdf5', 'w')\n dset = f.create_dataset('spectra', data = thesehists)\n f.close()\n \n f = h5py.File(fildir+'combinedspectrum_'+str(il).zfill(4)+'.hdf5', 'w')\n dset = f.create_dataset('spectra', data = totalhist)\n f.close()\n \n \n\n \n"
},
{
"alpha_fraction": 0.5303441882133484,
"alphanum_fraction": 0.562839686870575,
"avg_line_length": 31.917909622192383,
"blob_id": "35d05a2727a7be2d8590b386f056971dc94b6cc5",
"content_id": "99969135ede4f94bb75ba3c5e931c6fbebfa3ca6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8832,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 268,
"path": "/turbgravfilaments2/make_C18O_spectra_new.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport copy\nimport shutil\nfrom os.path import expanduser\ntry:\n from scipy import special\n scipy_available = True\nexcept ImportError:\n import math\n scipy_available = False\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _C18O(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 1000.0 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 0.0\n return newfield\n \ndef _N2Hplus(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = 31622.0 * mu * mH # not interested in anything below 10^4.5 / cm^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim)\n newfield[antiselection] = 0.0\n return newfield \n \n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\n\n\"\"\"\n read in the surface density map. we will use this to normalize the spectra\n so that they are in approximately physical units\n \n convert surface density to integrated line intensity using\n David S. Meier and Jean L. Turner ApJ 551:687 2001 equation 2\n\n N(H2)C18O = 2.42e14 cm^-2 [H2]/[C18O] * exp(5.27/Tex)/(exp(5.27/Tex)-1) IC18O K km/s\n [H2]/[C18O] = 2.94e6\n\"\"\"\nfile = 'reduced_'+str(snap).zfill(5)+'/surface_density_C18O'+str(axis)+'.hdf5'\nf = h5py.File(file, 'r')\nsd = f['surface_density_C18O']\nsd = 10**np.array(sd) # convert to linear units\nsd /= (2.33 * 1.672621777e-24) # convert to number density\nsd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\nsd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\nf.close()\n\n\nif axis == 0:\n los = 'x'\n dlos = 'dx'\n vlos = 'x-velocity'\n sliceax = 'z'\nif axis == 1:\n los = 'y'\n dlos = 'dy'\n vlos = 'y-velocity'\n sliceax = 'z'\nif axis == 2:\n los = 'z'\n dlos = 'dz'\n vlos = 'z-velocity'\n sliceax = 'y'\n\ninfoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n\nspecdir = 'reduced_'+str(snap).zfill(5)+'/posvel_'+str(axis)+'/'\nif not os.path.exists(specdir):\n os.makedirs(specdir)\n\n(lmin, lmax) = get_level_min_max(infoname)\n(boxlen, unit_l) = get_boxsize(infoname)\n\nds = load(infoname)\n\n# add new density fields\nadd_field('C18O', function=_C18O)\n#add_field('N2Hplus', function=_N2Hplus)\n\nvmax = 2.5e5\nvmin = -2.5e5\n# roughly match hacar et al by takin 0.05 km/s bins\nbins = (vmax - vmin) / 1.e5 / 0.05\nbinvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)\nif not scipy_available:\n erfvals = np.arange(vmin, 1.000001*vmax, (vmax - vmin) / bins)\nbinmids = 0.5 * (np.roll(binvals, -1) + binvals)\nbinmids = binmids[:len(binmids) - 1]\n# get a version of the bins in km/s instead of cgs\nbinmidskms = binmids / 1.e5\n \n# save the velocities to a file\nf = h5py.File(specdir+'spectrumvelsC18O.hdf5', 'w')\ndset = f.create_dataset('binmidskms', data = binmidskms, dtype='float32')\nf.close()\n\n\"\"\"\n to keep things manageable, make this output map on the 1024**3 base grid.\n outres: the output resolution.\n inres: the resolution we sample the grid at.\n\"\"\"\nlmin = max(10, lmin)\noutres = 2**lmin\noutdres = 1.0 / outres\n\nrefinefac = 2**(lmax - lmin)\ninres = outres * refinefac\nindres = 1.0 / inres\n\nsigmaC18O = 0.0526 # thermal width of C18O line in km/s\nsigma = sigmaC18O * 1.e5 # convert to cm/s\nerfdenom = np.sqrt(2*sigma**2)\n\n\nfor sj in xrange(outres):\n outpty = (sj + 0.5) * outdres\n thesehistsaccum = np.zeros([outres, len(binmids)])\n print sj, outpty\n \n if(sd[sj,:].sum() == 0):\n print 'skipping ',sj\n continue\n \n j = 0\n for ij in xrange(refinefac):\n thesehists = np.zeros([outres, len(binmids)])\n inpty = (sj*refinefac + j + 0.5) * indres\n print 'inpty: ',inpty\n # get a slice\n slc = ds.h.slice(sliceax, inpty)\n \n # get it into a frb\n frb = slc.to_frb(\n (1.0, 'unitary'), # get the whole extent of the box\n inres, # don't degrade anything\n center = [0.5, 0.5, 0.5], # centered in the box\n height = (1.0, 'unitary')) # get the whole extent of the box\n \n #weight = np.array(frb['C18O'])\n \n #x = np.array(frb[los], dtype=np.float32)\n #vx = np.array(frb[vlos], dtype=np.float32)\n #dx = np.array(frb[dlos], dtype=np.float32)\n #del(slc)\n #del(frb)\n #gc.collect()\n \n weight = np.array(frb['C18O'], dtype=np.float32)\n del(frb['C18O'])\n gc.collect()\n vx = np.array(frb[vlos], dtype=np.float32)\n del(frb[vlos])\n gc.collect()\n dx = np.array(frb[dlos] / indres, dtype=np.int32)\n del(frb[dlos])\n gc.collect()\n \n \n mindx = np.min(dx)\n print 'max(dx), min(dx), outdres: ',np.max(dx),np.min(dx),outdres\n print 'max(rho), min(rho), outdres: ',np.max(weight),np.min(weight),outdres \n\n i = 0\n for ii in xrange(inres):\n # for each point along the slice, march along the projecting dimension\n # and turn each detection into a gaussian. bin this gaussian into the \n # velbins.\n hist = np.zeros(len(binmids))\n k = 0\n dkmin = np.min(dx[:,i])\n dkmin = min(dkmin, refinefac) # out outres is higher than min res, this is necessary\n \n if(weight[:,i].sum() > 0):\n # print 'whaoooooooo ',i, i//refinefac,weight[:,i].sum()\n for ik in xrange(len(vx[:,i])):\n kincr = 1\n if weight[ik,i] > 0:\n peak = vx[ik,i]\n thisdx = dx[ik,i]\n #kincr = int(thisdx / indres)\n kincr = thisdx\n # calculate the cumulative distribution of this line at each velocity bin edge\n if scipy_available:\n cdfs = 0.5 * (1 + special.erf((binvals - peak) / erfdenom)) * weight[ik,i] * kincr\n else:\n for ev in xrange(len(erfvals)):\n erfvals[ev] = math.erf((binvals[ev] - peak) / erfdenom)\n cdfs = 0.5 * (1 + erfvals) * weight[ik,i] * kincr\n # subtract adjacent values to get the contribution to each bin\n hist = hist + np.diff(cdfs)\n # print 'hist ',hist\n k += kincr\n if(k == len(vx[:,i])):\n break\n del(cdfs) \n iincr = dkmin\n # this next bit handles binning together a refinefac**2 patch into one output cell\n # ii//refinefac == 0 handles glomming to gether along the slice\n thesehists[i//refinefac] += hist * iincr\n # print 'incrimenting i by ',iincr\n # print i, i//refinefac,dkmin / indres,inres\n del(hist)\n del(dkmin)\n i += iincr \n if(i % refinefac) == 0:\n gc.collect() \n if(i == inres):\n break\n \n #jincr = int(mindx / indres)\n jincr = mindx\n thesehistsaccum += thesehists * jincr\n #print 'incrementing j by ',jincr\n j += jincr\n if(j == refinefac):\n break\n del(vx)\n del(dx)\n del(weight)\n del(slc)\n del(frb)\n gc.collect()\n \n # normalize to put into K\n integratedI = thesehistsaccum.sum(axis = 1)\n for m in xrange(outres):\n if integratedI[m] == 0 and sd[sj, m] > 0:\n print 'trouble! ',m, integratedI[m], sd[sj, m]\n normalisations = sd[sj, :] / integratedI\n thesehistsaccum *= normalisations[:, np.newaxis]\n thesehistsaccum = np.nan_to_num(thesehistsaccum)\n \n # once we have the histograms of mass-weighted velocity along each point for this\n # row, save it to an hdf5 file\n f = h5py.File(specdir+'spectra_C18O_'+str(sj).zfill(4)+'.hdf5', 'w')\n dset = f.create_dataset('spectraC18O', data = thesehistsaccum, dtype='float32')\n dset.attrs['slowindex'] = sj\n dset.attrs[sliceax] = outpty\n f.close()\n \n del(f)\n del(dset)\n del(thesehists)\n del(thesehistsaccum)\n gc.collect()\n \n \n"
},
{
"alpha_fraction": 0.5818977355957031,
"alphanum_fraction": 0.6267432570457458,
"avg_line_length": 34.125,
"blob_id": "dda3f3b0b0032310ae60f48b662b30be796da0c6",
"content_id": "9cccc78b10ee0eac8d4d9fc04782ec335f009aa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7314,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 208,
"path": "/plot_dense_gas_plot.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \nfig = plt.figure(figsize = (5,3.5))\n\nax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n#ax = fig.add_axes([0., 0., 1., 1.])\n\ntimes = []\nmass1 = []\nmass2 = []\nmass3 = []\nmass4 = []\nsinkmasses = []\n\n\n# set limits for density that we're interested in\nndense1 = 1.e3\nndense2 = 1.e4\nndense3 = 1.e5\nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nmdense1 = ndense1 * mu * mH\nmdense2 = ndense2 * mu * mH\nmdense3 = ndense3 * mu * mH\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n dataname = 'reduced_'+str(snap).zfill(5)+'/MassAndVolumeInDensityBins.dat'\n infoname = 'reduced_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'reduced_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n limitsname = './densegasplotlimits.dat'\n \n f = open(limitsname)\n line = f.readline()\n sl = line.split()\n alltlow = float(sl[0])\n allthi = float(sl[1])\n line = f.readline()\n sl = line.split()\n qtlow = float(sl[0])\n qthi = float(sl[1])\n f.close()\n \n if os.path.isfile(dataname):\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6 + 1.e-10\n times.append(timeMyr)\n \n data = ascii.read(dataname)\n dense1sel = (data['Density'] > mdense1)\n dense2sel = (data['Density'] > mdense2)\n dense3sel = (data['Density'] > mdense3)\n \n cumulativemass = np.cumsum(data['CellMassMsun'])\n massabove = cumulativemass.max() - cumulativemass\n logdens = np.log10(data['Density'])\n \n # should interpolate this linearly in log density probably\n md1 = np.sum(data['CellMassMsun'][dense1sel])\n md2 = np.sum(data['CellMassMsun'][dense2sel])\n md3 = np.sum(data['CellMassMsun'][dense3sel])\n\n md1 = np.interp(np.log10(mdense1), logdens, massabove)\n md2 = np.interp(np.log10(mdense2), logdens, massabove)\n md3 = np.interp(np.log10(mdense3), logdens, massabove)\n \n mass1.append(md1)\n mass2.append(md2)\n mass3.append(md3)\n \n # see if we have any sink particles \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n sinkmass = np.sum(sinks[:,1]) # total mass of sinks in Msun\n else:\n sinkmass = 0.0\n sinkmasses.append(sinkmass)\n\n \n print snap, timeMyr, md1, md2, md3, sinkmass\n \n # now get surface density 'dense gas' as well. \n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5'] \n (boxlen, unit_l) = get_boxsize(infoname)\n (levelmin, levelmax) = get_level_min_max(infoname)\n print boxlen, unit_l\n print levelmin, levelmax\n thism4 = []\n for i in xrange(len(files)):\n f = h5py.File('reduced_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n md4 = 0.0\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n # these are log10(g / cm^2). convert to msun / pc^2\n coldensvals = 10**coldensvals\n coldensvals *= 4785.63 # that's pc**2 / msun\n sdenscut = 120.0 # surface density cut in msun / pc^2\n # get total mass above the surface density cut\n md4 += np.sum(coldensvals[coldensvals >= sdenscut])\n # scale to the area of each pixel\n # is 10 pc, and we want this in pc.\n pixlenau = boxlen * unit_l / 2**levelmax / 3.086e18\n md4 *= pixlenau**2\n thism4.append(md4)\n f.close() \n print thism4\n mass4.append(thism4)\n\nmass4 = np.array(mass4)\n\n# plot mass above the density cuts as a function of time\nax.plot(times, mass1, color = c1, linewidth = 2)\nax.plot(times, mass2, color = c2, linewidth = 2)\n#ax.plot(times, mass3, color = '0.6', linewidth = 2)\nax.plot(times, sinkmasses, color = c4, linewidth = 2)\nhoriz = alltlow + 0.05 * (allthi - alltlow)\nvertbase = 1.5e4\nax.text(horiz, vertbase, r'n > $\\mathdefault{10^3 cm^{-3}}$', transform=ax.transData,\n va = 'baseline', fontproperties = lfm, color=c1, snap = False) \nax.text(horiz, vertbase / 2, r'n > $\\mathdefault{10^4 cm^{-3}}$', transform=ax.transData,\n va = 'baseline', fontproperties = lfm, color=c2, snap = False)\nax.text(horiz, vertbase / 4, r'sinks', transform=ax.transData,\n va = 'baseline', fontproperties = lfm, color=c4, snap = False) \n\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlim(alltlow, allthi)\nax.set_ylim(10.0, 3.e4)\nset_ticks(ax, '0.6')\n\nax.set_xlabel('time / Myr', fontproperties = tfm, size = 15)\nax.set_ylabel('mass / '+r'M${_\\odot}$', fontproperties = tfm, size = 15)\n\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\nplt.savefig(outdir+'DenseGasStarMasses_logtime.pdf')\nax.set_xscale('linear')\nplt.savefig(outdir+'DenseGasStarMasses_lintime.pdf')\nplt.clf()\n\n\n\n\n# plot q values\nfig = plt.figure(figsize = (5,3.5))\n\nax2 = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n\nax2.plot(times, np.array(mass1) / (np.array(sinkmasses) + 1.e-10), color = c1, linewidth = 2)\nax2.plot(times, np.array(mass2) / (np.array(sinkmasses) + 1.e-10), color = c2, linewidth = 2)\nax2.plot(times, np.array(mass4[:,0]) / (np.array(sinkmasses) + 1.e-10), color = c3, alpha = 0.7, linewidth = 2)\nax2.plot(times, np.array(mass4[:,1]) / (np.array(sinkmasses) + 1.e-10), color = c3, alpha = 0.7, linewidth = 2)\nax2.plot(times, np.array(mass4[:,2]) / (np.array(sinkmasses) + 1.e-10), color = c3, alpha = 0.7, linewidth = 2)\n#ax2.plot(times, (np.array(mass1) + 0.7 * np.array(sinkmasses)) / (0.3 * np.array(sinkmasses)),\n# color = c1, linewidth = 0.75)\n#ax2.plot(times, (np.array(mass2) + 0.7 * np.array(sinkmasses)) / (0.3 * np.array(sinkmasses)),\n# color = c2, linewidth = 0.75)\n \nhoriz = qtlow + 0.65 * (qthi - qtlow) \nax2.text(horiz, 12.0, r'n > $\\mathdefault{10^3 cm^{-3}}$', transform=ax2.transData,\n va = 'baseline', fontproperties = lfm, color=c1, snap = False) \nax2.text(horiz, 10.0, r'n > $\\mathdefault{10^4 cm^{-3}}$', transform=ax2.transData,\n va = 'baseline', fontproperties = lfm, color=c2, snap = False)\n \n \nax2.set_xscale('log')\nax2.set_xlim(qtlow, qthi)\nax2.set_ylim(0,15.0)\nset_ticks(ax2, '0.6')\n\nax2.set_xlabel('time / Myr', fontproperties = tfm, size = 15)\nax2.set_ylabel('dense gas mass / sink mass', fontproperties = tfm, size = 15)\n\nfor label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\nplt.savefig(outdir+'qRatio_logtime.pdf')\nax2.set_xscale('linear')\nplt.savefig(outdir+'qRatio_lintime.pdf')\n\n\n\n \n"
},
{
"alpha_fraction": 0.5823350548744202,
"alphanum_fraction": 0.624568521976471,
"avg_line_length": 33.63380432128906,
"blob_id": "836ff4b970a3314ba09637d0aa3e8483f95df01a",
"content_id": "4fb88fbdb6f77800824a172c435a6669d525bcf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4925,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 142,
"path": "/plot_virial_ratios.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport sys\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport brewer2mpl\nfrom os.path import expanduser\nfrom matplotlib import cm\nfrom astropy.io import ascii\n \n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n \n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n \ndef get_energies_and_virial_ratio(egyfile):\n \"\"\" reads in the reduced energy data and returns it, as well as the virial ratio.\n this is all in code units.\n \"\"\"\n egy_data = ascii.read(egyfile)\n epot = np.array(egy_data['epot'])\n ekin = np.array(egy_data['ekin'])\n eint = np.array(egy_data['eint']) \n alphavir = 2*ekin/ np.abs(epot)\n return (epot, ekin, eint, alphavir)\n \ndef get_times(timefile, tff):\n \"\"\" reads in the reduced time data and returns it. this is in code units, \n also returns a converted version to Myr and a version in tff.\n input tff is the freefall time in Myr.\n \"\"\"\n time_data = ascii.read(timefile)\n times = np.array(time_data['time'])\n timesMyr = times * 14.909\n timesTff = timesMyr / tff\n return (times, timesMyr, timesTff)\n \n \ndef tff(density):\n \"\"\"\n arguments: in density in Msun / pc^3\n returns: free-fall time in Myr\n \"\"\"\n G = 6.67e-8\n pc = 3.086e18\n yr = 31.5576e6\n msun = 1.99e33\n denscgs = density * msun / pc**3\n tfreefall = np.sqrt(3 * np.pi / (32 * G * denscgs))\n return tfreefall / yr / 1.e6 \n \n \ndef main(): \n fig = plt.figure(figsize = (5,3.5))\n ax = fig.add_axes([.18, .2, .75, .75])\n \n diffusedir = 'turbshock1024k4ghc7/'\n compactdir = 'turbshock1024k4ghcs7/'\n \n (epot, ekin, eint, alphavir) = get_energies_and_virial_ratio(diffusedir+'energies.log')\n fftime = tff(5.e4 / (4 / 3 * np.pi * 25**3))\n t100 = 4.0625 / fftime # time when 100 msun of stars have formed in Myr / tff\n t1000 = 5.12 / fftime\n (times, timesMyr, timesTff) = get_times(diffusedir+'mainsteptimes.log', fftime)\n \n (epot, ekin, eint, alphavir) = get_energies_and_virial_ratio(compactdir+'energies.log')\n fftime = tff(5.e4 / (4 / 3 * np.pi * 12.5**3))\n t100 = 1.1 / fftime\n t1000 = 1.53 / fftime\n (times, timesMyr, timesTff) = get_times(compactdir+'mainsteptimes.log', fftime)\n \n etot = epot + ekin + eint\n e0 = np.abs(etot[0])\n \n tplot = timesTff\n tmax = tplot[-1]\n ax.plot(tplot, np.abs(epot)/e0, color=c2, linewidth=1.75) #blue\n ax.plot(tplot, ekin/e0, color=c4, linewidth=1.75) #red\n # internal energy is boring, don't plot it.\n #ax.plot(tplot, eint/e0, color=c3, linewidth=1.5) #green\n ax.plot(tplot, alphavir, color=c1, linewidth=1.75)\n \n ax.set_ylim(.1, 10)\n ax.set_xlim(0, tmax)\n ax.set_yscale('log')\n ax.xaxis.grid(False,which='minor')\n #ax.yaxis.grid(False,which='minor')\n \n ax.set_xlabel(r'time / t$\\mathdefault{_{ff}}$', fontproperties = tfm, size=15, color='0.15')\n ax.set_ylabel(r'$\\alpha$, energy / E$\\mathdefault{_{0}}$', fontproperties = tfm, size=15, color='0.15')\n \n # label the curves\n kinht = ekin[0] / e0\n ax.text(0.05*tmax, kinht, r'kinetic', transform=ax.transData,\n va = 'top', ha = 'left', fontproperties = lfm, color=c4, snap = False)\n potht = 1.05*(-epot[0] / e0)\n ax.text(0.05*tmax, potht, r'potential', transform=ax.transData,\n va = 'bottom', ha = 'left', fontproperties = lfm, color=c2, snap = False)\n alphaht = 1.3*alphavir[0]\n alphaht = alphavir[int(len(alphavir)/4)]\n ax.text(0.05*tmax, alphaht, r'virial ratio', transform=ax.transData,\n va = 'bottom', ha = 'left', fontproperties = lfm, color=c1, snap = False)\n \n # label the freefall time\n ax.text(0.7*tmax, 0.12, r't$\\mathdefault{_{ff}}$ = %.1f Myr' % fftime,\n va = 'baseline', ha = 'left', fontproperties = lfm, color=c1, snap = False)\n \n # label 100 and 1000 Msun in sinks\n ax.plot([t100,t100],[.25, 7], color='0.5', zorder = 1)\n ax.plot([t1000,t1000],[.25, 7], color='0.5')\n \n set_ticks(ax, '0.15')\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n \n plt.savefig('foo.png')\n \n sys.exit()\n epot = np.array(egy_data_diffuse['epot'])\n ekin = np.array(egy_data_diffuse['ekin'])\n eint = np.array(egy_data_diffuse['eint'])\n \n\n \n \n \nif __name__ == '__main__':\n main() "
},
{
"alpha_fraction": 0.6977108716964722,
"alphanum_fraction": 0.7331451773643494,
"avg_line_length": 29.028301239013672,
"blob_id": "5b7674b8c1b999824eaae01533d4456f5f5cacb3",
"content_id": "60238beb9a21195936d2e13bb3a13652a99bbf4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3189,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 106,
"path": "/turbgravfilaments2/make_column_fits.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom astropy.io import fits\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom scipy import special\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# downsample the spectra to a 256 squared grid\ninres = 1024#1024\noutres = 256#256\nstride = int(inres / outres)\nprint stride\n\nmu = 2.33\n\n#snap = int(sys.argv[1])\n#axis = int(sys.argv[2])\n\nsnap = 18\naxis = 1\n\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n\n# make the surface density fits files\nfileCO = fileprefix+'surface_density_C18O2.hdf5'\nfileSD = fileprefix+'surface_density2.hdf5'\nfileNH = fileprefix+'surface_density_N2Hplus2.hdf5'\n\nfC18O = h5py.File(fileCO, 'r')\nsdC18O = fC18O['surface_density_C18O']\n# convert to linear units, divide by mu * mH\nsdC18O = 10**np.array(sdC18O) / (mu * const.m_p.cgs.value)\n# there are a lot of very small values- cull them before getting some\n# info on the range of interesting values \nsdnonzero = sdC18O[sdC18O > 10**4]\nprint 'mean non-zero C18O column density: ',np.mean(sdnonzero),'cm^-2'\nprint 'median non-zero C18O column density: ',np.median(sdnonzero),'cm^-2'\nprint 'max C18O column density: ',np.max(sdC18O),'cm^-2'\n\nhdu = fits.PrimaryHDU(sdC18O)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('column_density_C18O.fits')\nfC18O.close()\n\n\nfN2Hplus = h5py.File(fileNH, 'r')\nsdN2Hplus = fN2Hplus['surface_density_N2Hplus']\n# convert to linear units, divide by mu * mH\nsdN2Hplus = 10**np.array(sdN2Hplus) / (mu * const.m_p.cgs.value)\n# there are a lot of very small values- cull them before getting some\n# info on the range of interesting values \nsdnonzero = sdN2Hplus[sdN2Hplus > 10**4]\nprint 'mean non-zero N2Hplus column density: ',np.mean(sdnonzero),'cm^-2'\nprint 'median non-zero N2Hplus column density: ',np.median(sdnonzero),'cm^-2'\nprint 'max N2Hplus column density: ',np.max(sdN2Hplus),'cm^-2'\n\nhdu = fits.PrimaryHDU(sdN2Hplus)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('column_density_N2Hplus.fits')\nfN2Hplus.close()\n\n\nfSD = h5py.File(fileSD, 'r')\nsdSD = fSD['surface_density']\n# convert to linear units, divide by mu * mH\nsdSD = 10**np.array(sdSD) / (mu * const.m_p.cgs.value)\n# there are a lot of very small values- cull them before getting some\n# info on the range of interesting values \nsdnonzero = sdSD[sdSD > 10**4]\nprint 'mean non-zero SD column density: ',np.mean(sdnonzero),'cm^-2'\nprint 'median non-zero SD column density: ',np.median(sdnonzero),'cm^-2'\nprint 'max SD column density: ',np.max(sdSD),'cm^-2'\n\nhdu = fits.PrimaryHDU(sdSD)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('column_density_total.fits')\nfSD.close()\n\n\n\n "
},
{
"alpha_fraction": 0.545836865901947,
"alphanum_fraction": 0.5872161388397217,
"avg_line_length": 35.44171905517578,
"blob_id": "ffab6e409d7a8b207fdddcbddc8133c306aeb315",
"content_id": "aafa8fa9733a9c1fbb134d997c7e13592eafc955",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5945,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 163,
"path": "/make_andi_frame.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font main\n fname=fontdir+'Gotham-Book.ttf', size=13) \nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=11) \n\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),1):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n framename = outdir+'framesD/frame_'+str(snap).zfill(4)+'.png'\n \n projaxis = 1\n\n (boxlen, unit_l) = get_boxsize(infoname)\n # set column density limits so that the images appear the same.\n # low dens cloud has boxsize 10, high dens cloud has boxsize 5.\n # offsets in log of column density limits are thus log10(8)\n if boxlen > 7:\n cdmin = -5.1\n cdmax = -2.0\n else:\n cdmin = -4.19\n cdmax = -1.09\n\n pf = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n # get a projection of density along y axis\n proj = pf.h.proj('Density', 1)\n\n wd = 0.5 # this is messed up- figure it out. yt might not get size right.\n # res should be base resolution times 2**levels of refinement * wd\n res = (512,512)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n frb = proj.to_frb(width, res, center = cntr, height = height)\n ascii.write(np.log10(frb['Density']),'snap72_surfacedensity_xz.ascii')\n \n \n # get a projection of density along x axis\n proj = pf.h.proj('Density', 0)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n ascii.write(np.log10(frb['Density']),'snap72_surfacedensity_yz.ascii')\n \n \n # get a projection of density along z axis\n proj = pf.h.proj('Density', 2)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n ascii.write(np.log10(frb['Density']),'snap72_surfacedensity_xy.ascii')\n \n \n sys.exit()\n \n fig = plt.figure(figsize = (res[1]/200, res[0]/200), dpi=200)\n\n #ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])\n ax = fig.add_axes([0., 0., 1., 1.])\n \n # get log of data\n frb_logrho = np.log10(frb['Density'])\n \n ax.imshow(frb_logrho,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = 'bone')\n \n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n sinkpos = sinks[:,2:5]\n #sinkpos = sinks[:,1:4]\n sinkpos[:] /= boxlen # shrink to 0-1 in all dimensions\n # get projected positions\n keep = np.array([1,1,1])\n keep[projaxis] = 0\n keep = np.array(keep, dtype=bool)\n sinkpos = sinkpos[:,keep]\n \n # restrict to same region as density plot\n ledge = cntr[0] - wd/2\n bedge = cntr[2] - ht/2\n sinkpos[:] -= np.array([ledge, bedge])\n # convert to imshow scale\n sinkpos *= res[1] / wd\n print sinkpos \n ax.autoscale(False)\n ax.scatter(sinkpos[:,0],sinkpos[:,1],marker='.',s=6,facecolor='r',edgecolor='r') \n except IOError:\n pass \n \n # add a time counter\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n ax.text(.2, .9, r'%.1f' %timeMyr, transform=ax.transAxes,\n va = 'baseline',ha='right',fontproperties = lfm, color=c3l, alpha=0.5, snap = False, size=15)\n ax.text(.21, .9, r'Myr', transform=ax.transAxes,\n va = 'baseline',ha='left',fontproperties = lfm, color=c3l, alpha=0.5, snap = False, size=13)\n # add a scale bar\n # bar coordinates in units from 0 to 1\n nscales = 2.5 # length of scale bar in units of 0.1 of domain\n barleft = 0.5\n barright = barleft + nscales * 0.1\n barheight = 0.07\n \n #ax2 = ax.twinx() \n ax2 = fig.add_axes([0,0,.999,.999])\n ax2.plot([barleft, barright], [barheight, barheight], color=c3l, alpha=0.5, lw=1.25)\n ax2.patch.set_facecolor('None')\n # ax2.patch.set_alpha(.1)\n ax2.xaxis.set_visible(False)\n ax2.yaxis.set_visible(False)\n ax2.set_xlim([0,1])\n ax2.set_ylim([0,1])\n scalePc = nscales * unit_l / 3.086e18 * (boxlen / 10.0) # one unit of length is 10 pc.\n \n if scalePc % 1 > 0.1:\n ax2.text(barright - 0.01 - 0.005, barheight + 0.02, r'%.1f' %scalePc, transform=ax2.transAxes,\n va = 'baseline',ha='right',fontproperties = lfm, color=c3l, alpha=0.5, snap = False, size=15) \n else:\n ax2.text(barright - 0.01 - 0.005, barheight + 0.02, r'%.0f' %scalePc, transform=ax2.transAxes,\n va = 'baseline',ha='right',fontproperties = lfm, color=c3l, alpha=0.5, snap = False, size=15) \n ax2.text(barright - 0.01 + 0.005, barheight + 0.02, 'pc', transform=ax2.transAxes,\n va = 'baseline',ha='left',fontproperties = lfm, color=c3l, alpha=0.5, snap = False, size=13)\n \n \n \n \n plt.savefig(framename, dpi = 200)\n \n del(frb)\n del(frb_logrho)\n del(pf)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5655984878540039,
"alphanum_fraction": 0.60654616355896,
"avg_line_length": 35.1435661315918,
"blob_id": "2f92496e57c8e0444bc6f1c0d8ef3a401db3bb94",
"content_id": "aa06e3e179d7535fa25f2a0e2627a37a1a226b8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7302,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 202,
"path": "/turbgravfilaments2/plot_CO_filament_finder_image_noboxes.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom matplotlib import rcParams\n\n\n\"\"\"\nusage: python plot_CO_filament_finder_image_turbgrav.py <snap> <axis>\n\"\"\"\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\n\n\nimshowmap = 'nickmapVD2'\n#imshowmap = 'bone_r'\n\n\"\"\"\nthis data is generated by making a surface density map taking into account only\ngas that is between 10^3 and 10^4.5 cm^-3. to convert to a crude approximation \nof a C18O map, use\nDavid S. Meier and Jean L. Turner ApJ 551:687 2001 equation 2\n\nN(H2)C18O = 2.42e14 cm^-2 [H2]/[C18O] * exp(5.27/Tex)/(exp(5.27/Tex)-1) IC18O K km/s\n[H2]/[C18O] = 2.94e6\n\nso first convert g cm^-2 to cm^-2 using mu = 2.33, then convert to ICO using Tex=10 K\n\"\"\"\ncdmin = 10**-3.3 \ncdmax = 10**-1.5\ncdmin = 0\ncdmax = 4\n\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\nsinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.csv'\n\n\nfile = fileprefix+'surface_density_C18O'+str(axis)+'.hdf5'\n\nif os.path.exists(file):\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density_C18O']\n sd = 10**np.array(sd) # convert to linear units\n print np.mean(sd),np.max(sd)\n sd /= (2.33 * const.m_p.cgs.value) # convert to number density\n print np.mean(sd),np.max(sd)\n sd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\n sd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\n print np.mean(sd),np.max(sd)\n fig = plt.figure(figsize = (1.1111111*sd.shape[0]/200, 1.1111111*sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0.05, 0.05, .9, .9])\n axu = fig.add_axes([0.05, 0.95, .9, .9])\n axl = fig.add_axes([-0.85, 0.05, .9, .9])\n axr = fig.add_axes([0.95, 0.05, 0.9, 0.9])\n axd = fig.add_axes([0.05, -0.85, 0.9, 0.9])\n axul = fig.add_axes([-0.85, 0.95, 0.9, 0.9])\n axur = fig.add_axes([0.95, 0.95, 0.9, 0.9])\n axdl = fig.add_axes([-0.85, -0.85, 0.9, 0.9])\n axdr = fig.add_axes([0.95, -0.85, 0.9, 0.9])\n ax4 = fig.add_axes([0, 0, 1, 1])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n axu.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axl.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axr.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axd.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axul.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axur.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axdl.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n axdr.imshow(sd,interpolation = 'nearest',origin = 'lower',vmin = cdmin,vmax = cdmax,cmap = imshowmap)\n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = ascii.read(sinkname, names=sinkcolumnnames, converters=sinkconverters)\n if len(sinks['ID']) > 0: \n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n sscale = sd.shape[0] / boxlen\n \n if axis == 0:\n i0 = 'y'\n i1 = 'z'\n if axis == 1:\n i0 = 'x'\n i1 = 'z'\n if axis == 2:\n i0 = 'x'\n i1 = 'y'\n\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(30)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinks['mass']) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinks[i0]*sscale,sinks[i1]*sscale,marker='.',s=20,facecolor=sinkcolors,lw=0.25) \n \n except IOError:\n pass \n \n \n # outline the original periodic box\n ax.plot([0, sd.shape[0]-1, sd.shape[0]-1, 0, 0],[0, 0, sd.shape[0]-1, sd.shape[0]-1, 0],\n lw=0.5, ls='--', color = 'c', solid_joinstyle='miter',zorder=300)\n \n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n axu.set_frame_on(False)\n axu.axes.get_yaxis().set_visible(False)\n axu.axes.get_xaxis().set_visible(False)\n axl.set_frame_on(False)\n axl.axes.get_yaxis().set_visible(False)\n axl.axes.get_xaxis().set_visible(False)\n axr.set_frame_on(False)\n axr.axes.get_yaxis().set_visible(False)\n axr.axes.get_xaxis().set_visible(False)\n axd.set_frame_on(False)\n axd.axes.get_yaxis().set_visible(False)\n axd.axes.get_xaxis().set_visible(False)\n axul.set_frame_on(False)\n axul.axes.get_yaxis().set_visible(False)\n axul.axes.get_xaxis().set_visible(False)\n axur.set_frame_on(False)\n axur.axes.get_yaxis().set_visible(False)\n axur.axes.get_xaxis().set_visible(False)\n axdl.set_frame_on(False)\n axdl.axes.get_yaxis().set_visible(False)\n axdl.axes.get_xaxis().set_visible(False)\n axdr.set_frame_on(False)\n axdr.axes.get_yaxis().set_visible(False)\n axdr.axes.get_xaxis().set_visible(False)\n \n ax4.set_frame_on(False)\n ax4.axes.get_yaxis().set_visible(False)\n ax4.axes.get_xaxis().set_visible(False)\n ax4.set_xlim(0,1)\n ax4.set_ylim(0,1)\n \n framesdir = 'finderimage'+str(axis)+'/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n\n framename = framesdir+'finderimage'+str(axis)+'_frame_'+str(snap).zfill(5)\n plt.savefig(framename+'_noboxes.png', dpi = 400)\n \n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect()\n\n"
},
{
"alpha_fraction": 0.6025518178939819,
"alphanum_fraction": 0.6338118314743042,
"avg_line_length": 28.443395614624023,
"blob_id": "d73efef389e962567282cff46bca4cbfa7d46e45",
"content_id": "d1a6ac500ffdb2a53a984b3e6ba58891b7f5a4e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3135,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 106,
"path": "/turbgravfilaments2/convert_hdf5_to_fits.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom astropy.io import fits\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom scipy import special\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# downsample the spectra to a 256 squared grid\ninres = 1024 #1024\noutres = 1024 #256\nstride = int(inres / outres)\nprint stride\n\n\nsnapstart = int(sys.argv[1])\nsnapend = int(sys.argv[2])\n\nfor snap in xrange(snapstart, snapend, 1):\n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n infofile = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n (time, unit_t) = get_time(infofile)\n time_kyr = time * unit_t / (3.15569e7 * 1000)\n\n timestr = str(int(round(time_kyr))).zfill(4)\n print timestr\n \n for axis in xrange(3):\n ax = str(axis) \n # make the surface density fits files\n fileCO = fileprefix+'surface_density_C18O'+ax+'.hdf5'\n fileSD = fileprefix+'surface_density'+ax+'.hdf5'\n fileNH = fileprefix+'surface_density_N2Hplus'+ax+'.hdf5'\n \n outCO = 'columndensity.total.'+timestr+'.'+ax+'.fits'\n outSD = 'columndensity.C18O.'+timestr+'.'+ax+'.fits'\n outNH = 'columndensity.N2H+.'+timestr+'.'+ax+'.fits'\n \n print outCO\n print outSD\n print outNH\n \n fC18O = h5py.File(fileCO, 'r')\n sdC18O = fC18O['surface_density_C18O']\n # convert to linear units\n sdC18O = 10**np.array(sdC18O)\n print 'max C18O column density: ',np.max(sdC18O),'cm^-2'\n \n hdu = fits.PrimaryHDU(sdC18O)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(outCO)\n fC18O.close()\n\n\n fN2Hplus = h5py.File(fileNH, 'r')\n sdN2Hplus = fN2Hplus['surface_density_N2Hplus']\n # convert to linear units\n sdN2Hplus = 10**np.array(sdN2Hplus)\n print 'max N2Hplus column density: ',np.max(sdN2Hplus),'cm^-2'\n\n hdu = fits.PrimaryHDU(sdN2Hplus)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(outNH)\n fN2Hplus.close()\n\n\n fSD = h5py.File(fileSD, 'r')\n sdSD = fSD['surface_density']\n # convert to linear units\n sdSD = 10**np.array(sdSD)\n sdnonzero = sdSD[sdSD > 10**4]\n print 'mean non-zero SD column density: ',np.mean(sdnonzero),'cm^-2'\n print 'median non-zero SD column density: ',np.median(sdnonzero),'cm^-2'\n print 'max SD column density: ',np.max(sdSD),'cm^-2'\n\n hdu = fits.PrimaryHDU(sdSD)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(outSD)\n fSD.close()\n \n\n\n "
},
{
"alpha_fraction": 0.5773453116416931,
"alphanum_fraction": 0.6122754216194153,
"avg_line_length": 32.358333587646484,
"blob_id": "25030979cef973795a6fb981d2471dd71f079ca3",
"content_id": "88a9d8f091bbe0724eca424c11517457aff82da4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4008,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 120,
"path": "/turbgravfilaments/make_reduced_data_CO_filaments_only_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\nfrom mpi4py import MPI\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\ndef _CO(field, data):\n mu = 2.33 # mean molecular weight\n mH = 1.6733e-24\n lolim = .001 * mu * mH # not interested in anything below 10^3 / cm^3\n hilim = 31622.0 * mu * mH # not interested in anything above 10^4.5 / com^3\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 1.e-99\n \n # cut out the filaments we're interested in\n leftpoint = np.array([0.795, 0.34])\n rightpoint = np.array([0.805, 0.05]) \n width = 0.045\n vector = rightpoint - leftpoint\n midpoint = leftpoint + 0.5 * vector\n # translate to midpoint\n transx = data['x'] - midpoint[0]\n transy = data['y'] - midpoint[1]\n length = np.linalg.norm(vector)\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n vector /= np.linalg.norm(vector)\n \n # rotate around midpoint. orthovec is already a unit vector now.\n beta = np.arccos(orthovec[1])\n rotx = transx * np.cos(beta) - transy * np.sin(beta)\n roty = transx * np.sin(beta) + transy * np.cos(beta)\n \n # cut based on width and length of box\n antiselection2 = (np.abs(rotx) > 0.5*length) | (np.abs(roty) > 0.5 * width)\n newfield[antiselection2] = 1.e-99\n \n return newfield\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if ytcfg.getint('yt', '__topcomm_parallel_rank') == 0:\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix) \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n add_field('CO', function=_CO)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n lmaxplot = min(11, lmax) \n resx = int(wd * 2**lmaxplot)\n expandfac = 1\n res = (expandfac * resx,expandfac * resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n\n # cut out the filaments we're interested in\n leftpoint = np.array([0.795, 0.34])\n rightpoint = np.array([0.805, 0.05]) \n width = 0.045\n vector = rightpoint - leftpoint\n midpoint = leftpoint + 0.5 * vector\n orthovec = (-vector[1], vector[0], 0)\n orthovec /= np.linalg.norm(orthovec)\n print orthovec\n cntr = [midpoint[0], midpoint[1], 0.5]\n width = [length, length, 1.0]\n #get a projection orthogonal to the filament\n proj = off_axis_projection(ds,\n cntr, #center\n orthovec, #normal vector\n width, #width\n 256, #resolution\n 'CO',\n north_vector = [0, 0, -1] #make z the vertical direction on the plot\n )\n print proj.shape\n \"\"\"proj = off_axis_projection(ds,\n [0.5, 0.5, 0.5], #center\n orthovec, #normal vector\n 1.0, #width\n res, #resolution\n 'CO',\n north_vector = [0, 0, 1] #make z the vertical direction on the plot\n )\"\"\"\n proj.write_hdf5(fileprefix+'surface_density_CO_fil0.hdf5')\n\n del(proj) \n del(ds)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.6543547511100769,
"alphanum_fraction": 0.6739625930786133,
"avg_line_length": 26.9743595123291,
"blob_id": "3b25e5aa0c9c5216360259c957cfd3dba8305872",
"content_id": "397c386b48592ad069dcdf2b1285b1e4ffaebf34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2193,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 78,
"path": "/turbgravfilaments2/make_ppp.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom os.path import expanduser\nfrom astropy.io import fits\n\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# if you were outputting the entire cube, what would the resolution be?\ninres = 1024 #64\n\nsnap = int(sys.argv[1])\nfileprefix = 'output_'+str(snap).zfill(5)+'/'\n\ninfofile = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n\n# left corner of the output box, in units of the cube length\nleft_corner = np.array([0.5, 0.5, 0.85])\nleft_corner -= np.mod(left_corner, 1/inres) #align to grid spacing\n\n# output size of the cube in pixels\noutput_cube_size = 512 #32\n\noutcube = np.zeros([output_cube_size, output_cube_size, output_cube_size], dtype=np.float32)\n\nslicewidth = 1 / inres\n\nds = load(infofile)\n\nxcenter = left_corner[0] + output_cube_size / 2 / inres\nycenter = left_corner[1] + output_cube_size / 2 / inres\n\nfor iz in xrange(output_cube_size):\n zlow = left_corner[2] + iz * slicewidth\n if zlow >= 1:\n zlow -= 1\n zhigh = zlow + slicewidth\n print zlow, zhigh\n \n cr = ds.h.all_data().cut_region(\n [\"obj['z'] > \"+str(zlow),\n \"obj['z'] < \"+str(zhigh)])\n \n prj = ds.h.proj('Density', 2, data_source=cr, \n center = [xcenter, ycenter, (zlow+zhigh)/2])\n\n frb = prj.to_frb(output_cube_size / inres, output_cube_size) \n # when storing the projection, get the mean density by dividing by the pixel length\n outcube[:,:,iz] = frb['Density'] / (slicewidth * ds.h.parameter_file.units['cm'])\n\nf = h5py.File('ppp.hdf5', 'w')\ndset = f.create_dataset('density', data = outcube, dtype='float32')\nf.close()\n \n#hdu = fits.PrimaryHDU(outcube)\n#hdulist = fits.HDUList([hdu])\n#hdulist.writeto('spectra.N2H+.'+timestr+'.fits')\n\n \n\n \n"
},
{
"alpha_fraction": 0.5581514835357666,
"alphanum_fraction": 0.5934531688690186,
"avg_line_length": 33.464603424072266,
"blob_id": "ef4a7092d263116278e3c9de94aee150bdb88ffa",
"content_id": "21fa8864123b7b285ae4dc902589eae89be0956d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7790,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 226,
"path": "/turbgravfilaments/plot_CO_filament_finder_image_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport gc\nimport sys\nimport h5py\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom matplotlib import rcParams\n\n\n\"\"\"\nusage: python plot_CO_filament_finder_image_turbgrav.py <snap> <axis>\n\"\"\"\n\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12) \n\nrcParams['xtick.direction'] = 'out'\n\noutdir = get_output_path(homedir)\noutdir = './'\n\nsnapstr = str(int(sys.argv[1])).zfill(5)\ninfoname = 'reduced_'+snapstr+'/info_'+snapstr+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\n\n\n\nimshowmap = 'nickmapVD2'\n#imshowmap = 'bone_r'\n\n\"\"\"\nthis data is generated by making a surface density map taking into account only\ngas that is between 10^3 and 10^4.5 cm^-3. to convert to a crude approximation \nof a C18O map, use\nDavid S. Meier and Jean L. Turner ApJ 551:687 2001 equation 2\n\nN(H2)C18O = 2.42e14 cm^-2 [H2]/[C18O] * exp(5.27/Tex)/(exp(5.27/Tex)-1) IC18O K km/s\n[H2]/[C18O] = 2.94e6\n\nso first convert g cm^-2 to cm^-2 using mu = 2.33, then convert to ICO using Tex=10 K\n\"\"\"\ncdmin = 10**-3.3 \ncdmax = 10**-1.5\ncdmin = 0\ncdmax = 5\n\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\nsinkname = fileprefix+'sink_'+str(snap).zfill(5)+'.out'\n\n\nfile = fileprefix+'surface_density_CO_'+str(axis)+'.hdf5'\nif os.path.exists(file):\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density_CO']\n sd = 10**np.array(sd) # convert to linear units\n print np.mean(sd),np.max(sd)\n sd /= (2.33 * const.m_p.cgs.value) # convert to number density\n print np.mean(sd),np.max(sd)\n sd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\n sd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\n print np.mean(sd),np.max(sd)\n fig = plt.figure(figsize = (sd.shape[0]/200, sd.shape[1]/200), dpi=200)\n ax = fig.add_axes([0., 0., 1., 1.])\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n \n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n # figure out the size of the sinks in units of 0-1\n #mincell = 1.0/2**lmax\n #sinkrad = 1.5 * mincell\n \n sinkpos = sinks[:,2:5]\n sinkpos[:] /= boxlen # shrink to 0-1 in all dimensions\n # get projected positions\n keep = np.array([1,1,1])\n keep[axis] = 0\n keep = np.array(keep, dtype=bool)\n sinkpos = sinkpos[:,keep]\n \n # restrict to same region as density plot\n #ledge = cntr[0] - wd/2\n #bedge = cntr[2] - ht/2\n #sinkpos[:] -= np.array([ledge, bedge])\n # convert to imshow scale\n #sinkpos *= res[1] / wd\n #sinkrad *= res[1] / wd\n sinkpos *= sd.shape[0]\n print sinkpos \n sinkmass = sinks[:,1]\n # color by the log of mass. the minimum that we plot is 0.1 Msun,\n # max is a few hundred.\n mmin = np.log10(1)\n mmax = np.log10(100)\n sinkmap = cm.get_cmap('nickmapSink')\n sinkcolors = sinkmap((np.log10(sinkmass) - mmin) / (mmax - mmin)) \n ax.autoscale(False)\n #for s in xrange(len(sinks)):\n # ax.add_artist(Circle((sinkpos[s,0],sinkpos[s,1]),sinkrad,fc=csink))\n ax.scatter(sinkpos[:,0],sinkpos[:,1],marker='.',s=9,facecolor=sinkcolors,lw=0) \n except IOError:\n pass \n \n \n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a colorbar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n ax2 = fig.add_axes([0.45, 0.8, 0.4, 0.015])\n a = np.outer(np.arange(cdmin, cdmax, (cdmax - cdmin)/255), np.ones(10)).T\n ax2.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax2.set_frame_on(False)\n ax2.axes.get_yaxis().set_visible(False)\n ax2.xaxis.set_ticks(np.arange(cdmin, cdmax+1, 1.0))\n ax2.set_xlabel(r'$\\mathdefault{I_{C^{18}O}}$ / K km s$\\mathdefault{^{-1}}$', fontproperties = tfm, size=18, color='0.15')\n \n set_ticks(ax2, '0.15')\n for label in ax2.get_xticklabels() + ax2.get_yticklabels():\n label.set_fontproperties(tfm)\n\n\n \"\"\"\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n add a scalebar\n xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n \"\"\"\n ax3 = fig.add_axes([0.65, 0.825, 0.2, 0.0015])\n a = np.outer(np.ones(100)*.8*cdmax, np.ones(10)).T\n ax3.imshow(a, \n aspect = 'auto',\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n extent = [cdmin, cdmax, 0, 1])\n ax3.set_frame_on(False)\n ax3.axes.get_yaxis().set_visible(False)\n ax3.axes.get_xaxis().set_visible(False)\n ax3.text(0.1, 0.85, r'2pc', transform = ax3.transAxes,\n va = 'bottom', ha = 'left', fontproperties = tfm, color='0.15', snap = False)\n set_ticks(ax3, '0.15')\n for label in ax3.get_xticklabels() + ax3.get_yticklabels():\n label.set_fontproperties(tfm)\n \n\n \n ax.autoscale(False)\n # define a rectangle by drawing a line on a filament and choosing a width\n rectdata = ascii.read(fileprefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt')\n for fil in rectdata:\n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]])\n width = fil[5]\n vector = rightpoint - leftpoint\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n x = (leftpoint[0], rightpoint[0])\n y = (leftpoint[1], rightpoint[1])\n ul = leftpoint + orthovec * width/2\n ll = leftpoint - orthovec * width/2\n ur = rightpoint + orthovec * width/2\n lr = rightpoint - orthovec * width/2\n rectangle = np.transpose([ul, ll, lr, ur, ul])\n #ax.plot(x,y,lw=.3,color=cred)\n ax.plot(rectangle[0]*sd.shape[0], rectangle[1]*sd.shape[0], lw=.7,color='m', solid_joinstyle='miter') \n \n # turn off axes\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n \n framesdir = 'finderimage'+str(axis)+'/'\n if not os.path.exists(framesdir):\n os.makedirs(framesdir)\n\n framename = framesdir+'finderimage'+str(axis)+'_frame_'+str(snap).zfill(5)\n plt.savefig(framename+'.png', dpi = 200)\n plt.savefig(framename+'.pdf')\n \n f.close() \n plt.close() \n del(f)\n del(sd)\n gc.collect()\n\n"
},
{
"alpha_fraction": 0.6193406581878662,
"alphanum_fraction": 0.6514285802841187,
"avg_line_length": 30.901409149169922,
"blob_id": "4c590765d566680ef5b34e87c0dd269c8c50ae46",
"content_id": "d2bb1aa91f7b47d396c83cbbdcc4ce0308ad4eca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2275,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 71,
"path": "/make_density_powerspectrum.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\n\n# import ramses helper functions and get figure directory\nhomedir = '/home/moon/moeckel/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font main\n fname=fontdir+'Gotham-Book.ttf', size=13) \nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=11) \n\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n framename = outdir+'densityPDF'+str(snap).zfill(4)+'.png'\n \n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname,fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n \n sphererad = 27.0\n spherevol = 4.0 * np.pi / 3.0 * (sphererad * 3.086e18)**3\n sp = ds.h.sphere(cntr, (sphererad, 'pc'))\n \n gasmass = sp.quantities['TotalQuantity']('CellMassMsun')\n print gasmass\n gasmass = sp.quantities['TotalQuantity']('CellMassCode')\n print gasmass\n \n nbins = 128\n dmin = 1.e-25\n dmax = 1.e-18\n\n profile = BinnedProfile1D(sp,nbins,'Density',dmin,dmax)\n profile.add_fields(\"CellVolume\", weight=None)\n \n fig = plt.figure(figsize = (5,3.5))\n\n ax = fig.add_axes([0.2, 0.15, 0.75, 0.8])\n \n # plot density pdf\n ax.plot(profile['Density'], profile['CellVolume']/spherevol, color = '0.2', linewidth = 1.5)\n ax.yaxis.grid(False,which='minor') \n ax.xaxis.grid(False,which='minor')\n ax.set_yscale('log')\n ax.set_xscale('log')\n #ax.set_xlim(-.05,7.05)\n ax.set_ylim(1.e-8,0.1)\n set_ticks(ax, '0.6')\n \n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n \n plt.savefig(framename, dpi = 200)\n \n \n"
},
{
"alpha_fraction": 0.5971161127090454,
"alphanum_fraction": 0.6199162006378174,
"avg_line_length": 29.05555534362793,
"blob_id": "dd8987ccf8f99fd6887d79ea2bc180cd0d4d090f",
"content_id": "fc2333c5e4034cc12a8541e736ba883f3f56b619",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8114,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 270,
"path": "/turbgravfilaments/plot_CO_vel_detections.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom os.path import expanduser\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\nF: filament number \n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\nmpl.rcParams['xtick.major.size'] = 9\n\ntc = '0.5'\ntc1 = '0.9'\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\nfig = plt.figure(figsize = (5, 3))\nax = fig.add_axes([.2, .2, .75, .75])\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n\n# read in the rectangle from the filament definition\nrectdata = ascii.read(fileprefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt')\nfor fil in rectdata:\n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]])\n width = fil[5]\n vector = rightpoint - leftpoint\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n x = (leftpoint[0], rightpoint[0])\n y = (leftpoint[1], rightpoint[1])\n ul = leftpoint + orthovec * width/2\n ll = leftpoint - orthovec * width/2\n ur = rightpoint + orthovec * width/2\n lr = rightpoint - orthovec * width/2\n rectangle = np.transpose([ul, ll, lr, ur, ul])\n\n # this rectangle is in unitary units.\n \n print ll\n print ur\n \n # read in the detections file\n # NOTE this is [z, y, [vels]]\n f = h5py.File(fileprefix+'posvel_'+str(axis)+'/detections.hdf5')\n dets = np.array(f['veldetections'])\n f.close()\n res = dets.shape[0]\n dres = 1 / res\n \n # get index limits of the box\n zlo = int(np.floor(ll[1] / dres))\n zhi = int(np.ceil(ur[1] / dres))\n ylo = int(np.floor(ll[0] / dres))\n yhi = int(np.ceil(ur[0] / dres))\n \n # march along y and collect detections over z\n ys = []\n collecteddets = []\n for y in xrange(ylo, yhi+1, 1):\n zsumdets = (dets[zlo:zhi+1, y]).flatten()\n zsumdets = zsumdets[np.logical_not(np.isnan(zsumdets))]\n collecteddets.append(zsumdets.tolist())\n ys.append((np.ones(len(zsumdets)) * (y + 0.5) * dres).tolist())\n \nys = np.array(reduce(lambda x,y: x+y,ys))\ncollecteddets = np.array(reduce(lambda x,y: x+y,collecteddets))\n\n# plot a 2D histogram of the detected vels as a function of y\nf = h5py.File(fileprefix+'posvel_'+str(axis)+'/spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\ndvel = vels[1] - vels[0]\ndvel *= 1\nvelmin = -2\nvelmax = 2\nvelbins = np.arange(velmin, velmax, dvel)\nprint len(ys)\nprint np.min(ys)\nprint (yhi+1 - ylo)\ndy = (np.max(ys) - np.min(ys))/(yhi+1 - ylo)\nybins = np.arange(np.min(ys), np.max(ys), dy)\n\n#ax.tick_params(labelbottom='off')\n\nH, xedges, yedges = np.histogram2d(collecteddets, ys, bins=(velbins, ybins))\nprint np.min(H), np.max(H), np.mean(H), np.median(H)\nextent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\nax.imshow(H,\n extent = [leftpoint[0], rightpoint[1], velmin, velmax],\n origin = 'lower',\n interpolation = 'nearest',\n aspect = .03,\n cmap = 'gray_r',\n vmax = 30\n )\n\n\nax.get_yaxis().tick_left()\nax.get_xaxis().tick_bottom()\nfor line in ax.xaxis.get_ticklines():\n line.set_color(tc1)\nfor line in ax.yaxis.get_ticklines():\n line.set_color(tc1) \nfor line in ax.yaxis.get_ticklines():\n line.set_color(tc1) \nax.tick_params(which = 'minor', color = tc1) \nax.tick_params(labelbottom='off')\nax.grid(color='0.0',alpha=0.1)\n\nax.set_ylabel(r'$\\mathdefault{v_{los}}$ / km $\\mathdefault{s^{-1}}$', fontproperties = tfm, size = 15, color=tc)\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n\nplt.savefig('fil0.pdf') \nsys.exit()\n\n \n \n \n \n\nf = h5py.File(fildir+'spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\nl_det = [] # detected l coordinates (length)\nw_det = [] # detected w coordinates (width)\nv_det = [] # detected v coordinates (velocity)\n#for i in xrange(358):\n\nl = 0 # keep track of l coordinate\nfor filename in glob.glob(fildir+'spectra_*.hdf5'): \n# filename = 'combinedspectrum_'+str(i).zfill(4)+'.hdf5'\n print filename\n f = h5py.File(filename, 'r')\n specs = f['spectra']\n # treat the spectrum of each point individually\n spectrum = np.zeros(specs.shape[1])\n \n for s in xrange(specs.shape[0]):\n spec = np.array(specs[s])\n # normalize the spectrum by its rms\n spec /= np.sqrt(np.mean(spec**2))\n \n # set 'detected' vels equal to one, the rest to zero\n spec[spec < 3] = 0.0\n spec[spec > 0] = 1.0\n \n # march through and glom together touching detections\n runningn = 0\n runningv = 0\n if spec[0] >= 1:\n runningn = spec[0]\n runningv = vels[0] * spec[0]\n for i in xrange(1,len(spec)):\n if (spec[i] >= 1) & (spec[i-1] == 0):\n # we are starting a new detection\n runningn = spec[i]\n runningv = vels[i] * spec[i]\n if (spec[i] >= 1) & (spec[i-1] >= 1):\n # we are continuing a new detection\n runningn += spec[i]\n runningv += vels[i] * spec[i]\n if (spec[i] == 0) & (spec[i-1] >= 1):\n # we are ending a detection, record it\n l_det.append(l*dl)\n w_det.append(s*dw)\n # need to change w to be an average.\n v_det.append(runningv / runningn)\n # check to see if this is really averaging- if runningn>1, print\n #if runningn > 1:\n # print runningv, runningv/runningn, runningn\n l += 1\n f.close()\n \n# v_det is already in km/s. \n# offset by vmin to match the middle plot\nv_det -= np.min(vels)\n\n#convert l_det and w_det to pc\nto_unitary = 1 / 2**lmax * expandfac\nlscale2 = length * to_unitary * 10 * boxlen / l #unit length is 10 pc\nl_det = np.array(l_det)*lscale2\nw_det = np.array(w_det)*lscale2\n#ax.scatter(l_det,v_det,marker='.',s=13,facecolor='0.0',lw=0,alpha=.33)\n#ax.set_xlim(0.001, lscale)\n#ax.set_ylim(np.min(v_det)-.1, np.max(v_det)+.1)\n#ax.set_axis_bgcolor('1.0')\n\ndvel = vels[1] - vels[0]\nvelmin = 2.1\nvelmax = 5.6\nvelbins = np.arange(velmin, velmax, dvel)\ndl = (np.max(l_det) - np.min(l_det))/l\nlbins = np.arange(np.min(l_det), np.max(l_det), dl)\n\nax.tick_params(labelbottom='off')\n\n\nH, xedges, yedges = np.histogram2d(v_det, l_det, bins=(velbins, lbins))\nprint np.min(H), np.max(H), np.mean(H), np.median(H)\nextent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\nax.imshow(H,\n extent = [0.001, lscale, velmin, velmax],\n origin = 'lower',\n interpolation = 'nearest',\n aspect=.25,\n cmap = 'gray_r',\n vmax = 8\n )\n\nax.get_yaxis().tick_left()\nax.get_xaxis().tick_bottom()\nfor line in ax.xaxis.get_ticklines():\n line.set_color(tc1)\nfor line in ax.yaxis.get_ticklines():\n line.set_color(tc1) \nfor line in ax.yaxis.get_ticklines():\n line.set_color(tc1) \nax.tick_params(which = 'minor', color = tc1) \nax.tick_params(labelbottom='off')\nax.grid(color='0.0',alpha=0.1)\n\nax.set_ylabel(r'$\\mathdefault{v_{los}}$ / km $\\mathdefault{s^{-1}}$', fontproperties = tfm, size = 15, color=tc)\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n\nplt.savefig('fil0.pdf') \nsys.exit()\n\nplt.savefig('fooppv.png',dpi=200)\nplt.savefig('fooppv.pdf')\n#plt.show()"
},
{
"alpha_fraction": 0.5726903676986694,
"alphanum_fraction": 0.615182638168335,
"avg_line_length": 33.570247650146484,
"blob_id": "ea3d3e3b7f6a65b7bed084a330dc23be187f8a4b",
"content_id": "6462e7d0393e74885ad4baa3b24c64aa255fb6d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4189,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 121,
"path": "/make_surface_density_pdf.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n \n\n\n# convert these to \nmu = 2.33 # mean molecular weight\nmH = 1.6733e-24\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n fig = plt.figure(figsize = (5, 3.5))\n ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n cdensmax = np.log10(10**2.0 / (mu * mH)) # convert these from g to n\n cdensmin = np.log10(10**-6.0 / (mu * mH))\n bins = 128\n binvals = np.arange(cdensmin, 1.000001*cdensmax, (cdensmax - cdensmin) / (bins))\n binmids = 0.5 * (np.roll(binvals, -1) + binvals)\n binmids = binmids[:len(binmids) - 1]\n\n files = [\n 'surface_density_0.hdf5',\n 'surface_density_1.hdf5',\n 'surface_density_2.hdf5']\n colors = [c1,c2,c3]\n\n for i in xrange(len(files)):\n f = h5py.File('output_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n totalhist = np.zeros(bins)\n for j in xrange(sd.shape[0]):\n coldensvals = sd[j]\n coldensvals -= np.log10(mu * mH)\n if j == 600:\n print coldensvals\n print cdensmin,cdensmax\n hist, binedges = np.histogram(coldensvals, range = (cdensmin, cdensmax), bins = binvals)\n totalhist += hist \n f.close() \n print totalhist\n\n ax.plot(10**binmids, totalhist, color = colors[i], linewidth = 1.5, alpha=0.8)\n ax.set_yscale('log')\n ax.set_xscale('log')\n\n ax.set_xlim(5.e18,1.e24)\n ax.set_ylim(1,1.e5)\n\n set_ticks(ax, '0.6')\n ax.xaxis.grid(False,which='minor')\n ax.yaxis.grid(False,which='minor')\n\n plotlim = mpl.xlim() + mpl.ylim()\n print plotlim\n ax.imshow([0,0],[1,1], cmap=mp.cm.Grays, interpolation='bicubic', extent=plotlim)\n\n ax.set_xlabel(r'surface density / $\\mathdefault{cm^{-2}}$', fontproperties = tfm, size = 15)\n #ax.set_ylabel('d', fontproperties = tfm, size = 15)\n ax.set_ylabel(r'volume weighted PDF', fontproperties = tfm, size = 15)\n\n (time, unit_t) = get_time(infoname)\n timeMyr = time * unit_t / 31557600.0 / 1.e6\n horiz = 5.e22\n vert = 2.e4\n ax.text(horiz, vert, r'%.1f' %timeMyr, transform = ax.transData, \n ha = 'right',va = 'baseline', fontproperties = lfm, color = c1, snap = False)\n ax.text(1.1*horiz, vert, r'Myr', transform = ax.transData,\n ha = 'left', va = 'baseline', fontproperties = lfm, color = c1, snap = False)\n\n for label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\n plt.savefig(outdir+'SurfaceDensities/SurfaceDensityPDF_'+str(snap).zfill(5)+'.png') \n plt.clf()\n \n res = (1080,1920)\n fig = plt.figure(figsize = (res[1]/200, res[0]/200), dpi=200)\n\n i = 0\n f = h5py.File('output_'+str(snap).zfill(5)+'/'+files[i], 'r')\n sd = f['surface_density']\n fig = plt.figure(figsize = (sd.shape[0]/200,sd.shape[0]/200), dpi=200)\n ax = fig.add_axes([0.,0.,1.,1.])\n cdmin = -2.797 #-4.097\n cdmax = -.193\n ax.imshow(sd,\n interpolation = 'nearest',\n origin = 'lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = 'gray_r')\n ax.set_frame_on(False)\n ax.axes.get_yaxis().set_visible(False)\n ax.axes.get_xaxis().set_visible(False)\n framename = outdir+'framesD/framefoo_'+str(snap).zfill(4)+'.png'\n plt.savefig(framename, dpi = 200)\n\n \n"
},
{
"alpha_fraction": 0.6071716547012329,
"alphanum_fraction": 0.6240934729576111,
"avg_line_length": 29.962499618530273,
"blob_id": "5c59c5d5b99688a376f08bae9f46762344d1621b",
"content_id": "b2d391c2d398add315e4d0bd3c948c7a5e92ad7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2482,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 80,
"path": "/turbgravfilaments/make_reduced_data_turbgrav.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nfrom yt.config import ytcfg\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n sinkname2 = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.csv'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix)\n \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n shutil.copy(sinkname2, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n ds = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n lmaxplot = min(11, lmax) \n resx = int(wd * 2**lmaxplot)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n \n \n for i in range(3):\n # get projection in each direction\n proj = ds.h.proj('Density', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Density']))\n f.close()\n del(f)\n del(dset)\n del(frb)\n del(proj)\n\tgc.collect()\n \n ad = ds.h.all_data()\n \n nbins = 128\n dmin = 1.e-27\n dmax = 1.e-17\n profilename = fileprefix+'MassAndVolumeInDensityBins.dat'\n profile = BinnedProfile1D(ad,nbins,'Density',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n \n del(ds)\n del(ad)\n del(profile)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5996350646018982,
"alphanum_fraction": 0.6291970610618591,
"avg_line_length": 31.547618865966797,
"blob_id": "33022631ab5edfec137a2c29a1997086f351f8f9",
"content_id": "60ae427f23b34cf2d2337fe7377f83b7760f7b12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2740,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 84,
"path": "/make_sink_mass_plot.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\n\n# import ramses helper functions and get figure directory\nhomedir = '/home/moon/moeckel/'\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = '/home/moon/moeckel/Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\n\nfig = plt.figure(figsize = (5,3.5))\n\nax = fig.add_axes([0.2, 0.15, 0.75, 0.8])\n#ax = fig.add_axes([0., 0., 1., 1.])\n\ntimes = [0.0]\nsinkmasses = [0.0]\n\nindivnames = []\nindivmasses = []\nindivtimes = []\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),1):\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n # see if we have any sink particles to plot\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0:\n sinkmass = sinks[:,1].sum() # total mass of sinks in Msun\n (time, unit_t) = get_time(infoname)\n\t\ttimeMyr = time * unit_t / 31557600.0 / 1.e6\n\t\tprint snap, timeMyr, sinkmass\n times.append(timeMyr)\n sinkmasses.append(sinkmass)\n timerow = np.ones((sinks.shape[0],1)) * timeMyr\n # nmt = name, mass, time\n nmt = np.hstack((sinks[:,[0,1]],timerow))\n indivnames.append(nmt[:,0])\n indivmasses.append(nmt[:,1])\n indivtimes.append(nmt[:,2])\n except IOError:\n pass \n\n# flatten lists of individual sink properties, stick in an array\nindivnames = [j for i in indivnames for j in i]\nindivmasses = [j for i in indivmasses for j in i] \nindivtimes = [j for i in indivtimes for j in i] \nnmt = np.array([indivnames, indivmasses, indivtimes])\n\n# plot total sink mass\nax.plot(times, sinkmasses, color = '0.2', linewidth = 1.5)\n\n# plot individual sink masses\nfor i in xrange(int(max(nmt[0,:]))):\n thisone = nmt[:,nmt[0,:] == i]\n ax.plot(thisone[2,:], thisone[1,:], color = '0.2', linewidth = 1.25, alpha = 0.5)\n \nax.set_yscale('log')\nax.set_xlim(-.05,7.05)\nax.set_ylim(1,5000)\nset_ticks(ax, '0.6')\n\nax.set_xlabel('time / Myr', fontproperties = tfm, size = 15)\nax.set_ylabel('total sink mass / '+r'M${_\\odot}$', fontproperties = tfm, size = 15)\n\nfor label in ax.get_xticklabels() + ax.get_yticklabels():\n label.set_fontproperties(tfm)\n\n \nplt.savefig(outdir+'totalsinkmass.pdf')\n\n \n"
},
{
"alpha_fraction": 0.6563257575035095,
"alphanum_fraction": 0.6820164918899536,
"avg_line_length": 22.86046600341797,
"blob_id": "be5c5e8cb7545d49e1ed27693d4980a5af73daa4",
"content_id": "3920d99d9ef8dc363623a8208233e686c42cbf1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2063,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 86,
"path": "/turbgravfilaments2/make_spectra_N2Hplus_fits.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\n\nimport numpy as np\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import fits\nfrom os.path import expanduser\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\n\n# downsample the spectra to a 256 squared grid\ninres = 1024\n\nsnap = int(sys.argv[1])\naxis = 2\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\ninfofile = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n(time, unit_t) = get_time(infofile)\ntime_kyr = time * unit_t / (3.15569e7 * 1000)\ntimestr = str(int(round(time_kyr))).zfill(4)\nprint timestr\n\n\nf = h5py.File(fileprefix+'posvel_'+str(axis)+'/spectrumvelsN2Hplus.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\noutcube = np.zeros([inres, inres, len(vels)], dtype=np.float32)\n\n#\n# j is image up\n# |\n# | \n# |_______i is image right\n#\n# spectra files march along j\n#\n\n### NOTE\n# the hdf5 files were created by summing along velocity bins rather than integratig\n# along them. we need to multiply them all by 1 / bin width, which is 0.05 km/s!\n\nbinwidth = 0.05\n\nfilelist = glob.glob(fileprefix+'posvel_'+str(axis)+'/spectra_N2Hplus_*.hdf5')\nprint filelist[0]\n\ntotnonzero = 0\nspecfile = filelist[0]\nf = h5py.File(specfile)\nspecs = f['spectraN2Hplus']\nzerospec = np.zeros(specs.shape)\nf.close()\n\nfor inj in xrange(inres):\n specfile = fileprefix+'posvel_'+str(axis)+'/spectra_N2Hplus_'+str(inj).zfill(4)+'.hdf5'\n if os.path.isfile(specfile):\n f = h5py.File(specfile)\n specs = np.array(f['spectraN2Hplus']) / binwidth\n print specfile\n else:\n specs = zerospec\n outcube[:, inj, :] = specs\n if os.path.isfile(specfile):\n f.close()\n \nhdu = fits.PrimaryHDU(outcube)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('spectra.N2H+.'+timestr+'.fits')\n\n \n\n \n"
},
{
"alpha_fraction": 0.6173800230026245,
"alphanum_fraction": 0.6383484601974487,
"avg_line_length": 32.244606018066406,
"blob_id": "f05e4006a57b3bdd03883b1021f5bb59d1af1723",
"content_id": "1ff4e60129980ed51cfc2faf488e8229a1398d78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4626,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 139,
"path": "/make_density_breakdown.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n\nhilim = 10**-19.5\nlolim = 10**-21.5\n\nhilim = 10**-21.5\nlolim = 10**-22.5\n\ndef _Highdens(field, data):\n newfield = data['Density']\n antiselection = data['Density'] < hilim\n newfield[antiselection] = 1.e-99\n return newfield\n\ndef _Middens(field, data):\n newfield = data['Density']\n antiselection = (data['Density'] < lolim) | (data['Density'] >= hilim)\n newfield[antiselection] = 1.e-99\n return newfield\n\ndef _Lowdens(field, data):\n newfield = data['Density']\n antiselection = data['Density'] >= lolim\n newfield[antiselection] = 1.e-99\n return newfield\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n \n fileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\n if not os.path.exists(fileprefix):\n os.makedirs(fileprefix)\n \n # copy the infofile and sinkfile to the reduced directory \n shutil.copy(infoname, fileprefix)\n if os.path.exists(sinkname):\n shutil.copy(sinkname, fileprefix)\n \n (lmin, lmax) = get_level_min_max(infoname)\n (boxlen, unit_l) = get_boxsize(infoname)\n\n pf = load(infoname, fields=['Density','x-velocity','y-velocity','z-velocity','Pressure'])\n \n add_field('Highdens', function=_Highdens)\n add_field('Middens', function=_Middens)\n add_field('Lowdens', function=_Lowdens)\n \n # center on original center of cloud\n cntr = [0.5, 0.5, 0.5]\n\n wd = 0.625 # this is messed up- figure it out. yt might not get size right.\n wd = 1.0\n # res should be base resolution times 2**levels of refinement * wd\n resx = int(wd * 2**lmax)\n res = (resx,resx)\n ht = wd * res[0] / res[1]\n width = (wd, 'unitary')\n height = (ht, 'unitary')\n \n for i in range(1):\n # get projection in each direction\n #proj = pf.h.proj('Density', i)\n proj = pf.h.proj('Highdens', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_high_'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Highdens']))\n f.close()\n \n proj = pf.h.proj('Middens', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_mid_'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Middens']))\n f.close()\n \n proj = pf.h.proj('Lowdens', i)\n frb = proj.to_frb(width, res, center = cntr, height = height)\n filename = fileprefix+'surface_density_low_'+str(i)+'.hdf5'\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('surface_density', data = np.log10(frb['Lowdens']))\n f.close()\n \n \n if boxlen > 7:\n sphererad = 27.0\n else:\n sphererad = 13.5\n spherevol = 4.0 * np.pi / 3.0 * (sphererad * 3.086e18)**3\n sp = pf.h.sphere(cntr, (sphererad, 'pc'))\n \n nbins = 128\n dmin = 1.e-25\n dmax = 1.e-17\n profilename = fileprefix+'MassAndVolumeInDensityBins_High.dat'\n profile = BinnedProfile1D(sp,nbins,'Highdens',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n del(profile)\n \n profilename = fileprefix+'MassAndVolumeInDensityBins_Mid.dat'\n profile = BinnedProfile1D(sp,nbins,'Middens',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n del(profile)\n \n profilename = fileprefix+'MassAndVolumeInDensityBins_Low.dat'\n profile = BinnedProfile1D(sp,nbins,'Lowdens',dmin,dmax,end_collect=True)\n profile.add_fields(\"CellMassMsun\", weight=None)\n profile.add_fields(\"CellVolume\", weight=None)\n profile.write_out(profilename)\n \n del(frb)\n del(pf)\n del(profile)\n gc.collect()\n \n"
},
{
"alpha_fraction": 0.5863776803016663,
"alphanum_fraction": 0.6076956987380981,
"avg_line_length": 34.97452163696289,
"blob_id": "dcbc54301c83220b19e9faf61ad2d0611d475fc5",
"content_id": "87fb828c97e301a53712efbf68cc3e994d7c16de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11305,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 314,
"path": "/turbgravfilaments/plot_CO_vel_detections2.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom astropy import constants as const\nfrom astropy import units as u\nfrom os.path import expanduser\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\"\"\"\nusage:\npython fooppv.py N A F\nN: number of output to use. reduced_N needs to be here.\nA: axis of the projection (0, 1, 2)\nF: filament number \n\"\"\"\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\nmpl.rcParams['xtick.major.size'] = 9\nmpl.rcParams['axes.unicode_minus'] = False\n\ntc = '0.5'\ntc1 = '0.9'\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font\n fname=fontdir+'Gotham-Book.ttf', size=13)\nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=12)\n\nfig = plt.figure(figsize = (7, 3))\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\nfileprefix = 'reduced_'+str(snap).zfill(5)+'/'\n\ninfoname = fileprefix+'info_'+str(snap).zfill(5)+'.txt'\n(boxlen, unit_l) = get_boxsize(infoname)\nprint boxlen, unit_l\nunitarytopc = boxlen * unit_l / const.pc.cgs.value\n\n# read in the rectangle from the filament definition\nrectdata = ascii.read(fileprefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt')\nfor fil in rectdata:\n filnum = fil[0]\n leftpoint = np.array([fil[1], fil[2]])\n rightpoint = np.array([fil[3], fil[4]]) \n width = fil[5] \n vector = rightpoint - leftpoint\n length = np.linalg.norm(vector)\n orthovec = (-vector[1], vector[0])\n orthovec /= np.linalg.norm(orthovec)\n vector /= np.linalg.norm(vector)\n x = (leftpoint[0], rightpoint[0])\n y = (leftpoint[1], rightpoint[1])\n ul = leftpoint + orthovec * width/2\n ll = leftpoint - orthovec * width/2\n ur = rightpoint + orthovec * width/2\n lr = rightpoint - orthovec * width/2\n rectangle = np.transpose([ul, ll, lr, ur, ul])\n # this rectangle is in unitary units.\n \n velmin = 1.0 * fil[6]\n velmax = 1.0 * fil[7]\n veltickspace = 1.0 * fil[8]\n print velmin, velmax, veltickspace\n \n print 'lower left corner: ',ll\n print 'upper right corner: ',ur\n\n # we will move along the lower-left line of the rectangle, starting from startpoint \n # and ending at endpoint \n startpoint = leftpoint - orthovec * width/2\n endpoint = rightpoint - orthovec * width/2\n print 'startpoint, endpoint: ',startpoint,endpoint\n \n # the box size is 10 pc. convert the length to physical scale\n lscale = length * unitarytopc\n \n if length * unitarytopc < 3:\n xtickspan = 0.5\n else:\n xtickspan = 1.0\n xtickvals = np.arange(0, length * unitarytopc, xtickspan)\n\n \"\"\"\n this section makes a resampled, rotated column density map of the filament\n that we're dealing with.\n \"\"\"\n plotSurfaceDensity = False\n if plotSurfaceDensity:\n ax0 = fig.add_axes([0.15, 0.1, 0.8, 0.8])\n # read in the surface density file and figure out the spatial resolution\n file = fileprefix+'surface_density_CO_'+str(axis)+'.hdf5'\n print snap,file\n f = h5py.File(file, 'r')\n sd = f['surface_density_CO']\n\n sd = 10**np.array(sd) # convert to linear units\n print np.mean(sd),np.max(sd)\n sd /= (2.33 * const.m_p.cgs.value) # convert to number density\n print np.mean(sd),np.max(sd)\n sd /= (2.42e14 * 2.94e6) # non-temperature factors of IC18O conversion\n sd /= (np.exp(5.27/10) / (np.exp(5.27/10) - 1)) # temperature part\n print np.mean(sd),np.max(sd)\n cdmin = 0\n cdmax = 5\n\n res_sd = sd.shape[1]\n dres_sd = 1 / res_sd\n \n expandfac = 12 # resample the surface density map to make off-axis pixels look nice\n nl = int(length * res_sd * expandfac)\n nw = int(width * res_sd * expandfac)\n dl = length / nl\n dw = width / nw\n print 'nl, nw: ',nl, nw\n \n # set up empty array for the resampled surface density map\n subbox = np.zeros([nw, nl])\n \n for il in xrange(nl):\n l = startpoint + vector * dl * (il + 0.5)\n for iw in xrange(nw):\n pt = l + orthovec * dw * (iw + 0.5)\n pt /= dres_sd\n # the sd array and imshow have different row-column ordering \n # convention than everything else in the world, including the \n # coordinates that we use to define the rectangles. so the \n # ordering of the points here is reversed.\n subbox[iw, il] = sd[int(pt[1]),int(pt[0])]\n f.close() # close the surface density file\n print np.min(subbox), np.max(subbox)\n \n imshowmap = 'nickmapVD2'\n ax0.imshow(subbox,\n origin='lower',\n extent = [0, lscale, 0, lscale*width/length],\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap,\n interpolation = 'nearest')\n \n if length * unitarytopc < 3:\n xtickspan = 0.5\n else:\n xtickspan = 1.0\n ax0.xaxis.set_ticks(xtickvals)\n ax0.yaxis.set_ticks(np.arange(0, width * unitarytopc, 0.2))\n\n ax0.get_yaxis().tick_left()\n for line in ax0.xaxis.get_ticklines():\n line.set_color(tc1)\n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n ax0.tick_params(which = 'minor', color = tc1) \n ax0.grid(False)\n \n ax0.set_xlabel(r'$\\mathdefault{L_{fil}}$ / pc', fontproperties = tfm, size = 15, color=tc)\n ax0.set_ylabel(r'$\\mathdefault{W_{fil}}$ / pc', fontproperties = tfm, size = 15, color=tc)\n for label in ax0.get_xticklabels() + ax0.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n \n plt.savefig('filsd'+str(filnum)+'.png',dpi=400) \n plt.savefig('filsd'+str(filnum)+'.pdf',dpi=400) \n plt.clf()\n \n \"\"\"\n this section makes a plot of the density-weighted line of site velocity\n along the filament. the velocity is summed along the W filament direction.\n \"\"\"\n ax0 = fig.add_axes([0.15, 0.04, 0.8, 0.8])\n f = h5py.File(fileprefix+'posvel_'+str(axis)+'/spectrumvels.hdf5')\n vels = np.array(f['binmidskms'])\n f.close()\n print 'length of spectra: ',len(vels)\n specfile = fileprefix+'posvel_'+str(axis)+'/spectra_0000.hdf5'\n f = h5py.File(specfile)\n specs = f['spectra']\n res_spec = specs.shape[0]\n dres_spec = 1 / res_spec\n f.close()\n \n # read in the detections file and figure out the spatial resolution of the spectra data\n # NOTE this is [z, y, [vels]]\n #f = h5py.File(fileprefix+'posvel_'+str(axis)+'/detections.hdf5')\n #dets = np.array(f['veldetections'])\n #f.close()\n #res_spec = dets.shape[0]\n #dres_spec = 1 / res_spec\n \n # get index limits of the box\n zlo = int(np.floor(min(ll[1], lr[1]) / dres_spec))\n zhi = int(np.ceil(max(ul[1], ur[1]) / dres_spec))\n ylo = int(np.floor(min(ll[0], ul[0]) / dres_spec))\n yhi = int(np.ceil(max(lr[0], ur[0]) / dres_spec))\n spany = yhi - ylo\n spanz = zhi - zlo\n print 'untransformed box spany, spanz: ',spany, spanz\n print zlo, zhi\n \n \n # for the distance to move in each step, choose a step size closest to the number of\n # grid cells (at the finest refinement level) it would take to traverse the box \n expandfac = 4 # resample the surface density map\n nl = int(length * res_spec * expandfac)\n nw = int(width * res_spec * expandfac)\n dl = length / nl\n dw = width / nw \n print 'nl, nw: ',nl, nw\n \n \n # set up the array for the untransformed spectra\n specfield = np.zeros([spany, spanz, len(vels)]) \n \n # get all of the spectra in this box into an array for easy access\n for i in xrange(zlo, zhi, 1):\n specfile = fileprefix+'posvel_'+str(axis)+'/spectra_'+str(i).zfill(4)+'.hdf5'\n f = h5py.File(specfile)\n specs = f['spectra']\n \n for s in xrange(ylo, yhi, 1):\n spec = np.array(specs[s])\n specfield[s-ylo, i-zlo] = spec\n f.close() \n \n # set up an empty array of spectra along the length of the filament\n transspec = np.zeros([nl, len(vels)])\n \n # march along the length and collect spectra along width\n for il in xrange(nl):\n l = startpoint + vector * dl * (il + 0.5)\n for iw in xrange(nw):\n pt = l + orthovec * dw * (iw + 0.5)\n # pt is now a y-z position. convert this to coordinates \n pt /= dres_spec\n iy = int(pt[0])\n iz = int(pt[1])\n #print specfield[iy-ylo][iz-zlo]\n transspec[il,:] += specfield[iy-ylo][iz-zlo]\n # print specfield[iy-ylo][iz-zlo]\n #print transspec[il,:]\n # set up the length and velocity bins, and make the histogram\n # use length / dres to make length bins. i.e. the subsampling \n # used to march along l only serves to weight each pixel appropriately\n # in the transformed space- plot at the same resolution as the simulation.\n \n dvel = vels[1] - vels[0]\n dvel *= 1\n # velmin = -2\n # velmax = 2\n transspecsub = transspec[:,(vels >= velmin) & (vels <= velmax)] \n\n\n transspecsub /= np.max(transspecsub)\n print np.min(transspecsub), np.max(transspecsub), np.median(transspecsub), np.mean(transspecsub)\n transspecsub_nonzero = transspecsub[transspecsub > 0.01]\n print np.min(transspecsub_nonzero), np.median(transspecsub_nonzero), np.max(transspecsub_nonzero)\n ax0.imshow(transspecsub.T,\n extent = [0, lscale, velmin, velmax],\n origin = 'lower',\n interpolation = 'nearest',\n aspect = 'auto',\n cmap = 'nickmapSD2',\n vmin = 0.01,\n vmax = min(1.0, 8. * np.median(transspecsub_nonzero))\n # cmap = 'gray_r',\n # vmin = -1,\n # vmax = 0\n )\n ax0.xaxis.set_ticks(xtickvals)\n velticksvals = np.union1d(np.arange(0, velmin, -veltickspace), np.arange(0, velmax, veltickspace))\n ax0.yaxis.set_ticks(velticksvals)\n \n ax0.get_yaxis().tick_left()\n ax0.get_xaxis().tick_bottom()\n for line in ax0.xaxis.get_ticklines():\n line.set_color(tc1)\n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n for line in ax0.yaxis.get_ticklines():\n line.set_color(tc1) \n ax0.tick_params(which = 'minor', color = tc1) \n ax0.tick_params(labelbottom='off')\n ax0.grid(color='0.0',alpha=0.1)\n \n ax0.set_ylabel(r'$\\mathdefault{v_{los}}$ / km $\\mathdefault{s^{-1}}$', fontproperties = tfm, size = 15, color=tc)\n for label in ax0.get_xticklabels() + ax0.get_yticklabels():\n label.set_fontproperties(tfm)\n label.set_color(tc)\n \n plt.savefig('filvel'+str(filnum)+'.png',dpi=400)\n plt.savefig('filvel'+str(filnum)+'.pdf')\n \n\n "
},
{
"alpha_fraction": 0.4983270466327667,
"alphanum_fraction": 0.5512337684631348,
"avg_line_length": 37.44355010986328,
"blob_id": "35ccd6d54c703cb8323574e0ef1a78323bdd66e3",
"content_id": "74702a1fa625562a4338284cb764a6db4b2f746d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4782,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 124,
"path": "/make_intercluster_energy_plot.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = '/home/moon/moeckel/'\nhomedir = expanduser('~')+'/'\nprint homedir\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\n#mpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n# set some fonts\nfontdir = homedir+'Documents/astronomy/macfontsforpython/'\ntfm = fm.FontProperties( # tick font main\n fname=fontdir+'Gotham-Book.ttf', size=13) \nlfm = fm.FontProperties( # label font main\n fname=fontdir+'Gotham-BookItalic.ttf', size=11) \n\n# choose dark or light background\nbackground = 'light'\n\nif background == 'dark':\n imshowmap = 'gray'\n textcolor = c3l\n textalpha = 0.5\nif background == 'light':\n imshowmap = 'gray_r'\n textcolor = textlightbg\n textalpha = 0.7\n \ndef get_total_energies(sinks):\n grav = 6.67e-8\n nsink = len(sinks)\n pot = 0.0\n kin = 0.5 * sinks[nsink - 1, 1] * np.sum(sinks[nsink - 1, 5:8]**2)\n for i in xrange(nsink - 1):\n imass = sinks[i, 1]\n ipos = sinks[i, 2:5]\n ivel = sinks[i, 5:8]\n kin += 0.5 * imass * np.sum(ivel**2) \n for j in xrange(i+1, nsink):\n jmass = sinks[j, 1]\n jpos = sinks[j, 2:5]\n dr = np.sqrt(np.sum((ipos - jpos)**2))\n pot -= grav * imass * jmass / dr\n return(kin, pot)\n\ndef get_individual_energies(sinks):\n grav = 6.67e-8\n nsink = len(sinks)\n pots = 0.0\n kins = 0.5 * sinks[nsink - 1, 1] * np.sum(sinks[nsink - 1, 5:8]**2)\n for i in xrange(nsink - 1):\n imass = sinks[i, 1]\n ipos = sinks[i, 2:5]\n ivel = sinks[i, 5:8]\n kin += 0.5 * imass * np.sum(ivel**2) \n for j in xrange(i+1, nsink):\n jmass = sinks[j, 1]\n jpos = sinks[j, 2:5]\n dr = np.sqrt(np.sum((ipos - jpos)**2))\n pot -= grav * imass * jmass / dr\n return(kin, pot)\n \nfig = plt.figure(figsize = (9, 3))\nax1 = fig.add_axes([.05, .05, .27, .9])\nax2 = fig.add_axes([.37, .05, .27, .9])\nax3 = fig.add_axes([.69, .05, .27, .9])\n\nfor snap in range(int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3])):\n infoname = 'output_'+str(snap).zfill(5)+'/info_'+str(snap).zfill(5)+'.txt'\n sinkname = 'output_'+str(snap).zfill(5)+'/sink_'+str(snap).zfill(5)+'.out'\n framename = outdir+'framesD/frame_'+str(snap).zfill(4)+'.png'\n \n # see if we have any sink particles\n try:\n with open(sinkname): \n sinks = get_sinks(sinkname)\n if len(sinks) > 0: \n # move everything to center of momentum\n rcm = np.sum(sinks[:,1,np.newaxis] * sinks[:,2:5], axis=0) / np.sum(sinks[:,1])\n vcm = np.sum(sinks[:,1,np.newaxis] * sinks[:,5:8], axis=0) / np.sum(sinks[:,1]) \n sinks[:,2:5] -= rcm\n sinks[:,5:8] -= vcm\n \n # convert everything to cgs\n # masses are in Msun, distances in 10pc, vels in kms\n sinks[:,1] *= 1.99e33\n sinks[:,2:5] *= 10 * 3.086e18\n sinks[:,5:8] *= 1.e5\n \n (totalkin, totalpot) = get_total_energies(sinks)\n print totalkin, totalpot, totalkin / np.abs(totalpot)\n ax1.scatter(sinks[:,2],sinks[:,3],marker='.',s=5,facecolor=csink,edgecolor=csink)\n ax2.scatter(sinks[:,2],sinks[:,4],marker='.',s=5,facecolor=csink,edgecolor=csink)\n ax3.scatter(sinks[:,3],sinks[:,4],marker='.',s=5,facecolor=csink,edgecolor=csink)\n \n # add one Myr tracks\n sinks2 = sinks.copy()\n Myrs = 1\n sinks2[:,2:5] += sinks[:,5:8] * Myrs * 1.e6 * 31557600.0\n for i in xrange(len(sinks)):\n ax1.arrow(sinks[i,2],sinks[i,3],sinks2[i,2]-sinks[i,2],sinks2[i,3]-sinks[i,3],lw=0.4,head_width=3.e17,length_includes_head=True)\n ax2.arrow(sinks[i,2],sinks[i,4],sinks2[i,2]-sinks[i,2],sinks2[i,4]-sinks[i,4],lw=0.4,head_width=3.e17,length_includes_head=True)\n ax3.arrow(sinks[i,3],sinks[i,4],sinks2[i,3]-sinks[i,3],sinks2[i,4]-sinks[i,4],lw=0.4,head_width=3.e17,length_includes_head=True)\n except IOError:\n pass \n \n ax1.set_xlim(-3.e19,3.e19)\n ax1.set_ylim(-3.e19,3.e19) \n ax2.set_xlim(-3.e19,3.e19)\n ax2.set_ylim(-3.e19,3.e19) \n ax3.set_xlim(-3.e19,3.e19)\n ax3.set_ylim(-3.e19,3.e19) \n plt.savefig(outdir+'sinksfoo.png', dpi = 200)\n \n\n \n"
},
{
"alpha_fraction": 0.6641221642494202,
"alphanum_fraction": 0.6820980310440063,
"avg_line_length": 25.376623153686523,
"blob_id": "b2b5af3c0c686b200e008ee489e0799d30d0b199",
"content_id": "304de9a391d54d4b0ad831958f048f053013751f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4061,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 154,
"path": "/foo.py",
"repo_name": "nickolas1/ramses_plot_scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import division\n\nfrom yt.mods import *\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nimport gc\nimport sys\nimport h5py\nimport shutil\nimport glob\nfrom astropy.io import ascii\nfrom os.path import expanduser\n\n# import ramses helper functions and get figure directory\nhomedir = expanduser('~')+'/'\n\n# import ramses helper functions and get figure directory\nsys.path.append(homedir+'pythonhelpers/ramses/')\nfrom ramses_helpers import *\nmpl.rc_file(homedir+'pythonhelpers/ramses/matplotlibrc')\noutdir = get_output_path(homedir)\n\n\nfig = plt.figure(figsize = (5, 3.5))\nax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\n\nsnap = int(sys.argv[1])\naxis = int(sys.argv[2])\nfilnumber = int(sys.argv[3])-1\n\nprefix = 'reduced_'+str(snap).zfill(5)+'/'\ninfoname = prefix+'info_'+str(snap).zfill(5)+'.txt'\nfilprefix = prefix+'filament_'+str(filnumber)+'/'\nfilname = prefix+'filaments'+str(axis)+'_'+str(snap).zfill(5)+'.txt'\n\n(boxlen, unit_l) = get_boxsize(infoname)\nif boxlen > 7:\n sdoff = np.log10(4)\n vdoff = np.log10(8)\nelse:\n sdoff = 0.0\n vdoff = 0.0\n \nimshowmap = 'nickmapSD'\n#imshowmap = 'bone_r'\ncdmin = -4.11 - sdoff\ncdmax = 0.069 - sdoff\n\n(lmin, lmax) = get_level_min_max(infoname)\n(boxlen, unit_l) = get_boxsize(infoname)\n\n# read in the rectangles that define the filaments we're interested in\n# these are in units of pixels in the finder image, so we will need to translate these\n# to unitary units!\nrectdata = ascii.read(filname)\nfil = rectdata[filnumber]\n\nleftpoint = np.array([fil[1], fil[2]])\nrightpoint = np.array([fil[3], fil[4]])\nwidth = fil[5]\nprint leftpoint\nprint rightpoint\nprint width\n\n# vector pointing along the filament box's long axis\nvec = rightpoint - leftpoint\nlength = np.linalg.norm(vec)\n# the orthogonal direction\northovec = (-vec[1], vec[0])\n# normalize them\northovec /= np.linalg.norm(orthovec)\nvec /= np.linalg.norm(vec)\n\n# we will move along the lower-left line of the rectangle, starting from startpoint \n# and ending at endpoint \nstartpoint = leftpoint - orthovec * width/2\nendpoint = rightpoint - orthovec * width/2\nprint startpoint,endpoint\n# for the distance to move in each step, choose a step size closest to the number of\n# grid cells (at the finest refinement level) it would take to traverse the box \nexpandfac = 4 # resample the surface density map\nnl = int(length * expandfac)\nnw = int(width * expandfac)\ndl = length / nl\ndw = width / nw\n\nprint nl, nw\nsubbox = np.zeros([nw, nl])\n\nfile = prefix+'surface_density_'+str(axis)+'.hdf5'\nprint snap,file\nf = h5py.File(file, 'r')\nsd = f['surface_density']\n\nny = sd.shape[1]\nfor il in xrange(nl):\n l = startpoint + vec * dl * (il + 0.5)\n for iw in xrange(nw):\n pt = l + orthovec * dw * (iw + 0.5)\n # the sd array and imshow have different row-column ordering \n # convention than everything else in the world, including the \n # coordinates that we use to define the rectangles. so the \n # ordering of the points here is reversed.\n subbox[iw, il] = sd[int(pt[1]),int(pt[0])]\nf.close()\n\nax.imshow(subbox,\n origin='lower',\n vmin = cdmin,\n vmax = cdmax,\n cmap = imshowmap)\n \n# turn off axes\nax.set_frame_on(False)\nax.axes.get_yaxis().set_visible(False)\nax.axes.get_xaxis().set_visible(False)\n\nplt.show()\n \nsys.exit()\n\nspectra = []\n#for i in xrange(358):\nfor filename in glob.glob('combinedspectrum_*.hdf5'): \n# filename = 'combinedspectrum_'+str(i).zfill(4)+'.hdf5'\n f = h5py.File(filename, 'r')\n spectrum = np.array(f['spectrum'])\n spectra.append(spectrum)\n f.close()\n\nspectra /= np.median(spectra)\n \nf = h5py.File('spectrumvels.hdf5')\nvels = np.array(f['binmidskms'])\nf.close()\n\nax.imshow(np.transpose(spectra),\n interpolation='nearest',\n origin = 'lower',\n extent = [0, 1, np.min(vels), np.max(vels)],\n aspect = 0.1,\n vmin = 3,\n vmax = 6,\n cmap = 'gray_r')\n\nprint np.min(spectra)\nprint np.max(spectra)\nprint np.mean(spectra)\nprint np.median(spectra) \n\nplt.savefig('foo.png')\n#plt.show()"
}
] | 51 |
Vedha286/NewsArticlesClassifier
|
https://github.com/Vedha286/NewsArticlesClassifier
|
1f74d77969d7a2d8e02220c9a662c6f735eb2b09
|
eac9f9bc73e9a26c2f881f1929d4fba5bfc5971c
|
364c819460c56c0ce5b0d87aac37259e0afc466b
|
refs/heads/main
| 2023-09-05T02:31:31.510829 | 2021-11-05T17:52:06 | 2021-11-05T17:52:06 | 412,728,097 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7472731471061707,
"alphanum_fraction": 0.7573080062866211,
"avg_line_length": 78.03448486328125,
"blob_id": "35ec5694b4bb798979797dd9bd6573ee7dd546bf",
"content_id": "ae529731b6140b53c8e80fc18cd54b22c46be2e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9183,
"license_type": "no_license",
"max_line_length": 656,
"num_lines": 116,
"path": "/modelPredictionService/modelPrediction.py",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "from pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import functions as sf\nfrom pyspark.ml import PipelineModel\nimport pandas as pd\nimport numpy as np\nfrom pyspark.ml.classification import NaiveBayesModel, RandomForestClassificationModel, OneVsRestModel\nimport os\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nimport re\nps = PorterStemmer()\n\nnews_topics = {0: \"General News\", 1: \"Sport\", 2: \"Tech\", 3: \"Entertainment\", 4: \"Finance\", 5: \"Politics\", 6: \"Business\", 7: \"economics\", \n 8: \"World\", 9: \"Beauty\", 10: \"Gaming\", 11:\"Science\", 12:\"Travel\", 13:\"Energy\", 14:\"Music\", 15:\"Food\"}\n\nr_news_topics = {y: x for x, y in news_topics.items()}\n\n\ndef start_tag(bg):\n \treturn \"<div class='alert alert-\" + bg + \" alert-dismissible fade show' role='alert'><strong>\"\n\nend_tag = \"</strong> <button type='button' class='close' data-dismiss='alert' aria-label='Close'><span aria-hidden='true'>×</span></button></div>\"\n\n\nmodel_dir = 'models/model-test'\n\ndef RemoveNonEnglishWords(text):\n text = str(text)\n text = re.sub('[^a-zA-Z]', ' ', text)\n text = text.lower()\n text = text.split()\n text = [ps.stem(word) for word in text if not word in stopwords.words('english')]\n text = ' '.join(text)\n return text\n\ndef predict(sentence):\n\tsentence = RemoveNonEnglishWords(sentence)\n\tspark = SparkSession.builder.master(\"local[0]\").appName(\"newsClassifierPredictor\").getOrCreate()\n\n\tspark.sparkContext.setLogLevel('WARN')\n\n\tprint(\"Article: \" + sentence)\n\tdf_test = pd.DataFrame(np.array([[\"test\"]]), columns=['sen'])\n\tdf_test = spark.createDataFrame([(0, sentence)], [\"id\", \"sen\"])\n#\tdf_test.show()\n\n\ttest1 = PipelineModel.load(model_dir+\"pipeline\").transform(df_test).select(\"features\")\n#\ttest1.show()\n\tif os.path.exists(model_dir+\"ovr\"):\n\t\tm = OneVsRestModel.load(model_dir+\"ovr\")\n\t\tprint(\"model loaded\")\n\t\trr = m.transform(test1)\n\t\tpred = (news_topics[rr.collect()[0][\"prediction\"]])\n\n\t\treturn start_tag(\"info\") + pred + end_tag\n\n\telif os.path.exists(model_dir+\"nb\"):\n\t\tm = NaiveBayesModel.load(model_dir+\"nb\")\n\t\tprint(\"model loaded\")\n\t\trr = m.transform(test1)\n\t\tpred = (news_topics[rr.collect()[0][\"prediction\"]])\n\n\t\treturn start_tag(\"info\") + pred + end_tag\n\telif os.path.exists(model_dir+\"rf\"):\n\t\tm = RandomForestClassificationModel.load(model_dir+\"rf\")\n\t\tprint(\"model loaded\")\n\t\trr = m.transform(test1)\n\t\tpred = (news_topics[rr.collect()[0][\"prediction\"]])\n\n\t\treturn start_tag(\"info\") + pred + end_tag\n\telse:\n\t\tpred = \"Error: <span class='font-weight-normal'>There is no model! Please train model again or try again after a while</span>\"\n\t\t\n\t\treturn start_tag(\"danger\") + pred + end_tag\n\t\n\n# y = predict(\"festivals of India Pictures: festivals of India Photos / Images The country's largest public sector bank, the State Bank of India (SBI) has announced that as part of its festive season scheme, it will be offering credit score linked home loans at 6.7%, irrespective of the loan amount. SBI has also waived processing fees on home loans. Click here to know how to avail SBI home loan.more23 Sep, 2021, 02.10 PM IST21 Sep, 2021, 10.25 AM ISTThe first prototype train of the Kanpur and Agra Metro projects has been inaugurated by Uttar Pradesh Chief Minister Yogi Adi\")\n# print(y)\n\n# y = predict(\"air india: Govt begins Air India bid evaluation NEW DELHI: The government has begun evaluation of financial bids received from Tata Group and SpiceJet founder for the acquisition of Air India , sources said.With this, the privatisation process of the national flag carrier has moved to the next phase as the government looks to expeditiously conclude the deal.The financial bids are being evaluated against an undisclosed reserve price and the bid offering the highest price above that benchmark would be accepted.If successful, this will mark the \")\n# print(y)\n\n\n# # entertainment\n# y = predict(\"Country Spotlight: India India is the third-largest emitter of greenhouse gases globally. With a population of almost 1.4 billion people, many of whom are still without access to electricity and clean cooking fuels. In addition, agriculture is the largest source of livelihoods in the country and is the home of the world's second largest cattle population. Consequently, India remains a strong proponent of the Paris Agreement principle of 'common but differentiated responsibilities.'India's Nationally Determined Contribut\")\n# print(y)\n\n# y = predict(\"Hrithik Roshan celebrates two years of 'War', says 'Miss everything about being on this set' Miss everything about being on this set - co-working, collaborating, CREATING. #2YearsOfWar @iTIGERSHROFF… https://t.co/0SF5dlxrj3 — Hrithik Roshan (@iHrithik) 1633154487000 Read Also Read Also Siddharth Anand's ‘War', starring Hrithik Roshan and Tiger Shroff completed two glorious years today. Commemorating the occasion, Hrithik took to his Twitter handle to share a poster of the film along with a heartfelt note celebrating the film's success.Along with the poster, he wrote, ‘Miss everything\")\n# print(y)\n\n# y = predict(\"20 movie ideas to help put you in the Halloween spirit What would Halloween be without the wonderful witches, ghosts, vampires, and zombies of the big screen? It's no trick that one of the best Halloween treats is sitting down with a big bowl of popcorn to watch your favourite campy flick, family classic, or terrifying horror movie. Here's a list of 20 movie ideas sure to tickle your funny bone or scare you silly. Microsoft and partners may be compensated if you purchase something through recommended links in this article. Microsoft and partners may be compensated if you purchase something through recommended links in this article.\")\n# print(y)\n\n# y = predict(\"Charlize Theron prefers impressing kids to critics Country United States of America US Virgin Islands United States Minor Outlying Islands Canada Mexico, United Mexican States Bahamas, Commonwealth of the Cuba, Republic of Dominican Republic Haiti, Republic of Jamaica Afghanistan Albania, People's Socialist Republic of Algeria, People's Democratic Republic of American Samoa Andorra, Principality of Angola, Republic of Anguilla Antarctica (the territory South of 60 deg S) Antigua and Barbuda Argentina, Argentine Republic Armenia Aruba Australia,\")\n# print(y)\n\n# y = predict(\"Read Also Choreographer Remo D'Souza and host Raghav Juyal were seen engaging in a fun workout challenge where Raghav tried to woo Shakti Mohan with his quirky style and funny banter with Remo on 'Dance+ 6'.Humorously teasing Raghav for his constant attempts to impress Shakti, Remo initiated a challenge saying: 'Raghav, I have seen you trying to woo Shakti for a few years, but eventually nothing comes out of it and that hurts me since I consider you my own. But then I wondered why should Shakti even consider you; can't put my finger on that one good thing that would impress her.\")\n# print(y)\n\n# y = predict(\"John Lennon ‘shattered, totally devastated' by Brian Epstein's death ‘Like a little child' Brian Epstein was the manager of The Beatles from 1962 until his sudden death of an accidental drug overdose in August 1967 at the age of just 32. At the time, John Lennon, Paul McCartney, George Harrison and Ringo Starr had been attending a seminar on Transcendental Meditation in Bangor, Wales, led by Indian guru Maharishi Mahesh Yogi. However, their visit was cut short by the news of Brian's death on August 27.Ajoy Bose, the director of new documentary film The Beatles and India, spoke exclusi\")\n# print(y)\n\n# #world\n# y = predict(\"India News Video caption: India building collapses hours after it was evacuatedIndia building collapses hours after it was evacuatedNo loss of life was reported from the incident.By Geeta PandeyBBC News, HathrasRajini VaidyanathanBBC South Asia Correspondent By Angie BrownBBC Scotland, Edinburgh and East reporterBy Vikas PandeyBBC News, DelhiBy Soutik BiswasIndia correspondent\")\n# print(y)\n\n# y = predict(\"Biden eager to push benefits of spending plan, visiting Michigan Tuesday HOWELL, Mich. — President Joe Biden is shifting strategy to sell his ambitious social spending plans by traveling outside Washington and courting moderate Democrats who are key to hopes for any deal. With his agenda in jeopardy on Capitol Hill, Biden on Tuesday is visiting the Michigan district of a moderate Democratic lawmaker who has urged him to promote his proposals more aggressively to the public. Back in Washington, negotiations continue on a pair of bills to boost spending on safety net, \")\n# print(y)\n\n\n# #food\n# y = predict(\"We all love a good roast on a Sunday but while many of us might think we are dab hands in the kitchen, it's far more enjoyable to get others to do all the cooking - not to mention the washing up afterwards. As we head deeper into autumn, there are few things better than a slap up Sunday lunch in a gastropub or restaurant with a roaring fire, and Bristol is blessed with such places. Below is a round-up of the best roast-serving pubs in Bristol. READ MORE:New bars and restaurants in Bristol to visit this month If we've missed out your favourite, tell us your recommendations in the comments below.\")\n# print(y)\n"
},
{
"alpha_fraction": 0.6529411673545837,
"alphanum_fraction": 0.7529411911964417,
"avg_line_length": 10.266666412353516,
"blob_id": "45e855a50ea50dde1f5c35f11dd9a3933293e423",
"content_id": "9614140b8d8c9f25e0db855ae928da3a0c300550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 170,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 15,
"path": "/requirements.txt",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "kafka-python==2.0.2\nschedule==1.1.0\nrequests==2.26.0\nkq==2.0.0\npymongo==3.12.0\npandas\nfindspark\nnltk\nsklearn\nscipy\nnumpy\nfastapi\nuvicorn\n\"pymongo[srv]\"\npython-multipart\n\n"
},
{
"alpha_fraction": 0.6614509224891663,
"alphanum_fraction": 0.6728307008743286,
"avg_line_length": 18.52777862548828,
"blob_id": "d690474ffff7b9e0d134bc598d9bd10624076468",
"content_id": "d2e378c0ee3e2a7dd0898be9fad15604b0b4abe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 36,
"path": "/README.md",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "# NewsArticlesClassifier\n\n## Hack Elite(Group 10) - **Vedha Krishna Velthapu and P Manish**\n\n## Report: **report.pdf**\n\n### What the report contains:\n\n- Milestones 1-4 details\n- Environment setup steps\n- Architectural design\n- Planning Document\n- Screenshots of database and frontend\n\n### What is done:\n\n- Whole of milestone 1\n\n - Kafka producer to get data\n - Kafka consuer to save data to db\n\n- Whole of milestone 2\n\n - Clean data\n - Prepare data for the model\n\n- Whole of milestone 3\n\n - Training model\n - Save model\n\n- Milestone 4\n\n - Frontend for the user to interact with\n - Predict news article category\n - Trigger model training from frontend by the click of a button\n"
},
{
"alpha_fraction": 0.5482954382896423,
"alphanum_fraction": 0.5551947951316833,
"avg_line_length": 40.75141143798828,
"blob_id": "c5002cb687275b837af93ad820a81d59ca0f7016",
"content_id": "31185ba203c93fc60f9c66b4bc001a5360baf6bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7392,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 177,
"path": "/modelTrainingService/training.py",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "\nfrom pymongo import MongoClient, errors\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nimport re\nfrom pyspark import SparkContext\nfrom pyspark.ml.feature import Tokenizer\nfrom pyspark.ml.feature import CountVectorizer\nfrom pyspark.ml.feature import IDF\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import Row\nfrom pyspark.ml.classification import NaiveBayes, RandomForestClassifier, LogisticRegression, OneVsRest\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nimport numpy as np\nimport pandas as pd\nfrom pyspark.ml import Pipeline\nimport os\nimport shutil\n\nps = PorterStemmer()\ndef RemoveNonEnglishWords(text):\n text = str(text)\n text = re.sub('[^a-zA-Z]', ' ', text)\n text = text.lower()\n text = text.split()\n text = [ps.stem(word) for word in text if not word in stopwords.words('english')]\n text = ' '.join(text)\n return text\n\nnews_topics = {0: \"general news\", 1: \"sport\", 2: \"tech\", 3: \"entertainment\", 4: \"finance\", 5: \"politics\", 6: \"business\", 7: \"economics\", \n 8: \"world\", 9: \"beauty\", 10: \"gaming\", 11:\"science\", 12:\"travel\", 13:\"energy\", 14:\"music\", 15:\"food\"}\nr_news_topics = {y: x for x, y in news_topics.items()}\nmongodb_connection_string = \"mongodb+srv://IIITH-group10:[email protected]/news?retryWrites=true&w=majority\"\n\nmodel_dir = 'models/model-test'\nspark = SparkSession.builder.master(\"local\").appName(\"newsClassifier\").getOrCreate()\nspark.conf.set(\"spark.driver.allowMultipleContexts\", \"true\")\nspark.sparkContext.setLogLevel('WARN')\ntokenizer = Tokenizer(inputCol=\"sen\", outputCol=\"words\")\ncount = CountVectorizer(inputCol=\"words\", outputCol=\"rawFeatures\")\nidf = IDF(inputCol=\"rawFeatures\", outputCol=\"features\")\npipeline = Pipeline(stages=[tokenizer, count, idf])\naccuracies = [] \nbest_models = []\n\ndef load_model():\n if not os.path.exists(model_dir+\"ovr\") and not os.path.exists(model_dir+\"nb\") and not os.path.exists(model_dir+\"rf\"):\n train()\ndef train():\n client = MongoClient(mongodb_connection_string)\n db = client.news\n print('Getting data')\n newsArticles = db.newsArticles.find({}, {\"_id\":0, \"date\":0, \"source\":0})\n client.close()\n newsArticlesArr = []\n for newsArticle in newsArticles:\n newsArticlesArr.append(newsArticle)\n print(\"Got \" + str(len(newsArticlesArr)) + \" records\")\n\n print(\"=================================\")\n print(\"=================================\")\n \n df = spark.createDataFrame(Row(RemoveNonEnglishWords(str(x['title']) + \" \" + str(x['summary'])), r_news_topics[x[\"category\"]] ) for x in newsArticlesArr)\n df = df.withColumnRenamed(\"_1\", \"sen\")\n df = df.withColumnRenamed(\"_2\", \"label\")\n df = df.na.fill(\"test\")\n \n \n transformer = pipeline.fit(df)\n transformer.write().overwrite().save(model_dir+\"pipeline\")\n\n rescaledData =transformer.transform(df).select(\"features\", \"label\")\n\n print(\"=================================\\n\")\n train, test = rescaledData.randomSplit([0.7, 0.3])\n print(\"train data\")\n train.show(2)\n print(\"=================================\\n\")\n print(\"=================================\\n\")\n \n print(\"test data\")\n test.show(2)\n print(\"=================================\\n\")\n nb = NaiveBayes()\n rf = RandomForestClassifier(numTrees=5)\n lr = LogisticRegression(maxIter = 4)\n ovr = OneVsRest(classifier=lr)\n print(\"=================================\\n\")\n print(\"=================================\\n\")\n numFolds = 5\n evaluator = MulticlassClassificationEvaluator(labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\n paramGrid_nb = ParamGridBuilder().addGrid(nb.smoothing, np.linspace(5,3, 1)).build()\n \n #print(\"paramGrid_nb built\")\n paramGrid_ovr = ParamGridBuilder().addGrid(lr.maxIter, [1, 3, 2]).build()\n #print(\"paramGrid_ovr built\")\n paramGrid_rf = ParamGridBuilder().addGrid(rf.numTrees, [2, 3, 1]).build()\n #print(\"paramGrid_rf built\")\n paramGrids = [\n paramGrid_nb, \n paramGrid_rf, \n paramGrid_ovr\n ]\n models = [\n nb, \n rf, \n ovr\n ]\n models_names = [\n \"nb\",\n \"rf\",\n \"ovr\"\n ]\n \n for i in range(0, len(models_names)):\n crossval_model = CrossValidator(estimator=models[i], estimatorParamMaps=paramGrids[i], evaluator=evaluator, numFolds=numFolds) \n print(\"cv Model built: \" + models_names[i])\n \n print(\"Training model: \" + models_names[i])\n model = crossval_model.fit(train)\n \n print(\"Got the best model...\")\n best_model = model.bestModel\n \n print(best_model.explainParams())\n best_models.append(best_model)\n\n print(\"Predicting model: \" + models_names[i])\n preds = model.transform(test)\n preds.select(\"prediction\", \"label\").show(2)\n \n accuracy = evaluator.evaluate(preds.select(\"prediction\", \"label\"))\n accuracies.append(accuracy)\n\n print(\"Accuracy of \" + models_names[i] + \" = %g\" % accuracy)\n print(\"=================================\\n\")\n \n\n max_accuracy = max(accuracies)\n model_index = accuracies.index(max_accuracy)\n print(\"index: \" + str(model_index))\n print(\"Using model \" + models_names[model_index])\n print(\"Accuracy %g\" % accuracies[model_index])\n\n print(\"=================================\\n\") \n print(\"=================================\\n\")\n print(\"=================================\\n\")\n\n filename = model_dir+models_names[model_index]\n if os.path.exists(filename):\n shutil.rmtree(filename, ignore_errors=True)\n else:\n print(\"Can not delete the file as it doesn't exists\")\n best_models[model_index].save(filename)\n \n if os.path.exists(model_dir+\"ovr\"):\n if os.path.exists(model_dir+\"nb\"):\n shutil.rmtree(model_dir+\"nb\", ignore_errors=True)\n if os.path.exists(model_dir+\"rf\"):\n shutil.rmtree(model_dir+\"rf\", ignore_errors=True)\n elif os.path.exists(model_dir+\"nb\"):\n if os.path.exists(model_dir+\"ovr\"):\n shutil.rmtree(model_dir+\"ovr\", ignore_errors=True)\n if os.path.exists(model_dir+\"rf\"):\n shutil.rmtree(model_dir+\"rf\", ignore_errors=True)\n elif os.path.exists(model_dir+\"rf\"):\n if os.path.exists(model_dir+\"ovr\"):\n shutil.rmtree(model_dir+\"ovr\", ignore_errors=True)\n if os.path.exists(model_dir+\"nb\"):\n shutil.rmtree(model_dir+\"nb\", ignore_errors=True)\n\n print(\"Saved model...\")\n return models_names[model_index], str(accuracies[model_index])\n\n#train()\n\n"
},
{
"alpha_fraction": 0.717577338218689,
"alphanum_fraction": 0.727452278137207,
"avg_line_length": 28.784313201904297,
"blob_id": "89f18cf5a234d23faca838739ea9baa979d35877",
"content_id": "44fdaa373441f3b88e8589908818e05484559f66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1519,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 51,
"path": "/main.py",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "import uvicorn\nfrom pydantic import BaseModel\nfrom modelTrainingService.training import train, load_model\nfrom modelPredictionService.modelPrediction import predict\nfrom starlette import requests\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi import FastAPI, Request, Form\n\ntemplates = Jinja2Templates(directory=\"views\")\napp = FastAPI(title=\"News Classifier\", docs_url=\"/docs\")\n#app.add_event_handler(\"startup\", load_model)\n\nclass NewsClassifierRetrainStatusOut(BaseModel):\n Status: str\n Accuracy: str\n Classifier: str\n\nclass NewsClassifierQueryIn(BaseModel):\n News: str\n\nclass NewsClassifierQueryOut(BaseModel):\n Category: str\n\[email protected](\"/ping\")\n\ndef ping():\n return {\"ping\": \"pong\"}\n\[email protected](\"/\")\ndef load_Home(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\[email protected](\"/retrain_news_category\")\ndef retrain_news_category(request: Request):\n classifier, accuracy = train()\n return templates.TemplateResponse(\"index.html\", {\"request\": request,\"classifier\": classifier,\"accuracy\" : accuracy})\n\[email protected](\"/predict_news_category\", status_code=200)\n\n\ndef predict_news_category(request: Request,newsText: str = Form(...)):\n query_data = NewsClassifierQueryIn(News = newsText)\n category = predict(query_data.News)\n return templates.TemplateResponse(\"index.html\", {\n \"request\": request,\n \"prediction\": category\n })\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"127.0.0.1\", port=8888, reload=True)\n"
},
{
"alpha_fraction": 0.7536231875419617,
"alphanum_fraction": 0.772946834564209,
"avg_line_length": 51,
"blob_id": "b8c86a928489101d34a74a0a4ff649f6c2990efd",
"content_id": "8be8630f11cecb5e3a4e51c39f75441dd44979e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 4,
"path": "/dataIngestionService/keys.py",
"repo_name": "Vedha286/NewsArticlesClassifier",
"src_encoding": "UTF-8",
"text": "# put your API keys here\nmongodb_connection_string = \"mongodb+srv://IIITH-group10:[email protected]/news?retryWrites=true&w=majority\"\nrapidapi_key = \"6965fe9a47msh414443209370cd3p188965jsn75040c6b3a4f\"\nnewscather_key = \"HDKy1I5lee16dLsWghX220ThukgRgfi7OdQZpyzL8Vo\""
}
] | 6 |
aceshine/Gilgamish
|
https://github.com/aceshine/Gilgamish
|
99d732037d6441adebedbece5e1dd87fb9aa16c5
|
964a8e5ae52bd2f4a50e367be25f1eac62b81a48
|
1b7fe3c053036a44de693773488614f24088b512
|
refs/heads/master
| 2016-08-12T08:08:03.633815 | 2016-02-04T13:23:19 | 2016-02-04T13:23:19 | 45,182,938 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6863905191421509,
"alphanum_fraction": 0.6962524652481079,
"avg_line_length": 17.10714340209961,
"blob_id": "649d8b0e4e8bca4380d32e3e59b38c4825870a47",
"content_id": "904c14dc927b23957035eb73503c3fbed0ca7211",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 28,
"path": "/unp/temp.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <sys/socket.h>\n\nstruct sockaddr\n{\n\tuint8_t sa_len;\n\tsa_family_t sa_family;\n\tchar sa_data[14];\n};\n\nstruct in_addr\n{\n\tin_addr_t s_addr;\n};\n\nstruct sockaddr_in\n{\n\tuint8_t sin_len;\n\tsa_family_t sin_family;\n\tin_port_t sin_port;\n\tstruct in_addr sin_addr;\n\tchar sin_zero[8];\n};\n\n#include <arpa/inet.h>\nint inet_aton(const char* strptr, struct in_addr* addrptr);\nin_addr_t inet_addr(const char* strptr);\nchar* inet_ntoa(struct in_addr inaddr);\nint inet_pton(int family, const char* strptr, void* addrptr);\n"
},
{
"alpha_fraction": 0.5743589997291565,
"alphanum_fraction": 0.6256410479545593,
"avg_line_length": 12.928571701049805,
"blob_id": "9071a1ad8f1aa2e3dc7f0a415005fc284bfaf3e4",
"content_id": "db95c0686635dbc01ea59572ea62573c9df3ff00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/nil/server.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// server.hpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#ifndef server_hpp\n#define server_hpp\n\n#include <stdio.h>\n\n#endif /* server_hpp */\n"
},
{
"alpha_fraction": 0.5743589997291565,
"alphanum_fraction": 0.6256410479545593,
"avg_line_length": 12.928571701049805,
"blob_id": "f597a32a137873834461d83e27e603bab64e8679",
"content_id": "e98de03900adf56295ff5bd426a7da011e71582c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/nil/client.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// client.hpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#ifndef client_hpp\n#define client_hpp\n\n#include <stdio.h>\n\n#endif /* client_hpp */\n"
},
{
"alpha_fraction": 0.5743589997291565,
"alphanum_fraction": 0.6256410479545593,
"avg_line_length": 12.928571701049805,
"blob_id": "e3528cbc4e1659c378c26361b08dc14369aae12e",
"content_id": "02aca0282d8c45fd66aef6216f62cec9c177f7de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/nil/kqueue.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// kqueue.hpp\n// nil\n//\n// Created by nil on 11/22/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#ifndef kqueue_hpp\n#define kqueue_hpp\n\n#include <stdio.h>\n\n#endif /* kqueue_hpp */\n"
},
{
"alpha_fraction": 0.6577380895614624,
"alphanum_fraction": 0.663690447807312,
"avg_line_length": 17.66666603088379,
"blob_id": "2b7361c235afb09181b3ab14b8fe25090ffe5553",
"content_id": "fcee9a36f4a83a95fb4c8b5dc60982e2998c8ca7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 18,
"path": "/interface/makefile",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "flags = -g -lstdc++ -std=c++11\n\nall: server client\n\nserver: server.o\n\tgcc $(flags) -o server server.o -lpthread\n\nserver.o: server.cpp\n\tgcc $(flags) -o server.o -c server.cpp\n\nclient: client.o\n\tgcc $(flags) -o client client.o -lpthread\n\nclient.o: client.cpp\n\tgcc $(flags) -o client.o -c client.cpp\n\nclean:\n\trm -f ./server ./client ./*.o\n"
},
{
"alpha_fraction": 0.5909591913223267,
"alphanum_fraction": 0.6284454464912415,
"avg_line_length": 21.674999237060547,
"blob_id": "eb5e3c1b4df9fcd330719709e56dc86ebdd5f97a",
"content_id": "35f5c623b3e88000ff7f3d0fafe471634402cd85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 40,
"path": "/interface/client.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\n#include <pthread.h>\n#include <unistd.h>\n\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n\nint main(int argc, char* argv[])\n{\n\tint sock_fd = socket(AF_INET, SOCK_STREAM, 0);\n\tstruct sockaddr_in their_addr;\n\ttheir_addr.sin_family = AF_INET;\n\ttheir_addr.sin_port = htons(8080);\n\t// their_addr.sin_addr = inet_addr(\"127.0.0.1\");\n\tinet_pton(AF_INET, \"127.0.0.1\", &their_addr.sin_addr);\n\tbzero(&(their_addr.sin_zero), 8);\n\n\tconnect(sock_fd, (struct sockaddr*)(&their_addr), sizeof(their_addr));\n\n\tchar msg[] = \"hello world\";\n\tchar buf[1024] = { 0 };\n\t//向服务器发送字符串msg\n\tif (send(sock_fd, msg, strlen(msg), 0) ==- 1) {\n\t\tperror(\"send\");\n\t\texit(1);\n\t}\n\t//接受从服务器返回的信息\n\tint numbytes;\n\tif ((numbytes = recv(sock_fd, buf, 100, 0)) == -1) {\n\t\tperror(\"recv\");\n\t\texit(1);\n\t}\n\t// buf[numbytes] = '/0';\n\tprintf(\"result:%s\",buf);\n\tclose(sock_fd);\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6041666865348816,
"alphanum_fraction": 0.6145833134651184,
"avg_line_length": 12.714285850524902,
"blob_id": "ecdc27903dfeb099cd5afaf6dfc08a0dee4127ed",
"content_id": "983bb332c98ada66886094cafa698c5620f29a6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 7,
"path": "/static.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint static_main()\n{\n\tstd::cout << \"static_main\" << std::endl;\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6711111068725586,
"alphanum_fraction": 0.7288888692855835,
"avg_line_length": 17.75,
"blob_id": "7288604323f364579c46b974869ee4ea6d16dd54",
"content_id": "1d8050315318bdc5a064883f0dad4743f1638c27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 12,
"path": "/becomeDaemon.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#ifndef _become_daemon_hpp_\n#define _become_daemon_hpp_\n\n#define BD_NO_CHDIR 01\n#define BD_NO_CLODE_FILES 02\n#define BD_NO_REOPEN_STD_FDS 04\n\n#define BD_NO_UMASKO 010\n\n#define BD_MAX_CLOSE 8192\n\n#endif // _become_daemon_hpp_\n"
},
{
"alpha_fraction": 0.6047618985176086,
"alphanum_fraction": 0.6142857074737549,
"avg_line_length": 14.071428298950195,
"blob_id": "2415ac0218cbd7a5de723c3221934aa9f0d13f2f",
"content_id": "660986b4f0e29531ada16a44a0724b4c78a071cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 14,
"path": "/freebsd/makefile",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "cc = gcc\nflags = -g -lstdc++ -std=c++11\n\nkqueue: kqueue.o\n\t$(cc) $(flags) -o kqueue kqueue.o -lpthread\n\nkqueue.o: kqueue.cpp\n\t$(cc) $(flags) -o kqueue.o -c kqueue.cpp\n\nclean:\n\trm -f kqueue ./*.o\n\nrun:\n\t./kqueue"
},
{
"alpha_fraction": 0.5246913433074951,
"alphanum_fraction": 0.5864197611808777,
"avg_line_length": 11.461538314819336,
"blob_id": "0437ab7eda96380880b9e2669442babd0e81b3b1",
"content_id": "159cae8c5f5d305ddb649b9927244a373e2e8e2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/nil/main.hpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// main.hpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#ifndef main_h\n#define main_h\n\n\n#endif /* main_h */\n"
},
{
"alpha_fraction": 0.6814285516738892,
"alphanum_fraction": 0.6842857003211975,
"avg_line_length": 13.91489315032959,
"blob_id": "bdb399caf0825fda4137633818c07d76ed7bdd9e",
"content_id": "ff8c102d68e6d8e83c0704b8dd84058e83bb0856",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 47,
"path": "/freebsd/kqueue.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <unistd.h>\n#include <pthread.h>\n\n#include <sys/event.h>\n\n\nstatic pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;\n\n\nstatic void server_init()\n{\n\t\n}\n\n\nstatic void* server(void* argv)\n{\n\tpthread_mutex_lock(&mtx);\n\tsleep(1);\n\tprintf(\"server start\\n\");\n\n\n\n\tpthread_mutex_unlock(&mtx);\n\treturn NULL;\n}\n\nstatic void* client(void* argv)\n{\n\tpthread_mutex_lock(&mtx);\n\tprintf(\"client start\\n\");\n\tpthread_mutex_unlock(&mtx);\n\treturn NULL;\n}\n\nint main(int argc, char* argv[])\n{\n\tpthread_t server_id;\n\tpthread_t client_id;\n\tpthread_create(&server_id, NULL, server, NULL);\n\tpthread_create(&client_id, NULL, client, NULL);\n\n\tvoid* result;\n\tpthread_join(server_id, &result);\n\treturn 0;\n}"
},
{
"alpha_fraction": 0.562011182308197,
"alphanum_fraction": 0.5810055732727051,
"avg_line_length": 14.982142448425293,
"blob_id": "253b28f57cb630a28e3962b4c5bd9ea03ab3587d",
"content_id": "06a778e85bdb8a95c7ccc4cea38f6bdbee807c17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 895,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 56,
"path": "/becomeDaemon.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <sys/stat.h>\n#include <fcntl.h>\n\n#include \"become_daemon.hpp\"\n\n\nint becomeDaemon(int flags)\n{\n\tint maxfd, fd;\n\n\tswitch (fork())\n\t{\n\t\tcase -1: return -1;\n\t\tcase 0: break;\n\t\tdefault: exit(EXIT_SUCCESS);\n\t}\n\n\tif (setsid() == -1)\n\t\treturn -1;\n\n\tswitch (fork())\n\t{\n\t\tcase -1: return -1;\n\t\tcase 0: break;\n\t\tdefault: exit(EXIT_SUCCESS);\n\t}\n\n\tif (!(flags & BD_NO_UMASKO))\n\t\tumask(0);\n\n\tif (!(flags & BD_NO_CHDIR))\n\t\tchdir(\"/\");\n\n\tif (!(flags & BD_NO_CLOSE_FILES))\n\t{\n\t\tmaxfd = sysconf(_SC_OPEN_MAX);\n\t\tif (maxfd == -1)\n\t\t\tmaxfd = BD_MAX_CLOSE;\n\n\t\tfor (fd = 0; fd < maxfd; fd++)\n\t\t\tclose(fd);\n\t}\n\n\tif (!(flags & BD_NO_REOPEN_STD_FDS))\n\t{\n\t\tclose(STDIN_FILENO);\n\t\tfd = open(\"/dev/null\", O_RDWR);\n\t\tif (fd != STDIN_FILENO)\n\t\t\treturn -1;\n\t\tif (dup2(STDIN_FILENO, STDOUT_FILENO) != STDOUT_FILENO)\n\t\t\treturn -1;\n\t\tif (dup2(STDIN_FILENO, STDERR_FILENO) != STDERR_FILENO)\n\t\t\treturn -1;\n\t}\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.57485032081604,
"alphanum_fraction": 0.5868263244628906,
"avg_line_length": 12.916666984558105,
"blob_id": "ae45e53200802cc36bd14a948da198921f0410aa",
"content_id": "3c83de7522725517a5612342942e171bad3c0abe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 12,
"path": "/algorithm/makefile",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "flags = -g -lstdc++ -std=c++11\n\nall: sort\n\nsort: sort.o\n\tgcc $(flags) -o sort sort.o\n\nsort.o: sort.cpp\n\tgcc $(flags) -o sort.o -c sort.cpp\n\nclean:\n\trm -f ./sort ./*.o\n"
},
{
"alpha_fraction": 0.672897219657898,
"alphanum_fraction": 0.6760124564170837,
"avg_line_length": 21.928571701049805,
"blob_id": "58b4bb68f00036e3826085bbe08a8b5a18c82692",
"content_id": "25f50717cdef628eeef5d9abddc465c28f9bdd0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 28,
"path": "/makefile",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "flags = -g -lstdc++ -std=c++11\n\nall: main libstatic\n\nmain: main.o libstatic libdynamic\n\t$(MAKE) -C ./algorithm\n\t#gcc $(flags) -o main main.o libstatic.a\n\tgcc $(flags) -o main main.o -L. -lstatic -ldynamic\n\nmain.o: main.cpp main.hpp\n\tgcc $(flags) -o main.o -c main.cpp\n\nlibstatic: static.o\n\tar rcs libstatic.a static.o\n\nstatic.o: static.cpp\n\tgcc $(flags) -o static.o -c static.cpp\n\nlibdynamic: dynamic.o\n\tgcc $(flags) -shared -o libdynamic.so dynamic.o\n\ndynamic.o: dynamic.cpp\n\tgcc $(flags) -fPIC -o dynamic.o -c dynamic.cpp\n\nclean:\n\trm -f ./main ./libstatic.a ./libdynamic.so ./*.o\n\t$(MAKE) -C ./algorithm clean\n\t$(MAKE) -C ./interface clean\n"
},
{
"alpha_fraction": 0.5628571510314941,
"alphanum_fraction": 0.5942857265472412,
"avg_line_length": 18.44444465637207,
"blob_id": "6159ea8425a8e292deaed56fd05744b128784237",
"content_id": "e0f32e34bf590140068697f77b685efb1e85ad58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 18,
"path": "/test.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <errno.h>\n#include <stdio.h>\n#include <unistd.h>\n\nint main(int argc, char* argv[])\n{\n\tchar cwd[1024] = { 0 };\n\tchar path[1024] = { 0 };\n\tif (getcwd(cwd, sizeof(cwd)/sizeof(char)) != NULL)\n\t{\n\t\tfprintf(stdout, \"%s\\n\", cwd);\n\t\tsprintf(path, \"%s/test.txt\", cwd);\n\t\tfprintf(stdout, \"%s\\n\", path);\n\t}\n\telse\n\t\tperror(\"getcwd error\");\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5077568292617798,
"alphanum_fraction": 0.5263451933860779,
"avg_line_length": 27.850807189941406,
"blob_id": "d020d6de5482907dee38e7db24fb7875d04d92fe",
"content_id": "f9e886372d5ec3a15d4376b2e88465fef72697f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7156,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 248,
"path": "/nil/server.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// server.cpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#include \"server.hpp\"\n\n#include <pthread.h>\n#include <fcntl.h>\n#include <poll.h>\n\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <netinet/in.h>\n\n#include <sys/event.h>\n\n#include <iostream>\n\nstatic int server_init()\n{\n int fd = socket(AF_INET, SOCK_STREAM, 0);\n return fd;\n}\n\nstatic void* normal_server(void* argv)\n{\n std::cout << \"main\" << std::endl;\n int server_fd = socket(AF_INET, SOCK_STREAM, 0);\n \n struct sockaddr_in server_addr;\n bzero(&server_addr, sizeof(server_addr));\n server_addr.sin_family = AF_INET;\n inet_pton(AF_INET, \"127.0.0.1\", &server_addr.sin_addr);\n server_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n server_addr.sin_port = htons(8080);\n if (int temp = bind(server_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0)\n {\n printf(\"bind error: %d \\n\", temp);\n }\n \n if (int temp = listen(server_fd, 1024) < 0)\n {\n printf(\"listen error: %d \\n\", temp);\n }\n \n for ( ; ; ) {\n struct sockaddr_in client_addr;\n socklen_t len = sizeof(client_addr);\n int client_fd = accept(server_fd, (struct sockaddr*)&client_addr, &len);\n if (client_fd < 0) break;\n \n char buff[1024] = { 0 };\n ssize_t count = recv(client_fd, buff, sizeof(buff), 0);\n printf(\"server recv count %zd, %s \\n\", count, buff);\n \n count = send(client_fd, buff, sizeof(buff), 0);\n printf(\"server send count %zd, %s \\n\", count, buff);\n \n break;\n }\n \n pthread_exit(nullptr);\n return nullptr;\n}\n\nstatic void* select_server(void* argv)\n{\n std::cout << \"main\" << std::endl;\n int server_fd = socket(AF_INET, SOCK_STREAM, 0);\n \n int flags = fcntl(server_fd, F_GETFL);\n flags |= O_NONBLOCK;\n fcntl(server_fd, F_SETFL, flags);\n \n struct sockaddr_in server_addr;\n bzero(&server_addr, sizeof(server_addr));\n server_addr.sin_family = AF_INET;\n // inet_pton(AF_INET, \"127.0.0.1\", &server_addr.sin_addr);\n server_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n server_addr.sin_port = htons(8080);\n if (int temp = bind(server_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0)\n {\n printf(\"bind error: %d \\n\", temp);\n }\n \n if (int temp = listen(server_fd, 1024) < 0)\n {\n printf(\"listen error: %d \\n\", temp);\n }\n \n struct timeval timeout;\n timeout.tv_sec = 10;\n timeout.tv_usec = 0;\n \n int fd_array[1024] = { 0 };\n int max_count = 0;\n int max_fd = server_fd;\n fd_set fdsr;\n FD_ZERO(&fdsr);\n FD_SET(server_fd, &fdsr);\n \n for ( ; ; ) {\n int ret = select(max_fd+1, &fdsr, nullptr, nullptr, &timeout);\n if (ret < 0)\n break;\n else if (ret == 0)\n continue;\n \n for (int i = 0; i < max_count; i++)\n {\n if (FD_ISSET(fd_array[i], &fdsr))\n {\n int client_fd = fd_array[i];\n char buff[1024] = { 0 };\n ssize_t count = recv(client_fd, buff, sizeof(buff), 0);\n printf(\"server recv count %zd, %s \\n\", count, buff);\n if (count <= 0)\n {\n close(client_fd);\n FD_CLR(fd_array[i], &fdsr);\n fd_array[i] = 0;\n max_count -= 1;\n std::cout << \"one client close\" << std::endl;\n goto over;\n// continue;\n }\n count = send(client_fd, buff, sizeof(buff), 0);\n printf(\"server send count %zd, %s \\n\", count, buff);\n }\n }\n \n if (FD_ISSET(server_fd, &fdsr))\n {\n struct sockaddr_in client_addr;\n socklen_t len = sizeof(client_addr);\n int client_fd = accept(server_fd, (struct sockaddr*)&client_addr, &len);\n// if (client_fd < 0) break;\n fd_array[max_count] = client_fd;\n FD_SET(fd_array[max_count], &fdsr);\n max_count += 1;\n if (client_fd > max_fd)\n max_fd = client_fd;\n\n// char buff[1024] = { 0 };\n// ssize_t count = recv(client_fd, buff, sizeof(buff), 0);\n// printf(\"select server recv count %zd, %s \\n\", count, buff);\n// \n// count = send(client_fd, buff, sizeof(buff), 0);\n// printf(\"select server send count %zd, %s \\n\", count, buff);\n\n }\n// struct sockaddr_in client_addr;\n// socklen_t len = sizeof(client_addr);\n// int client_fd = accept(server_fd, (struct sockaddr*)&client_addr, &len);\n// if (client_fd < 0) break;\n// \n// char buff[1024] = { 0 };\n// ssize_t count = recv(client_fd, buff, sizeof(buff), 0);\n// printf(\"server recv count %zd, %s \\n\", count, buff);\n// \n// count = send(client_fd, buff, sizeof(buff), 0);\n// printf(\"server send count %zd, %s \\n\", count, buff);\n//\n// break;\n }\nover:\n pthread_exit(nullptr);\n return nullptr;\n}\n\nstatic void* poll_server(void* argv)\n{\n std::cout << \"main\" << std::endl;\n int server_fd = socket(AF_INET, SOCK_STREAM, 0);\n \n int flags = fcntl(server_fd, F_GETFL);\n flags |= O_NONBLOCK;\n fcntl(server_fd, F_SETFL, flags);\n \n struct sockaddr_in server_addr;\n bzero(&server_addr, sizeof(server_addr));\n server_addr.sin_family = AF_INET;\n // inet_pton(AF_INET, \"127.0.0.1\", &server_addr.sin_addr);\n server_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n server_addr.sin_port = htons(8080);\n if (int temp = bind(server_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0)\n {\n printf(\"bind error: %d \\n\", temp);\n }\n \n if (int temp = listen(server_fd, 1024) < 0)\n {\n printf(\"listen error: %d \\n\", temp);\n }\n \n struct timeval timeout;\n timeout.tv_sec = 10;\n timeout.tv_usec = 0;\n \n struct pollfd client[OPEN_MAX];\n for (int i = 0; i < OPEN_MAX; i++) {\n client[i].fd = -1;\n }\n client[0].fd = server_fd;\n client[0].events = POLLRDNORM;\n int max_count = 0;\n for ( ; ; ) {\n int ready = poll(client, max_count + 1, -1);\n if (client[0].revents & POLLRDNORM)\n {\n ready -= 1;\n if (ready <= 0) goto over;\n }\n for (int i = 0; i < max_count; i++) {\n \n }\n }\nover:\n pthread_exit(nullptr);\n return nullptr;\n}\n\nstatic void* kqueue_server(void* argv)\n{\n\tstruct kevent change;\n\tstruct kevent event;\n\tint kq = kqueue();\n\tEV_SET(&change, 1, EVFILT_TIMER, EV_ADD | EV_ENABLE, 0, 5000, 0);\n\tfor ( ; ; )\n\t{\n\t\tint nev = kevent(kq, &change, 1, &event, 1, NULL);\n\t}\n pthread_exit(nullptr);\n return nullptr;\n}\n\nvoid* server(void* argv)\n{\n // setsockopt( socket, SOL_SOCKET, SO_SNDTIMEO, ( char * )&nNetTimeout, sizeof( int ) );\n // setsockopt( socket, SOL_SOCKET, SO_RCVTIMEO, ( char * )&nNetTimeout, sizeof( int ) );\n return poll_server(argv);\n return select_server(argv);\n return normal_server(argv);\n}\n"
},
{
"alpha_fraction": 0.5463108420372009,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 23.056604385375977,
"blob_id": "32d62278fa1285acd754a31f09036f3bfca056fd",
"content_id": "4bd6f8cf878423c4df36c6a27c50a013ae9da581",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1275,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 53,
"path": "/nil/client.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// client.cpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#include \"client.hpp\"\n\n#include <pthread.h>\n#include <unistd.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <netinet/in.h>\n\n#include <iostream>\n\nvoid* client(void* argv)\n{\n sleep(8);\n std::cout << \"client main\" << std::endl;\n int fd = socket(AF_INET, SOCK_STREAM, 0);\n \n struct sockaddr_in addr;\n bzero(&addr, sizeof(addr));\n addr.sin_family = AF_INET;\n // inet_pton(AF_INET, \"127.0.0.1\", &addr.sin_addr);\n addr.sin_addr.s_addr = htonl(INADDR_ANY);\n addr.sin_port = htons(8080);\n \n if (int temp = connect(fd, (struct sockaddr*)&addr, sizeof(addr)) < 0)\n {\n printf(\"connect error: %d \\n\", temp);\n }\n \n char text[] = \"<this is test>\";\n ssize_t count = send(fd, text, sizeof(text), 0);\n printf(\"send count %zd, %s \\n\", count, text);\n \n char buff[1024] = { 0 };\n count = recv(fd, buff, sizeof(buff), 0);\n printf(\"recv count %zd, %s \\n\", count, buff);\n \n count = send(fd, text, sizeof(text), 0);\n printf(\"send count %zd, %s \\n\", count, text);\n \n sleep(8);\n close(fd);\n std::cout << \"client over\" << std::endl;\n pthread_exit(nullptr);\n return nullptr;\n}"
},
{
"alpha_fraction": 0.4708879292011261,
"alphanum_fraction": 0.5109170079231262,
"avg_line_length": 23,
"blob_id": "7d70c37afc52f8d70e1c5a384ddcd6a020f0b368",
"content_id": "2b6dc95def9e7029a163e5050ebc97535ea077b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1374,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 57,
"path": "/CollectiveIntelligence/recommandations.py",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "from math import sqrt\n\n# A dictionary of movie critics add their ratings of a small\n# set of movies\ncritics = {\n 'Lisa Rose': {'Lady in the Water': 2.5,\n 'Snakes on a Plane': 3.5,\n 'Just My Luck': 3.0,\n 'Superman Returns': 3.5,\n 'You, Me and Dupree': 2.5,\n 'The Night Listener': 3.0},\n 'Gene Seymour': {},\n\n}\n\ndef sim_distance(prefs, person1, person2):\n si = {}\n for item in prefs[person1]:\n if item in prefs[person2]:\n si[item] = 1\n\n if len(si) == 0:\n return 0\n\n sum_of_squares = sum([\n pow(prefs[person1][item] - prefs[person2][item], 2)\n for item in prefs[person1] if item in prefs[person2]\n ])\n\n return 1 / (1 + sum_of_squares)\n\ndef sim_pearson(prefs, p1, p2):\n si = {}\n for item in prefs[p1]:\n if item in prefs[p2]:\n si[item] = 1\n\n n = len(si)\n\n if n == 0:\n return 0\n\n sum1 = sum([prefs[p1][it] for it in si])\n sum2 = sum([prefs[p2][it] for it in si])\n\n sum1Sq = sum([pow(prefs[p1][it],2) for it in si])\n sum2Sq = sum([pow(prefs[p2][it],2) for it in si])\n\n pSum = sum([prefs[p1][it]*prefs[p2][it] for it in si])\n\n num = pSum-(sum1*sum2/n)\n den = sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))\n if den == 0:\n return 0\n\n r = num/den\n return r\n\n \n"
},
{
"alpha_fraction": 0.6122449040412903,
"alphanum_fraction": 0.6224489808082581,
"avg_line_length": 13,
"blob_id": "2066735b39225db16c9d39e45fc7360f252cbdd9",
"content_id": "db67501e4ab5fb9cb8997bce024851f9e2daaa0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/dynamic.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint dynamic_main()\n{\n\tstd::cout << \"dynamic_main\" << std::endl;\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5610465407371521,
"alphanum_fraction": 0.5784883499145508,
"avg_line_length": 18.657142639160156,
"blob_id": "4ae9cb2c550938c956f32ee7f36215ce5d51125d",
"content_id": "b8968f232f7b61d0b905a32a96334dd16cfedfc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 689,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 35,
"path": "/nil/main.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// main.cpp\n// nil\n//\n// Created by nil on 11/21/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#include <pthread.h>\n#include <unistd.h>\n\n#include <iostream>\n\nextern void* server(void* argv);\nextern void* client(void* argv);\n\nint main(int argc, const char * argv[]) {\n // insert code here...\n std::cout << \"Hello, World!\\n\";\n pthread_t server_id, client_id;\n pthread_create(&server_id, nullptr, server, nullptr);\n pthread_create(&client_id, nullptr, client, nullptr);\n \n void* result;\n pthread_join(server_id, &result);\n \n for ( ; ; )\n {\n sleep(8);\n break;\n }\n \n std::cout << \"main over\" << std::endl;\n return 0;\n}\n"
},
{
"alpha_fraction": 0.48764175176620483,
"alphanum_fraction": 0.5013085007667542,
"avg_line_length": 26.08661460876465,
"blob_id": "4ad961b75952f229362e226ccfa2b642cb12fc29",
"content_id": "650d397833dd75c3265f33d2004ea9c2dc56d1c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3440,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 127,
"path": "/nil/kqueue.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "//\n// kqueue.cpp\n// nil\n//\n// Created by nil on 11/22/15.\n// Copyright © 2015 nil. All rights reserved.\n//\n\n#include \"kqueue.hpp\"\n\n#include <sys/event.h>\n#include <sys/socket.h>\n#include <sys/time.h>\n#include <netdb.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <unistd.h>\n\n#define BUFSIZE 1024\n\n/* function prototypes */\nvoid diep(const char *s);\nint tcpopen(const char *host, int port);\nvoid sendbuftosck(int sckfd, const char *buf, int len);\n\nint main(int argc, char *argv[])\n{\n struct kevent chlist[2]; /* events we want to monitor */\n struct kevent evlist[2]; /* events that were triggered */\n char buf[BUFSIZE];\n int sckfd, kq, nev, i;\n \n /* check argument count */\n if (argc != 3) {\n fprintf(stderr, \"usage: %s host port\\n\", argv[0]);\n exit(EXIT_FAILURE);\n }\n \n /* open a connection to a host:port pair */\n sckfd = tcpopen(argv[1], atoi(argv[2]));\n \n /* create a new kernel event queue */\n if ((kq = kqueue()) == -1)\n diep(\"kqueue()\");\n \n /* initialise kevent structures */\n EV_SET(&chlist[0], sckfd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);\n EV_SET(&chlist[1], fileno(stdin), EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);\n \n /* loop forever */\n for (;;) {\n nev = kevent(kq, chlist, 2, evlist, 2, NULL);\n \n if (nev < 0)\n diep(\"kevent()\");\n \n else if (nev > 0) {\n if (evlist[0].flags & EV_EOF) /* read direction of socket has shutdown */\n exit(EXIT_FAILURE);\n \n for (i = 0; i < nev; i++) {\n if (evlist[i].flags & EV_ERROR) { /* report errors */\n fprintf(stderr, \"EV_ERROR: %s\\n\", strerror(evlist[i].data));\n exit(EXIT_FAILURE);\n }\n \n if (evlist[i].ident == sckfd) { /* we have data from the host */\n memset(buf, 0, BUFSIZE);\n if (read(sckfd, buf, BUFSIZE) < 0)\n diep(\"read()\");\n fputs(buf, stdout);\n }\n \n else if (evlist[i].ident == fileno(stdin)) { /* we have data from stdin */\n memset(buf, 0, BUFSIZE);\n fgets(buf, BUFSIZE, stdin);\n sendbuftosck(sckfd, buf, strlen(buf));\n }\n }\n }\n }\n \n close(kq);\n return EXIT_SUCCESS;\n}\n\nvoid diep(const char *s)\n{\n perror(s);\n exit(EXIT_FAILURE);\n}\n\nint tcpopen(const char *host, int port)\n{\n struct sockaddr_in server;\n struct hostent *hp;\n int sckfd;\n \n if ((hp = gethostbyname(host)) == NULL)\n diep(\"gethostbyname()\");\n \n if ((sckfd = socket(PF_INET, SOCK_STREAM, 0)) < 0)\n diep(\"socket()\");\n \n server.sin_family = AF_INET;\n server.sin_port = htons(port);\n server.sin_addr = *((struct in_addr *)hp->h_addr);\n memset(&(server.sin_zero), 0, 8);\n \n if (connect(sckfd, (struct sockaddr *)&server, sizeof(struct sockaddr)) < 0)\n diep(\"connect()\");\n \n return sckfd;\n}\n\nvoid sendbuftosck(int sckfd, const char *buf, int len)\n{\n int bytessent, pos;\n \n pos = 0;\n do {\n if ((bytessent = send(sckfd, buf + pos, len - pos, 0)) < 0)\n diep(\"send()\");\n pos += bytessent;\n } while (bytessent > 0);\n}"
},
{
"alpha_fraction": 0.6192660331726074,
"alphanum_fraction": 0.6238532066345215,
"avg_line_length": 14.571428298950195,
"blob_id": "d0707b78c5d0dfc7b9a69e88b081d5160c388e2e",
"content_id": "d20ae123c62dbb63a5208c9fdcdd5a3b9bdb6034",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 14,
"path": "/main.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\n#include \"main.hpp\"\n\nextern int static_main();\nextern int dynamic_main();\n\nint main(int argc, char* argv[])\n{\n std::cout << \"main\" << std::endl;\n\tstatic_main();\n\tdynamic_main();\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6063829660415649,
"alphanum_fraction": 0.6453900933265686,
"avg_line_length": 19.14285659790039,
"blob_id": "3cc678ed82a7fb0fe9de095c120409fe080bfad6",
"content_id": "596ef89e05aeb35f74e7fde56ffb7a3f5242ce71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 14,
"path": "/unp/server.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <arpa/inet.h>\n#include <netinet/in.h>\n\n#include <stdio.h>\n\nint main(int argc, char* argv[])\n{\n\tsockaddr_in addr;\n\taddr.sin_family = AF_INET;\n\taddr.sin_port = 8080;\n\tinet_pton(AF_INET, \"127.0.0.1\", (void*)&addr.sin_addr);\n\tprintf(\"%d\\n\", addr.sin_addr.s_addr);\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.5972222089767456,
"alphanum_fraction": 0.613811731338501,
"avg_line_length": 20.781513214111328,
"blob_id": "4c29119e4a89f2a23291fdef0a891f5f9cf6bd8e",
"content_id": "fba71f2af6c427151e282127daa7cc86d40519d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2592,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 119,
"path": "/interface/server.cpp",
"repo_name": "aceshine/Gilgamish",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\n#include <pthread.h>\n#include <unistd.h>\n\n#include <fcntl.h>\n\n#include <sys/types.h>\n#include <sys/socket.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n\nstatic void* thread_function(void* args)\n{\n\tstd::cout << \"thread_function\" << std::endl;\n\tstd::string test(static_cast<char*>(args));\n\tprintf(\"args is %s\\n\", (char*)args);\n\t// pthread_exit((void*)0);\n\tpthread_exit(NULL);\n\treturn NULL;\n}\n\nstatic void* server_thread(void* args)\n{\n\tint sock_fd = socket(AF_INET, SOCK_STREAM, 0);\n\n\tint flags = fcntl(sock_fd, F_GETFL);\n\tflags |= O_NONBLOCK;\n\tfcntl(sock_fd, F_SETFL, flags);\n\n\tstruct sockaddr_in server_addr;\n\t// memset(server_addr, 0, sizeof(server_addr));\n\tserver_addr.sin_family = AF_INET;\n\tserver_addr.sin_port = htons(8080);\n\tserver_addr.sin_addr.s_addr = INADDR_ANY;\n\tmemset(server_addr.sin_zero, 0, sizeof(server_addr.sin_zero));\n\n\tbind(sock_fd, (struct sockaddr*)(&server_addr), sizeof(server_addr));\n\n\tlisten(sock_fd, 1024);\n\n\tstruct timeval timeout;\n\ttimeout.tv_sec = 10;\n\ttimeout.tv_usec = 0;\n\n\tint fd_A[1024] = { 0 };\n\tint conn_amount = 0;\n\tfd_set fdsr;\n\tint maxsock = sock_fd;\n\tFD_ZERO(&fdsr);\n\tFD_SET(sock_fd, &fdsr);\n\t\n\tfor (;;)\n\t{\n\t\tint ret = select(maxsock + 1, &fdsr, NULL, NULL, &timeout);\n\t\tif (ret < 0)\n\t\t\tbreak;\n\t\telse if (ret == 0)\n\t\t\tcontinue;\n\n\t\tfor (int i = 0; i < conn_amount; i++)\n\t\t{\n\t\t\tif (FD_ISSET(fd_A[i], &fdsr))\n\t\t\t{\n\t\t\t\tchar buf[1024] = { 0 };\n\t\t\t\tchar str[] = \"Good,very nice!\\n\";\n\t\t\t\tint ret = recv(fd_A[i], buf, sizeof(buf)/sizeof(char), 0);\n\t\t\t\tsend(fd_A[i], str, sizeof(str)/sizeof(char)+1, 0);\n\t\t\t\tif (ret <= 0)\n\t\t\t\t{\n\t\t\t\t\tclose(fd_A[i]);\n\t\t\t\t\tFD_CLR(fd_A[i], &fdsr);\n\t\t\t\t\tfd_A[i] = 0;\n\t\t\t\t\tconn_amount -= 1;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (FD_ISSET(sock_fd, &fdsr))\n\t\t{\n\t\t\tstruct sockaddr_in client_addr;\n\t\t\tsocklen_t s = sizeof(client_addr);\n\t\t\tint client = accept(sock_fd, (struct sockaddr*)(&client_addr), &s);\n\t\t\tif (conn_amount < 1024)\n\t\t\t{\n\t\t\t\tconn_amount += 1;\n\t\t\t\tfd_A[conn_amount] = client;\n\t\t\t\tFD_SET(fd_A[conn_amount], &fdsr);\n\t\t\t}\n\t\t}\n\t}\n\n\tpthread_exit(NULL);\n\treturn NULL;\n}\n\nint main(int argc, char* argv[])\n{\n\tstd::cout << \"server main\" << std::endl;\n\n\tpthread_t tid;\n\tvoid* thread_result;\n\n\tchar args[] = \"test\";\n\tpthread_create(&tid, NULL, thread_function, (void*)args);\n\t// sleep(1);\n\tpthread_join(tid, &thread_result);\n\tif (thread_result == NULL)\n\t\tprintf(\"thread over\\n\");\n\t// printf(\"thread result is %d\\n\", *((int*)thread_result));\n\n\tpthread_create(&tid, NULL, server_thread, NULL);\n\tpthread_join(tid, &thread_result);\n\tif (thread_result == NULL)\n\t\tprintf(\"server over\\n\");\n\n\tprintf(\"server over\\n\");\n\treturn 0;\n}\n"
}
] | 24 |
advatar/indy-sdk
|
https://github.com/advatar/indy-sdk
|
16ef447b14c82aa3291e32200779711111024741
|
5be07611886e4a8f4062ed2c4275af2f67478547
|
c478a5054f4a34335684159a434ca5f1d70b5558
|
refs/heads/master
| 2021-07-10T11:33:36.417126 | 2017-07-07T19:51:56 | 2017-07-07T19:51:56 | 96,631,745 | 0 | 0 |
Apache-2.0
| 2017-07-08T16:48:53 | 2017-07-08T16:48:57 | 2021-03-10T17:46:33 |
Rust
|
[
{
"alpha_fraction": 0.8487805128097534,
"alphanum_fraction": 0.8487805128097534,
"avg_line_length": 28.285715103149414,
"blob_id": "756cd17420972ea666cd004ae3aac190dadd1ef2",
"content_id": "a4c3e3c060c812272ff7d6d36a453a7721a025c8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 205,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 7,
"path": "/wrappers/python/sovrin/__init__.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from sovrin.error import SovrinError\n\nfrom sovrin.anoncreds import Anoncreds\nfrom sovrin.ledger import Ledger\nfrom sovrin.pool import Pool\nfrom sovrin.signus import Signus\nfrom sovrin.wallet import Wallet\n"
},
{
"alpha_fraction": 0.4910337030887604,
"alphanum_fraction": 0.620833694934845,
"avg_line_length": 34.095550537109375,
"blob_id": "bd15deaa9e3a44d757d0d232d5a2635da8ebc74b",
"content_id": "ecc238e545c6ad80ae42d6737cc095b670c1f430",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 21302,
"license_type": "permissive",
"max_line_length": 874,
"num_lines": 607,
"path": "/src/utils/crypto/pair/milagro.rs",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "extern crate milagro_crypto;\nextern crate serde;\n\nuse self::milagro_crypto::big::wrappers::{\n CURVE_Gx,\n CURVE_Gy,\n CURVE_Order,\n CURVE_Pxa,\n CURVE_Pya,\n CURVE_Pxb,\n CURVE_Pyb,\n BIG\n};\nuse self::milagro_crypto::ecp::wrappers::ECP;\nuse self::milagro_crypto::ecp2::wrappers::ECP2;\nuse self::milagro_crypto::fp12::wrappers::FP12;\nuse self::milagro_crypto::fp2::wrappers::FP2;\nuse self::milagro_crypto::pair::PAIR;\n\nuse errors::common::CommonError;\nuse services::anoncreds::helpers::BytesView;\n\nuse self::milagro_crypto::randapi::Random;\n\nextern crate rand;\n\nuse self::rand::os::{OsRng};\nuse self::rand::Rng;\nuse self::serde::ser::{Serialize, Serializer, Error as SError};\nuse self::serde::de::{Deserialize, Deserializer, Visitor, Error as DError};\nuse std::fmt;\n\nfn random_mod_order() -> Result<BIG, CommonError> {\n let mut seed = vec![0; 32];\n let mut os_rng = OsRng::new().unwrap();\n os_rng.fill_bytes(&mut seed.as_mut_slice());\n let mut rng = Random::new(&seed);\n Ok(BIG::randomnum(&unsafe { CURVE_Order }.clone(), &mut rng))\n}\n\n#[derive(Copy, Clone, PartialEq, Debug)]\npub struct PointG1 {\n point: ECP\n}\n\nimpl PointG1 {\n pub fn new() -> Result<PointG1, CommonError> {\n // generate random point from the group G1\n let mut gen_g1: ECP = ECP::new_bigs(&unsafe { CURVE_Gx }.clone(), &unsafe { CURVE_Gy }.clone());\n\n ECP::mul(&mut gen_g1, &random_mod_order()?);\n Ok(PointG1 {\n point: gen_g1\n })\n }\n\n pub fn new_inf() -> Result<PointG1, CommonError> {\n let mut r = ECP::default();\n ECP::inf(&mut r);\n Ok(PointG1 {\n point: r\n })\n }\n\n pub fn mul(&self, e: &GroupOrderElement) -> Result<PointG1, CommonError> {\n let mut r = self.point;\n ECP::mul(&mut r, &e.bn);\n Ok(PointG1 {\n point: r\n })\n }\n\n pub fn add(&self, q: &PointG1) -> Result<PointG1, CommonError> {\n let mut r = self.point;\n ECP::add(&mut r, &q.point);\n Ok(PointG1 {\n point: r\n })\n }\n\n pub fn sub(&self, q: &PointG1) -> Result<PointG1, CommonError> {\n let mut r = self.point;\n ECP::sub(&mut r, &q.point);\n Ok(PointG1 {\n point: r\n })\n }\n\n pub fn neg(&self) -> Result<PointG1, CommonError> {\n let mut r = self.point;\n ECP::neg(&mut r);\n Ok(PointG1 {\n point: r\n })\n }\n\n pub fn to_string(&self) -> Result<String, CommonError> {\n Ok(ECP::to_hex(&self.point))\n }\n\n pub fn from_string(str: &str) -> Result<PointG1, CommonError> {\n Ok(PointG1 {\n point: ECP::from_hex(str.to_string())\n })\n }\n\n pub fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n let str = self.to_string()?;\n\n Ok(str.into_bytes())\n }\n\n pub fn from_bytes(b: &[u8]) -> Result<PointG1, CommonError> {\n unimplemented!();\n }\n}\n\nimpl BytesView for PointG1 {\n fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n Ok(self.to_bytes()?)\n }\n}\n\nimpl Serialize for PointG1 {\n fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {\n serializer.serialize_newtype_struct(\"PointG1\", &self.to_string().map_err(SError::custom)?)\n }\n}\n\nimpl<'a> Deserialize<'a> for PointG1 {\n fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {\n struct PointG1Visitor;\n\n impl<'a> Visitor<'a> for PointG1Visitor {\n type Value = PointG1;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"expected PointG1\")\n }\n\n fn visit_str<E>(self, value: &str) -> Result<PointG1, E>\n where E: DError\n {\n Ok(PointG1::from_string(value).map_err(DError::custom)?)\n }\n }\n\n deserializer.deserialize_str(PointG1Visitor)\n }\n}\n\n#[derive(Copy, Clone, PartialEq, Debug)]\npub struct PointG2 {\n point: ECP2\n}\n\nimpl PointG2 {\n pub fn new() -> Result<PointG2, CommonError> {\n let mut point_x = FP2::default();\n let mut point_y = FP2::default();\n let mut point_z = FP2::default();\n FP2::from_BIGs(&mut point_x, &unsafe { CURVE_Pxa }.clone(), &unsafe { CURVE_Pxb }.clone());\n FP2::from_BIGs(&mut point_y, &unsafe { CURVE_Pya }.clone(), &unsafe { CURVE_Pyb }.clone());\n FP2::from_BIGs(&mut point_z, &BIG::from_hex(\"1\".to_string()), &BIG::from_hex(\"0\".to_string()));\n let mut gen_g2: ECP2 = ECP2::new_fp2s(point_x, point_y, point_z);\n\n ECP2::mul(&mut gen_g2, &random_mod_order()?);\n Ok(PointG2 {\n point: gen_g2\n })\n }\n\n pub fn new_inf() -> Result<PointG2, CommonError> {\n let mut point = ECP2::default();\n ECP2::inf(&mut point);\n\n Ok(PointG2 {\n point: point\n })\n }\n\n pub fn add(&self, q: &PointG2) -> Result<PointG2, CommonError> {\n let mut r = self.point;\n ECP2::add(&mut r, &q.point);\n Ok(PointG2 {\n point: r\n })\n }\n\n pub fn sub(&self, q: &PointG2) -> Result<PointG2, CommonError> {\n let mut r = self.point;\n ECP2::sub(&mut r, &q.point);\n Ok(PointG2 {\n point: r\n })\n }\n\n pub fn mul(&self, e: &GroupOrderElement) -> Result<PointG2, CommonError> {\n let mut r = self.point;\n ECP2::mul(&mut r, &e.bn);\n Ok(PointG2 {\n point: r\n })\n }\n\n pub fn to_string(&self) -> Result<String, CommonError> {\n Ok(ECP2::to_hex(&self.point))\n }\n\n pub fn from_string(str: &str) -> Result<PointG2, CommonError> {\n Ok(PointG2 {\n point: ECP2::from_hex(str.to_string())\n })\n }\n\n pub fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n let str = self.to_string()?;\n\n Ok(str.into_bytes())\n }\n\n pub fn from_bytes(b: &[u8]) -> Result<PointG1, CommonError> {\n unimplemented!();\n }\n}\n\nimpl Serialize for PointG2 {\n fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {\n serializer.serialize_newtype_struct(\"PointG2\", &self.to_string().map_err(SError::custom)?)\n }\n}\n\nimpl<'a> Deserialize<'a> for PointG2 {\n fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {\n struct PointG2Visitor;\n\n impl<'a> Visitor<'a> for PointG2Visitor {\n type Value = PointG2;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"expected PointG2\")\n }\n\n fn visit_str<E>(self, value: &str) -> Result<PointG2, E>\n where E: DError\n {\n Ok(PointG2::from_string(value).map_err(DError::custom)?)\n }\n }\n\n deserializer.deserialize_str(PointG2Visitor)\n }\n}\n\n#[derive(Debug, Copy, Clone, PartialEq)]\npub struct GroupOrderElement {\n bn: BIG\n}\n\nimpl GroupOrderElement {\n pub fn new() -> Result<GroupOrderElement, CommonError> {\n // returns random element in 0, ..., GroupOrder-1\n Ok(GroupOrderElement {\n bn: random_mod_order()?\n })\n }\n\n pub fn pow_mod(&self, e: &GroupOrderElement) -> Result<GroupOrderElement, CommonError> {\n let mut base = self.bn;\n let mut pow = e.bn;\n Ok(GroupOrderElement {\n bn: BIG::powmod(&mut base, &mut pow, &unsafe { CURVE_Order }.clone())\n })\n }\n\n pub fn add_mod(&self, r: &GroupOrderElement) -> Result<GroupOrderElement, CommonError> {\n let mut sum = BIG::add(&self.bn, &r.bn);\n BIG::rmod(&mut sum, &unsafe { CURVE_Order }.clone());\n Ok(GroupOrderElement {\n bn: sum\n })\n }\n\n pub fn sub_mod(&self, r: &GroupOrderElement) -> Result<GroupOrderElement, CommonError> {\n //need to use modneg if sub is negative\n let mut sub = BIG::sub(&self.bn, &r.bn);\n if sub < BIG::default() {\n let mut r: BIG = BIG::default();\n BIG::modneg(&mut r, &mut sub, &unsafe { CURVE_Order }.clone());\n Ok(GroupOrderElement {\n bn: r\n })\n } else {\n Ok(GroupOrderElement {\n bn: sub\n })\n }\n }\n\n pub fn mul_mod(&self, r: &GroupOrderElement) -> Result<GroupOrderElement, CommonError> {\n Ok(GroupOrderElement {\n bn: BIG::modmul(&self.bn, &r.bn, &unsafe { CURVE_Order }.clone())\n })\n }\n\n pub fn inverse(&self) -> Result<GroupOrderElement, CommonError> {\n Ok(GroupOrderElement {\n bn: BIG::invmodp(&self.bn, &unsafe { CURVE_Order }.clone())\n })\n }\n\n pub fn mod_neg(&self) -> Result<GroupOrderElement, CommonError> {\n let mut r: BIG = BIG::default();\n let mut bn = self.bn;\n BIG::modneg(&mut r, &mut bn, &unsafe { CURVE_Order }.clone());\n Ok(GroupOrderElement {\n bn: r\n })\n }\n\n pub fn to_string(&self) -> Result<String, CommonError> {\n Ok(BIG::to_hex(&self.bn))\n }\n\n pub fn from_string(str: &str) -> Result<GroupOrderElement, CommonError> {\n Ok(GroupOrderElement {\n bn: BIG::from_hex(str.to_string())\n })\n }\n\n pub fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n let mut vec: [u8; 32] = [0; 32];\n BIG::toBytes(&mut vec, &self.bn);\n Ok(vec.to_vec())\n }\n\n pub fn from_bytes(b: &[u8]) -> Result<GroupOrderElement, CommonError> {\n Ok(\n GroupOrderElement {\n bn: BIG::fromBytes(b)\n }\n )\n }\n}\n\nimpl Serialize for GroupOrderElement {\n fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {\n serializer.serialize_newtype_struct(\"GroupOrderElement\", &self.to_string().map_err(SError::custom)?)\n }\n}\n\nimpl<'a> Deserialize<'a> for GroupOrderElement {\n fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {\n struct GroupOrderElementVisitor;\n\n impl<'a> Visitor<'a> for GroupOrderElementVisitor {\n type Value = GroupOrderElement;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"expected GroupOrderElement\")\n }\n\n fn visit_str<E>(self, value: &str) -> Result<GroupOrderElement, E>\n where E: DError\n {\n Ok(GroupOrderElement::from_string(value).map_err(DError::custom)?)\n }\n }\n\n deserializer.deserialize_str(GroupOrderElementVisitor)\n }\n}\n\n#[derive(Debug, Copy, Clone, PartialEq)]\npub struct Pair {\n pair: FP12\n}\n\nimpl Pair {\n pub fn pair(p: &PointG1, q: &PointG2) -> Result<Pair, CommonError> {\n let mut pair = FP12::default();\n let mut p_new = *p;\n let mut q_new = *q;\n\n PAIR::ate(&mut pair, &mut q_new.point, &mut p_new.point);\n Ok(Pair {\n pair: pair\n })\n }\n\n pub fn mul(&self, b: &Pair) -> Result<Pair, CommonError> {\n let mut pair = self.pair;\n FP12::mul(&mut pair, &b.pair);\n Ok(Pair {\n pair: pair\n })\n }\n\n pub fn pow(&self, b: &GroupOrderElement) -> Result<Pair, CommonError> {\n let mut r = FP12::default();\n FP12::pow(&mut r, &self.pair, &b.bn);\n Ok(Pair {\n pair: r\n })\n }\n\n pub fn inverse(&self) -> Result<Pair, CommonError> {\n let mut r = FP12::default();\n FP12::inv(&mut r, &self.pair);\n Ok(Pair {\n pair: r\n })\n }\n\n pub fn to_string(&self) -> Result<String, CommonError> {\n Ok(FP12::to_hex(&self.pair))\n }\n\n pub fn from_string(str: &str) -> Result<Pair, CommonError> {\n Ok(Pair {\n pair: FP12::from_hex(str.to_string())\n })\n }\n\n pub fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n unimplemented!();\n }\n\n pub fn from_bytes(b: &[u8]) -> Result<Pair, CommonError> {\n unimplemented!();\n }\n}\n\nimpl BytesView for Pair {\n fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n Ok(self.to_bytes()?)\n }\n}\n\nimpl Serialize for Pair {\n fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {\n serializer.serialize_newtype_struct(\"Pair\", &self.to_string().map_err(SError::custom)?)\n }\n}\n\nimpl BytesView for GroupOrderElement {\n fn to_bytes(&self) -> Result<Vec<u8>, CommonError> {\n Ok(self.to_bytes()?)\n }\n}\n\nimpl<'a> Deserialize<'a> for Pair {\n fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {\n struct PairVisitor;\n\n impl<'a> Visitor<'a> for PairVisitor {\n type Value = Pair;\n\n fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n formatter.write_str(\"expected Pair\")\n }\n\n fn visit_str<E>(self, value: &str) -> Result<Pair, E>\n where E: DError\n {\n Ok(Pair::from_string(value).map_err(DError::custom)?)\n }\n }\n\n deserializer.deserialize_str(PairVisitor)\n }\n}\n\n#[cfg(test)]\nmod tests {\n use super::*;\n\n extern crate serde_json;\n\n #[derive(Serialize, Deserialize, Debug, PartialEq)]\n struct TestGroupOrderElementStructure {\n field: GroupOrderElement\n }\n\n #[derive(Serialize, Deserialize, Debug, PartialEq)]\n struct TestPointG1Structure {\n field: PointG1\n }\n\n #[derive(Serialize, Deserialize, Debug, PartialEq)]\n struct TestPointG2Structure {\n field: PointG2\n }\n\n #[derive(Serialize, Deserialize, Debug, PartialEq)]\n struct TestPairStructure {\n field: Pair\n }\n\n #[test]\n fn serialize_works_for_group_order_element() {\n let structure = TestGroupOrderElementStructure {\n field: GroupOrderElement::from_string(\"C4D05C20EC7BAC 2FBB155341552D 6AA4C1EA344257 E84BFFBF1408B3 194D3FBA\").unwrap()\n };\n let str = r#\"{\"field\":\"C4D05C20EC7BAC 2FBB155341552D 6AA4C1EA344257 E84BFFBF1408B3 194D3FBA\"}\"#;\n\n let serialized = serde_json::to_string(&structure).unwrap();\n assert_eq!(str, serialized);\n }\n\n #[test]\n fn deserialize_works_for_group_order_element() {\n let structure = TestGroupOrderElementStructure {\n field: GroupOrderElement::from_string(\"C4D05C20EC7BAC 2FBB155341552D 6AA4C1EA344257 E84BFFBF1408B3 194D3FBA\").unwrap()\n };\n let str = r#\"{\"field\":\"C4D05C20EC7BAC 2FBB155341552D 6AA4C1EA344257 E84BFFBF1408B3 194D3FBA\"}\"#;\n let deserialized: TestGroupOrderElementStructure = serde_json::from_str(&str).unwrap();\n\n assert_eq!(structure, deserialized);\n }\n\n #[test]\n fn serialize_works_for_point_g1() {\n let structure = TestPointG1Structure {\n field: PointG1::from_string(\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\").unwrap()\n };\n let str = r#\"{\"field\":\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\"}\"#;\n\n let serialized = serde_json::to_string(&structure).unwrap();\n assert_eq!(str, serialized);\n }\n\n #[test]\n fn deserialize_works_for_point_g1() {\n let structure = TestPointG1Structure {\n field: PointG1::from_string(\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\").unwrap()\n };\n\n let str = r#\"{\"field\":\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\"}\"#;\n let deserialized: TestPointG1Structure = serde_json::from_str(&str).unwrap();\n\n assert_eq!(structure, deserialized);\n }\n\n #[test]\n fn serialize_works_for_point_g2() {\n let structure = TestPointG2Structure {\n field: PointG2::from_string(\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\").unwrap()\n };\n\n let str = r#\"{\"field\":\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\"}\"#;\n let serialized = serde_json::to_string(&structure).unwrap();\n\n assert_eq!(str, serialized);\n }\n\n #[test]\n fn deserialize_works_for_point_g2() {\n let structure = TestPointG2Structure {\n field: PointG2::from_string(\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\").unwrap()\n };\n let str = r#\"{\"field\":\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\"}\"#;\n let deserialized: TestPointG2Structure = serde_json::from_str(&str).unwrap();\n\n assert_eq!(structure, deserialized);\n }\n #[test]\n fn serialize_works_for_pair() {\n let point_g1 = PointG1 {\n point: PointG1::from_string(\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\").unwrap().point\n };\n let point_g2 = PointG2 {\n point: PointG2::from_string(\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\").unwrap().point\n };\n let pair = TestPairStructure {\n field: Pair::pair(&point_g1, &point_g2).unwrap()\n };\n let str = r#\"{\"field\":\"70CC65D2371808 FD4F75244E5B72 40359FC95F7204 FA308613F34F1D 551BB55FA 7CB294CCE69B4A AE3C7228995A41 F27CD79430990 DE04BB58428296 5303A03BD 7FFA8A31C72E82 A8E1AA2E51D4B2 87B33F735B7ADF 19EADA0B95227E 392C800DC 74571A20806B80 ABDB72819D0A70 D4B1EDF5A54E6F FAF8EA4B2EFC2D 3CE6F2507 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002\"}\"#;\n let serialized = serde_json::to_string(&pair).unwrap();\n\n assert_eq!(str, serialized);\n }\n\n #[test]\n fn deserialize_works_for_pair() {\n let point_g1 = PointG1 {\n point: PointG1::from_string(\"1 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0\").unwrap().point\n };\n let point_g2 = PointG2 {\n point: PointG2::from_string(\"0 53104BD1A92BE9 4CBF937B44DAA 1D191B0496A14B 276529199F4D1B 4A996C2 3B2712E2EC37FF CF7C4390E8071C EF8C973AD5EDAA 547DD84375861 169CBAC9 5224321CF032B7 B9D2063515A045 9833D500F6EEBE DB9D00AED36ED2 7916166 22D7513761F614 4CD0E53D855FC3 950F3C38908717 A0261AC49D33A0 1B221531 A96F211585EDB F2942F28DB526F 2FF74229029FCD F4EABE779E75E4 3C3FED4 0 0 0 0 0\").unwrap().point\n };\n let pair = TestPairStructure {\n field: Pair::pair(&point_g1, &point_g2).unwrap()\n };\n let str = r#\"{\"field\":\"70CC65D2371808 FD4F75244E5B72 40359FC95F7204 FA308613F34F1D 551BB55FA 7CB294CCE69B4A AE3C7228995A41 F27CD79430990 DE04BB58428296 5303A03BD 7FFA8A31C72E82 A8E1AA2E51D4B2 87B33F735B7ADF 19EADA0B95227E 392C800DC 74571A20806B80 ABDB72819D0A70 D4B1EDF5A54E6F FAF8EA4B2EFC2D 3CE6F2507 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 1EF2EE10541C8A 7C8B52D128C803 9D4A4954550B73 922CD02BD9DA10 AF8000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002 5543B5BF0C1826 69623262363316 E78DA0826F5875 2CEAD78790F397 948000002\"}\"#;\n let deserialized: TestPairStructure = serde_json::from_str(&str).unwrap();\n\n assert_eq!(pair, deserialized);\n }\n\n #[test] //TODO: remove it\n #[ignore]\n fn stack_smashing_detected() {\n let point = PointG2::new().unwrap();\n println!(\"pstr: {}\", point.to_string().unwrap());\n }\n}"
},
{
"alpha_fraction": 0.6837944388389587,
"alphanum_fraction": 0.7035573124885559,
"avg_line_length": 20.08333396911621,
"blob_id": "080b8dce4d3c6af7e4f3c3029670d3c0662ead4f",
"content_id": "548b43bd116d9a9e73e1b039bc65744fc435304e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 12,
"path": "/wrappers/python/sovrin/error.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from enum import Enum\n\nclass SovrinErrorCode(Enum):\n Success = 0\n CommonInvalidParam1 = 100\n\n\nclass SovrinError(Exception):\n error_code: SovrinErrorCode\n\n def __init__(self, error_code: SovrinErrorCode):\n self.error_code = error_code\n"
},
{
"alpha_fraction": 0.4538695514202118,
"alphanum_fraction": 0.4653599262237549,
"avg_line_length": 30.411890029907227,
"blob_id": "d7f92fc9cb85c9a70ecc4586713615699c028bf0",
"content_id": "35c07a3960229923937c50961bf152b621983029",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 14795,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 471,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_fp_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_fp_arithmetics.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with FP\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\n#define LINE_LEN 10000\n#define MAX_STRING 300\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char support[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,support,len);\n len = (len-1)/2;;\n BIG_fromBytesLen(A,support,len);\n BIG_norm(A);\n}\n\n\nint test_fp_arithmetics(int argc, char** argv)\n{\n printf(\"test_fp_arithmetics() started\\n\");\n if (argc != 2)\n {\n printf(\"usage: ./test_fp_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i = 0, len = 0, j = 0, k = 0;\n FILE *fp;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n BIG M,supp, supp1, supp2, supp3;\n\n BIG FP_1;\n const char* FP_1line = \"FP_1 = \";\n BIG FP_2;\n const char* FP_2line = \"FP_2 = \";\n BIG FPadd;\n const char* FPaddline = \"FPadd = \";\n BIG FPsub;\n const char* FPsubline = \"FPsub = \";\n BIG FP_1nres;\n const char* FP_1nresline = \"FP_1nres = \";\n BIG FP_2nres;\n const char* FP_2nresline = \"FP_2nres = \";\n BIG FPmulmod;\n const char* FPmulmodline = \"FPmulmod = \";\n BIG FPsmallmul;\n const char* FPsmallmulline = \"FPsmallmul = \";\n BIG FPsqr;\n const char* FPsqrline = \"FPsqr = \";\n BIG FPreduce;\n const char* FPreduceline = \"FPreduce = \";\n BIG FPneg;\n const char* FPnegline = \"FPneg = \";\n BIG FPdiv2;\n const char* FPdiv2line = \"FPdiv2 = \";\n BIG FPinv;\n const char* FPinvline = \"FPinv = \";\n BIG FPexp;\n const char* FPexpline = \"FPexp = \";\n\n// Set to zero\n BIG_zero(FP_1);\n BIG_zero(FP_2);\n BIG_rcopy(M,Modulus);\n\n// Testing equal function and set zero function\n if(BIG_comp(FP_1,FP_2) || !FP_iszilch(FP_1) || !FP_iszilch(FP_2))\n {\n printf(\"ERROR comparing FPs or setting FP to zero FP\\n\");\n exit(EXIT_FAILURE);\n }\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n// Read first FP\n if (!strncmp(line,FP_1line, strlen(FP_1line)))\n {\n len = strlen(FP_1line);\n linePtr = line + len;\n read_BIG(FP_1,linePtr);\n }\n// Read second FP\n if (!strncmp(line,FP_2line, strlen(FP_2line)))\n {\n len = strlen(FP_2line);\n linePtr = line + len;\n read_BIG(FP_2,linePtr);\n }\n// Addition test\n if (!strncmp(line,FPaddline, strlen(FPaddline)))\n {\n len = strlen(FPaddline);\n linePtr = line + len;\n read_BIG(FPadd,linePtr);\n BIG_copy(supp1,FP_2);\n BIG_copy(supp,FP_1);\n BIG_copy(supp2,FP_1);\n FP_add(supp,supp,supp1);\n FP_add(supp2,supp2,supp1); // test commutativity P+Q = Q+P\n BIG_norm(supp);\n FP_reduce(supp);\n BIG_norm(supp2);\n FP_reduce(supp2);\n if(BIG_comp(supp,FPadd) || BIG_comp(supp2,FPadd))\n {\n printf(\"ERROR adding two FPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n BIG_copy(supp,FP_1); // test associativity (P+Q)+R = P+(Q+R)\n BIG_copy(supp2,FP_1);\n BIG_copy(supp1,FP_2);\n BIG_copy(supp3,FPadd);\n FP_add(supp,supp,supp1);\n FP_add(supp,supp,supp3);\n FP_add(supp1,supp1,supp3);\n FP_add(supp1,supp1,supp2);\n FP_reduce(supp);\n FP_reduce(supp1);\n BIG_norm(supp);\n BIG_norm(supp1);\n if(BIG_comp(supp,supp1))\n {\n printf(\"ERROR testing associativity between three FPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Subtraction test\n if (!strncmp(line,FPsubline, strlen(FPsubline)))\n {\n len = strlen(FPsubline);\n linePtr = line + len;\n read_BIG(FPsub,linePtr);\n BIG_copy(supp,FP_1);\n BIG_copy(supp1,FP_2);\n FP_sub(supp,supp,supp1);\n FP_redc(supp);\n FP_nres(supp);\n BIG_sub(supp1,supp,M); // in case of lazy reduction\n BIG_norm(supp1);\n if((BIG_comp(supp,FPsub) != 0) && (BIG_comp(supp1,FPsub) != 0))\n {\n printf(\"ERROR subtraction between two FPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Reduce first FP\n if (!strncmp(line,FP_1nresline, strlen(FP_1nresline)))\n {\n len = strlen(FP_1nresline);\n linePtr = line + len;\n read_BIG(FP_1nres,linePtr);\n BIG_copy(supp,FP_1);\n FP_nres(supp);\n FP_redc(supp);\n if(BIG_comp(supp,FP_1nres))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FP_1nres);\n printf(\"\\n\\n\");\n printf(\"ERROR Converts from BIG integer to n-residue form, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Reduce second FP\n if (!strncmp(line,FP_2nresline, strlen(FP_2nresline)))\n {\n len = strlen(FP_2nresline);\n linePtr = line + len;\n read_BIG(FP_2nres,linePtr);\n BIG_copy(supp,FP_2);\n FP_nres(supp);\n FP_redc(supp);\n if(BIG_comp(supp,FP_2nres))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FP_2nres);\n printf(\"\\n\\n\");\n printf(\"ERROR Converts from BIG integer to n-residue form, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiplication modulo\n if (!strncmp(line,FPmulmodline, strlen(FPmulmodline)))\n {\n len = strlen(FPmulmodline);\n linePtr = line + len;\n read_BIG(FPmulmod,linePtr);\n BIG_copy(supp,FP_1);\n BIG_copy(supp1,FP_2);\n FP_nres(supp);\n FP_nres(supp1);\n FP_mul(supp,supp,supp1);\n FP_redc(supp);\n if(BIG_comp(supp,FPmulmod))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPmulmod);\n printf(\"\\n\\n\");\n printf(\"ERROR in multiplication and reduction by Modulo, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Small multiplication\n if (!strncmp(line,FPsmallmulline, strlen(FPsmallmulline)))\n {\n len = strlen(FPsmallmulline);\n linePtr = line + len;\n read_BIG(FPsmallmul,linePtr);\n FP_imul(supp,FP_1,0);\n BIG_norm(supp);\n if (!FP_iszilch(supp))\n {\n printf(\"ERROR in multiplication by 0, line %d\\n\",i);\n }\n for (j = 1; j <= 10; ++j)\n {\n FP_imul(supp,FP_1,j);\n BIG_copy(supp1,FP_1);\n for (k = 1; k < j; ++k)\n {\n BIG_norm(supp1);\n FP_add(supp1,supp1,FP_1);\n }\n BIG_norm(supp1);\n if(BIG_comp(supp,supp1) != 0)\n {\n printf(\"comp1 \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"comp2 \");\n BIG_output(supp1);\n printf(\"\\n\\n\");\n printf(\"ERROR in small multiplication or addition, line %d, multiplier %d\\n\",i,j);\n exit(EXIT_FAILURE);\n }\n }\n FP_reduce(supp);\n FP_reduce(supp1);\n if(BIG_comp(supp,FPsmallmul) | BIG_comp(supp1,supp))\n {\n printf(\"comp1 \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"comp2 \");\n BIG_output(supp1);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPsmallmul);\n printf(\"\\n\\n\");\n printf(\"ERROR in small multiplication, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Square and square root\n if (!strncmp(line,FPsqrline, strlen(FPsqrline)))\n {\n len = strlen(FPsqrline);\n linePtr = line + len;\n read_BIG(FPsqr,linePtr);\n BIG_copy(supp,FP_1);\n FP_nres(supp);\n FP_sqr(supp,supp);\n FP_redc(supp);\n if(BIG_comp(supp,FPsqr))\n {\n printf(\"supp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPsqr);\n printf(\"\\n\\n\");\n printf(\"ERROR in squaring FP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n /*FP_nres(supp);\n FP_sqrt(supp,supp);\n FP_redc(supp);\n if(BIG_comp(supp,FP_1))\n {\n printf(\"supp \");BIG_output(supp);printf(\"\\n\\n\");\n printf(\"read \");BIG_output(FP_1);printf(\"\\n\\n\");\n printf(\"ERROR square/square root consistency FP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }*/\n }\n// Reducing Modulo\n if (!strncmp(line,FPreduceline, strlen(FPreduceline)))\n {\n len = strlen(FPreduceline);\n linePtr = line + len;\n read_BIG(FPreduce,linePtr);\n BIG_copy(supp,FP_1);\n FP_reduce(supp);\n if(BIG_comp(supp,FPreduce))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPreduce);\n printf(\"\\n\\n\");\n printf(\"ERROR in reducing FP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Negative an FP\n if (!strncmp(line,FPnegline, strlen(FPnegline)))\n {\n len = strlen(FPnegline);\n linePtr = line + len;\n read_BIG(FPneg,linePtr);\n BIG_copy(supp,FP_1);\n FP_nres(supp);\n FP_neg(supp,supp);\n FP_redc(supp);\n BIG_sub(supp1,supp,M); // in case of lazy reduction\n BIG_norm(supp1);\n if((BIG_comp(supp,FPneg) != 0) && (BIG_comp(supp1,FPneg) != 0))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPneg);\n printf(\"\\n\\n\");\n printf(\"ERROR in computing FP_neg, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Division by 2\n if (!strncmp(line,FPdiv2line, strlen(FPdiv2line)))\n {\n len = strlen(FPdiv2line);\n linePtr = line + len;\n read_BIG(FPdiv2,linePtr);\n BIG_copy(supp,FP_1);\n FP_redc(supp);\n FP_nres(supp);\n FP_div2(supp,supp);\n if(BIG_comp(supp,FPdiv2))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPdiv2);\n printf(\"\\n\\n\");\n printf(\"ERROR in division by 2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Inverse Modulo and FP_one\n if (!strncmp(line,FPinvline, strlen(FPinvline)))\n {\n len = strlen(FPinvline);\n linePtr = line + len;\n read_BIG(FPinv,linePtr);\n BIG_copy(supp,FP_1);\n BIG_copy(supp1,FP_1);\n FP_nres(supp);\n FP_inv(supp,supp);\n FP_redc(supp);\n if(BIG_comp(supp,FPinv))\n {\n printf(\"comp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPinv);\n printf(\"\\n\\n\");\n printf(\"ERROR computing inverse modulo, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP_mul(supp,supp,supp1);\n FP_nres(supp);\n FP_reduce(supp);\n FP_one(supp1);\n FP_redc(supp1);\n if(BIG_comp(supp,supp1))\n {\n printf(\"comp1 \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"comp2 \");\n BIG_output(supp1);\n printf(\"\\n\\n\");\n printf(\"ERROR multipling FP and its inverse, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// modular exponentiation\n if (!strncmp(line,FPexpline, strlen(FPexpline)))\n {\n len = strlen(FPexpline);\n linePtr = line + len;\n read_BIG(FPexp,linePtr);\n BIG_copy(supp,FP_1);\n BIG_copy(supp1,FP_2);\n FP_nres(supp);\n FP_pow(supp,supp,supp1);\n FP_redc(supp);\n if(BIG_comp(supp,FPexp))\n {\n printf(\"supp \");\n BIG_output(supp);\n printf(\"\\n\\n\");\n printf(\"read \");\n BIG_output(FPexp);\n printf(\"\\n\\n\");\n printf(\"ERROR in modular exponentiation, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n fclose(fp);\n\n printf(\"test_fp_arithmetics() SUCCESS TEST ARITMETIC OF FP PASSED\\n\");\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.6990740895271301,
"alphanum_fraction": 0.6990740895271301,
"avg_line_length": 18.636363983154297,
"blob_id": "56c3dd6178708790a71601b664049b5f564eabc0",
"content_id": "365b78142df96fb44036663175dcfd2580dd7a11",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 11,
"path": "/wrappers/python/tests/test_wallet.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "import asyncio\n\nfrom sovrin import *\n\nasync def main():\n await Wallet.create_wallet(None, \"wallet\", \"wallet-type\", \"config\", \"creds\")\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\nloop.close()\n"
},
{
"alpha_fraction": 0.5020663142204285,
"alphanum_fraction": 0.5187804102897644,
"avg_line_length": 29.163434982299805,
"blob_id": "3b37c269d9ee3fb4b876d6cd9c1c71281dccd691",
"content_id": "1f80e7cc9c3e6f5f0d96c8001afe9c2975132444",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 10889,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 361,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_big_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_big_consistency.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with BIG\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n#include \"utils.h\"\n\n#define LINE_LEN 10000\n#define MAX_STRING 300\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char support[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,support,len);\n len = (len-1)/2;;\n BIG_fromBytesLen(A,support,len);\n BIG_norm(A);\n}\n\nstatic void read_DBIG(DBIG A, char* string)\n{\n int len;\n char support[LINE_LEN];\n BIG_dzero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,support,len);\n len = (len-1)/2;\n BIG_dfromBytesLen(A,support,len);\n BIG_dnorm(A);\n}\n\nint test_big_arithmetics(int argc, char** argv)\n{\n printf(\"test_big_arithmetics() started\\n\");\n if (argc != 2)\n {\n printf(\"usage: ./test_BIG_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i=0, len=0, bitlen=0;\n\n FILE *fp;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n BIG supp,mod,div;\n DBIG dsupp;\n\n BIG BIG1;\n const char* BIG1line = \"BIG1 = \";\n BIG BIG2;\n const char* BIG2line = \"BIG2 = \";\n BIG BIGsum;\n const char* BIGsumline = \"BIGsum = \";\n BIG BIGsub;\n const char* BIGsubline = \"BIGsub = \";\n BIG BIG1mod2;\n const char* BIG1mod2line = \"BIG1mod2 = \";\n BIG BIG2mod1;\n const char* BIG2mod1line = \"BIG2mod1 = \";\n DBIG BIGmul;\n const char* BIGmulline = \"BIGmul = \";\n DBIG BIG1sqr;\n const char* BIG1sqrline = \"BIG1sqr = \";\n DBIG BIG2sqr;\n const char* BIG2sqrline = \"BIG2sqr = \";\n BIG BIG1sqrmod2;\n const char* BIG1sqrmod2line = \"BIG1sqrmod2 = \";\n BIG BIG1modneg2;\n const char* BIG1modneg2line = \"BIG1modneg2 = \";\n int nbitBIG = 0;\n const char* nbitBIGline = \"nbitBIG = \";\n int nbitDBIG = 0;\n const char* nbitDBIGline = \"nbitDBIG = \";\n BIG BIGdiv;\n const char* BIGdivline = \"BIGdiv = \";\n BIG BIGdivmod;\n const char* BIGdivmodline = \"BIGdivmod = \";\n DBIG BIGpxmul;\n const char* BIGpxmulline = \"BIGpxmul = \";\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n if (!strncmp(line, BIG1line, strlen(BIG1line)))\n {\n len = strlen(BIG1line);\n linePtr = line + len;\n read_BIG(BIG1,linePtr);\n }\n// test comparison\n if (!strncmp(line,BIG2line, strlen(BIG2line)))\n {\n len = strlen(BIG2line);\n linePtr = line + len;\n read_BIG(BIG2,linePtr);\n if (BIG_comp(BIG1,BIG2) < 0)\n {\n printf(\"ERROR comparing two BIGs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test addition\n if (!strncmp(line,BIGsumline, strlen(BIGsumline)))\n {\n BIG_zero(supp);\n BIG_add(supp,BIG1,BIG2);\n len = strlen(BIGsumline);\n linePtr = line + len;\n read_BIG(BIGsum,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIGsum,supp) != 0)\n {\n printf(\"ERROR adding two BIGs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test subtraction\n if (!strncmp(line,BIGsubline, strlen(BIGsubline)))\n {\n BIG_zero(supp);\n BIG_sub(supp,BIG1,BIG2);\n len = strlen(BIGsubline);\n linePtr = line + len;\n read_BIG(BIGsub,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIGsub,supp) != 0)\n {\n printf(\"ERROR subtracting two BIGs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test modulo 1\n if (!strncmp(line,BIG1mod2line, strlen(BIG1mod2line)))\n {\n BIG_zero(supp);\n BIG_copy(supp,BIG1);\n BIG_mod(supp,BIG2);\n len = strlen(BIG1mod2line);\n linePtr = line + len;\n read_BIG(BIG1mod2,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIG1mod2,supp) != 0)\n {\n printf(\"ERROR reducing modulo BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test modulo 2\n if (!strncmp(line,BIG2mod1line, strlen(BIG2mod1line)))\n {\n BIG_zero(supp);\n BIG_copy(supp,BIG2);\n BIG_mod(supp,BIG1);\n len = strlen(BIG2mod1line);\n linePtr = line + len;\n read_BIG(BIG2mod1,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIG2mod1,supp) != 0)\n {\n printf(\"ERROR reducing modulo BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test multiplication\n if (!strncmp(line,BIGmulline, strlen(BIGmulline)))\n {\n BIG_dzero(dsupp);\n BIG_mul(dsupp,BIG1,BIG2);\n len = strlen(BIGmulline);\n linePtr = line + len;\n read_DBIG(BIGmul,linePtr);\n BIG_dnorm(dsupp);\n if (BIG_dcomp(BIGmul,dsupp) != 0)\n {\n printf(\"ERROR multiplication BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test square 1\n if (!strncmp(line,BIG1sqrline, strlen(BIG1sqrline)))\n {\n BIG_dzero(dsupp);\n BIG_sqr(dsupp,BIG1);\n len = strlen(BIG1sqrline);\n linePtr = line + len;\n read_DBIG(BIG1sqr,linePtr);\n BIG_dnorm(dsupp);\n if (BIG_dcomp(BIG1sqr,dsupp) != 0)\n {\n printf(\"ERROR squaring BIG 1, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test square 2\n if (!strncmp(line,BIG2sqrline, strlen(BIG2sqrline)))\n {\n BIG_dzero(dsupp);\n BIG_sqr(dsupp,BIG2);\n len = strlen(BIG2sqrline);\n linePtr = line + len;\n read_DBIG(BIG2sqr,linePtr);\n BIG_dnorm(dsupp);\n if (BIG_dcomp(BIG2sqr,dsupp) != 0)\n {\n printf(\"ERROR squaring BIG 2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test square mod\n if (!strncmp(line,BIG1sqrmod2line, strlen(BIG1sqrmod2line)))\n {\n BIG_zero(supp);\n BIG_copy(supp,BIG1);\n BIG_modsqr(supp,supp,BIG2);\n len = strlen(BIG1sqrmod2line);\n linePtr = line + len;\n read_BIG(BIG1sqrmod2,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIG1sqrmod2,supp) != 0)\n {\n printf(\"ERROR reducing squaring modulo BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test negative and modulo\n if (!strncmp(line,BIG1modneg2line, strlen(BIG1modneg2line)))\n {\n BIG_zero(supp);\n BIG_copy(supp,BIG1);\n BIG_modneg(supp,supp,BIG2);\n len = strlen(BIG1modneg2line);\n linePtr = line + len;\n read_BIG(BIG1modneg2,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIG1modneg2,supp) != 0)\n {\n printf(\"ERROR negative reduced modulo BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test counting bit BIG\n if (!strncmp(line,nbitBIGline, strlen(nbitBIGline)))\n {\n len = strlen(nbitBIGline);\n nbitBIG = BIG_nbits(BIG1);\n linePtr = line + len;\n sscanf(linePtr,\"%d\\n\",&bitlen);\n if (nbitBIG != bitlen)\n {\n printf(\"ERROR counting bit BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test counting bit DBIG\n if (!strncmp(line,nbitDBIGline, strlen(nbitDBIGline)))\n {\n len = strlen(nbitDBIGline);\n nbitDBIG = BIG_dnbits(BIGmul);\n linePtr = line + len;\n sscanf(linePtr,\"%d\\n\",&bitlen);\n if (nbitDBIG != bitlen)\n {\n printf(\"ERROR counting bit DBIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test division\n if (!strncmp(line,BIGdivline, strlen(BIGdivline)))\n {\n BIG_dzero(dsupp);\n BIG_zero(supp);\n BIG_dcopy(dsupp,BIGmul);\n BIG_ddiv(supp,dsupp,BIGsum);\n len = strlen(BIGdivline);\n linePtr = line + len;\n read_BIG(BIGdiv,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIGdiv,supp) != 0)\n {\n printf(\"ERROR division BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test division with modulo\n if (!strncmp(line,BIGdivmodline, strlen(BIGdivmodline)))\n {\n read_BIG(mod,\"E186EB30EF\");\n read_BIG(div,\"0ED5066C6815047425DF\");\n BIG_zero(supp);\n BIG_copy(supp,BIG1);\n BIG_moddiv(supp,supp,div,mod);\n len = strlen(BIGdivmodline);\n linePtr = line + len;\n read_BIG(BIGdivmod,linePtr);\n BIG_norm(supp);\n if (BIG_comp(BIGdivmod,supp) != 0)\n {\n printf(\"ERROR division modulo BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// test small multiplication\n if (!strncmp(line,BIGpxmulline, strlen(BIGpxmulline)))\n {\n BIG_dzero(dsupp);\n BIG_pxmul(dsupp,BIG1,nbitDBIG);\n len = strlen(BIGpxmulline);\n linePtr = line + len;\n read_DBIG(BIGpxmul,linePtr);\n BIG_dnorm(dsupp);\n if (BIG_dcomp(BIGpxmul,dsupp) != 0)\n {\n printf(\"ERROR small multiplication BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n\n fclose(fp);\n printf(\"test_big_arithmetics() SUCCESS TEST ARITMETIC OF BIG PASSED\\n\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.47904667258262634,
"alphanum_fraction": 0.525732159614563,
"avg_line_length": 29.854442596435547,
"blob_id": "64422fe33adf0b7c723536edb2f9bca4a4e56dea",
"content_id": "35c11b7a3db1831ee918750609c24697f92d2a79",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 16322,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 529,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_fp4_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_fp_arithmetics.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with FP\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\n#define LINE_LEN 10000\n#define MAX_STRING 300\n\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char support[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,support,len);\n len = (len-1)/2;\n BIG_fromBytesLen(A,support,len);\n BIG_norm(A);\n}\n\nstatic void read_FP2(FP2 *fp2, char* stringx)\n{\n char *stringy, *end;\n BIG x,y;\n\n stringx++;\n stringy = strchr(stringx,',');\n if (stringy == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n stringy[0] = '\\0';\n stringy++;\n end = strchr(stringy,']');\n if (end == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n end[0] = '\\0';\n\n read_BIG(x,stringx);\n read_BIG(y,stringy);\n\n FP2_from_BIGs(fp2,x,y);\n}\n\nstatic void read_FP4(FP4 *fp4, char* stringx1)\n{\n char *stringx2, *stringy1, *stringy2, *end;\n BIG x1,x2,y1,y2;\n FP2 x,y;\n\n stringx1 += 2;\n stringx2 = strchr(stringx1,',');\n if (stringx2 == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n stringx2[0] = '\\0';\n stringx2 ++;\n stringy1 = strchr(stringx2,']');\n if (stringy1 == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n stringy1[0] = '\\0';\n stringy1 += 3;\n stringy2 = strchr(stringy1,',');\n if (stringy2 == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n stringy2[0] = '\\0';\n stringy2++;\n end = strchr(stringy2,']');\n if (end == NULL)\n {\n printf(\"ERROR unexpected test vector\\n\");\n exit(EXIT_FAILURE);\n }\n end[0] = '\\0';\n\n read_BIG(x1,stringx1);\n read_BIG(x2,stringx2);\n read_BIG(y1,stringy1);\n read_BIG(y2,stringy2);\n\n FP2_from_BIGs(&x,x1,x2);\n FP2_from_BIGs(&y,y1,y2);\n\n FP4_from_FP2s(fp4,&x,&y);\n}\n\nint test_fp4_arithmetics(int argc, char** argv)\n{\n printf(\"test_fp4_arithmetics() started\\n\");\n if (argc != 2)\n {\n printf(\"usage: ./test_fp4_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i = 0, len = 0, j = 0;\n FILE *fp;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n BIG M, Fr_a, Fr_b;\n FP2 Frob;\n FP4 FP4aux1, FP4aux2, FP4aux3, FP4aux4;\n\n FP4 FP4_1;\n const char* FP4_1line = \"FP4_1 = \";\n FP4 FP4_2;\n const char* FP4_2line = \"FP4_2 = \";\n FP4 FP4add;\n const char* FP4addline = \"FP4add = \";\n FP4 FP4neg;\n const char* FP4negline = \"FP4neg = \";\n FP4 FP4sub;\n const char* FP4subline = \"FP4sub = \";\n FP4 FP4conj;\n const char* FP4conjline = \"FP4conj = \";\n FP4 FP4nconj;\n const char* FP4nconjline = \"FP4nconj = \";\n FP2 FP2sc;\n const char* FP2scline = \"FP2sc = \";\n FP4 FP4pmul;\n const char* FP4pmulline = \"FP4pmul = \";\n FP4 FP4imul;\n const char* FP4imulline = \"FP4imul = \";\n FP4 FP4sqr;\n const char* FP4sqrline = \"FP4sqr = \";\n FP4 FP4mul;\n const char* FP4mulline = \"FP4mul = \";\n FP4 FP4inv;\n const char* FP4invline = \"FP4inv = \";\n FP4 FP4mulj;\n const char* FP4muljline = \"FP4mulj = \";\n BIG BIGsc;\n const char* BIGscline = \"BIGsc = \";\n FP4 FP4pow;\n const char* FP4powline = \"FP4pow = \";\n FP4 FP4frob;\n const char* FP4frobline = \"FP4frob = \";\n FP4 FP4_xtrA;\n const char* FP4_xtrAline = \"FP4_xtrA = \";\n FP4 FP4_xtrD;\n const char* FP4_xtrDline = \"FP4_xtrD = \";\n\n BIG_rcopy(M,Modulus);\n BIG_rcopy(Fr_a,CURVE_Fra);\n BIG_rcopy(Fr_b,CURVE_Frb);\n FP2_from_BIGs(&Frob,Fr_a,Fr_b);\n\n// Set to zero\n FP4_zero(&FP4aux1);\n FP4_zero(&FP4aux2);\n\n// Testing equal function and set zero function\n if(!FP4_equals(&FP4aux1,&FP4aux2) || !FP4_iszilch(&FP4aux1) || !FP4_iszilch(&FP4aux2) || !FP4_isreal(&FP4aux1))\n {\n printf(\"ERROR comparing FP4s or setting FP4 to zero FP\\n\");\n exit(EXIT_FAILURE);\n }\n\n// Set to one\n FP4_one(&FP4aux1);\n FP4_one(&FP4aux2);\n\n// Testing equal function and set one function\n if(!FP4_equals(&FP4aux1,&FP4aux2) || !FP4_isunity(&FP4aux1) || !FP4_isunity(&FP4aux2) || !FP4_isreal(&FP4aux1) || FP4_iszilch(&FP4aux1))\n {\n printf(\"ERROR comparing FP4s or setting FP4 to unity FP\\n\");\n exit(EXIT_FAILURE);\n }\n\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n// Read first FP4 and perform some tests\n if (!strncmp(line,FP4_1line, strlen(FP4_1line)))\n {\n len = strlen(FP4_1line);\n linePtr = line + len;\n read_FP4(&FP4_1,linePtr);\n// test FP4_from_FP2s\n FP4_from_FP2s(&FP4aux1,&FP4_1.a,&FP4_1.b);\n if(!FP4_equals(&FP4aux1,&FP4_1))\n {\n printf(\"ERROR in generating FP4 from two FP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n// test FP4_from_FP2 and FP4_isreal\n FP4_from_FP2(&FP4aux1,&FP4_1.a);\n FP4_copy(&FP4aux2,&FP4_1);\n FP2_zero(&FP4aux2.b);\n if(!FP4_equals(&FP4aux1,&FP4aux2) || !FP4_isreal(&FP4aux1))\n {\n printf(\"ERROR in generating FP4 from one FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Read second FP4\n if (!strncmp(line,FP4_2line, strlen(FP4_2line)))\n {\n len = strlen(FP4_2line);\n linePtr = line + len;\n read_FP4(&FP4_2,linePtr);\n }\n// Addition test\n if (!strncmp(line,FP4addline, strlen(FP4addline)))\n {\n len = strlen(FP4addline);\n linePtr = line + len;\n read_FP4(&FP4add,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_copy(&FP4aux2,&FP4_2);\n FP4_add(&FP4aux1,&FP4aux1,&FP4aux2);\n// test commutativity P+Q = Q+P\n FP4_copy(&FP4aux3,&FP4_1);\n FP4_add(&FP4aux2,&FP4aux2,&FP4aux3);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n FP4_reduce(&FP4aux2);\n FP4_norm(&FP4aux2);\n if(!FP4_equals(&FP4aux1,&FP4add) || !FP4_equals(&FP4aux2,&FP4add))\n {\n printf(\"ERROR adding two FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n// test associativity (P+Q)+R = P+(Q+R)\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_copy(&FP4aux3,&FP4_1);\n FP4_copy(&FP4aux2,&FP4_2);\n FP4_copy(&FP4aux4,&FP4add);\n FP4_add(&FP4aux1,&FP4aux1,&FP4aux2);\n FP4_add(&FP4aux1,&FP4aux1,&FP4aux4);\n FP4_add(&FP4aux2,&FP4aux2,&FP4aux4);\n FP4_add(&FP4aux2,&FP4aux2,&FP4aux3);\n FP4_reduce(&FP4aux1);\n FP4_reduce(&FP4aux2);\n FP4_norm(&FP4aux1);\n FP4_norm(&FP4aux2);\n if(!FP4_equals(&FP4aux1,&FP4aux2))\n {\n printf(\"ERROR testing associativity between three FP4s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test negative of an FP4\n if (!strncmp(line,FP4negline, strlen(FP4negline)))\n {\n len = strlen(FP4negline);\n linePtr = line + len;\n read_FP4(&FP4neg,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_neg(&FP4aux1,&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4neg))\n {\n printf(\"ERROR in computing negative of FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Subtraction test\n if (!strncmp(line,FP4subline, strlen(FP4subline)))\n {\n len = strlen(FP4subline);\n linePtr = line + len;\n read_FP4(&FP4sub,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_copy(&FP4aux2,&FP4_2);\n FP4_sub(&FP4aux1,&FP4aux1,&FP4aux2);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(FP4_equals(&FP4aux1,&FP4sub) == 0)\n {\n printf(\"ERROR subtraction between two FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test conjugate\n if (!strncmp(line,FP4conjline, strlen(FP4conjline)))\n {\n len = strlen(FP4conjline);\n linePtr = line + len;\n read_FP4(&FP4conj,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_conj(&FP4aux1,&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4conj))\n {\n printf(\"ERROR computing conjugate of FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test negative conjugate\n if (!strncmp(line,FP4nconjline, strlen(FP4nconjline)))\n {\n len = strlen(FP4nconjline);\n linePtr = line + len;\n read_FP4(&FP4nconj,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_nconj(&FP4aux1,&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4nconj))\n {\n printf(\"ERROR computing negative conjugate of FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Read multiplicator\n if (!strncmp(line,FP2scline, strlen(FP2scline)))\n {\n len = strlen(FP2scline);\n linePtr = line + len;\n read_FP2(&FP2sc,linePtr);\n }\n// Multiplication by FP2\n if (!strncmp(line,FP4pmulline, strlen(FP4pmulline)))\n {\n len = strlen(FP4pmulline);\n linePtr = line + len;\n read_FP4(&FP4pmul,linePtr);\n FP4_pmul(&FP4aux1,&FP4_1,&FP2sc);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4pmul))\n {\n printf(\"ERROR in multiplication by FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiplication by j = 0..10\n if (!strncmp(line,FP4imulline, strlen(FP4imulline)))\n {\n len = strlen(FP4imulline);\n linePtr = line + len;\n read_FP4(&FP4imul,linePtr);\n FP4_imul(&FP4aux1,&FP4_1,j);\n j++;\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4imul))\n {\n printf(\"ERROR in multiplication by small integer, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Square test\n if (!strncmp(line,FP4sqrline, strlen(FP4sqrline)))\n {\n len = strlen(FP4sqrline);\n linePtr = line + len;\n read_FP4(&FP4sqr,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_sqr(&FP4aux1,&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4sqr))\n {\n printf(\"ERROR in squaring FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiplication between two FP4s\n if (!strncmp(line,FP4mulline, strlen(FP4mulline)))\n {\n len = strlen(FP4mulline);\n linePtr = line + len;\n read_FP4(&FP4mul,linePtr);\n FP4_mul(&FP4aux1,&FP4_1,&FP4_2);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4mul))\n {\n printf(\"ERROR in multiplication between two FP4s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Inverse\n if (!strncmp(line,FP4invline, strlen(FP4invline)))\n {\n len = strlen(FP4invline);\n linePtr = line + len;\n read_FP4(&FP4inv,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_inv(&FP4aux1,&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4inv))\n {\n printf(\"ERROR in computing inverse of FP4, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test multiplication of an FP4 instance by sqrt(1+sqrt(-1))\n if (!strncmp(line,FP4muljline, strlen(FP4muljline)))\n {\n len = strlen(FP4muljline);\n linePtr = line + len;\n read_FP4(&FP4mulj,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_times_i(&FP4aux1);\n FP4_reduce(&FP4aux1);\n FP4_norm(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4mulj))\n {\n printf(\"ERROR in multiplication of an FP4 instance by sqrt(1+sqrt(-1)), line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Read exponent\n if (!strncmp(line,BIGscline, strlen(BIGscline)))\n {\n len = strlen(BIGscline);\n linePtr = line + len;\n read_BIG(BIGsc,linePtr);\n }\n// Raise FP4 by BIG power\n if (!strncmp(line,FP4powline, strlen(FP4powline)))\n {\n len = strlen(FP4powline);\n linePtr = line + len;\n read_FP4(&FP4pow,linePtr);\n FP4_pow(&FP4aux1,&FP4_1,BIGsc);\n FP4_reduce(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4pow))\n {\n printf(\"ERROR in raising FP4 by BIG power, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Raises an FP4 to the power of the internal modulus p, using the Frobenius constant f\n if (!strncmp(line,FP4frobline, strlen(FP4frobline)))\n {\n len = strlen(FP4frobline);\n linePtr = line + len;\n read_FP4(&FP4frob,linePtr);\n FP4_copy(&FP4aux1,&FP4_1);\n FP4_frob(&FP4aux1,&Frob);\n FP4_reduce(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4frob))\n {\n printf(\"ERROR in raising FP4 by an internal modulus p, using the Frobenius constant f, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test the XTR addition function r=w*x-conj(x)*y+z\n if (!strncmp(line,FP4_xtrAline, strlen(FP4_xtrAline)))\n {\n len = strlen(FP4_xtrAline);\n linePtr = line + len;\n read_FP4(&FP4_xtrA,linePtr);\n FP4_xtr_A(&FP4aux1,&FP4_1,&FP4_2,&FP4add,&FP4sub);\n FP4_reduce(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4_xtrA))\n {\n printf(\"ERROR in testing the XTR addition function r=w*x-conj(x)*y+z, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Test the XTR doubling function r=x^2-2*conj(x)\n if (!strncmp(line,FP4_xtrDline, strlen(FP4_xtrDline)))\n {\n len = strlen(FP4_xtrDline);\n linePtr = line + len;\n read_FP4(&FP4_xtrD,linePtr);\n FP4_xtr_D(&FP4aux1,&FP4_1);\n FP4_reduce(&FP4aux1);\n if(!FP4_equals(&FP4aux1,&FP4_xtrD))\n {\n printf(\"ERROR in testing the XTR doubling function r=x^2-2*conj(x), line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n fclose(fp);\n\n printf(\"test_fp4_arithmetics() SUCCESS TEST ARITMETIC OF FP PASSED\\n\");\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.518459677696228,
"alphanum_fraction": 0.5310513377189636,
"avg_line_length": 25.387096405029297,
"blob_id": "9c5b6d55750f39757ede7434630546d23afd6d18",
"content_id": "ce8f5a71a34593d580c7515b46a853eeebfd2a69",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 8180,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 310,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_ecdh.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_ecdh.c\n * @author Kealan McCusker\n * @brief Test function for ECDH\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n/* Build executible after installation:\n\n gcc -std=c99 -g ./test_ecdh.c -I/opt/amcl/include -L/opt/amcl/lib -lamcl -lecdh -o test_ecdh\n\n*/\n\n#include \"ecdh.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\ntypedef enum { false, true } bool;\n\n#define LINE_LEN 500\n//#define DEBUG\n\nint test_ecdh(int argc, char** argv)\n{\n if (argc != 2)\n {\n printf(\"usage: ./test_ecdh [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n int rc;\n FILE * fp = NULL;\n char line[LINE_LEN];\n char * linePtr = NULL;\n int l1=0, l2=0, i=0;\n\n char raw[256], key[EAS], ciphertext[EAS*2], res[EAS*2], plaintext[EAS*2];\n octet Key= {0,sizeof(key),key}, Ciphertext= {0,sizeof(ciphertext),ciphertext}, Plaintext= {0,sizeof(plaintext),plaintext}, Res= {0,sizeof(res),res};\n csprng rng;\n\n /* Fake random source */\n RAND_clean(&rng);\n for (i=0; i<256; i++) raw[i]=(char)i;\n RAND_seed(&rng,256,raw);\n\n\n char QCAVSx[EGS];\n const char* QCAVSxStr = \"QCAVSx = \";\n octet QCAVSxOct = {EGS,EGS,QCAVSx};\n\n#if CURVETYPE!=MONTGOMERY\n char QCAVSy[EGS];\n const char* QCAVSyStr = \"QCAVSy = \";\n octet QCAVSyOct = {EGS,EGS,QCAVSy};\n#endif\n\n char * dIUT = NULL;\n const char* dIUTStr = \"dIUT = \";\n octet dIUTOct;\n\n char QIUTx[EGS];\n const char* QIUTxStr = \"QIUTx = \";\n octet QIUTxOct = {EGS,EGS,QIUTx};\n\n#if CURVETYPE!=MONTGOMERY\n char QIUTy[EGS];\n const char* QIUTyStr = \"QIUTy = \";\n octet QIUTyOct = {EGS,EGS,QIUTy};\n#endif\n\n char * ZIUT = NULL;\n const char* ZIUTStr = \"ZIUT = \";\n octet ZIUTOct;\n\n char q[2*EFS+1];\n octet QOct= {0,sizeof(q),q};\n char z[EFS];\n octet ZOct= {0,sizeof(z),z};\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n bool readLine = false;\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n readLine = true;\n if (!strncmp(line, QCAVSxStr, strlen(QCAVSxStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QCAVSxStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // QCAVSx binary value\n amcl_hex2bin(linePtr, QCAVSx, l1);\n }\n\n#if CURVETYPE!=MONTGOMERY\n if (!strncmp(line, QCAVSyStr, strlen(QCAVSyStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QCAVSyStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // QCAVSy binary value\n amcl_hex2bin(linePtr, QCAVSy, l1);\n }\n#endif\n\n if (!strncmp(line, dIUTStr, strlen(dIUTStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(dIUTStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n dIUT = (char*) malloc (l2);\n if (dIUT==NULL)\n exit(EXIT_FAILURE);\n\n // dIUT binary value\n amcl_hex2bin(linePtr, dIUT, l1);\n\n dIUTOct.len=l2;\n dIUTOct.max=l2;\n dIUTOct.val=dIUT;\n }\n\n if (!strncmp(line, QIUTxStr, strlen(QIUTxStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QIUTxStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // QIUTx binary value\n amcl_hex2bin(linePtr, QIUTx, l1);\n }\n\n#if CURVETYPE!=MONTGOMERY\n if (!strncmp(line, QIUTyStr, strlen(QIUTyStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QIUTyStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // QIUTy binary value\n amcl_hex2bin(linePtr, QIUTy, l1);\n }\n#endif\n\n if (!strncmp(line, ZIUTStr, strlen(ZIUTStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(ZIUTStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n ZIUT = (char*) malloc (l2);\n if (ZIUT==NULL)\n exit(EXIT_FAILURE);\n\n // ZIUT binary value\n amcl_hex2bin(linePtr, ZIUT, l1);\n\n ZIUTOct.len=l2;\n ZIUTOct.max=l2;\n ZIUTOct.val=ZIUT;\n\n // Assign QIUT\n char q1[2*EFS+1];\n octet QIUTOct= {0,sizeof(q1),q1};\n#if CURVETYPE!=MONTGOMERY\n QIUTOct.val[0]=4;\n QIUTOct.len=1;\n OCT_joctet(&QIUTOct,&QIUTxOct);\n OCT_joctet(&QIUTOct,&QIUTyOct);\n#else\n QIUTOct.val[0]=2;\n QIUTOct.len=1;\n OCT_joctet(&QIUTOct,&QIUTxOct);\n#endif\n\n // Assign QCAVS\n char q2[2*EFS+1];\n octet QCAVSOct= {0,sizeof(q2),q2};\n#if CURVETYPE!=MONTGOMERY\n QCAVSOct.val[0]=4;\n QCAVSOct.len=1;\n OCT_joctet(&QCAVSOct,&QCAVSxOct);\n OCT_joctet(&QCAVSOct,&QCAVSyOct);\n#else\n QCAVSOct.val[0]=2;\n QCAVSOct.len=1;\n OCT_joctet(&QCAVSOct,&QCAVSxOct);\n#endif\n // Check correct public key generated\n ECP_KEY_PAIR_GENERATE(NULL,&dIUTOct,&QOct);\n rc = OCT_comp(&QOct,&QIUTOct);\n if (!rc)\n {\n printf(\"ERROR: TEST ECDH KEYPAIR FAILED LINE %d\\n\",i);\n#ifdef DEBUG\n printf(\"\\nline %d QOct: \",i);\n OCT_output(&QOct);\n printf(\"\\nline %d QIUTOct: \",i);\n OCT_output(&QIUTOct);\n printf(\"\\n\");\n#endif\n exit(EXIT_FAILURE);\n }\n\n // Check correct shared value generated\n ECPSVDP_DH(&dIUTOct,&QCAVSOct,&ZOct);\n rc = OCT_comp(&ZOct,&ZIUTOct);\n if (!rc)\n {\n\n printf(\"TEST ECDH Z FAILED LINE %d\\n\",i);\n#ifdef DEBUG\n printf(\"\\nline %d ZOct: \",i);\n OCT_output(&ZOct);\n printf(\"\\nline %dZIUTOct: \",i);\n OCT_output(&ZIUTOct);\n printf(\"\\n\");\n#endif\n exit(EXIT_FAILURE);\n }\n free(dIUT);\n dIUT = NULL;\n free(ZIUT);\n ZIUT = NULL;\n }\n }\n fclose(fp);\n if (!readLine)\n {\n printf(\"ERROR Empty test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n// Self test AES-CBC\n for(i=0; i<20; i++)\n {\n OCT_rand(&Key,&rng,EAS*2);\n OCT_rand(&Plaintext,&rng,EAS);\n OCT_copy(&Res,&Plaintext);\n\n AES_CBC_IV0_ENCRYPT(&Key,&Plaintext,&Ciphertext);\n rc = AES_CBC_IV0_DECRYPT(&Key,&Ciphertext,&Plaintext);\n if (!rc || !OCT_comp(&Plaintext,&Res))\n {\n printf(\"ERROR AES_CBC decryption failed\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n printf(\"SUCCESS TEST ECDH KEYPAIR PASSED\\n\");\n exit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.7796284556388855,
"alphanum_fraction": 0.7975656390190125,
"avg_line_length": 32.934783935546875,
"blob_id": "4b76ffa4900c800a962d1e0f079807ae2997b506",
"content_id": "08367380e6dee682e950982172af4713cbab9dc9",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1561,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 46,
"path": "/wrappers/java/src/test/java/org/hyperledger/indy/sdk/LedgerTest.java",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "package org.hyperledger.indy.sdk;\n\nimport java.io.File;\n\nimport org.hyperledger.indy.sdk.LibSovrin;\nimport org.hyperledger.indy.sdk.ledger.Ledger;\nimport org.hyperledger.indy.sdk.ledger.LedgerResults.BuildGetDdoRequestResult;\nimport org.hyperledger.indy.sdk.ledger.LedgerResults.BuildGetNymRequestResult;\nimport org.hyperledger.indy.sdk.pool.Pool;\nimport org.hyperledger.indy.sdk.pool.PoolJSONParameters.OpenPoolLedgerJSONParameter;\nimport org.junit.Assert;\n\nimport junit.framework.TestCase;\n\npublic class LedgerTest extends TestCase {\n\n\tprivate Pool pool;\n\n\t@Override\n\tprotected void setUp() throws Exception {\n\n\t\tif (! LibSovrin.isInitialized()) LibSovrin.init(new File(\"./lib/libsovrin.so\"));\n\n\t\tOpenPoolLedgerJSONParameter openPoolLedgerOptions = new OpenPoolLedgerJSONParameter(null, null, null);\n\t\tthis.pool = Pool.openPoolLedger(\"myconfig\", openPoolLedgerOptions).get().getPool();\n\t}\n\n\t@Override\n\tprotected void tearDown() throws Exception {\n\n\t\tthis.pool.closePoolLedger();\n\t}\n\n\tpublic void testLedger() throws Exception {\n\n\t\tBuildGetDdoRequestResult result1 = Ledger.buildGetDdoRequest(\"did:sov:21tDAKCERh95uGgKbJNHYp\", \"did:sov:1yvXbmgPoUm4dl66D7KhyD\", \"{}\").get();\n\t\tAssert.assertNotNull(result1);\n\t\tString requestJson1 = result1.getRequestJson();\n\t\tAssert.assertNotNull(requestJson1);\n\n\t\tBuildGetNymRequestResult result2 = Ledger.buildGetNymRequest(\"did:sov:21tDAKCERh95uGgKbJNHYp\", \"did:sov:1yvXbmgPoUm4dl66D7KhyD\").get();\n\t\tAssert.assertNotNull(result2);\n\t\tString requestJson2 = result2.getRequestJson();\n\t\tAssert.assertNotNull(requestJson2);\n\t}\n}\n"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.5625,
"avg_line_length": 15.333333015441895,
"blob_id": "a785f0bbd723d57ebb8497d7054e2db3e3ffd163",
"content_id": "429e0600a048f3bdd6da939d8da3e0dcf90db5e3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 48,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 3,
"path": "/src/main.rs",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "pub fn main() {\n println!(\"Implement me!\");\n}"
},
{
"alpha_fraction": 0.7979010343551636,
"alphanum_fraction": 0.7979010343551636,
"avg_line_length": 33.38144302368164,
"blob_id": "a2729f95906cb127191853844d1e9950c91e831b",
"content_id": "a28ae72ea494731c5abc6d2f1dfbd08f4742b378",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 3335,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 97,
"path": "/wrappers/java/src/main/java/org/hyperledger/indy/sdk/ledger/LedgerResults.java",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "package org.hyperledger.indy.sdk.ledger;\n\nimport org.hyperledger.indy.sdk.SovrinJava;\n\n/**\n * ledger.rs results\n */\npublic final class LedgerResults {\n\n\tprivate LedgerResults() {\n\n\t}\n\n\tpublic static class SignAndSubmitRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestResultJson;\n\t\tSignAndSubmitRequestResult(String requestResultJson) { this.requestResultJson = requestResultJson; }\n\t\tpublic String getRequestResultJson() { return this.requestResultJson; }\n\t}\n\n\tpublic static class SubmitRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestResultJson;\n\t\tSubmitRequestResult(String requestResultJson) { this.requestResultJson = requestResultJson; }\n\t\tpublic String getRequestResultJson() { return this.requestResultJson; }\n\t}\n\n\tpublic static class BuildGetDdoRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildGetDdoRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildNymRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildNymRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildAttribRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildAttribRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildGetAttribRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildGetAttribRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildGetNymRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildGetNymRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildSchemaRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildSchemaRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildGetSchemaRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildGetSchemaRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildClaimDefTxnResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildClaimDefTxnResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n\n\tpublic static class BuildGetClaimDefTxnResult extends SovrinJava.Result {\n\n\t\tprivate String requestResultJson;\n\t\tBuildGetClaimDefTxnResult(String requestResultJson) { this.requestResultJson = requestResultJson; }\n\t\tpublic String getRequestResultJson() { return this.requestResultJson; }\n\t}\n\n\tpublic static class BuildNodeRequestResult extends SovrinJava.Result {\n\n\t\tprivate String requestJson;\n\t\tBuildNodeRequestResult(String requestJson) { this.requestJson = requestJson; }\n\t\tpublic String getRequestJson() { return this.requestJson; }\n\t}\n}\n"
},
{
"alpha_fraction": 0.372538298368454,
"alphanum_fraction": 0.372538298368454,
"avg_line_length": 32.23636245727539,
"blob_id": "d1288ba5e1f2b252f18d9d9038804745719f6b32",
"content_id": "aa55156eb4382ef6e046e4b95284dbdeb3ac1474",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1828,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 55,
"path": "/wrappers/python/sovrin/signus.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from typing import Callable\nfrom . import SovrinError\n\nclass Signus(object):\n\n \"\"\"TODO: document it\"\"\"\n async def create_and_store_my_did(command_handle: int,\n wallet_handle: int,\n did_json: str,\n did: str,\n verkey: str,\n pk: str) -> None:\n pass\n\n async def replace_keys(command_handle: int,\n wallet_handle: int,\n did: str,\n identity_json: str,\n verkey: str,\n pk: str) -> None:\n pass\n\n async def store_their_did(command_handle: int,\n wallet_handle: int,\n identity_json: str) -> None:\n pass\n\n async def sign(command_handle: int,\n wallet_handle: int,\n did: str,\n msg: str,\n signature: str) -> None:\n pass\n\n async def verify_signature(command_handle: int,\n wallet_handle: int,\n did: str,\n msg: str,\n signature: str,\n valid: bool) -> None:\n pass\n\n async def encrypt(command_handle: int,\n wallet_handle: int,\n did: str,\n msg: str,\n encrypted_msg: str) -> None:\n pass\n\n async def decrypt(command_handle: int,\n wallet_handle: int,\n did: str,\n encrypted_msg: str,\n decrypted_msg: str) -> None:\n pass\n"
},
{
"alpha_fraction": 0.5059085488319397,
"alphanum_fraction": 0.5193243622779846,
"avg_line_length": 31.54751205444336,
"blob_id": "9a382f89c904527ff29347db9921b30c800234c0",
"content_id": "44621b9ad8f89661d92f27852884be89fbf73f85",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 14386,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 442,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_ecp_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_ecp_consistency.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with ECP\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n\n#define LINE_LEN 1000\n#define MAX_STRING 400\n#define PIN 1234\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char bin[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,bin,len);\n len = (len-1)/2;;\n BIG_fromBytesLen(A,bin,len);\n BIG_norm(A);\n}\n\nstatic int read_ECP(ECP *ecp, char* string)\n{\n BIG x;\n#if CURVETYPE!=MONTGOMERY\n BIG y;\n#endif\n char *stringy = strchr(string,':');\n stringy[0] = '\\0';\n read_BIG(x,string);\n#if CURVETYPE==MONTGOMERY\n return ECP_set(ecp,x);\n#else\n stringy++;\n read_BIG(y,stringy);\n return ECP_set(ecp,x,y);\n#endif\n}\n\nint test_ecp_arithmetics(int argc, char** argv)\n{\n printf(\"test_ecp_arithmetics() started\\n\");\n if (argc != 2)\n {\n printf(\"usage: ./test_ecp_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i=0, len=0;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n ECP inf, ECPaux1;\n BIG BIGaux1, Mod;\n\n char oct[LINE_LEN];\n octet OCTaux = {0,sizeof(oct),oct};\n#if CURVETYPE!=MONTGOMERY\n BIG BIGaux2;\n ECP ECPaux2;\n#endif\n ECP ecp1;\n const char* ECP1line = \"ECP1 = \";\n#if CURVETYPE!=MONTGOMERY\n ECP ecp2;\n const char* ECP2line = \"ECP2 = \";\n ECP ecpsum;\n const char* ECPsumline = \"ECPsum = \";\n ECP ecpneg;\n const char* ECPnegline = \"ECPneg = \";\n ECP ecpsub;\n const char* ECPsubline = \"ECPsub = \";\n#endif\n ECP ecpdbl;\n const char* ECPdblline = \"ECPdbl = \";\n BIG BIGscalar1;\n const char* BIGscalar1line = \"BIGscalar1 = \";\n ECP ecpmul;\n const char* ECPmulline = \"ECPmul = \";\n ECP ecpwrong;\n const char* ECPwrongline = \"ECPwrong = \";\n ECP ecpinf;\n const char* ECPinfline = \"ECPinf = \";\n#if CURVETYPE!=MONTGOMERY\n ECP ecppinmul;\n const char* ECPpinmulline = \"ECPpinmul = \";\n BIG BIGscalar2;\n const char* BIGscalar2line = \"BIGscalar2 = \";\n ECP ecpmul2;\n const char* ECPmul2line = \"ECPmul2 = \";\n ECP ecpeven;\n const char* ECPevenline = \"ECPeven = \";\n ECP ecpodd;\n const char* ECPoddline = \"ECPodd = \";\n#endif\n#if CURVETYPE==MONTGOMERY\n ECP ecpmul3;\n const char* ECPmul3line = \"ECPmul3 = \";\n#endif\n\n ECP_inf(&inf);\n BIG_rcopy(Mod,Modulus);\n\n if(!ECP_isinf(&inf))\n {\n printf(\"ERROR setting ECP to infinity\\n\");\n exit(EXIT_FAILURE);\n }\n\n FILE *fp;\n fp = fopen(argv[1],\"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n if (!strncmp(line, ECP1line, strlen(ECP1line))) // get first test vector\n {\n len = strlen(ECP1line);\n linePtr = line + len;\n if(!read_ECP(&ecp1,linePtr) || ECP_isinf(&ecp1))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n#if CURVETYPE!=MONTGOMERY\n ECP_get(BIGaux1,BIGaux2,&ecp1);\n FP_nres(BIGaux1);\n FP_nres(BIGaux2);\n FP_sqr(BIGaux2,BIGaux2);\n ECP_rhs(BIGaux1,BIGaux1);\n FP_reduce(BIGaux1); // in case of lazy reduction\n FP_reduce(BIGaux2); // in case of lazy reduction\n if ((BIG_comp(BIGaux1,BIGaux2)!=0)) // test if y^2=f(x)\n {\n printf(\"ERROR computing right hand side of equation ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n#endif\n ECP_toOctet(&OCTaux,&ecp1);\n ECP_fromOctet(&ECPaux1,&OCTaux);\n if(!ECP_equals(&ECPaux1,&ecp1)) // test octet conversion\n {\n printf(\"ERROR converting ECP to/from OCTET, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#if CURVETYPE!=MONTGOMERY\n if (!strncmp(line, ECP2line, strlen(ECP2line))) //get second test vector\n {\n len = strlen(ECP2line);\n linePtr = line + len;\n if(!read_ECP(&ecp2,linePtr) || ECP_isinf(&ecp2))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECPsumline, strlen(ECPsumline)))\n {\n len = strlen(ECPsumline);\n linePtr = line + len;\n if(!read_ECP(&ecpsum,linePtr) || ECP_isinf(&ecpsum))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_add(&ECPaux1,&ecp2);\n ECP_affine(&ECPaux1);\n ECP_copy(&ECPaux2,&ecp2);\n ECP_add(&ECPaux2,&ecp1);\n ECP_affine(&ECPaux2);\n if(!ECP_equals(&ECPaux1,&ecpsum) || !ECP_equals(&ECPaux2,&ecpsum)) // test addition P+Q and Q+P (commutativity)\n {\n printf(\"ERROR adding two ECPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1); // test associativity\n ECP_add(&ECPaux1,&ecp2);\n ECP_add(&ECPaux1,&ecpsum);\n ECP_copy(&ECPaux2,&ecpsum);\n ECP_add(&ECPaux2,&ecp2);\n ECP_add(&ECPaux2,&ecp1);\n if(!ECP_equals(&ECPaux1,&ECPaux2)) // test associativity (P+Q)+R = P+(Q+R)\n {\n printf(\"ERROR testing associativity between three ECPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECPsubline, strlen(ECPsubline)))\n {\n len = strlen(ECPsubline);\n linePtr = line + len;\n if(!read_ECP(&ecpsub,linePtr) || ECP_isinf(&ecpsub))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_sub(&ECPaux1,&ecp2);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpsub)) // test subtraction P-Q\n {\n printf(\"ERROR computing subtraction of two ECPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECPnegline, strlen(ECPnegline)))\n {\n len = strlen(ECPnegline);\n linePtr = line + len;\n if(!read_ECP(&ecpneg,linePtr) || ECP_isinf(&ecpneg))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_neg(&ECPaux1);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpneg))\n {\n printf(\"ERROR computing negative of ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#endif\n if (!strncmp(line, ECPdblline, strlen(ECPdblline)))\n {\n len = strlen(ECPdblline);\n linePtr = line + len;\n if(!read_ECP(&ecpdbl,linePtr) || ECP_isinf(&ecpdbl))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_dbl(&ECPaux1);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpdbl))\n {\n ECP_outputxyz(&ECPaux1);\n ECP_outputxyz(&ecpdbl);\n printf(\"ERROR computing double of ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#if CURVETYPE==MONTGOMERY\n if (!strncmp(line, ECPmul3line, strlen(ECPmul3line)))\n {\n len = strlen(ECPmul3line);\n linePtr = line + len;\n if(!read_ECP(&ecpmul3,linePtr) || ECP_isinf(&ecpmul3))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n BIG_one(BIGaux1);\n BIG_inc(BIGaux1,2);\n BIG_norm(BIGaux1);\n ECP_copy(&ECPaux1,&ecp1);\n ECP_mul(&ECPaux1,BIGaux1);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpmul3))\n {\n printf(\"ERROR computing multiplication of ECP by 3, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecpdbl);\n ECP_add(&ECPaux1,&ecp1,&ecp1);\n if(!ECP_equals(&ECPaux1,&ecpmul3))\n {\n printf(\"ERROR computing multiplication of ECP by 3, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#endif\n if (!strncmp(line, BIGscalar1line, strlen(BIGscalar1line)))\n {\n len = strlen(BIGscalar1line);\n linePtr = line + len;\n read_BIG(BIGscalar1,linePtr);\n }\n if (!strncmp(line, ECPmulline, strlen(ECPmulline)))\n {\n len = strlen(ECPmulline);\n linePtr = line + len;\n if(!read_ECP(&ecpmul,linePtr))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_mul(&ECPaux1,BIGscalar1);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpmul))\n {\n ECP_outputxyz(&ECPaux1);\n ECP_outputxyz(&ecpmul);\n printf(\"ERROR computing multiplication of ECP by a scalar, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#if CURVETYPE!=MONTGOMERY\n if (!strncmp(line, ECPpinmulline, strlen(ECPpinmulline)))\n {\n len = strlen(ECPpinmulline);\n linePtr = line + len;\n if(!read_ECP(&ecppinmul,linePtr))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_pinmul(&ECPaux1,PIN,14);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecppinmul))\n {\n printf(\"ERROR computing multiplication of ECP by small integer, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, BIGscalar2line, strlen(BIGscalar2line)))\n {\n len = strlen(BIGscalar2line);\n linePtr = line + len;\n read_BIG(BIGscalar2,linePtr);\n }\n if (!strncmp(line, ECPmul2line, strlen(ECPmul2line)))\n {\n len = strlen(ECPmul2line);\n linePtr = line + len;\n if(!read_ECP(&ecpmul2,linePtr))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_copy(&ECPaux1,&ecp1);\n ECP_copy(&ECPaux2,&ecp2);\n ECP_mul2(&ECPaux1,&ECPaux2,BIGscalar1,BIGscalar2);\n ECP_affine(&ECPaux1);\n if(!ECP_equals(&ECPaux1,&ecpmul2))\n {\n printf(\"ERROR computing linear combination of 2 ECPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#endif\n if (!strncmp(line, ECPwrongline, strlen(ECPwrongline)))\n {\n len = strlen(ECPwrongline);\n linePtr = line + len;\n if(read_ECP(&ecpwrong,linePtr) || !ECP_isinf(&ecpwrong) || !ECP_equals(&ecpwrong,&inf))\n {\n printf(\"ERROR identifying wrong ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECPinfline, strlen(ECPinfline)))\n {\n len = strlen(ECPinfline);\n linePtr = line + len;\n if(read_ECP(&ecpinf,linePtr) || !ECP_isinf(&ecpinf) || !ECP_equals(&ecpinf,&inf))\n {\n printf(\"ERROR identifying infinite point ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#if CURVETYPE!=MONTGOMERY\n if (!strncmp(line, ECPevenline, strlen(ECPevenline)))\n {\n len = strlen(ECPevenline);\n linePtr = line + len;\n if(!read_ECP(&ecpeven,linePtr))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_get(BIGaux1,BIGaux2,&ecp1);\n BIG_norm(BIGaux1);\n ECP_setx(&ECPaux1,BIGaux1,0);\n if(!ECP_equals(&ECPaux1,&ecpeven))\n {\n printf(\"ERROR computing ECP from coordinate x and with y even, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECPoddline, strlen(ECPoddline)))\n {\n len = strlen(ECPoddline);\n linePtr = line + len;\n if(!read_ECP(&ecpodd,linePtr))\n {\n printf(\"ERROR getting test vector input ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP_setx(&ECPaux1,BIGaux1,1);\n if(!ECP_equals(&ECPaux1,&ecpodd))\n {\n printf(\"ERROR computing ECP from coordinate x and with y odd, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n#endif\n }\n fclose(fp);\n\n printf(\"test_ecp_arithmetics() SUCCESS TEST ARITMETIC OF ECP PASSED\\n\");\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.35632839798927307,
"alphanum_fraction": 0.35632839798927307,
"avg_line_length": 38.41572952270508,
"blob_id": "bb49791f178022720c7d34f175fb0886db5a0561",
"content_id": "25692b5590792c6f760e99aeac734dc153ae214b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3508,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 89,
"path": "/wrappers/python/sovrin/ledger.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from typing import Callable\nfrom . import SovrinError\n\nclass Ledger(object):\n\n \"\"\"TODO: document it\"\"\"\n\n async def sign_and_submit_request(command_handle: int,\n wallet_handle: int,\n submitter_did: str,\n request_json: str,\n request_result_json: str) -> None:\n pass\n\n async def submit_request(command_handle: int,\n pool_handle: int,\n request_json: str,\n request_result_json: str) -> None:\n pass\n\n async def build_get_ddo_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n request_json: str) -> None:\n pass\n\n async def build_nym_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n verkey: str,\n xref: str,\n data: str,\n role: str,\n request_json: str) -> None:\n pass\n\n async def build_attrib_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n hash: str,\n raw: str,\n enc: str,\n request_json: str) -> None:\n pass\n\n async def build_get_attrib_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n data: str,\n request_json: str) -> None:\n pass\n\n async def build_get_nym_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n request_json: str) -> None:\n pass\n\n async def build_schema_request(command_handle: int,\n submitter_did: str,\n data: str,\n request_json: str) -> None:\n pass\n\n async def build_get_schema_request(command_handle: int,\n submitter_did: str,\n data: str,\n request_json: str) -> None:\n pass\n\n async def build_claim_def_txn(command_handle: int,\n submitter_did: str,\n xref: str,\n data: str,\n request_result_json: str) -> None:\n pass\n\n async def build_get_claim_def_txn(command_handle: int,\n submitter_did: str,\n xref: str,\n request_json: str) -> None:\n pass\n\n async def build_node_request(command_handle: int,\n submitter_did: str,\n target_did: str,\n data: str,\n request_json: str) -> None:\n pass\n"
},
{
"alpha_fraction": 0.559289813041687,
"alphanum_fraction": 0.637920081615448,
"avg_line_length": 27.405405044555664,
"blob_id": "6eafde16a3440b796b0471bd8e8adcad156e40c4",
"content_id": "8bd89990013a6fa36598600074bbee2829a25080",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3154,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 111,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_wcc_gcm.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_wcc_gcm.c\n * @author Kealan McCusker\n * @brief Test WCC protocol with GCM\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n#include <stdlib.h>\n#include <string.h>\n#include \"amcl.h\"\n#include \"wcc.h\"\n#include \"utils.h\"\n\n\nint test_wcc_gcm()\n{\n printf(\"test_wcc_gcm() started\\n\");\n char* KT=\"feffe9928665731c6d6a8f9467308308\";\n char* MT=\"d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39\";\n char* HT=\"feedfacedeadbeeffeedfacedeadbeefabaddad2\";\n char* NT=\"9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b\";\n // Tag should be 619cc5aefffe0bfa462af43c1699d050\n\n int lenM=strlen(MT)/2;\n int lenH=strlen(HT)/2;\n int lenK=strlen(KT)/2;\n int lenIV=strlen(NT)/2;\n\n char t1[PTAG]; // Tag\n char t2[PTAG]; // Tag\n char k[PAS]; // AES Key\n char h[64]; // Header - to be included in Authentication, but not encrypted\n char iv[100]; // IV - Initialisation vector\n char m[100]; // Plaintext to be encrypted/authenticated\n char c[100]; // Ciphertext\n char p[100]; // Recovered Plaintext\n octet T1= {sizeof(t1),sizeof(t1),t1};\n octet T2= {sizeof(t2),sizeof(t2),t2};\n octet K= {0,sizeof(k),k};\n octet H= {0,sizeof(h),h};\n octet IV= {0,sizeof(iv),iv};\n octet M= {0,sizeof(m),m};\n octet C= {0,sizeof(c),c};\n octet P= {0,sizeof(p),p};\n M.len=lenM;\n K.len=lenK;\n H.len=lenH;\n IV.len=lenIV;\n\n OCT_fromHex(&M, MT);\n OCT_fromHex(&H, HT);\n OCT_fromHex(&IV, NT);\n OCT_fromHex(&K, KT);\n\n// printf(\"Plaintext: \");\n// OCT_output(&M);\n// printf(\"\\n\");\n\n WCC_AES_GCM_ENCRYPT(&K, &IV, &H, &M, &C, &T1);\n\n// printf(\"Ciphertext: \");\n// OCT_output(&C);\n// printf(\"\\n\");\n\n// printf(\"Encryption Tag: \");\n// OCT_output(&T1);\n// printf(\"\\n\");\n\n WCC_AES_GCM_DECRYPT(&K, &IV, &H, &C, &P, &T2);\n\n// printf(\"Plaintext: \");\n// OCT_output(&P);\n// printf(\"\\n\");\n\n// printf(\"Decryption Tag: \");\n// OCT_output(&T2);\n// printf(\"\\n\");\n\n if (!OCT_comp(&M,&P))\n {\n printf(\"test_wcc_gcm() FAILURE Decryption\\n\");\n return 1;\n }\n\n if (!OCT_comp(&T1,&T2))\n {\n printf(\"test_wcc_gcm() FAILURE TAG mismatch\\n\");\n return 1;\n }\n\n printf(\"test_wcc_gcm() SUCCESS\\n\");\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.48016998171806335,
"alphanum_fraction": 0.48158639669418335,
"avg_line_length": 25.148147583007812,
"blob_id": "e83292db05e3341de20308690a877786df00c281",
"content_id": "b7786b198bafdd816222edf4e836a648be60362b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/wrappers/python/sovrin/wallet.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from typing import Callable\nfrom . import SovrinError\n\nclass Wallet(object):\n\n \"\"\"TODO: document it\"\"\"\n\n async def create_wallet(pool_name: str,\n name: str,\n xtype: str,\n config: str,\n credentials: str) -> None:\n pass\n\n async def open_wallet(pool_handle: int,\n name: str,\n config: str) -> int:\n return -1\n\n async def close_wallet(wallet_handle: int) -> None:\n pass\n\n async def delete_wallet(name:str) -> None:\n pass\n\n async def set_seq_no_for_value(wallet_key: str, seq_num: str) -> None:\n pass\n"
},
{
"alpha_fraction": 0.80033278465271,
"alphanum_fraction": 0.80033278465271,
"avg_line_length": 34.70296859741211,
"blob_id": "8b622165f69d89e468f30900de3adbd79c5023e7",
"content_id": "23e327454b0bc6c69484c4c2c1393ade1589afff",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 3606,
"license_type": "permissive",
"max_line_length": 150,
"num_lines": 101,
"path": "/wrappers/java/src/main/java/org/hyperledger/indy/sdk/anoncreds/AnoncredsResults.java",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "package org.hyperledger.indy.sdk.anoncreds;\n\nimport org.hyperledger.indy.sdk.SovrinJava;\n\n/**\n * anoncreds.rs results\n */\npublic final class AnoncredsResults {\n\n\tprivate AnoncredsResults() {\n\n\t}\n\n\tpublic static class IssuerCreateAndStoreClaimDefResult extends SovrinJava.Result {\n\n\t\tprivate String claimDefJson, claimDefUuid;\n\t\tIssuerCreateAndStoreClaimDefResult(String claimDefJson, String claimDefUuid) { this.claimDefJson = claimDefJson; this.claimDefUuid = claimDefUuid; }\n\t\tpublic String getClaimDefJson() { return this.claimDefJson; }\n\t\tpublic String getClaimDefUuid() { return this.claimDefUuid; }\n\t}\n\n\tpublic static class IssuerCreateAndStoreRevocRegResult extends SovrinJava.Result {\n\n\t\tprivate String revocRegJson, revocRegUuid;\n\t\tIssuerCreateAndStoreRevocRegResult(String revocRegJson, String revocRegUuid) { this.revocRegJson = revocRegJson; this.revocRegUuid = revocRegUuid; }\n\t\tpublic String getRevocRegJson() { return this.revocRegJson; }\n\t\tpublic String getRevocRegUuid() { return this.revocRegUuid; }\n\t}\n\n\tpublic static class IssuerCreateClaimResult extends SovrinJava.Result {\n\n\t\tprivate String revocRegUpdateJson, claimJson;\n\t\tIssuerCreateClaimResult(String revocRegUpdateJson, String claimJson) { this.revocRegUpdateJson = revocRegUpdateJson; this.claimJson = claimJson; }\n\t\tpublic String getRevocRegUpdateJson() { return this.revocRegUpdateJson; }\n\t\tpublic String getClaimJson() { return this.claimJson; }\n\t}\n\n\tpublic static class IssuerRevokeClaimResult extends SovrinJava.Result {\n\n\t\tprivate String revocRegUpdateJson;\n\t\tIssuerRevokeClaimResult(String revocRegUpdateJson) { this.revocRegUpdateJson = revocRegUpdateJson; }\n\t\tpublic String getRevocRegUpdateJson() { return this.revocRegUpdateJson; }\n\t}\n\n\tpublic static class ProverStoreClaimOfferResult extends SovrinJava.Result {\n\n\t\tProverStoreClaimOfferResult() { }\n\t}\n\n\tpublic static class ProverGetClaimOffersResult extends SovrinJava.Result {\n\n\t\tprivate String claimOffersJson;\n\t\tProverGetClaimOffersResult(String claimOffersJson) { this.claimOffersJson = claimOffersJson; }\n\t\tpublic String getClaimOffersJson() { return this.claimOffersJson; }\n\t}\n\n\tpublic static class ProverCreateMasterSecretResult extends SovrinJava.Result {\n\n\t\tProverCreateMasterSecretResult() { }\n\t}\n\n\tpublic static class ProverCreateAndStoreClaimReqResult extends SovrinJava.Result {\n\n\t\tprivate String claimReqJson;\n\t\tProverCreateAndStoreClaimReqResult(String claimReqJson) { this.claimReqJson = claimReqJson; }\n\t\tpublic String getClaimReqJson() { return this.claimReqJson; }\n\t}\n\n\tpublic static class ProverStoreClaimResult extends SovrinJava.Result {\n\n\t\tProverStoreClaimResult() { }\n\t}\n\n\tpublic static class ProverGetClaimsResult extends SovrinJava.Result {\n\n\t\tprivate String claimsJson;\n\t\tProverGetClaimsResult(String claimsJson) { this.claimsJson = claimsJson; }\n\t\tpublic String getClaimsJson() { return this.claimsJson; }\n\t}\n\n\tpublic static class ProverGetClaimsForProofReqResult extends SovrinJava.Result {\n\n\t\tprivate String claimsJson;\n\t\tProverGetClaimsForProofReqResult(String claimsJson) { this.claimsJson = claimsJson; }\n\t\tpublic String getClaimsJson() { return this.claimsJson; }\n\t}\n\n\tpublic static class ProverCreateProofResult extends SovrinJava.Result {\n\n\t\tprivate String proofJson;\n\t\tProverCreateProofResult(String proofJson) { this.proofJson = proofJson; }\n\t\tpublic String getProofJson() { return this.proofJson; }\n\t}\n\n\tpublic static class VerifierVerifyProofResult extends SovrinJava.Result {\n\n\t\tprivate boolean valid;\n\t\tVerifierVerifyProofResult(boolean valid) { this.valid = valid; }\n\t\tpublic boolean isValid() { return this.valid; }\n\t}\n}\n"
},
{
"alpha_fraction": 0.7453051805496216,
"alphanum_fraction": 0.7570422291755676,
"avg_line_length": 46.33333206176758,
"blob_id": "5f7c1b511921385645887e5de1299d819cfa1ec6",
"content_id": "34864a8daa3772e7f23325d1c16bdbb2315f1374",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 852,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 18,
"path": "/README.md",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "# Indy SDK\n\nThis is the official SDK for [Hyperledger Indy](https://www.hyperledger.org/projects),\nwhich provides a distributed-ledger-based foundation for [self-sovereign identity](https://sovrin.org).\nThe major artifact of the SDK is a c-callable\nlibrary; there are also convenience wrappers for various programming languages.\n\nAll bugs, stories, and backlog for this project are managed through [Hyperledger's Jira](https://jira.hyperledger.org)\nin project IS (note that regular Indy tickets are in the INDY project instead...). Also, join\nus on [Jira's Rocket.Chat](chat.hyperledger.org) at #indy-sdk to discuss.\n\n## Building Indy SDK\n\n* [Ubuntu based distro (Ubuntu 16.04)](doc/ubuntu-build.md)\n* [RHEL based distro (Amazon Linux 2017.03)](doc/rhel-build.md)\n* [Windows](doc/windows-build.md)\n* [iOS](doc/ios-build.md)\n* [MacOS](doc/mac-build.md)\n"
},
{
"alpha_fraction": 0.6269185543060303,
"alphanum_fraction": 0.6564344763755798,
"avg_line_length": 23.22857093811035,
"blob_id": "8cbf97153b68ffe9e6624d45b0d13443a2b51c51",
"content_id": "9513e436a9bc80528fb7eab19b7611dcd7e9895c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 847,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 35,
"path": "/ci/ubuntu.dockerfile",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:16.04\n\nARG uid=1000\n\nRUN apt-get update && \\\n apt-get install -y \\\n pkg-config \\\n libzmq3-dev \\\n libssl-dev \\\n curl \\\n build-essential \\\n libsqlite3-dev \\\n libsodium-dev \\\n cmake\n\nENV RUST_ARCHIVE=rust-1.16.0-x86_64-unknown-linux-gnu.tar.gz\nENV RUST_DOWNLOAD_URL=https://static.rust-lang.org/dist/$RUST_ARCHIVE\n\nRUN mkdir -p /rust\nWORKDIR /rust\n\nRUN curl -fsOSL $RUST_DOWNLOAD_URL \\\n && curl -s $RUST_DOWNLOAD_URL.sha256 | sha256sum -c - \\\n && tar -C /rust -xzf $RUST_ARCHIVE --strip-components=1 \\\n && rm $RUST_ARCHIVE \\\n && ./install.sh\n\nENV PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/.cargo/bin\"\n\nRUN useradd -ms /bin/bash -u $uid sovrin\nUSER sovrin\n\nRUN cargo install --git https://github.com/DSRCorporation/cargo-test-xunit\n\nWORKDIR /home/sorvin"
},
{
"alpha_fraction": 0.646789014339447,
"alphanum_fraction": 0.6743119359016418,
"avg_line_length": 20.899999618530273,
"blob_id": "ac0f6ba9163ca442b3e832d570ee080f9c686c90",
"content_id": "6df7912913c87be230ad5cb38f5a43f8ea38e8af",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 218,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 10,
"path": "/include/sovrin_types.h",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "#ifndef __sovrin__types__included__\n#define __sovrin__types__included__\n\n#include <stdint.h>\n\ntypedef int32_t sovrin_i32_t; \ntypedef int32_t sovrin_handle_t;\ntypedef unsigned int sovrin_bool_t;\n\n#endif"
},
{
"alpha_fraction": 0.6750524044036865,
"alphanum_fraction": 0.7148846983909607,
"avg_line_length": 28.8125,
"blob_id": "4ca6332bc8bd1e35677ef0bcfd97f48abb3be675",
"content_id": "a2fc3addd39d03b9af8b5a1ca398b7854141cc71",
"detected_licenses": [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 477,
"license_type": "permissive",
"max_line_length": 174,
"num_lines": 16,
"path": "/wrappers/java/src/main/java/org/hyperledger/indy/sdk/SovrinException.java",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "package org.hyperledger.indy.sdk;\n\npublic class SovrinException extends Exception {\n\n\tprivate static final long serialVersionUID = 2650355290834266477L;\n\n\tpublic SovrinException(String message) {\n\n\t\tsuper(message);\n\t}\n\n\tpublic static SovrinException fromErrorCode(ErrorCode errorCode, int err) {\n\n\t\treturn new SovrinException(\"\" + (errorCode == null ? null : errorCode.name()) + \": \" + (errorCode == null ? null : errorCode.value()) + \" (\" + Integer.toString(err) + \")\");\n\t}\n}\n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 10.333333015441895,
"blob_id": "f12d5492acb353e915ab57467b9cf91d4f2fd28e",
"content_id": "011a284731754540bc8f2e3084426b1522795f0c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 68,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 6,
"path": "/wrappers/ios/libsovrin-pod/libsovrin/SovrinTypes.h",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "//\n// SovrinTypes.h\n// libsovrin\n//\n\ntypedef SInt32 SovrinHandle;\n"
},
{
"alpha_fraction": 0.48161041736602783,
"alphanum_fraction": 0.5216436982154846,
"avg_line_length": 31.952617645263672,
"blob_id": "afe416aeb08fec80f68c05c60311d3f0c10c73f1",
"content_id": "0fbf3038904857799b2913bd531dbed2dd4fac2c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 13214,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 401,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_ecp2_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_ecp2_consistency.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with ECP2\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n*/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n\n#define LINE_LEN 1000\n#define MAX_STRING 1000\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char bin[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,bin,len);\n len = (len-1)/2;;\n BIG_fromBytesLen(A,bin,len);\n BIG_norm(A);\n}\n\n\nstatic int read_ECP2(ECP2 *ecp2, char* stringx1)\n{\n char *stringx2, *stringy1, *stringy2;\n BIG x1,x2,y1,y2;\n FP2 x,y;\n\n stringx2 = strchr(stringx1,':');\n stringx2[0] = '\\0';\n stringx2++;\n stringy1 = strchr(stringx2,'&');\n stringy1[0] = '\\0';\n stringy1++;\n stringy2 = strchr(stringy1,':');\n stringy2[0] = '\\0';\n stringy2++;\n\n read_BIG(x1,stringx1);\n read_BIG(x2,stringx2);\n read_BIG(y1,stringy1);\n read_BIG(y2,stringy2);\n\n FP2_from_BIGs(&x,x1,x2);\n FP2_from_BIGs(&y,y1,y2);\n\n return ECP2_set(ecp2,&x,&y);\n}\n\nint test_ecp2_arithmetics(int argc, char** argv)\n{\n printf(\"test_ecp2_arithmetics() started\\n\");\n\n if (argc != 2)\n {\n printf(\"usage: ./test_ecp2_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i=0, len=0;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n ECP2 ECP2aux1, ECP2aux2, inf;\n FP2 FP2aux1,FP2aux2;\n\n char oct[LINE_LEN];\n octet OCTaux= {0,sizeof(oct),oct};\n\n ECP2 ecp2[4];\n const char* ECP21line = \"ECP21 = \";\n const char* ECP22line = \"ECP22 = \";\n const char* ECP23line = \"ECP23 = \";\n const char* ECP24line = \"ECP24 = \";\n ECP2 ecp2sum;\n const char* ECP2sumline = \"ECP2sum = \";\n ECP2 ecp2neg;\n const char* ECP2negline = \"ECP2neg = \";\n ECP2 ecp2sub;\n const char* ECP2subline = \"ECP2sub = \";\n ECP2 ecp2dbl;\n const char* ECP2dblline = \"ECP2dbl = \";\n BIG BIGscalar[4];\n const char* BIGscalar1line = \"BIGscalar1 = \";\n const char* BIGscalar2line = \"BIGscalar2 = \";\n const char* BIGscalar3line = \"BIGscalar3 = \";\n const char* BIGscalar4line = \"BIGscalar4 = \";\n ECP2 ecp2mul;\n const char* ECP2mulline = \"ECP2mul = \";\n ECP2 ecp2mul4;\n const char* ECP2mul4line = \"ECP2mul4 = \";\n ECP2 ecp2wrong;\n const char* ECP2wrongline = \"ECP2wrong = \";\n ECP2 ecp2inf;\n const char* ECP2infline = \"ECP2inf = \";\n ECP2 ecp2set1;\n const char* ECP2set1line = \"ECP2set1 = \";\n ECP2 ecp2set2;\n const char* ECP2set2line = \"ECP2set2 = \";\n\n ECP2_inf(&inf);\n\n if(!ECP2_isinf(&inf))\n {\n printf(\"ERROR setting ECP2 to infinity\\n\");\n exit(EXIT_FAILURE);\n }\n\n FILE *fp;\n fp = fopen(argv[1],\"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n if (!strncmp(line, ECP21line, strlen(ECP21line)))\n {\n len = strlen(ECP21line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2[0],linePtr) || ECP2_isinf(&ecp2[0]))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_get(&FP2aux1,&FP2aux2,&ecp2[0]);\n FP2_sqr(&FP2aux2,&FP2aux2);\n ECP2_rhs(&FP2aux1,&FP2aux1);\n if (!FP2_equals(&FP2aux1,&FP2aux2))\n {\n printf(\"ERROR computing right hand side of equation ECP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_toOctet(&OCTaux,&ecp2[0]);\n ECP2_fromOctet(&ECP2aux1,&OCTaux);\n if(!ECP2_equals(&ECP2aux1,&ecp2[0]))\n {\n printf(\"ERROR converting ECP2 to/from OCTET, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP22line, strlen(ECP22line)))\n {\n len = strlen(ECP22line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2[1],linePtr) || ECP2_isinf(&ecp2[1]))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP23line, strlen(ECP23line)))\n {\n len = strlen(ECP23line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2[2],linePtr) || ECP2_isinf(&ecp2[2]))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP24line, strlen(ECP24line)))\n {\n len = strlen(ECP24line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2[3],linePtr) || ECP2_isinf(&ecp2[3]))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2sumline, strlen(ECP2sumline)))\n {\n len = strlen(ECP2sumline);\n linePtr = line + len;\n if(!read_ECP2(&ecp2sum,linePtr))\n {\n printf(\"ERROR reading test vector input ECP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]);\n ECP2_add(&ECP2aux1,&ecp2[1]);\n ECP2_affine(&ECP2aux1);\n ECP2_copy(&ECP2aux2,&ecp2[1]); // testing commutativity P+Q = Q+P\n ECP2_add(&ECP2aux2,&ecp2[0]);\n ECP2_affine(&ECP2aux2);\n if(!ECP2_equals(&ECP2aux1,&ecp2sum) || !ECP2_equals(&ECP2aux2,&ecp2sum))\n {\n printf(\"ERROR adding two ECP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]); // testing associativity (P+Q)+R = P+(Q+R)\n ECP2_add(&ECP2aux1,&ecp2[1]);\n ECP2_add(&ECP2aux1,&ecp2[2]);\n ECP2_affine(&ECP2aux1);\n ECP2_copy(&ECP2aux2,&ecp2[2]);\n ECP2_add(&ECP2aux2,&ecp2[1]);\n ECP2_add(&ECP2aux2,&ecp2[0]);\n ECP2_affine(&ECP2aux2);\n if(!ECP2_equals(&ECP2aux1,&ECP2aux2))\n {\n printf(\"ERROR testing associativity bewtween three ECP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2negline, strlen(ECP2negline)))\n {\n len = strlen(ECP2negline);\n linePtr = line + len;\n if(!read_ECP2(&ecp2neg,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]);\n ECP2_neg(&ECP2aux1);\n ECP2_affine(&ECP2aux1);\n if(!ECP2_equals(&ECP2aux1,&ecp2neg))\n {\n printf(\"ERROR computing negative of ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2subline, strlen(ECP2subline)))\n {\n len = strlen(ECP2subline);\n linePtr = line + len;\n if(!read_ECP2(&ecp2sub,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]);\n ECP2_sub(&ECP2aux1,&ecp2[1]);\n ECP2_affine(&ECP2aux1);\n if(!ECP2_equals(&ECP2aux1,&ecp2sub))\n {\n printf(\"ERROR performing subtraction bewtween two ECP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2dblline, strlen(ECP2dblline)))\n {\n len = strlen(ECP2dblline);\n linePtr = line + len;\n if(!read_ECP2(&ecp2dbl,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]);\n ECP2_dbl(&ECP2aux1);\n ECP2_affine(&ECP2aux1);\n if(!ECP2_equals(&ECP2aux1,&ecp2dbl))\n {\n printf(\"ERROR computing double of ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, BIGscalar1line, strlen(BIGscalar1line)))\n {\n len = strlen(BIGscalar1line);\n linePtr = line + len;\n read_BIG(BIGscalar[0],linePtr);\n }\n if (!strncmp(line, BIGscalar2line, strlen(BIGscalar2line)))\n {\n len = strlen(BIGscalar2line);\n linePtr = line + len;\n read_BIG(BIGscalar[1],linePtr);\n }\n if (!strncmp(line, BIGscalar3line, strlen(BIGscalar3line)))\n {\n len = strlen(BIGscalar3line);\n linePtr = line + len;\n read_BIG(BIGscalar[2],linePtr);\n }\n if (!strncmp(line, BIGscalar4line, strlen(BIGscalar4line)))\n {\n len = strlen(BIGscalar4line);\n linePtr = line + len;\n read_BIG(BIGscalar[3],linePtr);\n }\n if (!strncmp(line, ECP2mulline, strlen(ECP2mulline)))\n {\n len = strlen(ECP2mulline);\n linePtr = line + len;\n if(!read_ECP2(&ecp2mul,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_copy(&ECP2aux1,&ecp2[0]);\n ECP2_mul(&ECP2aux1,BIGscalar[0]);\n ECP2_affine(&ECP2aux1);\n if(!ECP2_equals(&ECP2aux1,&ecp2mul))\n {\n printf(\"ERROR computing multiplication of ECP2 by a scalar, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2mul4line, strlen(ECP2mul4line)))\n {\n len = strlen(ECP2mul4line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2mul4,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_mul4(&ECP2aux1,ecp2,BIGscalar);\n ECP2_affine(&ECP2aux1);\n if(!ECP2_equals(&ECP2aux1,&ecp2mul4))\n {\n printf(\"ERROR computing linear combination of 4 ECP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2wrongline, strlen(ECP2wrongline)))\n {\n len = strlen(ECP2wrongline);\n linePtr = line + len;\n if(read_ECP2(&ecp2wrong,linePtr) || !ECP2_isinf(&ecp2wrong) || !ECP2_equals(&ecp2wrong,&inf))\n {\n printf(\"ERROR identifying a wrong ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2infline, strlen(ECP2infline)))\n {\n len = strlen(ECP2infline);\n linePtr = line + len;\n if(read_ECP2(&ecp2inf,linePtr) || !ECP2_isinf(&ecp2inf) || !ECP2_equals(&ecp2inf,&inf))\n {\n printf(\"ERROR identifying infinite point ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n if (!strncmp(line, ECP2set1line, strlen(ECP2set1line)))\n {\n len = strlen(ECP2set1line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2set1,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n ECP2_get(&FP2aux1,&FP2aux2,&ecp2[0]);\n ECP2_setx(&ECP2aux1,&FP2aux1);\n }\n if (!strncmp(line, ECP2set2line, strlen(ECP2set2line)))\n {\n len = strlen(ECP2set2line);\n linePtr = line + len;\n if(!read_ECP2(&ecp2set2,linePtr))\n {\n printf(\"ERROR getting test vector input ECP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n if((!ECP2_equals(&ECP2aux1,&ecp2set2)) && (!ECP2_equals(&ECP2aux1,&ecp2set1)))\n {\n printf(\"ERROR computing ECP2 from coordinate x and with y set2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n fclose(fp);\n\n printf(\"test_ecp2_arithmetics() SUCCESS TEST ARITMETIC OF ECP2 PASSED\\n\");\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.5797551870346069,
"alphanum_fraction": 0.5812768936157227,
"avg_line_length": 32.14693069458008,
"blob_id": "7798795a548e5074cf307f4460eb136987bfbcc7",
"content_id": "824612d9a285ffc0d956a9ea3af61d97d44032de",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 15115,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 456,
"path": "/tests/wallet.rs",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "extern crate sovrin;\n\n// Workaround to share some utils code based on indy sdk types between tests and indy sdk\nuse sovrin::api as api;\n\n#[macro_use]\nextern crate serde_derive;\nextern crate serde_json;\n#[macro_use]\nextern crate lazy_static;\n#[macro_use]\nextern crate log;\n\n#[macro_use]\nmod utils;\n\nuse utils::inmem_wallet::InmemWallet;\nuse utils::wallet::WalletUtils;\nuse utils::signus::SignusUtils;\nuse utils::test::TestUtils;\n\nuse sovrin::api::ErrorCode;\n\nmod high_cases {\n use super::*;\n\n mod register_wallet_type {\n use super::*;\n\n #[test]\n fn sovrin_register_wallet_type_works() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n WalletUtils::register_wallet_type(\"inmem\").unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n }\n\n mod create_wallet {\n use super::*;\n\n #[test]\n fn sovrin_create_wallet_works() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works\";\n let wallet_name = \"sovrin_create_wallet_works\";\n let xtype = \"default\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_create_wallet_works_for_plugged() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n let pool_name = \"sovrin_create_wallet_works\";\n let wallet_name = \"sovrin_create_wallet_works\";\n let xtype = \"inmem\";\n\n WalletUtils::register_wallet_type(\"inmem\").unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n\n #[test]\n fn sovrin_create_wallet_works_for_unknown_type() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works_for_unknown_type\";\n let wallet_name = \"sovrin_create_wallet_works_for_unknown_type\";\n let xtype = \"type\";\n\n let res = WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None);\n assert_eq!(res.unwrap_err(), ErrorCode::WalletUnknownTypeError);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_create_wallet_works_for_empty_type() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works_for_empty_type\";\n let wallet_name = \"sovrin_create_wallet_works_for_empty_type\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_create_wallet_works_for_config() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works\";\n let wallet_name = \"sovrin_create_wallet_works\";\n let xtype = \"default\";\n let config = r#\"{\"freshness_time\":1000}\"#;\n\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), Some(config)).unwrap();\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod delete_wallet {\n use super::*;\n\n #[test]\n fn sovrin_delete_wallet_works() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_delete_wallet_works\";\n let wallet_name = \"sovrin_delete_wallet_works\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n WalletUtils::delete_wallet(wallet_name).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_delete_wallet_works_for_plugged() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n let pool_name = \"sovrin_delete_wallet_works_for_plugged\";\n let wallet_name = \"sovrin_delete_wallet_works_for_plugged\";\n let xtype = \"inmem\";\n\n WalletUtils::register_wallet_type(xtype).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n WalletUtils::delete_wallet(wallet_name).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n }\n\n mod open_wallet {\n use super::*;\n\n #[test]\n fn sovrin_open_wallet_works() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_open_wallet_works\";\n let wallet_name = \"sovrin_open_wallet_works\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n WalletUtils::open_wallet(wallet_name, None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_open_wallet_works_for_plugged() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n let pool_name = \"sovrin_open_wallet_works_for_plugged\";\n let wallet_name = \"sovrin_open_wallet_works_for_plugged\";\n let xtype = \"inmem\";\n\n WalletUtils::register_wallet_type(xtype).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n WalletUtils::open_wallet(wallet_name, None).unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n\n #[test]\n fn sovrin_open_wallet_works_for_config() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_open_wallet_works_for_config\";\n let wallet_name = \"sovrin_open_wallet_works_for_config\";\n let config = r#\"{\"freshness_time\":1000}\"#;\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n WalletUtils::open_wallet(wallet_name, Some(config)).unwrap();\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod close_wallet {\n use super::*;\n\n #[test]\n fn sovrin_close_wallet_works() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_close_wallet_works\";\n let wallet_name = \"sovrin_close_wallet_works\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n\n let wallet_handle = WalletUtils::open_wallet(wallet_name, None).unwrap();\n WalletUtils::close_wallet(wallet_handle).unwrap();\n WalletUtils::open_wallet(wallet_name, None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_close_wallet_works_for_plugged() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n let pool_name = \"sovrin_close_wallet_works_for_plugged\";\n let wallet_name = \"sovrin_close_wallet_works_for_plugged\";\n let xtype = \"inmem\";\n\n WalletUtils::register_wallet_type(xtype).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name, Some(xtype), None).unwrap();\n\n let wallet_handle = WalletUtils::open_wallet(wallet_name, None).unwrap();\n WalletUtils::close_wallet(wallet_handle).unwrap();\n WalletUtils::open_wallet(wallet_name, None).unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n }\n\n mod set_seqno_wallet {\n use super::*;\n\n #[test]\n fn sovrin_wallet_set_seqno_works() {\n TestUtils::cleanup_storage();\n\n let wallet_handle = WalletUtils::create_and_open_wallet(\"sovrin_wallet_set_seqno_works\", None).unwrap();\n\n let (did, _, _) = SignusUtils::create_my_did(wallet_handle, \"{}\").unwrap();\n\n WalletUtils::wallet_set_seq_no_for_value(wallet_handle, &did, 1).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_wallet_set_seqno_works_for_plugged() {\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n\n let xtype = \"inmem\";\n\n WalletUtils::register_wallet_type(xtype).unwrap();\n let wallet_handle = WalletUtils::create_and_open_wallet(\"sovrin_wallet_set_seqno_works_for_plugged\", Some(xtype)).unwrap();\n\n let (did, _, _) = SignusUtils::create_my_did(wallet_handle, \"{}\").unwrap();\n\n WalletUtils::wallet_set_seq_no_for_value(wallet_handle, &did, 1).unwrap();\n\n TestUtils::cleanup_storage();\n InmemWallet::cleanup();\n }\n }\n}\n\nmod medium_cases {\n use super::*;\n\n mod create_wallet {\n use super::*;\n\n #[test]\n fn sovrin_create_wallet_works_for_duplicate_name() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works_for_duplicate_name\";\n let wallet_name = \"sovrin_create_wallet_works_for_duplicate_name\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n let res = WalletUtils::create_wallet(pool_name, wallet_name, None, None);\n assert_eq!(res.unwrap_err(), ErrorCode::WalletAlreadyExistsError);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_create_wallet_works_for_empty_name() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works_for_empty_name\";\n let wallet_name = \"\";\n\n let res = WalletUtils::create_wallet(pool_name, wallet_name, None, None);\n assert_eq!(res.unwrap_err(), ErrorCode::CommonInvalidParam3);\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod delete_wallet {\n use super::*;\n\n #[test]\n fn sovrin_delete_wallet_works_for_invalid_wallet_name() {\n TestUtils::cleanup_storage();\n\n let res = WalletUtils::delete_wallet(\"sovrin_delete_wallet_works_for_invalid_wallet_name\");\n assert_eq!(res.unwrap_err(), ErrorCode::CommonIOError);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_delete_wallet_works_for_twice() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_delete_wallet_works_for_deleted_wallet\";\n let wallet_name = \"sovrin_delete_wallet_works_for_deleted_wallet\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n WalletUtils::delete_wallet(wallet_name).unwrap();\n let res = WalletUtils::delete_wallet(wallet_name);\n assert_eq!(res.unwrap_err(), ErrorCode::CommonIOError);\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod open_wallet {\n use super::*;\n\n #[test]\n fn sovrin_open_wallet_works_for_not_created_wallet() {\n TestUtils::cleanup_storage();\n\n let res = WalletUtils::open_wallet(\"sovrin_open_wallet_works_for_not_created_wallet\", None);\n assert_eq!(res.unwrap_err(), ErrorCode::CommonIOError);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n #[ignore] //TODO Check is not implemented\n fn sovrin_open_wallet_works_for_twice() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_create_wallet_works\";\n let wallet_name = \"sovrin_open_wallet_works_for_twice\";\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n\n WalletUtils::open_wallet(wallet_name, None).unwrap();\n let res = WalletUtils::open_wallet(wallet_name, None);\n assert_eq!(res.unwrap_err(), ErrorCode::CommonIOError);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_open_wallet_works_for_two_wallets() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_open_wallet_works_for_two_wallets\";\n let wallet_name_1 = \"sovrin_open_wallet_works_for_two_wallets1\";\n let wallet_name_2 = \"sovrin_open_wallet_works_for_two_wallets2\";\n\n WalletUtils::create_wallet(pool_name, wallet_name_1, None, None).unwrap();\n WalletUtils::create_wallet(pool_name, wallet_name_2, None, None).unwrap();\n WalletUtils::open_wallet(wallet_name_1, None).unwrap();\n WalletUtils::open_wallet(wallet_name_2, None).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_open_wallet_works_for_invalid_config() {\n TestUtils::cleanup_storage();\n\n let pool_name = \"sovrin_open_wallet_works_for_invalid_config\";\n let wallet_name = \"sovrin_open_wallet_works_for_invalid_config\";\n let config = r#\"{\"field\":\"value\"}\"#;\n\n WalletUtils::create_wallet(pool_name, wallet_name, None, None).unwrap();\n let res = WalletUtils::open_wallet(wallet_name, Some(config));\n assert_eq!(res.unwrap_err(), ErrorCode::CommonInvalidStructure);\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod close_wallet {\n use super::*;\n\n #[test]\n fn sovrin_close_wallet_works_for_invalid_handle() {\n TestUtils::cleanup_storage();\n\n let res = WalletUtils::close_wallet(1);\n assert_eq!(res.unwrap_err(), ErrorCode::WalletInvalidHandle);\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_close_wallet_works_for_twice() {\n TestUtils::cleanup_storage();\n\n let wallet_handle = WalletUtils::create_and_open_wallet(\"sovrin_close_wallet_works_for_twice\", None).unwrap();\n\n WalletUtils::close_wallet(wallet_handle).unwrap();\n let res = WalletUtils::close_wallet(wallet_handle);\n assert_eq!(res.unwrap_err(), ErrorCode::WalletInvalidHandle);\n\n TestUtils::cleanup_storage();\n }\n }\n\n mod set_seqno {\n use super::*;\n\n #[test]\n fn sovrin_wallet_set_seqno_works_for_not_exists_key() {\n TestUtils::cleanup_storage();\n\n let wallet_handle = WalletUtils::create_and_open_wallet(\"sovrin_wallet_set_seqno_works_for_not_exists_key\", None).unwrap();\n\n //TODO may be we must return WalletNotFound in case if key not exists in wallet\n WalletUtils::wallet_set_seq_no_for_value(wallet_handle, \"key\", 1).unwrap();\n\n TestUtils::cleanup_storage();\n }\n\n #[test]\n fn sovrin_wallet_set_seqno_works_for_invalid_wallet() {\n TestUtils::cleanup_storage();\n\n let wallet_handle = WalletUtils::create_and_open_wallet(\"sovrin_wallet_set_seqno_works_for_invalid_wallet\", None).unwrap();\n\n\n let invalid_wallet_handle = wallet_handle + 1;\n let res = WalletUtils::wallet_set_seq_no_for_value(invalid_wallet_handle, \"key\", 1);\n assert_eq!(res.unwrap_err(), ErrorCode::WalletInvalidHandle);\n\n TestUtils::cleanup_storage();\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5966587066650391,
"alphanum_fraction": 0.6133651733398438,
"avg_line_length": 26.933332443237305,
"blob_id": "ade111b007b96de03de5bcf69c9844efafe305ed",
"content_id": "c98dc2f8f420dcc809b09ecef76daed859a4c6e6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 419,
"license_type": "permissive",
"max_line_length": 158,
"num_lines": 15,
"path": "/ci/rpm-build.sh",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncommit=$1\nversion=$(wget -q https://raw.githubusercontent.com/hyperledger/indy-sdk/$commit/Cargo.toml -O - | grep -E '^version =' | head -n1 | cut -f2 -d= | tr -d '\" ')\n\n[ -z $version ] && exit 1\n[ -z $commit ] && exit 2\n\nsed \\\n\t-e \"s|@commit@|$commit|g\" \\\n\t-e \"s|@version@|$version.$commit|g\" \\\n\tindy-sdk.spec.in >indy-sdk.spec\n\nspectool -g -R indy-sdk.spec || exit 3\nrpmbuild -ba indy-sdk.spec || exit 4\n"
},
{
"alpha_fraction": 0.47985246777534485,
"alphanum_fraction": 0.49812182784080505,
"avg_line_length": 26.783681869506836,
"blob_id": "39e241907649b03971e31fdb8a045ee19a416876",
"content_id": "462c14a4a063ffe7f4add262966b55afbbdb1cd8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 29284,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 1054,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_x509.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_x509.c\n * @author Kealan McCusker\n * @brief Test x509 functions\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n#include \"rsa.h\"\n#include \"ecdh.h\"\n#include \"x509.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\ntypedef enum { false, true } bool;\n\n//#define DEBUG\n\n#define ECC 1\n#define RSA 2\n#define H160 1\n#define H256 2\n#define H384 3\n#define H512 4\n\n#define LINE_LEN 10000\n#define MAX_STRING 300\n\n#define MAXMODBYTES 72\n#define MAXFFLEN 16\n\nstatic char sig[MAXMODBYTES*MAXFFLEN];\nstatic octet SIG= {0,sizeof(sig),sig};\n\nstatic char r[MAXMODBYTES];\nstatic octet R= {0,sizeof(r),r};\n\nstatic char s[MAXMODBYTES];\nstatic octet S= {0,sizeof(s),s};\n\nstatic char cakey[MAXMODBYTES*MAXFFLEN];\nstatic octet CAKEY= {0,sizeof(cakey),cakey};\n\nstatic char certkey[MAXMODBYTES*MAXFFLEN];\nstatic octet CERTKEY= {0,sizeof(certkey),certkey};\n\nstatic char h[5000];\nstatic octet H= {0,sizeof(h),h};\n\nstatic char hh[5000];\nstatic octet HH= {0,sizeof(hh),hh};\n\nstatic char hp[RFS];\nstatic octet HP= {0,sizeof(hp),hp};\n\n// countryName\nstatic char cn[3]= {0x55,0x04,0x06};\nstatic octet CN= {3,sizeof(cn),cn};\n\n// stateName\nstatic char sn[3]= {0x55,0x04,0x08};\nstatic octet SN= {3,sizeof(sn),sn};\n\n// localName\nstatic char ln[3]= {0x55,0x04,0x07};\nstatic octet LN= {3,sizeof(ln),ln};\n\n// orgName\nstatic char on[3]= {0x55,0x04,0x0A};\nstatic octet ON= {3,sizeof(on),on};\n\n// unitName\nstatic char un[3]= {0x55,0x04,0x0B};\nstatic octet UN= {3,sizeof(un),un};\n\n// myName\nstatic char mn[3]= {0x55,0x04,0x03};\nstatic octet MN= {3,sizeof(mn),mn};\n\n// emailName\nstatic char en[9]= {0x2a,0x86,0x48,0x86,0xf7,0x0d,0x01,0x09,0x01};\nstatic octet EN= {9,sizeof(en),en};\n\nextern void print_out(char *des,octet *c,int index,int len);\nextern void print_date(char *des,octet *c,int index);\n\nstatic int compare_data(octet *cert,octet *data,int index)\n{\n int i;\n for (i=0; i<data->len; i++)\n {\n if (cert->val[index+i]!=data->val[i])\n {\n return 0;\n }\n }\n return 1;\n}\n\nint test_x509(int argc, char** argv)\n{\n if (argc != 2)\n {\n printf(\"usage: ./test_x509 [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n int sha;\n pktype st,pt;\n pktype ca = {0,0,0};\n FILE * fp = NULL;\n char line[LINE_LEN];\n char * linePtr = NULL;\n int l1=0;\n const char* CAStr = \"CA = \";\n const char* CERTStr = \"CERT = \";\n\n char issuerc[MAX_STRING];\n octet IssuerCOct= {0,MAX_STRING,issuerc};\n const char* IssuerCStr = \"IssuerC = \";\n\n char issuerst[MAX_STRING];\n octet IssuerSTOct= {0,MAX_STRING,issuerst};\n const char* IssuerSTStr = \"IssuerST = \";\n\n char issuerl[MAX_STRING];\n octet IssuerLOct= {0,MAX_STRING,issuerl};\n const char* IssuerLStr = \"IssuerL = \";\n\n char issuero[MAX_STRING];\n octet IssuerOOct= {0,MAX_STRING,issuero};\n const char* IssuerOStr = \"IssuerO = \";\n\n char issuerou[MAX_STRING];\n octet IssuerOUOct= {0,MAX_STRING,issuerou};\n const char* IssuerOUStr = \"IssuerOU = \";\n\n char issuercn[MAX_STRING];\n octet IssuerCNOct= {0,MAX_STRING,issuercn};\n const char* IssuerCNStr = \"IssuerCN = \";\n\n char issueremailaddress[MAX_STRING];\n octet IssuerEmailAddressOct= {0,MAX_STRING,issueremailaddress};\n const char* IssuerEmailAddressStr = \"IssuerEmailAddress = \";\n\n\n char subjectc[MAX_STRING];\n octet SubjectCOct= {0,MAX_STRING,subjectc};\n const char* SubjectCStr = \"SubjectC = \";\n\n char subjectst[MAX_STRING];\n octet SubjectSTOct= {0,MAX_STRING,subjectst};\n const char* SubjectSTStr = \"SubjectST = \";\n\n char subjectl[MAX_STRING];\n octet SubjectLOct= {0,MAX_STRING,subjectl};\n const char* SubjectLStr = \"SubjectL = \";\n\n char subjecto[MAX_STRING];\n octet SubjectOOct= {0,MAX_STRING,subjecto};\n const char* SubjectOStr = \"SubjectO = \";\n\n char subjectou[MAX_STRING];\n octet SubjectOUOct= {0,MAX_STRING,subjectou};\n const char* SubjectOUStr = \"SubjectOU = \";\n\n char subjectcn[MAX_STRING];\n octet SubjectCNOct= {0,MAX_STRING,subjectcn};\n const char* SubjectCNStr = \"SubjectCN = \";\n\n char subjectemailaddress[MAX_STRING];\n octet SubjectEmailAddressOct= {0,MAX_STRING,subjectemailaddress};\n const char* SubjectEmailAddressStr = \"SubjectEmailAddress = \";\n\n char vf[MAX_STRING];\n octet vfOct= {0,MAX_STRING,vf};\n const char* vfStr = \"vf = \";\n\n char vt[MAX_STRING];\n octet vtOct= {0,MAX_STRING,vt};\n const char* vtStr = \"vt = \";\n\n char cert_pk[512];\n octet CERT_PKOct= {0,sizeof(cert_pk),cert_pk};\n const char* CERT_PKStr = \"CERT_PK = \";\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n rsa_public_key PK;\n\n bool readLine = false;\n int i=0;\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n readLine = true;\n\n if (!strncmp(line, IssuerCStr, strlen(IssuerCStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerCStr);\n OCT_clear(&IssuerCOct);\n OCT_jstring(&IssuerCOct,linePtr);\n IssuerCOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerCOct Hex: \");\n OCT_output(&IssuerCOct);\n printf(\"IssuerCOct ASCII: \");\n OCT_output_string(&IssuerCOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerSTStr, strlen(IssuerSTStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerSTStr);\n OCT_clear(&IssuerSTOct);\n OCT_jstring(&IssuerSTOct,linePtr);\n IssuerSTOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerSTOct Hex: \");\n OCT_output(&IssuerSTOct);\n printf(\"IssuerSTOct ASCII: \");\n OCT_output_string(&IssuerSTOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerLStr, strlen(IssuerLStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerLStr);\n OCT_clear(&IssuerLOct);\n OCT_jstring(&IssuerLOct,linePtr);\n IssuerLOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerLOct Hex: \");\n OCT_output(&IssuerLOct);\n printf(\"IssuerLOct ASCII: \");\n OCT_output_string(&IssuerLOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerOStr, strlen(IssuerOStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerOStr);\n OCT_clear(&IssuerOOct);\n OCT_jstring(&IssuerOOct,linePtr);\n IssuerOOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerOOct Hex: \");\n OCT_output(&IssuerOOct);\n printf(\"IssuerOOct ASCII: \");\n OCT_output_string(&IssuerOOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerOUStr, strlen(IssuerOUStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerOUStr);\n OCT_clear(&IssuerOUOct);\n OCT_jstring(&IssuerOUOct,linePtr);\n IssuerOUOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerOUOct Hex: \");\n OCT_output(&IssuerOUOct);\n printf(\"IssuerOUOct ASCII: \");\n OCT_output_string(&IssuerOUOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerCNStr, strlen(IssuerCNStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerCNStr);\n OCT_clear(&IssuerCNOct);\n OCT_jstring(&IssuerCNOct,linePtr);\n IssuerCNOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerCNOct Hex: \");\n OCT_output(&IssuerCNOct);\n printf(\"IssuerCNOct ASCII: \");\n OCT_output_string(&IssuerCNOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, IssuerEmailAddressStr, strlen(IssuerEmailAddressStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(IssuerEmailAddressStr);\n OCT_clear(&IssuerEmailAddressOct);\n OCT_jstring(&IssuerEmailAddressOct,linePtr);\n IssuerEmailAddressOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"IssuerEmailAddressOct Hex: \");\n OCT_output(&IssuerEmailAddressOct);\n printf(\"IssuerEmailAddressOct ASCII: \");\n OCT_output_string(&IssuerEmailAddressOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectCStr, strlen(SubjectCStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectCStr);\n OCT_clear(&SubjectCOct);\n OCT_jstring(&SubjectCOct,linePtr);\n SubjectCOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectCOct Hex: \");\n OCT_output(&SubjectCOct);\n printf(\"SubjectCOct ASCII: \");\n OCT_output_string(&SubjectCOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectSTStr, strlen(SubjectSTStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectSTStr);\n OCT_clear(&SubjectSTOct);\n OCT_jstring(&SubjectSTOct,linePtr);\n SubjectSTOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectSTOct Hex: \");\n OCT_output(&SubjectSTOct);\n printf(\"SubjectSTOct ASCII: \");\n OCT_output_string(&SubjectSTOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectLStr, strlen(SubjectLStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectLStr);\n OCT_clear(&SubjectLOct);\n OCT_jstring(&SubjectLOct,linePtr);\n SubjectLOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectLOct Hex: \");\n OCT_output(&SubjectLOct);\n printf(\"SubjectLOct ASCII: \");\n OCT_output_string(&SubjectLOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectOStr, strlen(SubjectOStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectOStr);\n OCT_clear(&SubjectOOct);\n OCT_jstring(&SubjectOOct,linePtr);\n SubjectOOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectOOct Hex: \");\n OCT_output(&SubjectOOct);\n printf(\"SubjectOOct ASCII: \");\n OCT_output_string(&SubjectOOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectOUStr, strlen(SubjectOUStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectOUStr);\n OCT_clear(&SubjectOUOct);\n OCT_jstring(&SubjectOUOct,linePtr);\n SubjectOUOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectOUOct Hex: \");\n OCT_output(&SubjectOUOct);\n printf(\"SubjectOUOct ASCII: \");\n OCT_output_string(&SubjectOUOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectCNStr, strlen(SubjectCNStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectCNStr);\n OCT_clear(&SubjectCNOct);\n OCT_jstring(&SubjectCNOct,linePtr);\n SubjectCNOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectCNOct Hex: \");\n OCT_output(&SubjectCNOct);\n printf(\"SubjectCNOct ASCII: \");\n OCT_output_string(&SubjectCNOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, SubjectEmailAddressStr, strlen(SubjectEmailAddressStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(SubjectEmailAddressStr);\n OCT_clear(&SubjectEmailAddressOct);\n OCT_jstring(&SubjectEmailAddressOct,linePtr);\n SubjectEmailAddressOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"SubjectEmailAddressOct Hex: \");\n OCT_output(&SubjectEmailAddressOct);\n printf(\"SubjectEmailAddressOct ASCII: \");\n OCT_output_string(&SubjectEmailAddressOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, vfStr, strlen(vfStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(vfStr);\n OCT_clear(&vfOct);\n OCT_jstring(&vfOct,linePtr);\n vfOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"vfOct Hex: \");\n OCT_output(&vfOct);\n printf(\"vfOct ASCII: \");\n OCT_output_string(&vfOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, vtStr, strlen(vtStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(vtStr);\n OCT_clear(&vtOct);\n OCT_jstring(&vtOct,linePtr);\n vtOct.len -= 1;\n\n#ifdef DEBUG\n printf(\"vtOct Hex: \");\n OCT_output(&vtOct);\n printf(\"vtOct ASCII: \");\n OCT_output_string(&vtOct);\n printf(\"\\n\");\n#endif\n }\n\n if (!strncmp(line, CERT_PKStr, strlen(CERT_PKStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(CERT_PKStr);\n\n // p binary value\n l1 = strlen(linePtr)-1;\n amcl_hex2bin(linePtr, CERT_PKOct.val, l1);\n CERT_PKOct.len = l1/2;\n\n#ifdef DEBUG\n printf(\"CERT_PKOct Hex: \");\n OCT_output(&CERT_PKOct);\n printf(\"\\n\");\n#endif\n }\n\n\n // Self-Signed CA cert\n if (!strncmp(line, CAStr, strlen(CAStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find base64 value in string\n char io[5000];\n octet IO= {0,sizeof(io),io};\n linePtr = line + strlen(CAStr);\n l1 = strlen(linePtr);\n char* ca_b64 = (char*) calloc (l1,sizeof(char));\n strncpy(ca_b64,linePtr,l1-1);\n OCT_frombase64(&IO,ca_b64);\n\n#ifdef DEBUG\n printf(\"CA Self-Signed Cert: \\n\");\n OCT_output(&IO);\n printf(\"\\n\");\n#endif\n\n free(ca_b64);\n ca_b64 = NULL;\n\n // returns signature type\n st=X509_extract_cert_sig(&IO,&SIG);\n\n if (st.type==0)\n {\n printf(\"Unable to extract self-signed cert signature\\r\\n\");\n }\n\n if (st.type==ECC)\n {\n OCT_chop(&SIG,&S,SIG.len/2);\n OCT_copy(&R,&SIG);\n // printf(\"SIG: \");\n // OCT_output(&R);\n // printf(\"\\r\\n\");\n // OCT_output(&S);\n // printf(\"\\r\\n\");\n }\n\n if (st.type==RSA)\n {\n //printf(\"SIG: \");\n //OCT_output(&SIG);\n //printf(\"\\r\\n\");\n }\n\n // Extract Cert from signed Cert\n X509_extract_cert(&IO,&H);\n\n ca=X509_extract_public_key(&H,&CAKEY);\n\n if (ca.type==0)\n {\n printf(\"Not supported by library\\n\");\n exit(EXIT_FAILURE);\n }\n if (ca.type!=st.type)\n {\n printf(\"Not self-signed\\n\");\n exit(EXIT_FAILURE);\n }\n\n if (ca.type==ECC)\n {\n // printf(\"EXTRACTED ECC CA PUBLIC KEY: \");\n // OCT_output(&CAKEY);\n // printf(\"\\n\");\n }\n if (ca.type==RSA)\n {\n // printf(\"EXTRACTED RSA CA PUBLIC KEY: \");\n // OCT_output(&CAKEY);\n // printf(\"\\n\");\n }\n\n // Cert is self-signed - so check signature\n // printf(\"Checking Self-Signed Signature\\r\\n\");\n if (ca.type==ECC)\n {\n if (ca.curve!=CHOICE)\n {\n printf(\"TEST X509 ERROR CURVE IS NOT SUPPORTED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n int res=ECP_PUBLIC_KEY_VALIDATE(1,&CAKEY);\n if (res!=0)\n {\n printf(\"TEST X509 ERROR PUBLIC KEY IS INVALID LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n sha=0;\n if (st.hash==H256) sha=SHA256;\n if (st.hash==H384) sha=SHA384;\n if (st.hash==H512) sha=SHA512;\n if (st.hash==0)\n {\n printf(\"TEST X509 ERROR HASH FUNCTION NOT SUPPORTED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n if (ECPVP_DSA(sha,&CAKEY,&H,&R,&S)!=0)\n {\n printf(\"X509 ERROR ECDSA VERIFICATION FAILED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n\n if (ca.type==RSA)\n {\n PK.e=65537; // assuming this!\n FF_fromOctet(PK.n,&CAKEY,FFLEN);\n\n sha=0;\n if (st.hash==H256) sha=SHA256;\n if (st.hash==H384) sha=SHA384;\n if (st.hash==H512) sha=SHA512;\n if (st.hash==0)\n {\n printf(\"TEST X509 ERROR Hash Function not supported LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n PKCS15(sha,&H,&HP);\n\n RSA_ENCRYPT(&PK,&SIG,&HH);\n if (!OCT_comp(&HP,&HH))\n {\n printf(\"TEST X509 ERROR RSA VERIFICATION FAILED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n\n /////////// CA Signed cert /////////////////\n if (!strncmp(line, CERTStr, strlen(CERTStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find base64 value in string\n char io[5000];\n octet IO= {0,sizeof(io),io};\n linePtr = line + strlen(CERTStr);\n l1 = strlen(linePtr);\n char* cert_b64 = (char*) calloc (l1,sizeof(char));\n strncpy(cert_b64,linePtr,l1-1);\n OCT_frombase64(&IO,cert_b64);\n\n#ifdef DEBUG\n printf(\"CA Signed Cert: \\n\");\n OCT_output(&IO);\n printf(\"\\n\");\n#endif\n\n free(cert_b64);\n cert_b64 = NULL;\n\n // returns signature type\n st=X509_extract_cert_sig(&IO,&SIG);\n\n if (st.type==0)\n {\n printf(\"TEST X509 ERROR UNABLE TO CHECK CERT SIGNATURE LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n if (st.type==ECC)\n {\n OCT_chop(&SIG,&S,SIG.len/2);\n OCT_copy(&R,&SIG);\n#ifdef DEBUG\n printf(\"ECC SIG: \");\n OCT_output(&R);\n printf(\"\\r\\n\");\n OCT_output(&S);\n printf(\"\\r\\n\");\n#endif\n }\n\n#ifdef DEBUG\n if (st.type==RSA)\n {\n printf(\"RSA SIG: \");\n OCT_output(&SIG);\n printf(\"\\r\\n\");\n }\n#endif\n\n // Extract Cert\n int c;\n c=X509_extract_cert(&IO,&H);\n\n#ifdef DEBUG\n printf(\"Cert: \");\n OCT_output(&H);\n printf(\"\\n\");\n#endif\n\n // Check Details\n int ic,len;\n // Issuer Details\n ic=X509_find_issuer(&H);\n\n c=X509_find_entity_property(&H,&CN,ic,&len);\n#ifdef DEBUG\n print_out(\"countryName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerCOct,c))\n {\n printf(\"TEST X509 ERROR IssuerC LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&SN,ic,&len);\n#ifdef DEBUG\n print_out(\"stateOrProvinceName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerSTOct,c))\n {\n printf(\"TEST X509 ERROR IssuerST LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&LN,ic,&len);\n#ifdef DEBUG\n print_out(\"localityName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerLOct,c))\n {\n printf(\"TEST X509 ERROR IssuerL LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&ON,ic,&len);\n#ifdef DEBUG\n print_out(\"organizationName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerOOct,c))\n {\n printf(\"TEST X509 ERROR IssuerO LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&UN,ic,&len);\n#ifdef DEBUG\n print_out(\"organizationalUnitName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerOUOct,c))\n {\n printf(\"TEST X509 ERROR IssuerOU LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&MN,ic,&len);\n#ifdef DEBUG\n print_out(\"commonName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerCNOct,c))\n {\n printf(\"TEST X509 ERROR IssuerCN LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&EN,ic,&len);\n#ifdef DEBUG\n print_out(\"emailAddress: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&IssuerEmailAddressOct,c))\n {\n printf(\"TEST X509 ERROR IssuerEmailAddress LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n // Subject details\n#ifdef DEBUG\n printf(\"Subject Details\\n\");\n#endif\n ic=X509_find_subject(&H);\n\n c=X509_find_entity_property(&H,&CN,ic,&len);\n#ifdef DEBUG\n print_out(\"countryName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectCOct,c))\n {\n printf(\"TEST X509 ERROR SubjectC LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&SN,ic,&len);\n#ifdef DEBUG\n print_out(\"stateOrProvinceName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectSTOct,c))\n {\n printf(\"TEST X509 ERROR SubjectST LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&LN,ic,&len);\n#ifdef DEBUG\n print_out(\"localityName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectLOct,c))\n {\n printf(\"TEST X509 ERROR SubjectL LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&ON,ic,&len);\n#ifdef DEBUG\n print_out(\"organizationName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectOOct,c))\n {\n printf(\"TEST X509 ERROR SubjectO LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&UN,ic,&len);\n#ifdef DEBUG\n print_out(\"organizationalUnitName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectOUOct,c))\n {\n printf(\"TEST X509 ERROR SubjectOU LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&MN,ic,&len);\n#ifdef DEBUG\n print_out(\"commonName: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectCNOct,c))\n {\n printf(\"TEST X509 ERROR SubjectCN LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_entity_property(&H,&EN,ic,&len);\n#ifdef DEBUG\n print_out(\"emailAddress: \",&H,c,len);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&SubjectEmailAddressOct,c))\n {\n printf(\"TEST X509 ERROR SubjectEmailAddress LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n ic=X509_find_validity(&H);\n c=X509_find_start_date(&H,ic);\n#ifdef DEBUG\n print_date(\"start date: \",&H,c);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&vfOct,c))\n {\n printf(\"TEST X509 ERROR VALID FROM LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n c=X509_find_expiry_date(&H,ic);\n#ifdef DEBUG\n print_date(\"expiry date: \",&H,c);\n printf(\"\\n\");\n#endif\n\n if (!compare_data(&H,&vtOct,c))\n {\n printf(\"TEST X509 ERROR VALID TO LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n pt=X509_extract_public_key(&H,&CERTKEY);\n\n if (pt.type==0)\n {\n printf(\"TEST X509 ERROR NOT SUPPORTED BY LIBRARY LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n#ifdef DEBUG\n if (pt.type==ECC)\n {\n printf(\"EXTRACTED ECC PUBLIC KEY: \");\n OCT_output(&CERTKEY);\n printf(\"\\n\");\n }\n if (pt.type==RSA)\n {\n printf(\"EXTRACTED RSA PUBLIC KEY: \");\n OCT_output(&CERTKEY);\n printf(\"\\n\");\n }\n#endif\n if (!compare_data(&CERTKEY,&CERT_PKOct,0))\n {\n printf(\"TEST X509 ERROR CERT PUBLIC KEY LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n // Check CA signature\n // printf(\"Checking CA Signed Signature\\n\");\n\n#ifdef DEBUG\n printf(\"CA PUBLIC KEY: \");\n OCT_output(&CAKEY);\n printf(\"\\n\");\n#endif\n\n if (ca.type==ECC)\n {\n if (ca.curve!=CHOICE)\n {\n printf(\"TEST X509 ERROR CURVE IS NOT SUPPORTED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n int res=ECP_PUBLIC_KEY_VALIDATE(1,&CAKEY);\n if (res!=0)\n {\n printf(\"TEST X509 ERROR PUBLIC KEY IS INVALID LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n sha=0;\n if (st.hash==H256) sha=SHA256;\n if (st.hash==H384) sha=SHA384;\n if (st.hash==H512) sha=SHA512;\n if (st.hash==0)\n {\n printf(\"TEST X509 ERROR HASH FUNCTION NOT SUPPORTED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n\n if (ECPVP_DSA(sha,&CAKEY,&H,&R,&S)!=0)\n {\n printf(\"X509 ERROR ECDSA VERIFICATION FAILED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n\n if (ca.type==RSA)\n {\n PK.e=65537; // assuming this!\n FF_fromOctet(PK.n,&CAKEY,FFLEN);\n\n sha=0;\n if (st.hash==H256) sha=SHA256;\n if (st.hash==H384) sha=SHA384;\n if (st.hash==H512) sha=SHA512;\n if (st.hash==0)\n {\n printf(\"TEST X509 ERROR Hash Function not supported LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n PKCS15(sha,&H,&HP);\n\n RSA_ENCRYPT(&PK,&SIG,&HH);\n if (!OCT_comp(&HP,&HH))\n {\n printf(\"TEST X509 ERROR RSA VERIFICATION FAILED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n\n }\n fclose(fp);\n if (!readLine)\n {\n printf(\"X509 ERROR Empty test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n printf(\"SUCCESS TEST X509 PASSED\\n\");\n exit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.4557522237300873,
"alphanum_fraction": 0.4557522237300873,
"avg_line_length": 30.172412872314453,
"blob_id": "67e2c2347fe2e9c082bef2eb98f6a566c44b5364",
"content_id": "e35f41e593e3078474593e46e3dedd133025c012",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 29,
"path": "/wrappers/python/sovrin/pool.py",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "from typing import Callable\nfrom . import SovrinError\n\nclass Pool(object):\n\n \"\"\"TODO: document it\"\"\"\n\n async def create_pool_ledger_config(command_handle: int,\n config_name: str,\n config: str) -> None:\n pass\n\n async def open_pool_ledger(command_handle: int,\n config_name: str,\n config: str,\n pool_handle: int) -> None:\n pass\n\n async def refresh_pool_ledger(command_handle: int,\n handle: int) -> None:\n pass\n\n async def close_pool_ledger(command_handle: int,\n handle: int) -> None:\n pass\n\n async def delete_pool_ledger_config(command_handle: int,\n config_name: str) -> None:\n pass\n"
},
{
"alpha_fraction": 0.5013006329536438,
"alphanum_fraction": 0.5019676089286804,
"avg_line_length": 34.11475372314453,
"blob_id": "10e7370b18c3512c54f320ba3dd8239180bf84f8",
"content_id": "71ba5f802e1bd4e7add073b5322eb8d52838b437",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Rust",
"length_bytes": 14993,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 427,
"path": "/tests/utils/ledger.rs",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "extern crate time;\n\nuse sovrin::api::ErrorCode;\nuse sovrin::api::ledger::{\n sovrin_sign_and_submit_request,\n sovrin_submit_request,\n sovrin_build_get_ddo_request,\n sovrin_build_attrib_request,\n sovrin_build_get_attrib_request,\n sovrin_build_get_nym_request,\n sovrin_build_schema_request,\n sovrin_build_get_schema_request,\n sovrin_build_claim_def_txn,\n sovrin_build_get_claim_def_txn,\n sovrin_build_node_request,\n sovrin_build_nym_request\n};\n\nuse utils::callback::CallbackUtils;\nuse utils::timeout::TimeoutUtils;\n\nuse std::ffi::CString;\nuse std::ptr::null;\nuse std::sync::mpsc::channel;\n\npub struct LedgerUtils {}\n\nimpl LedgerUtils {\n pub fn sign_and_submit_request(pool_handle: i32, wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_result_json| {\n sender.send((err, request_result_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_sign_and_submit_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let request_json = CString::new(request_json).unwrap();\n\n let err =\n sovrin_sign_and_submit_request(command_handle,\n pool_handle,\n wallet_handle,\n submitter_did.as_ptr(),\n request_json.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_result_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_result_json)\n }\n\n pub fn submit_request(pool_handle: i32, request_json: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_result_json| {\n sender.send((err, request_result_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_submit_request_cb(cb);\n\n let request_json = CString::new(request_json).unwrap();\n\n let err =\n sovrin_submit_request(command_handle,\n pool_handle,\n request_json.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_result_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_result_json)\n }\n\n pub fn build_get_ddo_request(submitter_did: &str, target_did: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n\n let err =\n sovrin_build_get_ddo_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_nym_request(submitter_did: &str, target_did: &str, verkey: Option<&str>,\n data: Option<&str>, role: Option<&str>) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n\n let verkey_str = verkey.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());;\n let data_str = data.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());;\n let role_str = role.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());;\n let err =\n sovrin_build_nym_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n if verkey.is_some() { verkey_str.as_ptr() } else { null() },\n if data.is_some() { data_str.as_ptr() } else { null() },\n if role.is_some() { role_str.as_ptr() } else { null() },\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_attrib_request(submitter_did: &str, target_did: &str, hash: Option<&str>, raw: Option<&str>, enc: Option<&str>) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n\n let hash_str = hash.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());\n let raw_str = raw.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());\n let enc_str = enc.map(|s| CString::new(s).unwrap()).unwrap_or(CString::new(\"\").unwrap());\n\n let err =\n sovrin_build_attrib_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n if hash.is_some() { hash_str.as_ptr() } else { null() },\n if raw.is_some() { raw_str.as_ptr() } else { null() },\n if enc.is_some() { enc_str.as_ptr() } else { null() },\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_get_attrib_request(submitter_did: &str, target_did: &str, data: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n let data = CString::new(data).unwrap();\n\n let err =\n sovrin_build_get_attrib_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n data.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_get_nym_request(submitter_did: &str, target_did: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n\n let err =\n sovrin_build_get_nym_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_schema_request(submitter_did: &str, data: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let data = CString::new(data).unwrap();\n\n let err =\n sovrin_build_schema_request(command_handle,\n submitter_did.as_ptr(),\n data.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_get_schema_request(submitter_did: &str, dest: &str, data: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let dest = CString::new(dest).unwrap();\n let data = CString::new(data).unwrap();\n\n let err =\n sovrin_build_get_schema_request(command_handle,\n submitter_did.as_ptr(),\n dest.as_ptr(),\n data.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_claim_def_txn(submitter_did: &str, xref: i32, signature_type: &str, data: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let signature_type = CString::new(signature_type).unwrap();\n let data = CString::new(data).unwrap();\n\n let err =\n sovrin_build_claim_def_txn(command_handle,\n submitter_did.as_ptr(),\n xref,\n signature_type.as_ptr(),\n data.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_get_claim_def_txn(submitter_did: &str, xref: i32, signature_type: &str, origin: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let signature_type = CString::new(signature_type).unwrap();\n let origin = CString::new(origin).unwrap();\n\n let err =\n sovrin_build_get_claim_def_txn(command_handle,\n submitter_did.as_ptr(),\n xref,\n signature_type.as_ptr(),\n origin.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n\n pub fn build_node_request(submitter_did: &str, target_did: &str, data: &str) -> Result<String, ErrorCode> {\n let (sender, receiver) = channel();\n\n let cb = Box::new(move |err, request_json| {\n sender.send((err, request_json)).unwrap();\n });\n\n let (command_handle, cb) = CallbackUtils::closure_to_build_request_cb(cb);\n\n let submitter_did = CString::new(submitter_did).unwrap();\n let target_did = CString::new(target_did).unwrap();\n let data = CString::new(data).unwrap();\n\n let err =\n sovrin_build_node_request(command_handle,\n submitter_did.as_ptr(),\n target_did.as_ptr(),\n data.as_ptr(),\n cb);\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n let (err, request_json) = receiver.recv_timeout(TimeoutUtils::long_timeout()).unwrap();\n\n if err != ErrorCode::Success {\n return Err(err);\n }\n\n Ok(request_json)\n }\n}"
},
{
"alpha_fraction": 0.46621572971343994,
"alphanum_fraction": 0.5151130557060242,
"avg_line_length": 30.8228702545166,
"blob_id": "bd5cb2f67c409c550887526b0ba20cba06d9cb6b",
"content_id": "d862b848bea9a9b594c7ac517ba30995f2d8bb93",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 14193,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 446,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_fp2_arithmetics.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_fp_arithmetics.c\n * @author Alessandro Budroni\n * @brief Test for aritmetics with FP\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\n#define LINE_LEN 10000\n#define MAX_STRING 300\n\nstatic void read_BIG(BIG A, char* string)\n{\n int len;\n char support[LINE_LEN];\n BIG_zero(A);\n len = strlen(string)+1;\n amcl_hex2bin(string,support,len);\n len = (len-1)/2;;\n BIG_fromBytesLen(A,support,len);\n BIG_norm(A);\n}\n\nvoid read_FP2(FP2 *fp2, char* stringx)\n{\n char *stringy;\n BIG x,y;\n\n stringy = strchr(stringx,',');\n stringy[0] = '\\0';\n stringy++;\n\n read_BIG(x,stringx);\n read_BIG(y,stringy);\n\n FP2_from_BIGs(fp2,x,y);\n}\n\nint test_fp2_arithmetics(int argc, char** argv)\n{\n printf(\"test_fp2_arithmetics() started\\n\");\n if (argc != 2)\n {\n printf(\"usage: ./test_fp2_arithmetics [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n\n int i = 0, len = 0, j = 0;\n FILE *fp;\n\n char line[LINE_LEN];\n char * linePtr = NULL;\n\n BIG M;\n FP2 FP2aux1, FP2aux2, FP2aux3, FP2aux4;\n\n FP2 FP2_1;\n const char* FP2_1line = \"FP2_1 = \";\n FP2 FP2_2;\n const char* FP2_2line = \"FP2_2 = \";\n FP2 FP2add;\n const char* FP2addline = \"FP2add = \";\n FP2 FP2neg;\n const char* FP2negline = \"FP2neg = \";\n FP2 FP2sub;\n const char* FP2subline = \"FP2sub = \";\n FP2 FP2conj;\n const char* FP2conjline = \"FP2conj = \";\n BIG BIGsc;\n const char* BIGscline = \"BIGsc = \";\n FP2 FP2pmul;\n const char* FP2pmulline = \"FP2pmul = \";\n FP2 FP2imul;\n const char* FP2imulline = \"FP2imul = \";\n FP2 FP2sqr;\n const char* FP2sqrline = \"FP2sqr = \";\n FP2 FP2mul;\n const char* FP2mulline = \"FP2mul = \";\n FP2 FP2inv;\n const char* FP2invline = \"FP2inv = \";\n FP2 FP2div2;\n const char* FP2div2line = \"FP2div2 = \";\n FP2 FP2_mulip;\n const char* FP2_mulipline = \"FP2_mulip = \";\n FP2 FP2_divip;\n const char* FP2_divipline = \"FP2_divip = \";\n FP2 FP2pow;\n const char* FP2powline = \"FP2pow = \";\n\n BIG_rcopy(M,Modulus);\n\n// Set to zero\n FP2_zero(&FP2aux1);\n FP2_zero(&FP2aux2);\n\n// Testing equal function and set zero function\n if(!FP2_equals(&FP2aux1,&FP2aux2) || !FP2_iszilch(&FP2aux1) || !FP2_iszilch(&FP2aux2))\n {\n printf(\"ERROR comparing FP2s or setting FP2 to zero FP\\n\");\n exit(EXIT_FAILURE);\n }\n\n// Set to one\n FP2_one(&FP2aux1);\n FP2_one(&FP2aux2);\n\n// Testing equal function and set one function\n if(!FP2_equals(&FP2aux1,&FP2aux2) || !FP2_isunity(&FP2aux1) || !FP2_isunity(&FP2aux2))\n {\n printf(\"ERROR comparing FP2s or setting FP2 to unity FP\\n\");\n exit(EXIT_FAILURE);\n }\n\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n// Read first FP2 and perform some tests\n if (!strncmp(line,FP2_1line, strlen(FP2_1line)))\n {\n len = strlen(FP2_1line);\n linePtr = line + len;\n read_FP2(&FP2_1,linePtr);\n FP2_cmove(&FP2aux1,&FP2_1,0);\n if(FP2_equals(&FP2aux1,&FP2_1) != 0)\n {\n printf(\"ERROR in conditional copy of FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_cmove(&FP2aux1,&FP2_1,1);\n if(FP2_equals(&FP2aux1,&FP2_1) != 1)\n {\n printf(\"ERROR in conditional copy of FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_from_FPs(&FP2aux1,FP2_1.a,FP2_1.b);\n if(FP2_equals(&FP2aux1,&FP2_1) != 1)\n {\n printf(\"ERROR in generating FP2 from two FPs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_from_BIGs(&FP2aux1,FP2_1.a,FP2_1.b);\n FP_redc(FP2aux1.a);\n FP_redc(FP2aux1.b);\n if(FP2_equals(&FP2aux1,&FP2_1) != 1)\n {\n printf(\"ERROR in generating FP2 from two BIGs, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_from_FP(&FP2aux1,FP2_1.a);\n FP2_copy(&FP2aux2,&FP2_1);\n BIG_zero(FP2aux2.b);\n if(FP2_equals(&FP2aux1,&FP2aux2) != 1)\n {\n printf(\"ERROR in generating FP2 from one FP, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_from_BIG(&FP2aux1,FP2_1.a);\n FP_redc(FP2aux1.a);\n FP2_copy(&FP2aux2,&FP2_1);\n BIG_zero(FP2aux2.b);\n if(FP2_equals(&FP2aux1,&FP2aux2) != 1)\n {\n printf(\"ERROR in generating FP2 from one BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Read second FP2\n if (!strncmp(line,FP2_2line, strlen(FP2_2line)))\n {\n len = strlen(FP2_2line);\n linePtr = line + len;\n read_FP2(&FP2_2,linePtr);\n }\n// Addition tests\n if (!strncmp(line,FP2addline, strlen(FP2addline)))\n {\n len = strlen(FP2addline);\n linePtr = line + len;\n read_FP2(&FP2add,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_copy(&FP2aux2,&FP2_2);\n FP2_add(&FP2aux1,&FP2aux1,&FP2aux2);\n// test commutativity P+Q = Q+P\n FP2_copy(&FP2aux3,&FP2_1);\n FP2_add(&FP2aux2,&FP2aux2,&FP2aux3);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n FP2_reduce(&FP2aux2);\n FP2_norm(&FP2aux2);\n if(!FP2_equals(&FP2aux1,&FP2add) || !FP2_equals(&FP2aux2,&FP2add))\n {\n printf(\"ERROR adding two FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n// test associativity (P+Q)+R = P+(Q+R)\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_copy(&FP2aux3,&FP2_1);\n FP2_copy(&FP2aux2,&FP2_2);\n FP2_copy(&FP2aux4,&FP2add);\n FP2_add(&FP2aux1,&FP2aux1,&FP2aux2);\n FP2_add(&FP2aux1,&FP2aux1,&FP2aux4);\n FP2_add(&FP2aux2,&FP2aux2,&FP2aux4);\n FP2_add(&FP2aux2,&FP2aux2,&FP2aux3);\n FP2_reduce(&FP2aux1);\n FP2_reduce(&FP2aux2);\n FP2_norm(&FP2aux1);\n FP2_norm(&FP2aux2);\n if(!FP2_equals(&FP2aux1,&FP2aux2))\n {\n printf(\"ERROR testing associativity between three FP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Negative an FP2\n if (!strncmp(line,FP2negline, strlen(FP2negline)))\n {\n len = strlen(FP2negline);\n linePtr = line + len;\n read_FP2(&FP2neg,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_neg(&FP2aux1,&FP2aux1);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2neg))\n {\n printf(\"ERROR in computing negative of FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Subtraction test\n if (!strncmp(line,FP2subline, strlen(FP2subline)))\n {\n len = strlen(FP2subline);\n linePtr = line + len;\n read_FP2(&FP2sub,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_copy(&FP2aux2,&FP2_2);\n FP2_sub(&FP2aux1,&FP2aux1,&FP2aux2);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(FP2_equals(&FP2aux1,&FP2sub) == 0)\n {\n printf(\"ERROR subtraction between two FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Compute conjugate\n if (!strncmp(line,FP2conjline, strlen(FP2conjline)))\n {\n len = strlen(FP2conjline);\n linePtr = line + len;\n read_FP2(&FP2conj,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_conj(&FP2aux1,&FP2aux1);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2conj))\n {\n printf(\"ERROR computing conjugate of FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Read multiplicator\n if (!strncmp(line,BIGscline, strlen(BIGscline)))\n {\n len = strlen(BIGscline);\n linePtr = line + len;\n read_BIG(BIGsc,linePtr);\n }\n// Multiplication by BIGsc\n if (!strncmp(line,FP2pmulline, strlen(FP2pmulline)))\n {\n len = strlen(FP2pmulline);\n linePtr = line + len;\n read_FP2(&FP2pmul,linePtr);\n FP2_pmul(&FP2aux1,&FP2_1,BIGsc);\n FP_nres(FP2aux1.a);\n FP_nres(FP2aux1.b);\n if(!FP2_equals(&FP2aux1,&FP2pmul))\n {\n printf(\"ERROR in multiplication by BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Raise FP2 by power BIGsc\n if (!strncmp(line,FP2powline, strlen(FP2powline)))\n {\n len = strlen(FP2powline);\n linePtr = line + len;\n read_FP2(&FP2pow,linePtr);\n FP2_pow(&FP2aux1,&FP2_1,BIGsc);\n FP2_reduce(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2pow))\n {\n printf(\"ERROR in raising FP2 by power BIG, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiplication by j = 1..10\n if (!strncmp(line,FP2imulline, strlen(FP2imulline)))\n {\n len = strlen(FP2imulline);\n linePtr = line + len;\n read_FP2(&FP2imul,linePtr);\n FP2_imul(&FP2aux1,&FP2_1,j);\n j++;\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2imul))\n {\n printf(\"ERROR in multiplication by small integer, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Square and square root\n if (!strncmp(line,FP2sqrline, strlen(FP2sqrline)))\n {\n len = strlen(FP2sqrline);\n linePtr = line + len;\n read_FP2(&FP2sqr,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_sqr(&FP2aux1,&FP2aux1);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2sqr))\n {\n printf(\"ERROR in squaring FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n FP2_sqrt(&FP2aux1,&FP2aux1);\n FP2_neg(&FP2aux2,&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2_1) && !FP2_equals(&FP2aux2,&FP2_1))\n {\n printf(\"ERROR square/square root consistency FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiplication between two FP2s\n if (!strncmp(line,FP2mulline, strlen(FP2mulline)))\n {\n len = strlen(FP2mulline);\n linePtr = line + len;\n read_FP2(&FP2mul,linePtr);\n FP2_mul(&FP2aux1,&FP2_1,&FP2_2);\n FP2_reduce(&FP2aux1);\n FP2_norm(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2mul))\n {\n printf(\"ERROR in multiplication between two FP2s, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Inverse\n if (!strncmp(line,FP2invline, strlen(FP2invline)))\n {\n len = strlen(FP2invline);\n linePtr = line + len;\n read_FP2(&FP2inv,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_inv(&FP2aux1,&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2inv))\n {\n printf(\"ERROR in computing inverse of FP2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Divide an FP2 by 2\n if (!strncmp(line,FP2div2line, strlen(FP2div2line)))\n {\n len = strlen(FP2div2line);\n linePtr = line + len;\n read_FP2(&FP2div2,linePtr);\n FP2_div2(&FP2aux1,&FP2_1);\n if(!FP2_equals(&FP2aux1,&FP2div2))\n {\n printf(\"ERROR in computing division FP2 by 2, line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Multiply an FP2 by (1+sqrt(-1))\n if (!strncmp(line,FP2_mulipline, strlen(FP2_mulipline)))\n {\n len = strlen(FP2_mulipline);\n linePtr = line + len;\n read_FP2(&FP2_mulip,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_mul_ip(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2_mulip))\n {\n printf(\"ERROR in computing multiplication of FP2 by (1+sqrt(-1)), line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n// Divide an FP2 by (1+sqrt(-1))\n if (!strncmp(line,FP2_divipline, strlen(FP2_divipline)))\n {\n len = strlen(FP2_divipline);\n linePtr = line + len;\n read_FP2(&FP2_divip,linePtr);\n FP2_copy(&FP2aux1,&FP2_1);\n FP2_div_ip(&FP2aux1);\n if(!FP2_equals(&FP2aux1,&FP2_divip))\n {\n printf(\"ERROR in computing division of FP2 by (1+sqrt(-1)), line %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n }\n }\n fclose(fp);\n\n printf(\"test_fp2_arithmetics() SUCCESS TEST ARITMETIC OF FP PASSED\\n\");\n return EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.48281553387641907,
"alphanum_fraction": 0.4973735511302948,
"avg_line_length": 24.143396377563477,
"blob_id": "02234eef06a4de56dc76137c47d2fddefa613571",
"content_id": "09a0bcd34d8dd1bb580368e075e0262fec6213d9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6663,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 265,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_ecdsa_verify.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_ecdsa_verify.c\n * @author Kealan McCusker\n * @brief Test function for ECDSA verification,\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n/* Build executible after installation:\n\n gcc -std=c99 -g ./test_ecdsa_verify.c -I/opt/amcl/include -L/opt/amcl/lib -lamcl -lecdh -o test_ecdsa_verify\n\n*/\n\n#include \"ecdh.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\ntypedef enum { false, true } bool;\n\n#define LINE_LEN 300\n// #define DEBUG\n\nint test_ecdsa_verify(int argc, char** argv)\n{\n if (argc != 3)\n {\n printf(\"usage: ./test_ecdsa_sign [path to test vector file] [hash type: sha256||sha512] \\n\");\n exit(EXIT_FAILURE);\n }\n int rc;\n bool pass;\n FILE * fp = NULL;\n char line[LINE_LEN];\n char * linePtr = NULL;\n int l1=0;\n int l2=0;\n char * Msg = NULL;\n const char* MsgStr = \"Msg = \";\n octet MsgOct;\n char Qx[EGS];\n const char* QxStr = \"Qx = \";\n octet QxOct = {EGS,EGS,Qx};\n char Qy[EGS];\n const char* QyStr = \"Qy = \";\n octet QyOct = {EGS,EGS,Qy};\n char * R = NULL;\n const char* RStr = \"R = \";\n octet ROct;\n char * S = NULL;\n const char* SStr = \"S = \";\n octet SOct;\n const char* ResultStr = \"Result = \";\n\n // Assign hash type\n int hash_type;\n if (!strcmp(argv[2], \"sha256\"))\n {\n hash_type = 32;\n }\n else if (!strcmp(argv[2], \"sha384\"))\n {\n hash_type = 48;\n }\n else if (!strcmp(argv[2], \"sha512\"))\n {\n hash_type = 64;\n }\n else\n {\n hash_type = 32;\n }\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n bool readLine = false;\n\n int i=0;\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n readLine = true;\n if (!strncmp(line, MsgStr, strlen(MsgStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(MsgStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n Msg = (char*) malloc (l2);\n if (Msg==NULL)\n exit(EXIT_FAILURE);\n\n // Msg binary value\n amcl_hex2bin(linePtr, Msg, l1);\n\n MsgOct.len=l2;\n MsgOct.max=l2;\n MsgOct.val=Msg;\n }\n\n if (!strncmp(line, QxStr, strlen(QxStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QxStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // Qx binary value\n amcl_hex2bin(linePtr, Qx, l1);\n }\n\n if (!strncmp(line, QyStr, strlen(QyStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QyStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // Qy binary value\n amcl_hex2bin(linePtr, Qy, l1);\n }\n\n if (!strncmp(line, RStr, strlen(RStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(RStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n R = (char*) malloc (l2);\n if (R==NULL)\n exit(EXIT_FAILURE);\n\n // R binary value\n amcl_hex2bin(linePtr, R, l1);\n\n ROct.len=l2;\n ROct.max=l2;\n ROct.val=R;\n }\n\n if (!strncmp(line, SStr, strlen(SStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(SStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n S = (char*) malloc (l2);\n if (S==NULL)\n exit(EXIT_FAILURE);\n\n // S binary value\n amcl_hex2bin(linePtr, S, l1);\n\n SOct.len=l2;\n SOct.max=l2;\n SOct.val=S;\n }\n\n if (!strncmp(line, ResultStr, strlen(ResultStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n linePtr = line + strlen(ResultStr);\n char r1[1];\n char r2[1] = {\"P\"};\n strncpy(r1,linePtr,1);\n if (r1[0] == r2[0])\n {\n pass = true;\n }\n else\n {\n pass = false;\n }\n\n // Assign Public Key to EC\n BIG qx, qy;\n char q[2*EFS+1];\n BIG_fromBytes(qx,QxOct.val);\n BIG_fromBytes(qy,QyOct.val);\n octet QOct= {sizeof(q),sizeof(q),q};\n QOct.val[0]=4;\n BIG_toBytes(&(QOct.val[1]),qx);\n BIG_toBytes(&(QOct.val[EFS+1]),qy);\n\n rc = ECPVP_DSA(hash_type,&QOct,&MsgOct,&ROct,&SOct);\n // Test expected to pass. rc is true for fail\n if ( pass && rc )\n {\n printf(\"TEST ECDSA VERIFY FAILED LINE %d pass %d rc %d\\n\",i,pass,rc);\n exit(EXIT_FAILURE);\n }\n\n // Test expected to fail\n if ( !pass && !rc )\n {\n printf(\"TEST ECDSA VERIFY FAILED LINE %d pass %d rc %d\\n\",i,pass,rc);\n exit(EXIT_FAILURE);\n }\n\n free(Msg);\n Msg = NULL;\n free(R);\n R = NULL;\n free(S);\n S = NULL;\n }\n }\n fclose(fp);\n if (!readLine)\n {\n printf(\"ERROR Empty test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n printf(\"SUCCESS TEST ECDSA %s VERIFY PASSED\\n\", argv[2]);\n exit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.5074929594993591,
"alphanum_fraction": 0.5174835920333862,
"avg_line_length": 23.5440616607666,
"blob_id": "47a74bb539ab76d753cdf30659fd08f82bd590fc",
"content_id": "1df9a408076382a1b19749fdbc2de2c39d4fe7eb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6406,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 261,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_big_consistency.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_big_consistency.c\n * @author Alessandro Budroni\n * @brief Test for consistency with BIG\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n\n#include \"arch.h\"\n#include \"amcl.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\ntypedef enum { false, true } bool;\n\nint test_big_consistency()\n{\n\n int i,j;\n char raw[256], bytes[MODBYTES];\n csprng rng;\n\n BIG F,G,H,I,Z;\n DBIG DF,DG;\n\n /* Fake random source */\n RAND_clean(&rng);\n for (i=0; i<256; i++) raw[i]=(char)i;\n RAND_seed(&rng,256,raw);\n\n /* Set to zero */\n BIG_zero(F);\n BIG_zero(G);\n BIG_dzero(DF);\n BIG_dzero(DG);\n\n /* Testing equal function and set zero function */\n if(BIG_comp(G,F) | !BIG_iszilch(F) | !BIG_iszilch(G) | BIG_dcomp(DG,DF) | !BIG_diszilch(DF) | !BIG_diszilch(DG))\n {\n printf(\"ERROR comparing or setting zero BIG\\n\");\n exit(EXIT_FAILURE);\n }\n\n /* Testing coping and equal function */\n BIG_random(F,&rng);\n BIG_random(DF,&rng);\n BIG_copy(G,F);\n BIG_dcopy(DG,DF);\n if(BIG_comp(G,F) | BIG_dcomp(DG,DF))\n {\n printf(\"ERROR testing coping and equal BIG\\n\");\n exit(EXIT_FAILURE);\n }\n\n /* Testing addition, subtraction */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_random(H,&rng);\n BIG_copy(G,F);\n BIG_add(G,G,H);\n BIG_sub(G,G,H);\n BIG_sub(H,H,H);\n if(BIG_comp(G,F) | !BIG_iszilch(H))\n {\n printf(\"ERROR testing addition/subtraction BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n BIG_one(I);\n BIG_zero(Z);\n BIG_zero(F);\n BIG_add(F,F,F);\n BIG_add(Z,I,Z);\n if(BIG_comp(Z,I) | !BIG_iszilch(F))\n {\n printf(\"ERROR testing addition/subtraction BIG\\n\");\n exit(EXIT_FAILURE);\n }\n\n /* Testing small multiplication and division by 3 */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n BIG_imul(G,G,3);\n BIG_div3(G);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing small multiplication and division by 3 BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing small multiplication and addition */\n BIG_random(F,&rng);\n for (j = 1; j <= 20; ++j)\n {\n BIG_imul(H,F,j);\n BIG_copy(G,F);\n for (i = 1; i < j; ++i)\n {\n BIG_add(G,G,F);\n }\n BIG_norm(G);\n BIG_norm(H);\n if(BIG_comp(H,G) != 0)\n {\n printf(\"\\nH \");\n BIG_output(H);\n printf(\"\\nG \");\n BIG_output(G);\n printf(\"\\n\\n\");\n printf(\"ERROR testing small multiplication and addition BIG, %d\\n\",j);\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing square */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n BIG_sqr(DG,G);\n BIG_mul(DF,F,F);\n if(BIG_dcomp(DG,DF))\n {\n printf(\"ERROR testing square BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing square mod */\n for (i=0; i<100; i++)\n {\n BIG_random(H,&rng);\n BIG_randomnum(F,H,&rng);\n BIG_copy(G,F);\n BIG_modsqr(G,G,H);\n BIG_sqr(DF,F);\n BIG_dmod(F,DF,H);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing mod square BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing from and to bytes conversion */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n BIG_toBytes(bytes,G);\n BIG_fromBytes(G,bytes);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing from and to bytes conversion BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n BIG_toBytes(bytes,G);\n BIG_fromBytesLen(G,bytes,MODBYTES);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing from and to bytes conversion BIG\\n\");\n exit(EXIT_FAILURE);\n }\n\n /* Testing small increment and decrement */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n BIG_inc(G,i);\n BIG_dec(G,i);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing small increment and decrement BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing small increment and decrement */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n if(BIG_comp(G,F))\n {\n printf(\"ERROR testing small increment and decrement BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing random with modulo */\n for (i=0; i<100; i++)\n {\n BIG_random(G,&rng);\n BIG_randomnum(F,G,&rng);\n if(BIG_comp(F,G)>0)\n {\n printf(\"ERROR testing random with modulo BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing mod neg */\n for (i=0; i<100; i++)\n {\n BIG_random(H,&rng);\n BIG_randomnum(F,H,&rng);\n BIG_modneg(G,F,H);\n BIG_modneg(G,G,H);\n BIG_norm(G);\n BIG_norm(F);\n if(BIG_comp(F,G))\n {\n printf(\"ERROR testing mod neg BIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n /* Testing copy from/to BIG/DBIG */\n for (i=0; i<100; i++)\n {\n BIG_random(F,&rng);\n BIG_copy(G,F);\n BIG_dzero(DF);\n BIG_dsucopy(DF,F);\n BIG_sducopy(F,DF);\n if(BIG_comp(F,G))\n {\n printf(\"ERROR testing copy from/to BIG/DBIG\\n\");\n exit(EXIT_FAILURE);\n }\n }\n\n printf(\"SUCCESS TEST CONSISTENCY OF BIG PASSED\\n\");\n exit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.39837029576301575,
"alphanum_fraction": 0.39837029576301575,
"avg_line_length": 52.82926940917969,
"blob_id": "510a37c4738cc2a1ecc36f7043ad9d889e1adec9",
"content_id": "ab1f1ba78007a145127d58b865a15aa4e09dc5c0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2209,
"license_type": "permissive",
"max_line_length": 153,
"num_lines": 41,
"path": "/include/sovrin_pool.h",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "#ifndef __sovrin__pool_included__\n#define __sovrin__pool_included__\n\n#include \"sovrin_mod.h\"\n#include \"sovrin_types.h\"\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n extern sovrin_error_t sovrin_create_pool_ledger_config(sovrin_handle_t command_handle,\n const char * config_name,\n const char * config,\n void (*cb)(sovrin_handle_t xcommand_handle, sovrin_error_t err)\n );\n \n extern sovrin_error_t sovrin_open_pool_ledger(sovrin_handle_t command_handle,\n const char * config_name,\n const char * config,\n void (*cb)(sovrin_handle_t xcommand_handle, sovrin_error_t err, sovrin_handle_t pool_handle)\n );\n \n extern sovrin_error_t sovrin_refresh_pool_ledger(sovrin_handle_t command_hangle,\n sovrin_handle_t handle,\n void (*cb)(sovrin_handle_t xcommand_handle, sovrin_error_t err)\n );\n \n extern sovrin_error_t sovrin_close_pool_ledger(sovrin_handle_t command_hangle,\n sovrin_handle_t handle,\n void (*cb)(sovrin_handle_t xcommand_handle, sovrin_error_t err)\n );\n \n extern sovrin_error_t sovrin_delete_pool_ledger_config(sovrin_handle_t command_handle,\n const char * config_name,\n void (*cb)(sovrin_handle_t xcommand_handle, sovrin_error_t err)\n );\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* __sovrin__pool_included__ */\n\n\n"
},
{
"alpha_fraction": 0.524044930934906,
"alphanum_fraction": 0.5355055928230286,
"avg_line_length": 24.722543716430664,
"blob_id": "fee118b921fa531a018bb953b39e7130154a840f",
"content_id": "ec03eb54231e97a5e8d7115470d70312641ce307",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4450,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 173,
"path": "/wrappers/ios/Tests/milagro-ios-test-app/milagro-test-app/tests/test_ecdsa_keypair.c",
"repo_name": "advatar/indy-sdk",
"src_encoding": "UTF-8",
"text": "/**\n * @file test_ecdsa_keypair.c\n * @author Kealan McCusker\n * @brief Test function for ECDSA keypair,\n *\n * LICENSE\n *\n * Licensed to the Apache Software Foundation (ASF) under one\n * or more contributor license agreements. See the NOTICE file\n * distributed with this work for additional information\n * regarding copyright ownership. The ASF licenses this file\n * to you under the Apache License, Version 2.0 (the\n * \"License\"); you may not use this file except in compliance\n * with the License. You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing,\n * software distributed under the License is distributed on an\n * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n * KIND, either express or implied. See the License for the\n * specific language governing permissions and limitations\n * under the License.\n */\n\n/* Build executible after installation:\n\n gcc -std=c99 -g ./test_ecdsa_keypair.c -I/opt/amcl/include -L/opt/amcl/lib -lamcl -lecdh -o test_ecdsa_keypair\n\n*/\n\n#include \"ecdh.h\"\n#include \"utils.h\"\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n\ntypedef enum { false, true } bool;\n\n#define LINE_LEN 300\n// #define DEBUG\n\nint test_ecdsa_keypair(int argc, char** argv)\n{\n if (argc != 2)\n {\n printf(\"usage: ./test_ecdsa_sign [path to test vector file]\\n\");\n exit(EXIT_FAILURE);\n }\n int rc;\n FILE * fp = NULL;\n char line[LINE_LEN];\n char * linePtr = NULL;\n int l1=0;\n int l2=0;\n char * d = NULL;\n const char* dStr = \"d = \";\n octet dOct;\n char Qx[EGS];\n const char* QxStr = \"Qx = \";\n octet QxOct = {EGS,EGS,Qx};\n char Qy[EGS];\n const char* QyStr = \"Qy = \";\n octet QyOct = {EGS,EGS,Qy};\n\n char q2[2*EFS+1];\n octet Q2Oct= {0,sizeof(q2),q2};\n\n fp = fopen(argv[1], \"r\");\n if (fp == NULL)\n {\n printf(\"ERROR opening test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n\n bool readLine = false;\n int i=0;\n while (fgets(line, LINE_LEN, fp) != NULL)\n {\n i++;\n readLine = true;\n if (!strncmp(line, dStr, strlen(dStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(dStr);\n\n // Allocate memory\n l1 = strlen(linePtr)-1;\n l2 = l1/2;\n d = (char*) malloc (l2);\n if (d==NULL)\n exit(EXIT_FAILURE);\n\n // d binary value\n amcl_hex2bin(linePtr, d, l1);\n\n dOct.len=l2;\n dOct.max=l2;\n dOct.val=d;\n }\n\n if (!strncmp(line, QxStr, strlen(QxStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QxStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // Qx binary value\n amcl_hex2bin(linePtr, Qx, l1);\n }\n\n if (!strncmp(line, QyStr, strlen(QyStr)))\n {\n#ifdef DEBUG\n printf(\"line %d %s\\n\", i,line);\n#endif\n // Find hex value in string\n linePtr = line + strlen(QyStr);\n\n // Allocate data\n l1 = strlen(linePtr)-1;\n\n // Qy binary value\n amcl_hex2bin(linePtr, Qy, l1);\n\n // Assign Public Key\n BIG qx, qy;\n char q[2*EFS+1];\n BIG_fromBytes(qx,QxOct.val);\n BIG_fromBytes(qy,QyOct.val);\n octet QOct= {sizeof(q),sizeof(q),q};\n QOct.val[0]=4;\n BIG_toBytes(&(QOct.val[1]),qx);\n BIG_toBytes(&(QOct.val[EFS+1]),qy);\n\n // Generate Key pair\n ECP_KEY_PAIR_GENERATE(NULL,&dOct,&Q2Oct);\n\n#ifdef DEBUG\n printf(\"QOct: \");\n OCT_output(&QOct);\n printf(\"\\r\\n\");\n printf(\"Q2Oct: \");\n OCT_output(&Q2Oct);\n printf(\"\\r\\n\");\n#endif\n rc = OCT_comp(&QOct,&Q2Oct);\n if (!rc)\n {\n printf(\"TEST ECDSA KEYPAIR FAILED LINE %d\\n\",i);\n exit(EXIT_FAILURE);\n }\n free(d);\n d = NULL;\n }\n }\n fclose(fp);\n if (!readLine)\n {\n printf(\"ERROR Empty test vector file\\n\");\n exit(EXIT_FAILURE);\n }\n printf(\"SUCCESS TEST ECDSA KEYPAIR PASSED\\n\");\n exit(EXIT_SUCCESS);\n}\n"
}
] | 33 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.